feat(inputs.openstack): Allow collection without admin privileges (#15468)
Co-authored-by: Roman Plevka <rplevka@redhat.com>
This commit is contained in:
parent
7dbe28d144
commit
0d12cc5c4c
13
go.mod
13
go.mod
|
|
@ -96,7 +96,7 @@ require (
|
|||
github.com/google/uuid v1.6.0
|
||||
github.com/gopacket/gopacket v1.2.0
|
||||
github.com/gopcua/opcua v0.5.3
|
||||
github.com/gophercloud/gophercloud v1.11.0
|
||||
github.com/gophercloud/gophercloud/v2 v2.0.0-rc.3
|
||||
github.com/gorcon/rcon v1.3.5
|
||||
github.com/gorilla/mux v1.8.1
|
||||
github.com/gorilla/websocket v1.5.1
|
||||
|
|
@ -199,14 +199,14 @@ require (
|
|||
go.opentelemetry.io/otel/sdk/metric v1.27.0
|
||||
go.starlark.net v0.0.0-20240520160348-046347dcd104
|
||||
go.step.sm/crypto v0.44.1
|
||||
golang.org/x/crypto v0.23.0
|
||||
golang.org/x/crypto v0.24.0
|
||||
golang.org/x/mod v0.17.0
|
||||
golang.org/x/net v0.25.0
|
||||
golang.org/x/oauth2 v0.20.0
|
||||
golang.org/x/sync v0.7.0
|
||||
golang.org/x/sys v0.20.0
|
||||
golang.org/x/term v0.20.0
|
||||
golang.org/x/text v0.15.0
|
||||
golang.org/x/sys v0.21.0
|
||||
golang.org/x/term v0.21.0
|
||||
golang.org/x/text v0.16.0
|
||||
golang.zx2c4.com/wireguard/wgctrl v0.0.0-20211230205640-daad0b7ba671
|
||||
gonum.org/v1/gonum v0.15.0
|
||||
google.golang.org/api v0.178.0
|
||||
|
|
@ -344,6 +344,7 @@ require (
|
|||
github.com/google/s2a-go v0.1.7 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.12.4 // indirect
|
||||
github.com/gophercloud/gophercloud v1.12.0 // indirect
|
||||
github.com/gorilla/securecookie v1.1.2 // indirect
|
||||
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect
|
||||
github.com/grid-x/serial v0.0.0-20211107191517-583c7356b3aa // indirect
|
||||
|
|
@ -483,7 +484,7 @@ require (
|
|||
go.uber.org/zap v1.24.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240529005216-23cca8864a10 // indirect
|
||||
golang.org/x/time v0.5.0 // indirect
|
||||
golang.org/x/tools v0.21.0 // indirect
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
|
||||
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect
|
||||
golang.zx2c4.com/wireguard v0.0.0-20211209221555-9c9e7e272434 // indirect
|
||||
google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda // indirect
|
||||
|
|
|
|||
26
go.sum
26
go.sum
|
|
@ -1416,8 +1416,10 @@ github.com/gopacket/gopacket v1.2.0 h1:eXbzFad7f73P1n2EJHQlsKuvIMJjVXK5tXoSca78I
|
|||
github.com/gopacket/gopacket v1.2.0/go.mod h1:BrAKEy5EOGQ76LSqh7DMAr7z0NNPdczWm2GxCG7+I8M=
|
||||
github.com/gopcua/opcua v0.5.3 h1:K5QQhjK9KQxQW8doHL/Cd8oljUeXWnJJsNgP7mOGIhw=
|
||||
github.com/gopcua/opcua v0.5.3/go.mod h1:nrVl4/Rs3SDQRhNQ50EbAiI5JSpDrTG6Frx3s4HLnw4=
|
||||
github.com/gophercloud/gophercloud v1.11.0 h1:ls0O747DIq1D8SUHc7r2vI8BFbMLeLFuENaAIfEx7OM=
|
||||
github.com/gophercloud/gophercloud v1.11.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
|
||||
github.com/gophercloud/gophercloud v1.12.0 h1:Jrz16vPAL93l80q16fp8NplrTCp93y7rZh2P3Q4Yq7g=
|
||||
github.com/gophercloud/gophercloud v1.12.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
|
||||
github.com/gophercloud/gophercloud/v2 v2.0.0-rc.3 h1:Gc1oFIROarJoIcvg63BsXrK6G+DPwYVs5gKlmvV2UC8=
|
||||
github.com/gophercloud/gophercloud/v2 v2.0.0-rc.3/go.mod h1:ZKbcGNjxFTSaP5wlvtLDdsppllD/UGGvXBPqcjeqA8Y=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00 h1:l5lAOZEym3oK3SQ2HBHWsJUfbNBiTXJDeW2QDxw9AQ0=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
|
|
@ -2420,8 +2422,8 @@ golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf
|
|||
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ=
|
||||
golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
|
||||
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
|
|
@ -2762,8 +2764,8 @@ golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|||
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
|
||||
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
|
|
@ -2780,8 +2782,8 @@ golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
|
|||
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
|
||||
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw=
|
||||
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
|
||||
golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
|
||||
golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
|
@ -2801,8 +2803,8 @@ golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
|||
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
|
||||
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
|
||||
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
|
@ -2887,8 +2889,8 @@ golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
|
|||
golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
|
||||
golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw=
|
||||
golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
|
|
|||
|
|
@ -83,13 +83,15 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
|||
password = "password"
|
||||
|
||||
## Available services are:
|
||||
## "agents", "aggregates", "cinder_services", "flavors", "hypervisors", "networks",
|
||||
## "nova_services", "ports", "projects", "servers", "services", "stacks", "storage_pools",
|
||||
## "subnets", "volumes"
|
||||
## "agents", "aggregates", "cinder_services", "flavors", "hypervisors",
|
||||
## "networks", "nova_services", "ports", "projects", "servers",
|
||||
## "serverdiagnostics", "services", "stacks", "storage_pools", "subnets",
|
||||
## "volumes"
|
||||
# enabled_services = ["services", "projects", "hypervisors", "flavors", "networks", "volumes"]
|
||||
|
||||
## Collect Server Diagnostics
|
||||
# server_diagnotics = false
|
||||
## Query all instances of all tenants for the volumes and server services
|
||||
## NOTE: Usually this is only permitted for administrators!
|
||||
# query_all_tenants = true
|
||||
|
||||
## output secrets (such as adminPass(for server) and UserID(for volume)).
|
||||
# output_secrets = false
|
||||
|
|
|
|||
|
|
@ -17,31 +17,31 @@ import (
|
|||
"fmt"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gophercloud/gophercloud"
|
||||
"github.com/gophercloud/gophercloud/openstack"
|
||||
"github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/schedulerstats"
|
||||
cinder_services "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/services"
|
||||
"github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumetenants"
|
||||
"github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes"
|
||||
"github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/aggregates"
|
||||
"github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/diagnostics"
|
||||
"github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors"
|
||||
nova_services "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/services"
|
||||
"github.com/gophercloud/gophercloud/openstack/compute/v2/flavors"
|
||||
"github.com/gophercloud/gophercloud/openstack/compute/v2/servers"
|
||||
"github.com/gophercloud/gophercloud/openstack/identity/v3/projects"
|
||||
"github.com/gophercloud/gophercloud/openstack/identity/v3/services"
|
||||
"github.com/gophercloud/gophercloud/openstack/identity/v3/tokens"
|
||||
"github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/agents"
|
||||
"github.com/gophercloud/gophercloud/openstack/networking/v2/networks"
|
||||
"github.com/gophercloud/gophercloud/openstack/networking/v2/ports"
|
||||
"github.com/gophercloud/gophercloud/openstack/networking/v2/subnets"
|
||||
"github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks"
|
||||
"github.com/gophercloud/gophercloud/v2"
|
||||
"github.com/gophercloud/gophercloud/v2/openstack"
|
||||
"github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/schedulerstats"
|
||||
cinder_services "github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/services"
|
||||
"github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes"
|
||||
"github.com/gophercloud/gophercloud/v2/openstack/compute/v2/aggregates"
|
||||
"github.com/gophercloud/gophercloud/v2/openstack/compute/v2/diagnostics"
|
||||
"github.com/gophercloud/gophercloud/v2/openstack/compute/v2/flavors"
|
||||
"github.com/gophercloud/gophercloud/v2/openstack/compute/v2/hypervisors"
|
||||
"github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers"
|
||||
nova_services "github.com/gophercloud/gophercloud/v2/openstack/compute/v2/services"
|
||||
"github.com/gophercloud/gophercloud/v2/openstack/identity/v3/projects"
|
||||
"github.com/gophercloud/gophercloud/v2/openstack/identity/v3/services"
|
||||
"github.com/gophercloud/gophercloud/v2/openstack/identity/v3/tokens"
|
||||
"github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/agents"
|
||||
"github.com/gophercloud/gophercloud/v2/openstack/networking/v2/networks"
|
||||
"github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports"
|
||||
"github.com/gophercloud/gophercloud/v2/openstack/networking/v2/subnets"
|
||||
"github.com/gophercloud/gophercloud/v2/openstack/orchestration/v1/stacks"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
httpconfig "github.com/influxdata/telegraf/plugins/common/http"
|
||||
|
|
@ -57,12 +57,6 @@ var (
|
|||
typeStorage = regexp.MustCompile(`_errors$|_read$|_read_req$|_write$|_write_req$`)
|
||||
)
|
||||
|
||||
// volume is a structure used to unmarshal raw JSON from the API into.
|
||||
type volume struct {
|
||||
volumes.Volume
|
||||
volumetenants.VolumeTenantExt
|
||||
}
|
||||
|
||||
// OpenStack is the main structure associated with a collection instance.
|
||||
type OpenStack struct {
|
||||
// Configuration variables
|
||||
|
|
@ -72,12 +66,13 @@ type OpenStack struct {
|
|||
Username string `toml:"username"`
|
||||
Password string `toml:"password"`
|
||||
EnabledServices []string `toml:"enabled_services"`
|
||||
ServerDiagnotics bool `toml:"server_diagnotics"`
|
||||
ServerDiagnotics bool `toml:"server_diagnotics" deprecated:"1.32.0;1.40.0;add 'serverdiagnostics' to 'enabled_services' instead"`
|
||||
OutputSecrets bool `toml:"output_secrets"`
|
||||
TagPrefix string `toml:"tag_prefix"`
|
||||
TagValue string `toml:"tag_value"`
|
||||
HumanReadableTS bool `toml:"human_readable_timestamps"`
|
||||
MeasureRequest bool `toml:"measure_openstack_requests"`
|
||||
AllTenants bool `toml:"query_all_tenants"`
|
||||
Log telegraf.Logger `toml:"-"`
|
||||
httpconfig.HTTPClientConfig
|
||||
|
||||
|
|
@ -91,10 +86,9 @@ type OpenStack struct {
|
|||
stack *gophercloud.ServiceClient
|
||||
|
||||
// Locally cached resources
|
||||
openstackFlavors map[string]flavors.Flavor
|
||||
openstackHypervisors []hypervisors.Hypervisor
|
||||
openstackProjects map[string]projects.Project
|
||||
openstackServices map[string]services.Service
|
||||
openstackFlavors map[string]flavors.Flavor
|
||||
openstackProjects map[string]projects.Project
|
||||
openstackServices map[string]services.Service
|
||||
|
||||
services map[string]bool
|
||||
}
|
||||
|
|
@ -124,26 +118,29 @@ func (o *OpenStack) Init() error {
|
|||
return errors.New("tag_value option can not be empty string")
|
||||
}
|
||||
|
||||
// For backward compatibility
|
||||
if o.ServerDiagnotics && !slices.Contains(o.EnabledServices, "serverdiagnostics") {
|
||||
o.EnabledServices = append(o.EnabledServices, "serverdiagnostics")
|
||||
}
|
||||
|
||||
// Check the enabled services
|
||||
o.services = make(map[string]bool, len(o.EnabledServices))
|
||||
for _, service := range o.EnabledServices {
|
||||
switch service {
|
||||
case "agents", "aggregates", "cinder_services", "flavors", "hypervisors",
|
||||
"networks", "nova_services", "ports", "projects", "servers", "services",
|
||||
"stacks", "storage_pools", "subnets", "volumes":
|
||||
"networks", "nova_services", "ports", "projects", "servers",
|
||||
"serverdiagnostics", "services", "stacks", "storage_pools",
|
||||
"subnets", "volumes":
|
||||
o.services[service] = true
|
||||
default:
|
||||
return fmt.Errorf("invalid service %q", service)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *OpenStack) Start(_ telegraf.Accumulator) error {
|
||||
o.openstackFlavors = map[string]flavors.Flavor{}
|
||||
o.openstackHypervisors = []hypervisors.Hypervisor{}
|
||||
o.openstackProjects = map[string]projects.Project{}
|
||||
|
||||
func (o *OpenStack) Start(telegraf.Accumulator) error {
|
||||
// Authenticate against Keystone and get a token provider
|
||||
provider, err := openstack.NewClient(o.IdentityEndpoint)
|
||||
if err != nil {
|
||||
|
|
@ -168,7 +165,7 @@ func (o *OpenStack) Start(_ telegraf.Accumulator) error {
|
|||
Password: o.Password,
|
||||
AllowReauth: true,
|
||||
}
|
||||
if err := openstack.Authenticate(provider, authOption); err != nil {
|
||||
if err := openstack.Authenticate(ctx, provider, authOption); err != nil {
|
||||
return fmt.Errorf("unable to authenticate OpenStack user: %w", err)
|
||||
}
|
||||
|
||||
|
|
@ -194,7 +191,7 @@ func (o *OpenStack) Start(_ telegraf.Accumulator) error {
|
|||
o.Log.Warnf("failed to get services from v3 authentication: %v; falling back to services API", err)
|
||||
}
|
||||
// Determine the services available at the endpoint
|
||||
if err := o.availableServices(); err != nil {
|
||||
if err := o.availableServices(ctx); err != nil {
|
||||
return fmt.Errorf("failed to get resource openstack services: %w", err)
|
||||
}
|
||||
}
|
||||
|
|
@ -235,6 +232,37 @@ func (o *OpenStack) Start(_ telegraf.Accumulator) error {
|
|||
}
|
||||
}
|
||||
|
||||
// Prepare cross-dependency information
|
||||
o.openstackFlavors = map[string]flavors.Flavor{}
|
||||
o.openstackProjects = map[string]projects.Project{}
|
||||
if slices.Contains(o.EnabledServices, "servers") {
|
||||
// We need the flavors to output machine details for servers
|
||||
page, err := flavors.ListDetail(o.compute, nil).AllPages(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list flavors: %w", err)
|
||||
}
|
||||
extractedflavors, err := flavors.ExtractFlavors(page)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to extract flavors: %w", err)
|
||||
}
|
||||
for _, flavor := range extractedflavors {
|
||||
o.openstackFlavors[flavor.ID] = flavor
|
||||
}
|
||||
|
||||
// We need the project to deliver a human readable name in servers
|
||||
page, err = projects.ListAvailable(o.identity).AllPages(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list projects: %w", err)
|
||||
}
|
||||
extractedProjects, err := projects.ExtractProjects(page)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to extract projects: %w", err)
|
||||
}
|
||||
for _, project := range extractedProjects {
|
||||
o.openstackProjects[project.ID] = project
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -247,27 +275,9 @@ func (o *OpenStack) Stop() {
|
|||
// Gather gathers resources from the OpenStack API and accumulates metrics. This
|
||||
// implements the Input interface.
|
||||
func (o *OpenStack) Gather(acc telegraf.Accumulator) error {
|
||||
ctx := context.Background()
|
||||
callDuration := make(map[string]interface{}, len(o.services))
|
||||
|
||||
// Prepare the shared resources
|
||||
if o.services["hypervisors"] || o.services["servers"] || o.ServerDiagnotics {
|
||||
start := time.Now()
|
||||
if err := o.gatherHypervisors(); err != nil {
|
||||
acc.AddError(fmt.Errorf("failed to get resource \"hypervisors\": %w", err))
|
||||
}
|
||||
if o.services["hypervisors"] {
|
||||
callDuration["hypervisors"] = time.Since(start).Nanoseconds()
|
||||
}
|
||||
}
|
||||
// Servers were already queried, so use this information
|
||||
if o.services["servers"] || o.ServerDiagnotics {
|
||||
start := time.Now()
|
||||
if err := o.gatherServers(acc); err != nil {
|
||||
return fmt.Errorf("failed to get resource \"servers\": %w", err)
|
||||
}
|
||||
callDuration["servers"] = time.Since(start).Nanoseconds()
|
||||
}
|
||||
|
||||
for service := range o.services {
|
||||
var err error
|
||||
|
||||
|
|
@ -275,38 +285,47 @@ func (o *OpenStack) Gather(acc telegraf.Accumulator) error {
|
|||
switch service {
|
||||
case "services":
|
||||
// As Services are already gathered in Init(), using this to accumulate them.
|
||||
o.accumulateServices(acc)
|
||||
for _, service := range o.openstackServices {
|
||||
tags := map[string]string{
|
||||
"name": service.Type,
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"service_id": service.ID,
|
||||
"service_enabled": service.Enabled,
|
||||
}
|
||||
acc.AddFields("openstack_service", fields, tags)
|
||||
}
|
||||
continue
|
||||
case "projects":
|
||||
err = o.gatherProjects(acc)
|
||||
err = o.gatherProjects(ctx, acc)
|
||||
case "hypervisors":
|
||||
// Gathered as part of the shared resource
|
||||
o.accumulateHypervisor(acc)
|
||||
continue
|
||||
err = o.gatherHypervisors(ctx, acc)
|
||||
case "flavors":
|
||||
err = o.gatherFlavors(acc)
|
||||
case "servers":
|
||||
// Gathered as part of the shared resource
|
||||
err = o.gatherFlavors(ctx, acc)
|
||||
case "volumes":
|
||||
err = o.gatherVolumes(acc)
|
||||
err = o.gatherVolumes(ctx, acc)
|
||||
case "storage_pools":
|
||||
err = o.gatherStoragePools(acc)
|
||||
err = o.gatherStoragePools(ctx, acc)
|
||||
case "subnets":
|
||||
err = o.gatherSubnets(acc)
|
||||
err = o.gatherSubnets(ctx, acc)
|
||||
case "ports":
|
||||
err = o.gatherPorts(acc)
|
||||
err = o.gatherPorts(ctx, acc)
|
||||
case "networks":
|
||||
err = o.gatherNetworks(acc)
|
||||
err = o.gatherNetworks(ctx, acc)
|
||||
case "aggregates":
|
||||
err = o.gatherAggregates(acc)
|
||||
err = o.gatherAggregates(ctx, acc)
|
||||
case "nova_services":
|
||||
err = o.gatherNovaServices(acc)
|
||||
err = o.gatherNovaServices(ctx, acc)
|
||||
case "cinder_services":
|
||||
err = o.gatherCinderServices(acc)
|
||||
err = o.gatherCinderServices(ctx, acc)
|
||||
case "agents":
|
||||
err = o.gatherAgents(acc)
|
||||
err = o.gatherAgents(ctx, acc)
|
||||
case "servers":
|
||||
err = o.gatherServers(ctx, acc)
|
||||
case "serverdiagnostics":
|
||||
err = o.gatherServerDiagnostics(ctx, acc)
|
||||
case "stacks":
|
||||
err = o.gatherStacks(acc)
|
||||
err = o.gatherStacks(ctx, acc)
|
||||
default:
|
||||
return fmt.Errorf("invalid service %q", service)
|
||||
}
|
||||
|
|
@ -357,8 +376,8 @@ func (o *OpenStack) availableServicesFromAuth(provider *gophercloud.ProviderClie
|
|||
}
|
||||
|
||||
// availableServices collects the available endpoint services via API
|
||||
func (o *OpenStack) availableServices() error {
|
||||
page, err := services.List(o.identity, nil).AllPages()
|
||||
func (o *OpenStack) availableServices(ctx context.Context) error {
|
||||
page, err := services.List(o.identity, nil).AllPages(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list services: %w", err)
|
||||
}
|
||||
|
|
@ -376,8 +395,8 @@ func (o *OpenStack) availableServices() error {
|
|||
}
|
||||
|
||||
// gatherStacks collects and accumulates stacks data from the OpenStack API.
|
||||
func (o *OpenStack) gatherStacks(acc telegraf.Accumulator) error {
|
||||
page, err := stacks.List(o.stack, &stacks.ListOpts{}).AllPages()
|
||||
func (o *OpenStack) gatherStacks(ctx context.Context, acc telegraf.Accumulator) error {
|
||||
page, err := stacks.List(o.stack, nil).AllPages(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list stacks: %w", err)
|
||||
}
|
||||
|
|
@ -407,8 +426,8 @@ func (o *OpenStack) gatherStacks(acc telegraf.Accumulator) error {
|
|||
}
|
||||
|
||||
// gatherNovaServices collects and accumulates nova_services data from the OpenStack API.
|
||||
func (o *OpenStack) gatherNovaServices(acc telegraf.Accumulator) error {
|
||||
page, err := nova_services.List(o.compute, &nova_services.ListOpts{}).AllPages()
|
||||
func (o *OpenStack) gatherNovaServices(ctx context.Context, acc telegraf.Accumulator) error {
|
||||
page, err := nova_services.List(o.compute, nil).AllPages(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list nova_services: %w", err)
|
||||
}
|
||||
|
|
@ -437,8 +456,8 @@ func (o *OpenStack) gatherNovaServices(acc telegraf.Accumulator) error {
|
|||
}
|
||||
|
||||
// gatherCinderServices collects and accumulates cinder_services data from the OpenStack API.
|
||||
func (o *OpenStack) gatherCinderServices(acc telegraf.Accumulator) error {
|
||||
page, err := cinder_services.List(o.volume, &cinder_services.ListOpts{}).AllPages()
|
||||
func (o *OpenStack) gatherCinderServices(ctx context.Context, acc telegraf.Accumulator) error {
|
||||
page, err := cinder_services.List(o.volume, nil).AllPages(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list cinder_services: %w", err)
|
||||
}
|
||||
|
|
@ -469,8 +488,8 @@ func (o *OpenStack) gatherCinderServices(acc telegraf.Accumulator) error {
|
|||
}
|
||||
|
||||
// gatherSubnets collects and accumulates subnets data from the OpenStack API.
|
||||
func (o *OpenStack) gatherSubnets(acc telegraf.Accumulator) error {
|
||||
page, err := subnets.List(o.network, &subnets.ListOpts{}).AllPages()
|
||||
func (o *OpenStack) gatherSubnets(ctx context.Context, acc telegraf.Accumulator) error {
|
||||
page, err := subnets.List(o.network, nil).AllPages(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list subnets: %w", err)
|
||||
}
|
||||
|
|
@ -511,8 +530,8 @@ func (o *OpenStack) gatherSubnets(acc telegraf.Accumulator) error {
|
|||
}
|
||||
|
||||
// gatherPorts collects and accumulates ports data from the OpenStack API.
|
||||
func (o *OpenStack) gatherPorts(acc telegraf.Accumulator) error {
|
||||
page, err := ports.List(o.network, &ports.ListOpts{}).AllPages()
|
||||
func (o *OpenStack) gatherPorts(ctx context.Context, acc telegraf.Accumulator) error {
|
||||
page, err := ports.List(o.network, nil).AllPages(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list ports: %w", err)
|
||||
}
|
||||
|
|
@ -556,8 +575,8 @@ func (o *OpenStack) gatherPorts(acc telegraf.Accumulator) error {
|
|||
}
|
||||
|
||||
// gatherNetworks collects and accumulates networks data from the OpenStack API.
|
||||
func (o *OpenStack) gatherNetworks(acc telegraf.Accumulator) error {
|
||||
page, err := networks.List(o.network, &networks.ListOpts{}).AllPages()
|
||||
func (o *OpenStack) gatherNetworks(ctx context.Context, acc telegraf.Accumulator) error {
|
||||
page, err := networks.List(o.network, nil).AllPages(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list networks: %w", err)
|
||||
}
|
||||
|
|
@ -598,8 +617,8 @@ func (o *OpenStack) gatherNetworks(acc telegraf.Accumulator) error {
|
|||
}
|
||||
|
||||
// gatherAgents collects and accumulates agents data from the OpenStack API.
|
||||
func (o *OpenStack) gatherAgents(acc telegraf.Accumulator) error {
|
||||
page, err := agents.List(o.network, &agents.ListOpts{}).AllPages()
|
||||
func (o *OpenStack) gatherAgents(ctx context.Context, acc telegraf.Accumulator) error {
|
||||
page, err := agents.List(o.network, nil).AllPages(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list neutron agents: %w", err)
|
||||
}
|
||||
|
|
@ -631,8 +650,8 @@ func (o *OpenStack) gatherAgents(acc telegraf.Accumulator) error {
|
|||
}
|
||||
|
||||
// gatherAggregates collects and accumulates aggregates data from the OpenStack API.
|
||||
func (o *OpenStack) gatherAggregates(acc telegraf.Accumulator) error {
|
||||
page, err := aggregates.List(o.compute).AllPages()
|
||||
func (o *OpenStack) gatherAggregates(ctx context.Context, acc telegraf.Accumulator) error {
|
||||
page, err := aggregates.List(o.compute).AllPages(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list aggregates: %w", err)
|
||||
}
|
||||
|
|
@ -666,8 +685,8 @@ func (o *OpenStack) gatherAggregates(acc telegraf.Accumulator) error {
|
|||
}
|
||||
|
||||
// gatherProjects collects and accumulates projects data from the OpenStack API.
|
||||
func (o *OpenStack) gatherProjects(acc telegraf.Accumulator) error {
|
||||
page, err := projects.List(o.identity, &projects.ListOpts{}).AllPages()
|
||||
func (o *OpenStack) gatherProjects(ctx context.Context, acc telegraf.Accumulator) error {
|
||||
page, err := projects.List(o.identity, nil).AllPages(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list projects: %w", err)
|
||||
}
|
||||
|
|
@ -698,8 +717,8 @@ func (o *OpenStack) gatherProjects(acc telegraf.Accumulator) error {
|
|||
}
|
||||
|
||||
// gatherHypervisors collects and accumulates hypervisors data from the OpenStack API.
|
||||
func (o *OpenStack) gatherHypervisors() error {
|
||||
page, err := hypervisors.List(o.compute, hypervisors.ListOpts{}).AllPages()
|
||||
func (o *OpenStack) gatherHypervisors(ctx context.Context, acc telegraf.Accumulator) error {
|
||||
page, err := hypervisors.List(o.compute, nil).AllPages(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list hypervisors: %w", err)
|
||||
}
|
||||
|
|
@ -707,14 +726,51 @@ func (o *OpenStack) gatherHypervisors() error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("unable to extract hypervisors: %w", err)
|
||||
}
|
||||
o.openstackHypervisors = extractedHypervisors
|
||||
|
||||
for _, hypervisor := range extractedHypervisors {
|
||||
tags := map[string]string{
|
||||
"cpu_vendor": hypervisor.CPUInfo.Vendor,
|
||||
"cpu_arch": hypervisor.CPUInfo.Arch,
|
||||
"cpu_model": hypervisor.CPUInfo.Model,
|
||||
"status": strings.ToLower(hypervisor.Status),
|
||||
"state": hypervisor.State,
|
||||
"hypervisor_hostname": hypervisor.HypervisorHostname,
|
||||
"hypervisor_type": hypervisor.HypervisorType,
|
||||
"hypervisor_version": strconv.Itoa(hypervisor.HypervisorVersion),
|
||||
"service_host": hypervisor.Service.Host,
|
||||
"service_id": hypervisor.Service.ID,
|
||||
"service_disabled_reason": hypervisor.Service.DisabledReason,
|
||||
}
|
||||
for _, cpuFeature := range hypervisor.CPUInfo.Features {
|
||||
tags["cpu_feature_"+cpuFeature] = "true"
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"id": hypervisor.ID,
|
||||
"host_ip": hypervisor.HostIP,
|
||||
"cpu_topology_sockets": hypervisor.CPUInfo.Topology.Sockets,
|
||||
"cpu_topology_cores": hypervisor.CPUInfo.Topology.Cores,
|
||||
"cpu_topology_threads": hypervisor.CPUInfo.Topology.Threads,
|
||||
"current_workload": hypervisor.CurrentWorkload,
|
||||
"disk_available_least": hypervisor.DiskAvailableLeast,
|
||||
"free_disk_gb": hypervisor.FreeDiskGB,
|
||||
"free_ram_mb": hypervisor.FreeRamMB,
|
||||
"local_gb": hypervisor.LocalGB,
|
||||
"local_gb_used": hypervisor.LocalGBUsed,
|
||||
"memory_mb": hypervisor.MemoryMB,
|
||||
"memory_mb_used": hypervisor.MemoryMBUsed,
|
||||
"running_vms": hypervisor.RunningVMs,
|
||||
"vcpus": hypervisor.VCPUs,
|
||||
"vcpus_used": hypervisor.VCPUsUsed,
|
||||
}
|
||||
acc.AddFields("openstack_hypervisor", fields, tags)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// gatherFlavors collects and accumulates flavors data from the OpenStack API.
|
||||
func (o *OpenStack) gatherFlavors(acc telegraf.Accumulator) error {
|
||||
page, err := flavors.ListDetail(o.compute, &flavors.ListOpts{}).AllPages()
|
||||
func (o *OpenStack) gatherFlavors(ctx context.Context, acc telegraf.Accumulator) error {
|
||||
page, err := flavors.ListDetail(o.compute, nil).AllPages(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list flavors: %w", err)
|
||||
}
|
||||
|
|
@ -743,16 +799,16 @@ func (o *OpenStack) gatherFlavors(acc telegraf.Accumulator) error {
|
|||
}
|
||||
|
||||
// gatherVolumes collects and accumulates volumes data from the OpenStack API.
|
||||
func (o *OpenStack) gatherVolumes(acc telegraf.Accumulator) error {
|
||||
page, err := volumes.List(o.volume, &volumes.ListOpts{AllTenants: true}).AllPages()
|
||||
func (o *OpenStack) gatherVolumes(ctx context.Context, acc telegraf.Accumulator) error {
|
||||
page, err := volumes.List(o.volume, &volumes.ListOpts{AllTenants: o.AllTenants}).AllPages(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list volumes: %w", err)
|
||||
}
|
||||
v := []volume{}
|
||||
if err := volumes.ExtractVolumesInto(page, &v); err != nil {
|
||||
extractedVolumes, err := volumes.ExtractVolumes(page)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to extract volumes: %w", err)
|
||||
}
|
||||
for _, volume := range v {
|
||||
for _, volume := range extractedVolumes {
|
||||
tags := map[string]string{
|
||||
"status": strings.ToLower(volume.Status),
|
||||
"availability_zone": volume.AvailabilityZone,
|
||||
|
|
@ -798,8 +854,8 @@ func (o *OpenStack) gatherVolumes(acc telegraf.Accumulator) error {
|
|||
}
|
||||
|
||||
// gatherStoragePools collects and accumulates storage pools data from the OpenStack API.
|
||||
func (o *OpenStack) gatherStoragePools(acc telegraf.Accumulator) error {
|
||||
results, err := schedulerstats.List(o.volume, &schedulerstats.ListOpts{Detail: true}).AllPages()
|
||||
func (o *OpenStack) gatherStoragePools(ctx context.Context, acc telegraf.Accumulator) error {
|
||||
results, err := schedulerstats.List(o.volume, &schedulerstats.ListOpts{Detail: true}).AllPages(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list storage pools: %w", err)
|
||||
}
|
||||
|
|
@ -824,210 +880,202 @@ func (o *OpenStack) gatherStoragePools(acc telegraf.Accumulator) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// gatherServers collects servers from the OpenStack API.
|
||||
func (o *OpenStack) gatherServers(acc telegraf.Accumulator) error {
|
||||
for _, hypervisor := range o.openstackHypervisors {
|
||||
page, err := servers.List(o.compute, &servers.ListOpts{AllTenants: true, Host: hypervisor.HypervisorHostname}).AllPages()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list servers: %w", err)
|
||||
func (o *OpenStack) gatherServers(ctx context.Context, acc telegraf.Accumulator) error {
|
||||
page, err := servers.List(o.compute, &servers.ListOpts{AllTenants: o.AllTenants}).AllPages(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list servers: %w", err)
|
||||
}
|
||||
extractedServers, err := servers.ExtractServers(page)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to extract servers: %w", err)
|
||||
}
|
||||
|
||||
for i := range extractedServers {
|
||||
server := &extractedServers[i]
|
||||
|
||||
// Try derive the associated project
|
||||
project := "unknown"
|
||||
if p, ok := o.openstackProjects[server.TenantID]; ok {
|
||||
project = p.Name
|
||||
}
|
||||
extractedServers, err := servers.ExtractServers(page)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to extract servers: %w", err)
|
||||
|
||||
// Try to derive the hostname
|
||||
var hostname string
|
||||
if server.Host != "" {
|
||||
hostname = server.Host
|
||||
} else if server.Hostname != nil && *server.Hostname != "" {
|
||||
hostname = *server.Hostname
|
||||
} else if server.HypervisorHostname != "" {
|
||||
hostname = server.HypervisorHostname
|
||||
} else {
|
||||
hostname = server.HostID
|
||||
}
|
||||
for _, server := range extractedServers {
|
||||
if o.services["servers"] {
|
||||
o.accumulateServer(acc, server, hypervisor.HypervisorHostname)
|
||||
}
|
||||
if o.ServerDiagnotics && server.Status == "ACTIVE" {
|
||||
diagnostic, err := diagnostics.Get(o.compute, server.ID).Extract()
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("unable to get diagnostics for server %q: %w", server.ID, err))
|
||||
continue
|
||||
|
||||
tags := map[string]string{
|
||||
"tenant_id": server.TenantID,
|
||||
"name": server.Name,
|
||||
"host_id": server.HostID,
|
||||
"status": strings.ToLower(server.Status),
|
||||
"key_name": server.KeyName,
|
||||
"host_name": hostname,
|
||||
"project": project,
|
||||
}
|
||||
|
||||
// Extract the flavor details to avoid joins (ignore errors and leave as zero values)
|
||||
var vcpus, ram, disk int
|
||||
if flavorIDInterface, found := server.Flavor["id"]; found {
|
||||
if flavorID, ok := flavorIDInterface.(string); ok {
|
||||
tags["flavor"] = flavorID
|
||||
if flavor, ok := o.openstackFlavors[flavorID]; ok {
|
||||
vcpus = flavor.VCPUs
|
||||
ram = flavor.RAM
|
||||
disk = flavor.Disk
|
||||
}
|
||||
o.accumulateServerDiagnostics(acc, hypervisor.HypervisorHostname, server.ID, diagnostic)
|
||||
}
|
||||
}
|
||||
if imageIDInterface, found := server.Image["id"]; found {
|
||||
if imageID, ok := imageIDInterface.(string); ok {
|
||||
tags["image"] = imageID
|
||||
}
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"id": server.ID,
|
||||
"progress": server.Progress,
|
||||
"accessIPv4": server.AccessIPv4,
|
||||
"accessIPv6": server.AccessIPv6,
|
||||
"addresses": len(server.Addresses),
|
||||
"security_groups": len(server.SecurityGroups),
|
||||
"volumes_attached": len(server.AttachedVolumes),
|
||||
"fault_code": server.Fault.Code,
|
||||
"fault_details": server.Fault.Details,
|
||||
"fault_message": server.Fault.Message,
|
||||
"vcpus": vcpus,
|
||||
"ram_mb": ram,
|
||||
"disk_gb": disk,
|
||||
"fault_created": o.convertTimeFormat(server.Fault.Created),
|
||||
"updated": o.convertTimeFormat(server.Updated),
|
||||
"created": o.convertTimeFormat(server.Created),
|
||||
}
|
||||
if o.OutputSecrets {
|
||||
tags["user_id"] = server.UserID
|
||||
fields["adminPass"] = server.AdminPass
|
||||
}
|
||||
if len(server.AttachedVolumes) == 0 {
|
||||
acc.AddFields("openstack_server", fields, tags)
|
||||
} else {
|
||||
for _, AttachedVolume := range server.AttachedVolumes {
|
||||
fields["volume_id"] = AttachedVolume.ID
|
||||
acc.AddFields("openstack_server", fields, tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *OpenStack) accumulateHypervisor(acc telegraf.Accumulator) {
|
||||
for _, hypervisor := range o.openstackHypervisors {
|
||||
tags := map[string]string{
|
||||
"cpu_vendor": hypervisor.CPUInfo.Vendor,
|
||||
"cpu_arch": hypervisor.CPUInfo.Arch,
|
||||
"cpu_model": hypervisor.CPUInfo.Model,
|
||||
"status": strings.ToLower(hypervisor.Status),
|
||||
"state": hypervisor.State,
|
||||
"hypervisor_hostname": hypervisor.HypervisorHostname,
|
||||
"hypervisor_type": hypervisor.HypervisorType,
|
||||
"hypervisor_version": strconv.Itoa(hypervisor.HypervisorVersion),
|
||||
"service_host": hypervisor.Service.Host,
|
||||
"service_id": hypervisor.Service.ID,
|
||||
"service_disabled_reason": hypervisor.Service.DisabledReason,
|
||||
}
|
||||
for _, cpuFeature := range hypervisor.CPUInfo.Features {
|
||||
tags["cpu_feature_"+cpuFeature] = "true"
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"id": hypervisor.ID,
|
||||
"host_ip": hypervisor.HostIP,
|
||||
"cpu_topology_sockets": hypervisor.CPUInfo.Topology.Sockets,
|
||||
"cpu_topology_cores": hypervisor.CPUInfo.Topology.Cores,
|
||||
"cpu_topology_threads": hypervisor.CPUInfo.Topology.Threads,
|
||||
"current_workload": hypervisor.CurrentWorkload,
|
||||
"disk_available_least": hypervisor.DiskAvailableLeast,
|
||||
"free_disk_gb": hypervisor.FreeDiskGB,
|
||||
"free_ram_mb": hypervisor.FreeRamMB,
|
||||
"local_gb": hypervisor.LocalGB,
|
||||
"local_gb_used": hypervisor.LocalGBUsed,
|
||||
"memory_mb": hypervisor.MemoryMB,
|
||||
"memory_mb_used": hypervisor.MemoryMBUsed,
|
||||
"running_vms": hypervisor.RunningVMs,
|
||||
"vcpus": hypervisor.VCPUs,
|
||||
"vcpus_used": hypervisor.VCPUsUsed,
|
||||
}
|
||||
acc.AddFields("openstack_hypervisor", fields, tags)
|
||||
func (o *OpenStack) gatherServerDiagnostics(ctx context.Context, acc telegraf.Accumulator) error {
|
||||
page, err := servers.List(o.compute, &servers.ListOpts{AllTenants: o.AllTenants}).AllPages(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list servers: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// accumulateServices accumulates statistics of services.
|
||||
func (o *OpenStack) accumulateServices(acc telegraf.Accumulator) {
|
||||
for _, service := range o.openstackServices {
|
||||
tags := map[string]string{
|
||||
"name": service.Type,
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"service_id": service.ID,
|
||||
"service_enabled": service.Enabled,
|
||||
}
|
||||
acc.AddFields("openstack_service", fields, tags)
|
||||
extractedServers, err := servers.ExtractServers(page)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to extract servers: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// accumulateServer accumulates statistics of a server.
|
||||
func (o *OpenStack) accumulateServer(acc telegraf.Accumulator, server servers.Server, hostName string) {
|
||||
tags := map[string]string{}
|
||||
// Extract the flavor details to avoid joins (ignore errors and leave as zero values)
|
||||
var vcpus, ram, disk int
|
||||
if flavorIDInterface, ok := server.Flavor["id"]; ok {
|
||||
if flavorID, ok := flavorIDInterface.(string); ok {
|
||||
tags["flavor"] = flavorID
|
||||
if flavor, ok := o.openstackFlavors[flavorID]; ok {
|
||||
vcpus = flavor.VCPUs
|
||||
ram = flavor.RAM
|
||||
disk = flavor.Disk
|
||||
for i := range extractedServers {
|
||||
server := &extractedServers[i]
|
||||
if server.Status != "ACTIVE" {
|
||||
continue
|
||||
}
|
||||
diagnostic, err := diagnostics.Get(ctx, o.compute, server.ID).Extract()
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("unable to get diagnostics for server %q: %w", server.ID, err))
|
||||
continue
|
||||
}
|
||||
|
||||
portName := make(map[string]bool)
|
||||
storageName := make(map[string]bool)
|
||||
memoryStats := make(map[string]interface{})
|
||||
cpus := make(map[string]interface{})
|
||||
for k, v := range diagnostic {
|
||||
if typePort.MatchString(k) {
|
||||
portName[strings.Split(k, "_")[0]] = true
|
||||
} else if typeCPU.MatchString(k) {
|
||||
cpus[k] = v
|
||||
} else if typeStorage.MatchString(k) {
|
||||
storageName[strings.Split(k, "_")[0]] = true
|
||||
} else {
|
||||
memoryStats[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
if imageIDInterface, ok := server.Image["id"]; ok {
|
||||
if imageID, ok := imageIDInterface.(string); ok {
|
||||
tags["image"] = imageID
|
||||
}
|
||||
}
|
||||
// Try derive the associated project
|
||||
project := "unknown"
|
||||
if p, ok := o.openstackProjects[server.TenantID]; ok {
|
||||
project = p.Name
|
||||
}
|
||||
tags["tenant_id"] = server.TenantID
|
||||
tags["name"] = server.Name
|
||||
tags["host_id"] = server.HostID
|
||||
tags["status"] = strings.ToLower(server.Status)
|
||||
tags["key_name"] = server.KeyName
|
||||
tags["host_name"] = hostName
|
||||
tags["project"] = project
|
||||
fields := map[string]interface{}{
|
||||
"id": server.ID,
|
||||
"progress": server.Progress,
|
||||
"accessIPv4": server.AccessIPv4,
|
||||
"accessIPv6": server.AccessIPv6,
|
||||
"addresses": len(server.Addresses),
|
||||
"security_groups": len(server.SecurityGroups),
|
||||
"volumes_attached": len(server.AttachedVolumes),
|
||||
"fault_code": server.Fault.Code,
|
||||
"fault_details": server.Fault.Details,
|
||||
"fault_message": server.Fault.Message,
|
||||
"vcpus": vcpus,
|
||||
"ram_mb": ram,
|
||||
"disk_gb": disk,
|
||||
"fault_created": o.convertTimeFormat(server.Fault.Created),
|
||||
"updated": o.convertTimeFormat(server.Updated),
|
||||
"created": o.convertTimeFormat(server.Created),
|
||||
}
|
||||
if o.OutputSecrets {
|
||||
tags["user_id"] = server.UserID
|
||||
fields["adminPass"] = server.AdminPass
|
||||
}
|
||||
if len(server.AttachedVolumes) == 0 {
|
||||
acc.AddFields("openstack_server", fields, tags)
|
||||
} else {
|
||||
for _, AttachedVolume := range server.AttachedVolumes {
|
||||
fields["volume_id"] = AttachedVolume.ID
|
||||
acc.AddFields("openstack_server", fields, tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
nPorts := strconv.Itoa(len(portName))
|
||||
nDisks := strconv.Itoa(len(storageName))
|
||||
|
||||
// accumulateServerDiagnostics accumulates statistics from the compute(nova) service.
|
||||
// currently only supports 'libvirt' driver.
|
||||
func (o *OpenStack) accumulateServerDiagnostics(acc telegraf.Accumulator, _, serverID string, diagnostic map[string]interface{}) {
|
||||
tags := map[string]string{
|
||||
"server_id": serverID,
|
||||
}
|
||||
fields := map[string]interface{}{}
|
||||
portName := make(map[string]bool)
|
||||
storageName := make(map[string]bool)
|
||||
memoryStats := make(map[string]interface{})
|
||||
for k, v := range diagnostic {
|
||||
if typePort.MatchString(k) {
|
||||
portName[strings.Split(k, "_")[0]] = true
|
||||
} else if typeCPU.MatchString(k) {
|
||||
// Add metrics for disks
|
||||
fields := map[string]interface{}{
|
||||
"memory": memoryStats["memory"],
|
||||
"memory-actual": memoryStats["memory-actual"],
|
||||
"memory-rss": memoryStats["memory-rss"],
|
||||
"memory-swap_in": memoryStats["memory-swap_in"],
|
||||
}
|
||||
for k, v := range cpus {
|
||||
fields[k] = v
|
||||
} else if typeStorage.MatchString(k) {
|
||||
storageName[strings.Split(k, "_")[0]] = true
|
||||
} else {
|
||||
memoryStats[k] = v
|
||||
}
|
||||
for key := range storageName {
|
||||
fields["disk_errors"] = diagnostic[key+"_errors"]
|
||||
fields["disk_read"] = diagnostic[key+"_read"]
|
||||
fields["disk_read_req"] = diagnostic[key+"_read_req"]
|
||||
fields["disk_write"] = diagnostic[key+"_write"]
|
||||
fields["disk_write_req"] = diagnostic[key+"_write_req"]
|
||||
tags := map[string]string{
|
||||
"server_id": server.ID,
|
||||
"no_of_ports": nPorts,
|
||||
"no_of_disks": nDisks,
|
||||
"disk_name": key,
|
||||
}
|
||||
acc.AddFields("openstack_server_diagnostics", fields, tags)
|
||||
}
|
||||
|
||||
// Add metrics for network ports
|
||||
fields = map[string]interface{}{
|
||||
"memory": memoryStats["memory"],
|
||||
"memory-actual": memoryStats["memory-actual"],
|
||||
"memory-rss": memoryStats["memory-rss"],
|
||||
"memory-swap_in": memoryStats["memory-swap_in"],
|
||||
}
|
||||
for k, v := range cpus {
|
||||
fields[k] = v
|
||||
}
|
||||
for key := range portName {
|
||||
fields["port_rx"] = diagnostic[key+"_rx"]
|
||||
fields["port_rx_drop"] = diagnostic[key+"_rx_drop"]
|
||||
fields["port_rx_errors"] = diagnostic[key+"_rx_errors"]
|
||||
fields["port_rx_packets"] = diagnostic[key+"_rx_packets"]
|
||||
fields["port_tx"] = diagnostic[key+"_tx"]
|
||||
fields["port_tx_drop"] = diagnostic[key+"_tx_drop"]
|
||||
fields["port_tx_errors"] = diagnostic[key+"_tx_errors"]
|
||||
fields["port_tx_packets"] = diagnostic[key+"_tx_packets"]
|
||||
tags := map[string]string{
|
||||
"server_id": server.ID,
|
||||
"no_of_ports": nPorts,
|
||||
"no_of_disks": nDisks,
|
||||
"port_name": key,
|
||||
}
|
||||
acc.AddFields("openstack_server_diagnostics", fields, tags)
|
||||
}
|
||||
}
|
||||
fields["memory"] = memoryStats["memory"]
|
||||
fields["memory-actual"] = memoryStats["memory-actual"]
|
||||
fields["memory-rss"] = memoryStats["memory-rss"]
|
||||
fields["memory-swap_in"] = memoryStats["memory-swap_in"]
|
||||
tags["no_of_ports"] = strconv.Itoa(len(portName))
|
||||
tags["no_of_disks"] = strconv.Itoa(len(storageName))
|
||||
for key := range storageName {
|
||||
fields["disk_errors"] = diagnostic[key+"_errors"]
|
||||
fields["disk_read"] = diagnostic[key+"_read"]
|
||||
fields["disk_read_req"] = diagnostic[key+"_read_req"]
|
||||
fields["disk_write"] = diagnostic[key+"_write"]
|
||||
fields["disk_write_req"] = diagnostic[key+"_write_req"]
|
||||
tags["disk_name"] = key
|
||||
acc.AddFields("openstack_server_diagnostics", fields, tags)
|
||||
}
|
||||
for key := range portName {
|
||||
fields["port_rx"] = diagnostic[key+"_rx"]
|
||||
fields["port_rx_drop"] = diagnostic[key+"_rx_drop"]
|
||||
fields["port_rx_errors"] = diagnostic[key+"_rx_errors"]
|
||||
fields["port_rx_packets"] = diagnostic[key+"_rx_packets"]
|
||||
fields["port_tx"] = diagnostic[key+"_tx"]
|
||||
fields["port_tx_drop"] = diagnostic[key+"_tx_drop"]
|
||||
fields["port_tx_errors"] = diagnostic[key+"_tx_errors"]
|
||||
fields["port_tx_packets"] = diagnostic[key+"_tx_packets"]
|
||||
tags["port_name"] = key
|
||||
acc.AddFields("openstack_server_diagnostics", fields, tags)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// init registers a callback which creates a new OpenStack input instance.
|
||||
func init() {
|
||||
inputs.Add("openstack", func() telegraf.Input {
|
||||
return &OpenStack{
|
||||
Domain: "default",
|
||||
Project: "admin",
|
||||
TagPrefix: "openstack_tag_",
|
||||
TagValue: "true",
|
||||
Domain: "default",
|
||||
Project: "admin",
|
||||
TagPrefix: "openstack_tag_",
|
||||
TagValue: "true",
|
||||
AllTenants: true,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,13 +16,15 @@
|
|||
password = "password"
|
||||
|
||||
## Available services are:
|
||||
## "agents", "aggregates", "cinder_services", "flavors", "hypervisors", "networks",
|
||||
## "nova_services", "ports", "projects", "servers", "services", "stacks", "storage_pools",
|
||||
## "subnets", "volumes"
|
||||
## "agents", "aggregates", "cinder_services", "flavors", "hypervisors",
|
||||
## "networks", "nova_services", "ports", "projects", "servers",
|
||||
## "serverdiagnostics", "services", "stacks", "storage_pools", "subnets",
|
||||
## "volumes"
|
||||
# enabled_services = ["services", "projects", "hypervisors", "flavors", "networks", "volumes"]
|
||||
|
||||
## Collect Server Diagnostics
|
||||
# server_diagnotics = false
|
||||
## Query all instances of all tenants for the volumes and server services
|
||||
## NOTE: Usually this is only permitted for administrators!
|
||||
# query_all_tenants = true
|
||||
|
||||
## output secrets (such as adminPass(for server) and UserID(for volume)).
|
||||
# output_secrets = false
|
||||
|
|
|
|||
Loading…
Reference in New Issue