feat: Openstack input plugin (#9236)
This commit is contained in:
parent
d5afd654c6
commit
a288bc0bf8
|
|
@ -109,6 +109,7 @@ following works:
|
|||
- github.com/googleapis/gax-go [BSD 3-Clause "New" or "Revised" License](https://github.com/googleapis/gax-go/blob/master/LICENSE)
|
||||
- github.com/googleapis/gnostic [Apache License 2.0](https://github.com/google/gnostic/blob/master/LICENSE)
|
||||
- github.com/gopcua/opcua [MIT License](https://github.com/gopcua/opcua/blob/master/LICENSE)
|
||||
- github.com/gophercloud/gophercloud [Apache License 2.0](https://github.com/gophercloud/gophercloud/blob/master/LICENSE)
|
||||
- github.com/gorilla/mux [BSD 3-Clause "New" or "Revised" License](https://github.com/gorilla/mux/blob/master/LICENSE)
|
||||
- github.com/gorilla/websocket [BSD 2-Clause "Simplified" License](https://github.com/gorilla/websocket/blob/master/LICENSE)
|
||||
- github.com/gosnmp/gosnmp [BSD 2-Clause "Simplified" License](https://github.com/gosnmp/gosnmp/blob/master/LICENSE)
|
||||
|
|
|
|||
1
go.mod
1
go.mod
|
|
@ -127,6 +127,7 @@ require (
|
|||
github.com/googleapis/gax-go/v2 v2.0.5 // indirect
|
||||
github.com/googleapis/gnostic v0.5.5 // indirect
|
||||
github.com/gopcua/opcua v0.2.0-rc2.0.20210409063412-baabb9b14fd2
|
||||
github.com/gophercloud/gophercloud v0.16.0
|
||||
github.com/gorilla/mux v1.8.0
|
||||
github.com/gorilla/websocket v1.4.2
|
||||
github.com/gosnmp/gosnmp v1.32.0
|
||||
|
|
|
|||
1
go.sum
1
go.sum
|
|
@ -1096,6 +1096,7 @@ github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97Dwqy
|
|||
github.com/gookit/color v1.3.6/go.mod h1:R3ogXq2B9rTbXoSHJ1HyUVAZ3poOJHpd9nQmyGZsfvQ=
|
||||
github.com/gopcua/opcua v0.2.0-rc2.0.20210409063412-baabb9b14fd2 h1:OtFKr0Kwe1oLpMR+uNMh/DPgC5fxAq4xRe6HBv8LDqQ=
|
||||
github.com/gopcua/opcua v0.2.0-rc2.0.20210409063412-baabb9b14fd2/go.mod h1:a6QH4F9XeODklCmWuvaOdL8v9H0d73CEKUHWVZLQyE8=
|
||||
github.com/gophercloud/gophercloud v0.16.0 h1:sWjPfypuzxRxjVbk3/MsU4H8jS0NNlyauZtIUl78BPU=
|
||||
github.com/gophercloud/gophercloud v0.16.0/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
|
|
|
|||
|
|
@ -137,6 +137,7 @@ import (
|
|||
_ "github.com/influxdata/telegraf/plugins/inputs/openldap"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/openntpd"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/opensmtpd"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/openstack"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/opentelemetry"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/openweathermap"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/passenger"
|
||||
|
|
|
|||
|
|
@ -0,0 +1,362 @@
|
|||
|
||||
# OpenStack Input Plugin
|
||||
|
||||
Collects the metrics from following services of OpenStack:
|
||||
|
||||
* CINDER(Block Storage)
|
||||
* GLANCE(Image service)
|
||||
* HEAT(Orchestration)
|
||||
* KEYSTONE(Identity service)
|
||||
* NEUTRON(Networking)
|
||||
* NOVA(Compute Service)
|
||||
|
||||
At present this plugin requires the following APIs:
|
||||
|
||||
* blockstorage v2
|
||||
* compute v2
|
||||
* identity v3
|
||||
* networking v2
|
||||
* orchestration v1
|
||||
|
||||
## Configuration and Recommendations
|
||||
### Recommendations
|
||||
|
||||
Due to the large number of unique tags that this plugin generates, in order to keep the cardinality down it is **highly recommended** to use [modifiers](https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#modifiers) like `tagexclude` to discard unwanted tags.
|
||||
|
||||
For deployments with only a small number of VMs and hosts, a small polling interval (e.g. seconds-minutes) is acceptable. For larger deployments, polling a large number of systems will impact performance. Use the `interval` option to change how often the plugin is run:
|
||||
|
||||
`interval`: How often a metric is gathered. Setting this value at the plugin level overrides the global agent interval setting.
|
||||
|
||||
Also, consider polling OpenStack services at different intervals depending on your requirements. This will help with load and cardinality as well.
|
||||
|
||||
```
|
||||
[[inputs.openstack]]
|
||||
interval = 5m
|
||||
....
|
||||
authentication_endpoint = "https://my.openstack.cloud:5000"
|
||||
...
|
||||
enabled_services = ["nova_services"]
|
||||
....
|
||||
|
||||
[[inputs.openstack]]
|
||||
interval = 30m
|
||||
....
|
||||
authentication_endpoint = "https://my.openstack.cloud:5000"
|
||||
...
|
||||
enabled_services = ["services", "projects", "hypervisors", "flavors", "networks", "volumes"]
|
||||
....
|
||||
```
|
||||
|
||||
|
||||
### Configuration
|
||||
|
||||
```
|
||||
## The recommended interval to poll is '30m'
|
||||
|
||||
## The identity endpoint to authenticate against and get the service catalog from.
|
||||
authentication_endpoint = "https://my.openstack.cloud:5000"
|
||||
|
||||
## The domain to authenticate against when using a V3 identity endpoint.
|
||||
# domain = "default"
|
||||
|
||||
## The project to authenticate as.
|
||||
# project = "admin"
|
||||
|
||||
## User authentication credentials. Must have admin rights.
|
||||
username = "admin"
|
||||
password = "password"
|
||||
|
||||
## Available services are:
|
||||
## "agents", "aggregates", "flavors", "hypervisors", "networks", "nova_services",
|
||||
## "ports", "projects", "servers", "services", "stacks", "storage_pools", "subnets", "volumes"
|
||||
# enabled_services = ["services", "projects", "hypervisors", "flavors", "networks", "volumes"]
|
||||
|
||||
## Collect Server Diagnostics
|
||||
# server_diagnotics = false
|
||||
|
||||
## output secrets (such as adminPass(for server) and UserID(for volume)).
|
||||
# output_secrets = false
|
||||
|
||||
## Amount of time allowed to complete the HTTP(s) request.
|
||||
# timeout = "5s"
|
||||
|
||||
## HTTP Proxy support
|
||||
# http_proxy_url = ""
|
||||
|
||||
## Optional TLS Config
|
||||
# tls_ca = /path/to/cafile
|
||||
# tls_cert = /path/to/certfile
|
||||
# tls_key = /path/to/keyfile
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## Options for tags received from Openstack
|
||||
# tag_prefix = "openstack_tag_"
|
||||
# tag_value = "true"
|
||||
|
||||
## Timestamp format for timestamp data recieved from Openstack.
|
||||
## If false format is unix nanoseconds.
|
||||
# human_readable_timestamps = false
|
||||
|
||||
## Measure Openstack call duration
|
||||
# measure_openstack_requests = false
|
||||
```
|
||||
|
||||
### Measurements, Tags & Fields
|
||||
|
||||
* openstack_aggregate
|
||||
* name
|
||||
* aggregate_host [string]
|
||||
* aggregate_hosts [integer]
|
||||
* created_at [string]
|
||||
* deleted [boolean]
|
||||
* deleted_at [string]
|
||||
* id [integer]
|
||||
* updated_at [string]
|
||||
* openstack_flavor
|
||||
* is_public
|
||||
* name
|
||||
* disk [integer]
|
||||
* ephemeral [integer]
|
||||
* id [string]
|
||||
* ram [integer]
|
||||
* rxtx_factor [float]
|
||||
* swap [integer]
|
||||
* vcpus [integer]
|
||||
* openstack_hypervisor
|
||||
* cpu_arch
|
||||
* cpu_feature_tsc
|
||||
* cpu_feature_tsc-deadline
|
||||
* cpu_feature_tsc_adjust
|
||||
* cpu_feature_tsx-ctrl
|
||||
* cpu_feature_vme
|
||||
* cpu_feature_vmx
|
||||
* cpu_feature_x2apic
|
||||
* cpu_feature_xgetbv1
|
||||
* cpu_feature_xsave
|
||||
* cpu_model
|
||||
* cpu_vendor
|
||||
* hypervisor_hostname
|
||||
* hypervisor_type
|
||||
* hypervisor_version
|
||||
* service_host
|
||||
* service_id
|
||||
* state
|
||||
* status
|
||||
* cpu_topology_cores [integer]
|
||||
* cpu_topology_sockets [integer]
|
||||
* cpu_topology_threads [integer]
|
||||
* current_workload [integer]
|
||||
* disk_available_least [integer]
|
||||
* free_disk_gb [integer]
|
||||
* free_ram_mb [integer]
|
||||
* host_ip [string]
|
||||
* id [string]
|
||||
* local_gb [integer]
|
||||
* local_gb_used [integer]
|
||||
* memory_mb [integer]
|
||||
* memory_mb_used [integer]
|
||||
* running_vms [integer]
|
||||
* vcpus [integer]
|
||||
* vcpus_used [integer]
|
||||
* openstack_identity
|
||||
* description
|
||||
* domain_id
|
||||
* name
|
||||
* parent_id
|
||||
* enabled boolean
|
||||
* id string
|
||||
* is_domain boolean
|
||||
* projects integer
|
||||
* openstack_network
|
||||
* name
|
||||
* openstack_tags_xyz
|
||||
* project_id
|
||||
* status
|
||||
* tenant_id
|
||||
* admin_state_up [boolean]
|
||||
* availability_zone_hints [string]
|
||||
* created_at [string]
|
||||
* id [string]
|
||||
* shared [boolean]
|
||||
* subnet_id [string]
|
||||
* subnets [integer]
|
||||
* updated_at [string]
|
||||
* openstack_newtron_agent
|
||||
* agent_host
|
||||
* agent_type
|
||||
* availability_zone
|
||||
* binary
|
||||
* topic
|
||||
* admin_state_up [boolean]
|
||||
* alive [boolean]
|
||||
* created_at [string]
|
||||
* heartbeat_timestamp [string]
|
||||
* id [string]
|
||||
* resources_synced [boolean]
|
||||
* started_at [string]
|
||||
* openstack_nova_service
|
||||
* host_machine
|
||||
* name
|
||||
* state
|
||||
* status
|
||||
* zone
|
||||
* disabled_reason [string]
|
||||
* forced_down [boolean]
|
||||
* id [string]
|
||||
* updated_at [string]
|
||||
* openstack_port
|
||||
* device_id
|
||||
* device_owner
|
||||
* name
|
||||
* network_id
|
||||
* project_id
|
||||
* status
|
||||
* tenant_id
|
||||
* admin_state_up [boolean]
|
||||
* allowed_address_pairs [integer]
|
||||
* fixed_ips [integer]
|
||||
* id [string]
|
||||
* ip_address [string]
|
||||
* mac_address [string]
|
||||
* security_groups [string]
|
||||
* subnet_id [string]
|
||||
* openstack_request_duration
|
||||
* agents [integer]
|
||||
* aggregates [integer]
|
||||
* flavors [integer]
|
||||
* hypervisors [integer]
|
||||
* networks [integer]
|
||||
* nova_services [integer]
|
||||
* ports [integer]
|
||||
* projects [integer]
|
||||
* servers [integer]
|
||||
* stacks [integer]
|
||||
* storage_pools [integer]
|
||||
* subnets [integer]
|
||||
* volumes [integer]
|
||||
* openstack_server
|
||||
* flavor
|
||||
* host_id
|
||||
* host_name
|
||||
* image
|
||||
* key_name
|
||||
* name
|
||||
* project
|
||||
* status
|
||||
* tenant_id
|
||||
* user_id
|
||||
* accessIPv4 [string]
|
||||
* accessIPv6 [string]
|
||||
* addresses [integer]
|
||||
* adminPass [string]
|
||||
* created [string]
|
||||
* disk_gb [integer]
|
||||
* fault_code [integer]
|
||||
* fault_created [string]
|
||||
* fault_details [string]
|
||||
* fault_message [string]
|
||||
* id [string]
|
||||
* progress [integer]
|
||||
* ram_mb [integer]
|
||||
* security_groups [integer]
|
||||
* updated [string]
|
||||
* vcpus [integer]
|
||||
* volume_id [string]
|
||||
* volumes_attached [integer]
|
||||
* openstack_server_diagnostics
|
||||
* disk_name
|
||||
* no_of_disks
|
||||
* no_of_ports
|
||||
* port_name
|
||||
* server_id
|
||||
* cpu0_time [float]
|
||||
* cpu1_time [float]
|
||||
* cpu2_time [float]
|
||||
* cpu3_time [float]
|
||||
* cpu4_time [float]
|
||||
* cpu5_time [float]
|
||||
* cpu6_time [float]
|
||||
* cpu7_time [float]
|
||||
* disk_errors [float]
|
||||
* disk_read [float]
|
||||
* disk_read_req [float]
|
||||
* disk_write [float]
|
||||
* disk_write_req [float]
|
||||
* memory [float]
|
||||
* memory-actual [float]
|
||||
* memory-rss [float]
|
||||
* memory-swap_in [float]
|
||||
* port_rx [float]
|
||||
* port_rx_drop [float]
|
||||
* port_rx_errors [float]
|
||||
* port_rx_packets [float]
|
||||
* port_tx [float]
|
||||
* port_tx_drop [float]
|
||||
* port_tx_errors [float]
|
||||
* port_tx_packets [float]
|
||||
* openstack_service
|
||||
* name
|
||||
* service_enabled [boolean]
|
||||
* service_id [string]
|
||||
* openstack_storage_pool
|
||||
* driver_version
|
||||
* name
|
||||
* storage_protocol
|
||||
* vendor_name
|
||||
* volume_backend_name
|
||||
* free_capacity_gb [float]
|
||||
* total_capacity_gb [float]
|
||||
* openstack_subnet
|
||||
* cidr
|
||||
* gateway_ip
|
||||
* ip_version
|
||||
* name
|
||||
* network_id
|
||||
* openstack_tags_subnet_type_PRV
|
||||
* project_id
|
||||
* tenant_id
|
||||
* allocation_pools [string]
|
||||
* dhcp_enabled [boolean]
|
||||
* dns_nameservers [string]
|
||||
* id [string]
|
||||
* openstack_volume
|
||||
* attachment_attachment_id
|
||||
* attachment_device
|
||||
* attachment_host_name
|
||||
* availability_zone
|
||||
* bootable
|
||||
* description
|
||||
* name
|
||||
* status
|
||||
* user_id
|
||||
* volume_type
|
||||
* attachment_attached_at [string]
|
||||
* attachment_server_id [string]
|
||||
* created_at [string]
|
||||
* encrypted [boolean]
|
||||
* id [string]
|
||||
* multiattach [boolean]
|
||||
* size [integer]
|
||||
* total_attachments [integer]
|
||||
* updated_at [string]
|
||||
|
||||
### Example Output
|
||||
|
||||
```
|
||||
> openstack_newtron_agent,agent_host=vim2,agent_type=DHCP\ agent,availability_zone=nova,binary=neutron-dhcp-agent,host=telegraf_host,topic=dhcp_agent admin_state_up=true,alive=true,created_at="2021-01-07T03:40:53Z",heartbeat_timestamp="2021-10-14T07:46:40Z",id="17e1e446-d7da-4656-9e32-67d3690a306f",resources_synced=false,started_at="2021-07-02T21:47:42Z" 1634197616000000000
|
||||
> openstack_aggregate,host=telegraf_host,name=non-dpdk aggregate_host="vim3",aggregate_hosts=2i,created_at="2021-02-01T18:28:00Z",deleted=false,deleted_at="0001-01-01T00:00:00Z",id=3i,updated_at="0001-01-01T00:00:00Z" 1634197617000000000
|
||||
> openstack_flavor,host=telegraf_host,is_public=true,name=hwflavor disk=20i,ephemeral=0i,id="f89785c0-6b9f-47f5-a02e-f0fcbb223163",ram=8192i,rxtx_factor=1,swap=0i,vcpus=8i 1634197617000000000
|
||||
> openstack_hypervisor,cpu_arch=x86_64,cpu_feature_3dnowprefetch=true,cpu_feature_abm=true,cpu_feature_acpi=true,cpu_feature_adx=true,cpu_feature_aes=true,cpu_feature_apic=true,cpu_feature_xtpr=true,cpu_model=C-Server,cpu_vendor=xyz,host=telegraf_host,hypervisor_hostname=vim3,hypervisor_type=QEMU,hypervisor_version=4002000,service_host=vim3,service_id=192,state=up,status=enabled cpu_topology_cores=28i,cpu_topology_sockets=1i,cpu_topology_threads=2i,current_workload=0i,disk_available_least=2596i,free_disk_gb=2744i,free_ram_mb=374092i,host_ip="xx:xx:xx:x::xxx",id="12",local_gb=3366i,local_gb_used=622i,memory_mb=515404i,memory_mb_used=141312i,running_vms=15i,vcpus=0i,vcpus_used=72i 1634197618000000000
|
||||
> openstack_network,host=telegraf_host,name=Network\ 2,project_id=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,status=active,tenant_id=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx admin_state_up=true,availability_zone_hints="",created_at="2021-07-29T15:58:25Z",id="f5af5e71-e890-4245-a377-d4d86273c319",shared=false,subnet_id="2f7341c6-074d-42aa-9abc-71c662d9b336",subnets=1i,updated_at="2021-09-02T16:46:48Z" 1634197618000000000
|
||||
> openstack_nova_service,host=telegraf_host,host_machine=vim3,name=nova-compute,state=up,status=enabled,zone=nova disabled_reason="",forced_down=false,id="192",updated_at="2021-10-14T07:46:52Z" 1634197619000000000
|
||||
> openstack_port,device_id=a043b8b3-2831-462a-bba8-19088f3db45a,device_owner=compute:nova,host=telegraf_host,name=offload-port1,network_id=6b40d744-9a48-43f2-a4c8-2e0ccb45ac96,project_id=71f9bc44621234f8af99a3949258fc7b,status=ACTIVE,tenant_id=71f9bc44621234f8af99a3949258fc7b admin_state_up=true,allowed_address_pairs=0i,fixed_ips=1i,id="fb64626a-07e1-4d78-a70d-900e989537cc",ip_address="1.1.1.5",mac_address="xx:xx:xx:xx:xx:xx",security_groups="",subnet_id="eafa1eca-b318-4746-a55a-682478466689" 1634197620000000000
|
||||
> openstack_identity,domain_id=default,host=telegraf_host,name=service,parent_id=default enabled=true,id="a0877dd2ed1d4b5f952f5689bc04b0cb",is_domain=false,projects=7i 1634197621000000000
|
||||
> openstack_server,flavor=0d438971-56cf-4f86-801f-7b04b29384cb,host=telegraf_host,host_id=c0fe05b14261d35cf8748a3f5aae1234b88c2fd62b69fe24ca4a27e9,host_name=vim1,image=b295f1f3-1w23-470c-8734-197676eedd16,name=test-VM7,project=admin,status=active,tenant_id=80ac889731f540498fb1dc78e4bcd5ed,user_id=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx accessIPv4="",accessIPv6="",addresses=1i,adminPass="",created="2021-09-07T14:40:11Z",disk_gb=8i,fault_code=0i,fault_created="0001-01-01T00:00:00Z",fault_details="",fault_message="",id="db92ee0d-459b-458e-9fe3-2be5ec7c87e1",progress=0i,ram_mb=16384i,security_groups=1i,updated="2021-09-07T14:40:19Z",vcpus=4i,volumes_attached=0i 1634197656000000000
|
||||
> openstack_service,host=telegraf_host,name=identity service_enabled=true,service_id="ad605eff92444a158d0f78768f2c4668" 1634197656000000000
|
||||
> openstack_storage_pool,driver_version=1.0.0,host=telegraf_host,name=storage_bloack_1,storage_protocol=nfs,vendor_name=xyz,volume_backend_name=abc free_capacity_gb=4847.54,total_capacity_gb=4864 1634197658000000000
|
||||
> openstack_subnet,cidr=10.10.20.10/28,gateway_ip=10.10.20.17,host=telegraf_host,ip_version=4,name=IPv4_Subnet_2,network_id=73c6e1d3-f522-4a3f-8e3c-762a0c06d68b,openstack_tags_lab=True,project_id=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,tenant_id=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx allocation_pools="10.10.20.11-10.10.20.30",dhcp_enabled=true,dns_nameservers="",id="db69fbb2-9ca1-4370-8c78-82a27951c94b" 1634197660000000000
|
||||
> openstack_volume,attachment_attachment_id=c83ca0d6-c467-44a0-ac1f-f87d769c0c65,attachment_device=/dev/vda,attachment_host_name=vim1,availability_zone=nova,bootable=true,host=telegraf_host,status=in-use,user_id=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,volume_type=storage_bloack_1 attachment_attached_at="2021-01-12T21:02:04Z",attachment_server_id="c0c6b4af-0d26-4a0b-a6b4-4ea41fa3bb4a",created_at="2021-01-12T21:01:47Z",encrypted=false,id="d4204f1b-b1ae-1233-b25c-a57d91d2846e",multiattach=false,size=80i,total_attachments=1i,updated_at="2021-01-12T21:02:04Z" 1634197660000000000
|
||||
> openstack_request_duration,host=telegraf_host networks=703214354i 1634197660000000000
|
||||
> openstack_server_diagnostics,disk_name=vda,host=telegraf_host,no_of_disks=1,no_of_ports=2,port_name=vhu1234566c-9c,server_id=fdddb58c-bbb9-1234-894b-7ae140178909 cpu0_time=4924220000000,cpu1_time=218809610000000,cpu2_time=218624300000000,cpu3_time=220505700000000,disk_errors=-1,disk_read=619156992,disk_read_req=35423,disk_write=8432728064,disk_write_req=882445,memory=8388608,memory-actual=8388608,memory-rss=37276,memory-swap_in=0,port_rx=410516469288,port_rx_drop=13373626,port_rx_errors=-1,port_rx_packets=52140392,port_tx=417312195654,port_tx_drop=0,port_tx_errors=0,port_tx_packets=321385978 1634197660000000000
|
||||
```
|
||||
|
|
@ -0,0 +1,958 @@
|
|||
// Package openstack implements an OpenStack input plugin for Telegraf
|
||||
//
|
||||
// The OpenStack input plug is a simple two phase metric collector. In the first
|
||||
// pass a set of gatherers are run against the API to cache collections of resources.
|
||||
// In the second phase the gathered resources are combined and emitted as metrics.
|
||||
//
|
||||
// No aggregation is performed by the input plugin, instead queries to InfluxDB should
|
||||
// be used to gather global totals of things such as tag frequency.
|
||||
package openstack
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gophercloud/gophercloud"
|
||||
"github.com/gophercloud/gophercloud/openstack"
|
||||
"github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/schedulerstats"
|
||||
"github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumetenants"
|
||||
"github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes"
|
||||
"github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/aggregates"
|
||||
"github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/diagnostics"
|
||||
"github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors"
|
||||
nova_services "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/services"
|
||||
"github.com/gophercloud/gophercloud/openstack/compute/v2/flavors"
|
||||
"github.com/gophercloud/gophercloud/openstack/compute/v2/servers"
|
||||
"github.com/gophercloud/gophercloud/openstack/identity/v3/projects"
|
||||
"github.com/gophercloud/gophercloud/openstack/identity/v3/services"
|
||||
"github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/agents"
|
||||
"github.com/gophercloud/gophercloud/openstack/networking/v2/networks"
|
||||
"github.com/gophercloud/gophercloud/openstack/networking/v2/ports"
|
||||
"github.com/gophercloud/gophercloud/openstack/networking/v2/subnets"
|
||||
"github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal/choice"
|
||||
httpconfig "github.com/influxdata/telegraf/plugins/common/http"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
var (
|
||||
typePort = regexp.MustCompile(`_rx$|_rx_drop$|_rx_errors$|_rx_packets$|_tx$|_tx_drop$|_tx_errors$|_tx_packets$`)
|
||||
typeCPU = regexp.MustCompile(`cpu[0-9]{1,2}_time$`)
|
||||
typeStorage = regexp.MustCompile(`_errors$|_read$|_read_req$|_write$|_write_req$`)
|
||||
)
|
||||
|
||||
// volume is a structure used to unmarshal raw JSON from the API into.
|
||||
type volume struct {
|
||||
volumes.Volume
|
||||
volumetenants.VolumeTenantExt
|
||||
}
|
||||
|
||||
// OpenStack is the main structure associated with a collection instance.
|
||||
type OpenStack struct {
|
||||
// Configuration variables
|
||||
IdentityEndpoint string `toml:"authentication_endpoint"`
|
||||
Domain string `toml:"domain"`
|
||||
Project string `toml:"project"`
|
||||
Username string `toml:"username"`
|
||||
Password string `toml:"password"`
|
||||
EnabledServices []string `toml:"enabled_services"`
|
||||
ServerDiagnotics bool `toml:"server_diagnotics"`
|
||||
OutputSecrets bool `toml:"output_secrets"`
|
||||
TagPrefix string `toml:"tag_prefix"`
|
||||
TagValue string `toml:"tag_value"`
|
||||
HumanReadableTS bool `toml:"human_readable_timestamps"`
|
||||
MeasureRequest bool `toml:"measure_openstack_requests"`
|
||||
Log telegraf.Logger `toml:"-"`
|
||||
httpconfig.HTTPClientConfig
|
||||
|
||||
// Locally cached clients
|
||||
identity *gophercloud.ServiceClient
|
||||
compute *gophercloud.ServiceClient
|
||||
volume *gophercloud.ServiceClient
|
||||
network *gophercloud.ServiceClient
|
||||
stack *gophercloud.ServiceClient
|
||||
|
||||
// Locally cached resources
|
||||
openstackFlavors map[string]flavors.Flavor
|
||||
openstackHypervisors []hypervisors.Hypervisor
|
||||
diag map[string]interface{}
|
||||
openstackProjects map[string]projects.Project
|
||||
openstackServices map[string]services.Service
|
||||
}
|
||||
|
||||
// containsService indicates whether a particular service is enabled
|
||||
func (o *OpenStack) containsService(t string) bool {
|
||||
for _, service := range o.openstackServices {
|
||||
if service.Type == t {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// convertTimeFormat, to convert time format based on HumanReadableTS
|
||||
func (o *OpenStack) convertTimeFormat(t time.Time) interface{} {
|
||||
if o.HumanReadableTS {
|
||||
return t.Format("2006-01-02T15:04:05.999999999Z07:00")
|
||||
}
|
||||
return t.UnixNano()
|
||||
}
|
||||
|
||||
// Description returns a description string of the input plugin and implements
|
||||
// the Input interface.
|
||||
func (o *OpenStack) Description() string {
|
||||
return "Collects performance metrics from OpenStack services"
|
||||
}
|
||||
|
||||
// sampleConfig is a sample configuration file entry.
|
||||
var sampleConfig = `
|
||||
## The recommended interval to poll is '30m'
|
||||
|
||||
## The identity endpoint to authenticate against and get the service catalog from.
|
||||
authentication_endpoint = "https://my.openstack.cloud:5000"
|
||||
|
||||
## The domain to authenticate against when using a V3 identity endpoint.
|
||||
# domain = "default"
|
||||
|
||||
## The project to authenticate as.
|
||||
# project = "admin"
|
||||
|
||||
## User authentication credentials. Must have admin rights.
|
||||
username = "admin"
|
||||
password = "password"
|
||||
|
||||
## Available services are:
|
||||
## "agents", "aggregates", "flavors", "hypervisors", "networks", "nova_services",
|
||||
## "ports", "projects", "servers", "services", "stacks", "storage_pools", "subnets", "volumes"
|
||||
# enabled_services = ["services", "projects", "hypervisors", "flavors", "networks", "volumes"]
|
||||
|
||||
## Collect Server Diagnostics
|
||||
# server_diagnotics = false
|
||||
|
||||
## output secrets (such as adminPass(for server) and UserID(for volume)).
|
||||
# output_secrets = false
|
||||
|
||||
## Amount of time allowed to complete the HTTP(s) request.
|
||||
# timeout = "5s"
|
||||
|
||||
## HTTP Proxy support
|
||||
# http_proxy_url = ""
|
||||
|
||||
## Optional TLS Config
|
||||
# tls_ca = /path/to/cafile
|
||||
# tls_cert = /path/to/certfile
|
||||
# tls_key = /path/to/keyfile
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## Options for tags received from Openstack
|
||||
# tag_prefix = "openstack_tag_"
|
||||
# tag_value = "true"
|
||||
|
||||
## Timestamp format for timestamp data recieved from Openstack.
|
||||
## If false format is unix nanoseconds.
|
||||
# human_readable_timestamps = false
|
||||
|
||||
## Measure Openstack call duration
|
||||
# measure_openstack_requests = false
|
||||
`
|
||||
|
||||
// SampleConfig return a sample configuration file for auto-generation and
|
||||
// implements the Input interface.
|
||||
func (o *OpenStack) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
// initialize performs any necessary initialization functions
|
||||
func (o *OpenStack) Init() error {
|
||||
if len(o.EnabledServices) == 0 {
|
||||
o.EnabledServices = []string{"services", "projects", "hypervisors", "flavors", "networks", "volumes"}
|
||||
}
|
||||
if o.Username == "" || o.Password == "" {
|
||||
return fmt.Errorf("username or password can not be empty string")
|
||||
}
|
||||
if o.TagValue == "" {
|
||||
return fmt.Errorf("tag_value option can not be empty string")
|
||||
}
|
||||
sort.Strings(o.EnabledServices)
|
||||
o.openstackFlavors = map[string]flavors.Flavor{}
|
||||
o.openstackHypervisors = []hypervisors.Hypervisor{}
|
||||
o.diag = map[string]interface{}{}
|
||||
o.openstackProjects = map[string]projects.Project{}
|
||||
o.openstackServices = map[string]services.Service{}
|
||||
|
||||
// Authenticate against Keystone and get a token provider
|
||||
authOption := gophercloud.AuthOptions{
|
||||
IdentityEndpoint: o.IdentityEndpoint,
|
||||
DomainName: o.Domain,
|
||||
TenantName: o.Project,
|
||||
Username: o.Username,
|
||||
Password: o.Password,
|
||||
}
|
||||
provider, err := openstack.NewClient(authOption.IdentityEndpoint)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create client for OpenStack endpoint %v", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
client, err := o.HTTPClientConfig.CreateClient(ctx, o.Log)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
provider.HTTPClient = *client
|
||||
|
||||
if err := openstack.Authenticate(provider, authOption); err != nil {
|
||||
return fmt.Errorf("unable to authenticate OpenStack user %v", err)
|
||||
}
|
||||
|
||||
// Create required clients and attach to the OpenStack struct
|
||||
if o.identity, err = openstack.NewIdentityV3(provider, gophercloud.EndpointOpts{}); err != nil {
|
||||
return fmt.Errorf("unable to create V3 identity client %v", err)
|
||||
}
|
||||
|
||||
if err := o.gatherServices(); err != nil {
|
||||
return fmt.Errorf("failed to get resource openstack services %v", err)
|
||||
}
|
||||
|
||||
if o.compute, err = openstack.NewComputeV2(provider, gophercloud.EndpointOpts{}); err != nil {
|
||||
return fmt.Errorf("unable to create V2 compute client %v", err)
|
||||
}
|
||||
|
||||
// Create required clients and attach to the OpenStack struct
|
||||
if o.network, err = openstack.NewNetworkV2(provider, gophercloud.EndpointOpts{}); err != nil {
|
||||
return fmt.Errorf("unable to create V2 network client %v", err)
|
||||
}
|
||||
|
||||
// The Orchestration service is optional
|
||||
if o.containsService("orchestration") {
|
||||
if o.stack, err = openstack.NewOrchestrationV1(provider, gophercloud.EndpointOpts{}); err != nil {
|
||||
return fmt.Errorf("unable to create V1 stack client %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// The Cinder volume storage service is optional
|
||||
if o.containsService("volumev2") {
|
||||
if o.volume, err = openstack.NewBlockStorageV2(provider, gophercloud.EndpointOpts{}); err != nil {
|
||||
return fmt.Errorf("unable to create V2 volume client %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Gather gathers resources from the OpenStack API and accumulates metrics. This
|
||||
// implements the Input interface.
|
||||
func (o *OpenStack) Gather(acc telegraf.Accumulator) error {
|
||||
// Gather resources. Note service harvesting must come first as the other
|
||||
// gatherers are dependant on this information.
|
||||
gatherers := map[string]func(telegraf.Accumulator) error{
|
||||
"projects": o.gatherProjects,
|
||||
"hypervisors": o.gatherHypervisors,
|
||||
"flavors": o.gatherFlavors,
|
||||
"servers": o.gatherServers,
|
||||
"volumes": o.gatherVolumes,
|
||||
"storage_pools": o.gatherStoragePools,
|
||||
"subnets": o.gatherSubnets,
|
||||
"ports": o.gatherPorts,
|
||||
"networks": o.gatherNetworks,
|
||||
"aggregates": o.gatherAggregates,
|
||||
"nova_services": o.gatherNovaServices,
|
||||
"agents": o.gatherAgents,
|
||||
"stacks": o.gatherStacks,
|
||||
}
|
||||
|
||||
callDuration := map[string]interface{}{}
|
||||
for _, service := range o.EnabledServices {
|
||||
// As Services are already gathered in Init(), using this to accumulate them.
|
||||
if service == "services" {
|
||||
o.accumulateServices(acc)
|
||||
continue
|
||||
}
|
||||
start := time.Now()
|
||||
gatherer := gatherers[service]
|
||||
if err := gatherer(acc); err != nil {
|
||||
acc.AddError(fmt.Errorf("failed to get resource %q %v", service, err))
|
||||
}
|
||||
callDuration[service] = time.Since(start).Nanoseconds()
|
||||
}
|
||||
|
||||
if o.MeasureRequest {
|
||||
for service, duration := range callDuration {
|
||||
acc.AddFields("openstack_request_duration", map[string]interface{}{service: duration}, map[string]string{})
|
||||
}
|
||||
}
|
||||
|
||||
if o.ServerDiagnotics {
|
||||
if !choice.Contains("servers", o.EnabledServices) {
|
||||
if err := o.gatherServers(acc); err != nil {
|
||||
acc.AddError(fmt.Errorf("failed to get resource server diagnostics %v", err))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
o.accumulateServerDiagnostics(acc)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// gatherServices collects services from the OpenStack API.
|
||||
func (o *OpenStack) gatherServices() error {
|
||||
page, err := services.List(o.identity, &services.ListOpts{}).AllPages()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list services %v", err)
|
||||
}
|
||||
extractedServices, err := services.ExtractServices(page)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to extract services %v", err)
|
||||
}
|
||||
for _, service := range extractedServices {
|
||||
o.openstackServices[service.ID] = service
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// gatherStacks collects and accumulates stacks data from the OpenStack API.
|
||||
func (o *OpenStack) gatherStacks(acc telegraf.Accumulator) error {
|
||||
page, err := stacks.List(o.stack, &stacks.ListOpts{}).AllPages()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list stacks %v", err)
|
||||
}
|
||||
extractedStacks, err := stacks.ExtractStacks(page)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to extract stacks %v", err)
|
||||
}
|
||||
for _, stack := range extractedStacks {
|
||||
tags := map[string]string{
|
||||
"description": stack.Description,
|
||||
"name": stack.Name,
|
||||
}
|
||||
for _, stackTag := range stack.Tags {
|
||||
tags[o.TagPrefix+stackTag] = o.TagValue
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"status": strings.ToLower(stack.Status),
|
||||
"id": stack.ID,
|
||||
"status_reason": stack.StatusReason,
|
||||
"creation_time": o.convertTimeFormat(stack.CreationTime),
|
||||
"updated_time": o.convertTimeFormat(stack.UpdatedTime),
|
||||
}
|
||||
acc.AddFields("openstack_stack", fields, tags)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// gatherNovaServices collects and accumulates nova_services data from the OpenStack API.
|
||||
func (o *OpenStack) gatherNovaServices(acc telegraf.Accumulator) error {
|
||||
page, err := nova_services.List(o.compute, &nova_services.ListOpts{}).AllPages()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list nova_services %v", err)
|
||||
}
|
||||
novaServices, err := nova_services.ExtractServices(page)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to extract nova_services %v", err)
|
||||
}
|
||||
for _, novaService := range novaServices {
|
||||
tags := map[string]string{
|
||||
"name": novaService.Binary,
|
||||
"host_machine": novaService.Host,
|
||||
"state": novaService.State,
|
||||
"status": strings.ToLower(novaService.Status),
|
||||
"zone": novaService.Zone,
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"id": novaService.ID,
|
||||
"disabled_reason": novaService.DisabledReason,
|
||||
"forced_down": novaService.ForcedDown,
|
||||
"updated_at": o.convertTimeFormat(novaService.UpdatedAt),
|
||||
}
|
||||
acc.AddFields("openstack_nova_service", fields, tags)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// gatherSubnets collects and accumulates subnets data from the OpenStack API.
|
||||
func (o *OpenStack) gatherSubnets(acc telegraf.Accumulator) error {
|
||||
page, err := subnets.List(o.network, &subnets.ListOpts{}).AllPages()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list subnets %v", err)
|
||||
}
|
||||
extractedSubnets, err := subnets.ExtractSubnets(page)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to extract subnets %v", err)
|
||||
}
|
||||
for _, subnet := range extractedSubnets {
|
||||
var allocationPools []string
|
||||
for _, pool := range subnet.AllocationPools {
|
||||
allocationPools = append(allocationPools, pool.Start+"-"+pool.End)
|
||||
}
|
||||
tags := map[string]string{
|
||||
"network_id": subnet.NetworkID,
|
||||
"name": subnet.Name,
|
||||
"description": subnet.Description,
|
||||
"ip_version": strconv.Itoa(subnet.IPVersion),
|
||||
"cidr": subnet.CIDR,
|
||||
"gateway_ip": subnet.GatewayIP,
|
||||
"tenant_id": subnet.TenantID,
|
||||
"project_id": subnet.ProjectID,
|
||||
"ipv6_address_mode": subnet.IPv6AddressMode,
|
||||
"ipv6_ra_mode": subnet.IPv6RAMode,
|
||||
"subnet_pool_id": subnet.SubnetPoolID,
|
||||
}
|
||||
for _, subnetTag := range subnet.Tags {
|
||||
tags[o.TagPrefix+subnetTag] = o.TagValue
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"id": subnet.ID,
|
||||
"dhcp_enabled": subnet.EnableDHCP,
|
||||
"dns_nameservers": strings.Join(subnet.DNSNameservers[:], ","),
|
||||
"allocation_pools": strings.Join(allocationPools[:], ","),
|
||||
}
|
||||
acc.AddFields("openstack_subnet", fields, tags)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// gatherPorts collects and accumulates ports data from the OpenStack API.
|
||||
func (o *OpenStack) gatherPorts(acc telegraf.Accumulator) error {
|
||||
page, err := ports.List(o.network, &ports.ListOpts{}).AllPages()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list ports %v", err)
|
||||
}
|
||||
extractedPorts, err := ports.ExtractPorts(page)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to extract ports %v", err)
|
||||
}
|
||||
for _, port := range extractedPorts {
|
||||
tags := map[string]string{
|
||||
"network_id": port.NetworkID,
|
||||
"name": port.Name,
|
||||
"description": port.Description,
|
||||
"status": strings.ToLower(port.Status),
|
||||
"tenant_id": port.TenantID,
|
||||
"project_id": port.ProjectID,
|
||||
"device_owner": port.DeviceOwner,
|
||||
"device_id": port.DeviceID,
|
||||
}
|
||||
for _, portTag := range port.Tags {
|
||||
tags[o.TagPrefix+portTag] = o.TagValue
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"id": port.ID,
|
||||
"mac_address": port.MACAddress,
|
||||
"admin_state_up": port.AdminStateUp,
|
||||
"fixed_ips": len(port.FixedIPs),
|
||||
"allowed_address_pairs": len(port.AllowedAddressPairs),
|
||||
"security_groups": strings.Join(port.SecurityGroups[:], ","),
|
||||
}
|
||||
if len(port.FixedIPs) > 0 {
|
||||
for _, ip := range port.FixedIPs {
|
||||
fields["subnet_id"] = ip.SubnetID
|
||||
fields["ip_address"] = ip.IPAddress
|
||||
acc.AddFields("openstack_port", fields, tags)
|
||||
}
|
||||
} else {
|
||||
acc.AddFields("openstack_port", fields, tags)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// gatherNetworks collects and accumulates networks data from the OpenStack API.
|
||||
func (o *OpenStack) gatherNetworks(acc telegraf.Accumulator) error {
|
||||
page, err := networks.List(o.network, &networks.ListOpts{}).AllPages()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list networks %v", err)
|
||||
}
|
||||
extractedNetworks, err := networks.ExtractNetworks(page)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to extract networks %v", err)
|
||||
}
|
||||
for _, network := range extractedNetworks {
|
||||
tags := map[string]string{
|
||||
"name": network.Name,
|
||||
"description": network.Description,
|
||||
"status": strings.ToLower(network.Status),
|
||||
"tenant_id": network.TenantID,
|
||||
"project_id": network.ProjectID,
|
||||
}
|
||||
for _, networkTag := range network.Tags {
|
||||
tags[o.TagPrefix+networkTag] = o.TagValue
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"id": network.ID,
|
||||
"admin_state_up": network.AdminStateUp,
|
||||
"subnets": len(network.Subnets),
|
||||
"shared": network.Shared,
|
||||
"availability_zone_hints": strings.Join(network.AvailabilityZoneHints[:], ","),
|
||||
"updated_at": o.convertTimeFormat(network.UpdatedAt),
|
||||
"created_at": o.convertTimeFormat(network.CreatedAt),
|
||||
}
|
||||
if len(network.Subnets) > 0 {
|
||||
for _, subnet := range network.Subnets {
|
||||
fields["subnet_id"] = subnet
|
||||
acc.AddFields("openstack_network", fields, tags)
|
||||
}
|
||||
} else {
|
||||
acc.AddFields("openstack_network", fields, tags)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// gatherAgents collects and accumulates agents data from the OpenStack API.
|
||||
func (o *OpenStack) gatherAgents(acc telegraf.Accumulator) error {
|
||||
page, err := agents.List(o.network, &agents.ListOpts{}).AllPages()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list newtron agents %v", err)
|
||||
}
|
||||
extractedAgents, err := agents.ExtractAgents(page)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to extract newtron agents %v", err)
|
||||
}
|
||||
for _, agent := range extractedAgents {
|
||||
tags := map[string]string{
|
||||
"agent_type": agent.AgentType,
|
||||
"availability_zone": agent.AvailabilityZone,
|
||||
"binary": agent.Binary,
|
||||
"description": agent.Description,
|
||||
"agent_host": agent.Host,
|
||||
"topic": agent.Topic,
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"id": agent.ID,
|
||||
"admin_state_up": agent.AdminStateUp,
|
||||
"alive": agent.Alive,
|
||||
"resources_synced": agent.ResourcesSynced,
|
||||
"created_at": o.convertTimeFormat(agent.CreatedAt),
|
||||
"started_at": o.convertTimeFormat(agent.StartedAt),
|
||||
"heartbeat_timestamp": o.convertTimeFormat(agent.HeartbeatTimestamp),
|
||||
}
|
||||
acc.AddFields("openstack_newtron_agent", fields, tags)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// gatherAggregates collects and accumulates aggregates data from the OpenStack API.
|
||||
func (o *OpenStack) gatherAggregates(acc telegraf.Accumulator) error {
|
||||
page, err := aggregates.List(o.compute).AllPages()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list aggregates %v", err)
|
||||
}
|
||||
extractedAggregates, err := aggregates.ExtractAggregates(page)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to extract aggregates %v", err)
|
||||
}
|
||||
for _, aggregate := range extractedAggregates {
|
||||
tags := map[string]string{
|
||||
"availability_zone": aggregate.AvailabilityZone,
|
||||
"name": aggregate.Name,
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"id": aggregate.ID,
|
||||
"aggregate_hosts": len(aggregate.Hosts),
|
||||
"deleted": aggregate.Deleted,
|
||||
"created_at": o.convertTimeFormat(aggregate.CreatedAt),
|
||||
"updated_at": o.convertTimeFormat(aggregate.UpdatedAt),
|
||||
"deleted_at": o.convertTimeFormat(aggregate.DeletedAt),
|
||||
}
|
||||
if len(aggregate.Hosts) > 0 {
|
||||
for _, host := range aggregate.Hosts {
|
||||
fields["aggregate_host"] = host
|
||||
acc.AddFields("openstack_aggregate", fields, tags)
|
||||
}
|
||||
} else {
|
||||
acc.AddFields("openstack_aggregate", fields, tags)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// gatherProjects collects and accumulates projects data from the OpenStack API.
|
||||
func (o *OpenStack) gatherProjects(acc telegraf.Accumulator) error {
|
||||
page, err := projects.List(o.identity, &projects.ListOpts{}).AllPages()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list projects %v", err)
|
||||
}
|
||||
extractedProjects, err := projects.ExtractProjects(page)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to extract projects %v", err)
|
||||
}
|
||||
for _, project := range extractedProjects {
|
||||
o.openstackProjects[project.ID] = project
|
||||
tags := map[string]string{
|
||||
"description": project.Description,
|
||||
"domain_id": project.DomainID,
|
||||
"name": project.Name,
|
||||
"parent_id": project.ParentID,
|
||||
}
|
||||
for _, projectTag := range project.Tags {
|
||||
tags[o.TagPrefix+projectTag] = o.TagValue
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"id": project.ID,
|
||||
"is_domain": project.IsDomain,
|
||||
"enabled": project.Enabled,
|
||||
"projects": len(extractedProjects),
|
||||
}
|
||||
acc.AddFields("openstack_identity", fields, tags)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// gatherHypervisors collects and accumulates hypervisors data from the OpenStack API.
|
||||
func (o *OpenStack) gatherHypervisors(acc telegraf.Accumulator) error {
|
||||
page, err := hypervisors.List(o.compute).AllPages()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list hypervisors %v", err)
|
||||
}
|
||||
extractedHypervisors, err := hypervisors.ExtractHypervisors(page)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to extract hypervisors %v", err)
|
||||
}
|
||||
o.openstackHypervisors = extractedHypervisors
|
||||
if choice.Contains("hypervisors", o.EnabledServices) {
|
||||
for _, hypervisor := range extractedHypervisors {
|
||||
tags := map[string]string{
|
||||
"cpu_vendor": hypervisor.CPUInfo.Vendor,
|
||||
"cpu_arch": hypervisor.CPUInfo.Arch,
|
||||
"cpu_model": hypervisor.CPUInfo.Model,
|
||||
"status": strings.ToLower(hypervisor.Status),
|
||||
"state": hypervisor.State,
|
||||
"hypervisor_hostname": hypervisor.HypervisorHostname,
|
||||
"hypervisor_type": hypervisor.HypervisorType,
|
||||
"hypervisor_version": strconv.Itoa(hypervisor.HypervisorVersion),
|
||||
"service_host": hypervisor.Service.Host,
|
||||
"service_id": hypervisor.Service.ID,
|
||||
"service_disabled_reason": hypervisor.Service.DisabledReason,
|
||||
}
|
||||
for _, cpuFeature := range hypervisor.CPUInfo.Features {
|
||||
tags["cpu_feature_"+cpuFeature] = "true"
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"id": hypervisor.ID,
|
||||
"host_ip": hypervisor.HostIP,
|
||||
"cpu_topology_sockets": hypervisor.CPUInfo.Topology.Sockets,
|
||||
"cpu_topology_cores": hypervisor.CPUInfo.Topology.Cores,
|
||||
"cpu_topology_threads": hypervisor.CPUInfo.Topology.Threads,
|
||||
"current_workload": hypervisor.CurrentWorkload,
|
||||
"disk_available_least": hypervisor.DiskAvailableLeast,
|
||||
"free_disk_gb": hypervisor.FreeDiskGB,
|
||||
"free_ram_mb": hypervisor.FreeRamMB,
|
||||
"local_gb": hypervisor.LocalGB,
|
||||
"local_gb_used": hypervisor.LocalGBUsed,
|
||||
"memory_mb": hypervisor.MemoryMB,
|
||||
"memory_mb_used": hypervisor.MemoryMBUsed,
|
||||
"running_vms": hypervisor.RunningVMs,
|
||||
"vcpus": hypervisor.VCPUs,
|
||||
"vcpus_used": hypervisor.VCPUsUsed,
|
||||
}
|
||||
acc.AddFields("openstack_hypervisor", fields, tags)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// gatherFlavors collects and accumulates flavors data from the OpenStack API.
|
||||
func (o *OpenStack) gatherFlavors(acc telegraf.Accumulator) error {
|
||||
page, err := flavors.ListDetail(o.compute, &flavors.ListOpts{}).AllPages()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list flavors %v", err)
|
||||
}
|
||||
extractedflavors, err := flavors.ExtractFlavors(page)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to extract flavors %v", err)
|
||||
}
|
||||
for _, flavor := range extractedflavors {
|
||||
o.openstackFlavors[flavor.ID] = flavor
|
||||
tags := map[string]string{
|
||||
"name": flavor.Name,
|
||||
"is_public": strconv.FormatBool(flavor.IsPublic),
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"id": flavor.ID,
|
||||
"disk": flavor.Disk,
|
||||
"ram": flavor.RAM,
|
||||
"rxtx_factor": flavor.RxTxFactor,
|
||||
"swap": flavor.Swap,
|
||||
"vcpus": flavor.VCPUs,
|
||||
"ephemeral": flavor.Ephemeral,
|
||||
}
|
||||
acc.AddFields("openstack_flavor", fields, tags)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// gatherVolumes collects and accumulates volumes data from the OpenStack API.
|
||||
func (o *OpenStack) gatherVolumes(acc telegraf.Accumulator) error {
|
||||
page, err := volumes.List(o.volume, &volumes.ListOpts{AllTenants: true}).AllPages()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list volumes %v", err)
|
||||
}
|
||||
v := []volume{}
|
||||
if err := volumes.ExtractVolumesInto(page, &v); err != nil {
|
||||
return fmt.Errorf("unable to extract volumes %v", err)
|
||||
}
|
||||
for _, volume := range v {
|
||||
tags := map[string]string{
|
||||
"status": strings.ToLower(volume.Status),
|
||||
"availability_zone": volume.AvailabilityZone,
|
||||
"name": volume.Name,
|
||||
"description": volume.Description,
|
||||
"volume_type": volume.VolumeType,
|
||||
"snapshot_id": volume.SnapshotID,
|
||||
"source_volid": volume.SourceVolID,
|
||||
"bootable": volume.Bootable,
|
||||
"replication_status": strings.ToLower(volume.ReplicationStatus),
|
||||
"consistency_group_id": volume.ConsistencyGroupID,
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"id": volume.ID,
|
||||
"size": volume.Size,
|
||||
"total_attachments": len(volume.Attachments),
|
||||
"encrypted": volume.Encrypted,
|
||||
"multiattach": volume.Multiattach,
|
||||
"created_at": o.convertTimeFormat(volume.CreatedAt),
|
||||
"updated_at": o.convertTimeFormat(volume.UpdatedAt),
|
||||
}
|
||||
if o.OutputSecrets {
|
||||
tags["user_id"] = volume.UserID
|
||||
}
|
||||
if len(volume.Attachments) > 0 {
|
||||
for _, attachment := range volume.Attachments {
|
||||
if !o.HumanReadableTS {
|
||||
fields["attachment_attached_at"] = attachment.AttachedAt.UnixNano()
|
||||
} else {
|
||||
fields["attachment_attached_at"] = attachment.AttachedAt.Format("2006-01-02T15:04:05.999999999Z07:00")
|
||||
}
|
||||
tags["attachment_attachment_id"] = attachment.AttachmentID
|
||||
tags["attachment_device"] = attachment.Device
|
||||
tags["attachment_host_name"] = attachment.HostName
|
||||
fields["attachment_server_id"] = attachment.ServerID
|
||||
acc.AddFields("openstack_volume", fields, tags)
|
||||
}
|
||||
} else {
|
||||
acc.AddFields("openstack_volume", fields, tags)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// gatherStoragePools collects and accumulates storage pools data from the OpenStack API.
|
||||
func (o *OpenStack) gatherStoragePools(acc telegraf.Accumulator) error {
|
||||
results, err := schedulerstats.List(o.volume, &schedulerstats.ListOpts{Detail: true}).AllPages()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list storage pools %v", err)
|
||||
}
|
||||
storagePools, err := schedulerstats.ExtractStoragePools(results)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to extract storage pools %v", err)
|
||||
}
|
||||
for _, storagePool := range storagePools {
|
||||
tags := map[string]string{
|
||||
"name": storagePool.Capabilities.VolumeBackendName,
|
||||
"driver_version": storagePool.Capabilities.DriverVersion,
|
||||
"storage_protocol": storagePool.Capabilities.StorageProtocol,
|
||||
"vendor_name": storagePool.Capabilities.VendorName,
|
||||
"volume_backend_name": storagePool.Capabilities.VolumeBackendName,
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"total_capacity_gb": storagePool.Capabilities.TotalCapacityGB,
|
||||
"free_capacity_gb": storagePool.Capabilities.FreeCapacityGB,
|
||||
}
|
||||
acc.AddFields("openstack_storage_pool", fields, tags)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// gatherServers collects servers from the OpenStack API.
|
||||
func (o *OpenStack) gatherServers(acc telegraf.Accumulator) error {
|
||||
if !choice.Contains("hypervisors", o.EnabledServices) {
|
||||
if err := o.gatherHypervisors(acc); err != nil {
|
||||
acc.AddError(fmt.Errorf("failed to get resource hypervisors %v", err))
|
||||
}
|
||||
}
|
||||
serverGather := choice.Contains("servers", o.EnabledServices)
|
||||
for _, hypervisor := range o.openstackHypervisors {
|
||||
page, err := servers.List(o.compute, &servers.ListOpts{AllTenants: true, Host: hypervisor.HypervisorHostname}).AllPages()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list servers %v", err)
|
||||
}
|
||||
extractedServers, err := servers.ExtractServers(page)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to extract servers %v", err)
|
||||
}
|
||||
for _, server := range extractedServers {
|
||||
if serverGather {
|
||||
o.accumulateServer(acc, server, hypervisor.HypervisorHostname)
|
||||
}
|
||||
if !o.ServerDiagnotics || server.Status != "ACTIVE" {
|
||||
continue
|
||||
}
|
||||
diagnostic, err := diagnostics.Get(o.compute, server.ID).Extract()
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("unable to get diagnostics for server(%v) %v", server.ID, err))
|
||||
continue
|
||||
}
|
||||
o.diag[server.ID] = diagnostic
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// accumulateServices accumulates statistics of services.
|
||||
func (o *OpenStack) accumulateServices(acc telegraf.Accumulator) {
|
||||
for _, service := range o.openstackServices {
|
||||
tags := map[string]string{
|
||||
"name": service.Type,
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"service_id": service.ID,
|
||||
"service_enabled": service.Enabled,
|
||||
}
|
||||
acc.AddFields("openstack_service", fields, tags)
|
||||
}
|
||||
}
|
||||
|
||||
// accumulateServer accumulates statistics of a server.
|
||||
func (o *OpenStack) accumulateServer(acc telegraf.Accumulator, server servers.Server, hostName string) {
|
||||
tags := map[string]string{}
|
||||
// Extract the flavor details to avoid joins (ignore errors and leave as zero values)
|
||||
var vcpus, ram, disk int
|
||||
if flavorIDInterface, ok := server.Flavor["id"]; ok {
|
||||
if flavorID, ok := flavorIDInterface.(string); ok {
|
||||
tags["flavor"] = flavorID
|
||||
if flavor, ok := o.openstackFlavors[flavorID]; ok {
|
||||
vcpus = flavor.VCPUs
|
||||
ram = flavor.RAM
|
||||
disk = flavor.Disk
|
||||
}
|
||||
}
|
||||
}
|
||||
if imageIDInterface, ok := server.Image["id"]; ok {
|
||||
if imageID, ok := imageIDInterface.(string); ok {
|
||||
tags["image"] = imageID
|
||||
}
|
||||
}
|
||||
// Try derive the associated project
|
||||
project := "unknown"
|
||||
if p, ok := o.openstackProjects[server.TenantID]; ok {
|
||||
project = p.Name
|
||||
}
|
||||
tags["tenant_id"] = server.TenantID
|
||||
tags["name"] = server.Name
|
||||
tags["host_id"] = server.HostID
|
||||
tags["status"] = strings.ToLower(server.Status)
|
||||
tags["key_name"] = server.KeyName
|
||||
tags["host_name"] = hostName
|
||||
tags["project"] = project
|
||||
fields := map[string]interface{}{
|
||||
"id": server.ID,
|
||||
"progress": server.Progress,
|
||||
"accessIPv4": server.AccessIPv4,
|
||||
"accessIPv6": server.AccessIPv6,
|
||||
"addresses": len(server.Addresses),
|
||||
"security_groups": len(server.SecurityGroups),
|
||||
"volumes_attached": len(server.AttachedVolumes),
|
||||
"fault_code": server.Fault.Code,
|
||||
"fault_details": server.Fault.Details,
|
||||
"fault_message": server.Fault.Message,
|
||||
"vcpus": vcpus,
|
||||
"ram_mb": ram,
|
||||
"disk_gb": disk,
|
||||
"fault_created": o.convertTimeFormat(server.Fault.Created),
|
||||
"updated": o.convertTimeFormat(server.Updated),
|
||||
"created": o.convertTimeFormat(server.Created),
|
||||
}
|
||||
if o.OutputSecrets {
|
||||
tags["user_id"] = server.UserID
|
||||
fields["adminPass"] = server.AdminPass
|
||||
}
|
||||
if len(server.AttachedVolumes) == 0 {
|
||||
acc.AddFields("openstack_server", fields, tags)
|
||||
} else {
|
||||
for _, AttachedVolume := range server.AttachedVolumes {
|
||||
fields["volume_id"] = AttachedVolume.ID
|
||||
acc.AddFields("openstack_server", fields, tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// accumulateServerDiagnostics accumulates statistics from the compute(nova) service.
|
||||
// currently only supports 'libvirt' driver.
|
||||
func (o *OpenStack) accumulateServerDiagnostics(acc telegraf.Accumulator) {
|
||||
for serverID, diagnostic := range o.diag {
|
||||
s, ok := diagnostic.(map[string]interface{})
|
||||
if !ok {
|
||||
o.Log.Warnf("unknown type for diagnostics %T", diagnostic)
|
||||
continue
|
||||
}
|
||||
tags := map[string]string{
|
||||
"server_id": serverID,
|
||||
}
|
||||
fields := map[string]interface{}{}
|
||||
portName := make(map[string]bool)
|
||||
storageName := make(map[string]bool)
|
||||
memoryStats := make(map[string]interface{})
|
||||
for k, v := range s {
|
||||
if typePort.MatchString(k) {
|
||||
portName[strings.Split(k, "_")[0]] = true
|
||||
} else if typeCPU.MatchString(k) {
|
||||
fields[k] = v
|
||||
} else if typeStorage.MatchString(k) {
|
||||
storageName[strings.Split(k, "_")[0]] = true
|
||||
} else {
|
||||
memoryStats[k] = v
|
||||
}
|
||||
}
|
||||
fields["memory"] = memoryStats["memory"]
|
||||
fields["memory-actual"] = memoryStats["memory-actual"]
|
||||
fields["memory-rss"] = memoryStats["memory-rss"]
|
||||
fields["memory-swap_in"] = memoryStats["memory-swap_in"]
|
||||
tags["no_of_ports"] = strconv.Itoa(len(portName))
|
||||
tags["no_of_disks"] = strconv.Itoa(len(storageName))
|
||||
for key := range storageName {
|
||||
fields["disk_errors"] = s[key+"_errors"]
|
||||
fields["disk_read"] = s[key+"_read"]
|
||||
fields["disk_read_req"] = s[key+"_read_req"]
|
||||
fields["disk_write"] = s[key+"_write"]
|
||||
fields["disk_write_req"] = s[key+"_write_req"]
|
||||
tags["disk_name"] = key
|
||||
acc.AddFields("openstack_server_diagnostics", fields, tags)
|
||||
}
|
||||
for key := range portName {
|
||||
fields["port_rx"] = s[key+"_rx"]
|
||||
fields["port_rx_drop"] = s[key+"_rx_drop"]
|
||||
fields["port_rx_errors"] = s[key+"_rx_errors"]
|
||||
fields["port_rx_packets"] = s[key+"_rx_packets"]
|
||||
fields["port_tx"] = s[key+"_tx"]
|
||||
fields["port_tx_drop"] = s[key+"_tx_drop"]
|
||||
fields["port_tx_errors"] = s[key+"_tx_errors"]
|
||||
fields["port_tx_packets"] = s[key+"_tx_packets"]
|
||||
tags["port_name"] = key
|
||||
acc.AddFields("openstack_server_diagnostics", fields, tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// init registers a callback which creates a new OpenStack input instance.
|
||||
func init() {
|
||||
inputs.Add("openstack", func() telegraf.Input {
|
||||
return &OpenStack{
|
||||
Domain: "default",
|
||||
Project: "admin",
|
||||
TagPrefix: "openstack_tag_",
|
||||
TagValue: "true",
|
||||
}
|
||||
})
|
||||
}
|
||||
Loading…
Reference in New Issue