feat: New input plugin for libvirt (#11814)

This commit is contained in:
Paweł Żak 2022-10-13 01:05:42 +02:00 committed by GitHub
parent 39e4bcdc90
commit 94e39fa018
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 2509 additions and 6 deletions

View File

@ -91,6 +91,7 @@ following works:
- github.com/denisenkom/go-mssqldb [BSD 3-Clause "New" or "Revised" License](https://github.com/denisenkom/go-mssqldb/blob/master/LICENSE.txt)
- github.com/devigned/tab [MIT License](https://github.com/devigned/tab/blob/master/LICENSE)
- github.com/dgryski/go-rendezvous [MIT License](https://github.com/dgryski/go-rendezvous/blob/master/LICENSE)
- github.com/digitalocean/go-libvirt [Apache License 2.0](https://github.com/digitalocean/go-libvirt/blob/master/LICENSE.md)
- github.com/dimchansky/utfbom [Apache License 2.0](https://github.com/dimchansky/utfbom/blob/master/LICENSE)
- github.com/djherbis/times [MIT License](https://github.com/djherbis/times/blob/master/LICENSE)
- github.com/docker/distribution [Apache License 2.0](https://github.com/docker/distribution/blob/master/LICENSE)
@ -160,6 +161,7 @@ following works:
- github.com/hashicorp/go-rootcerts [Mozilla Public License 2.0](https://github.com/hashicorp/go-rootcerts/blob/master/LICENSE)
- github.com/hashicorp/go-uuid [Mozilla Public License 2.0](https://github.com/hashicorp/go-uuid/blob/master/LICENSE)
- github.com/hashicorp/golang-lru [Mozilla Public License 2.0](https://github.com/hashicorp/golang-lru/blob/master/LICENSE)
- github.com/hashicorp/packer-plugin-sdk [Mozilla Public License 2.0](https://github.com/hashicorp/packer-plugin-sdk/blob/main/LICENSE)
- github.com/hashicorp/serf [Mozilla Public License 2.0](https://github.com/hashicorp/serf/blob/master/LICENSE)
- github.com/huandu/xstrings [MIT License](https://github.com/huandu/xstrings/blob/master/LICENSE)
- github.com/imdario/mergo [BSD 3-Clause "New" or "Revised" License](https://github.com/imdario/mergo/blob/master/LICENSE)
@ -279,6 +281,7 @@ following works:
- github.com/stretchr/objx [MIT License](https://github.com/stretchr/objx/blob/master/LICENSE)
- github.com/stretchr/testify [MIT License](https://github.com/stretchr/testify/blob/master/LICENSE)
- github.com/testcontainers/testcontainers-go [MIT License](https://github.com/testcontainers/testcontainers-go/blob/main/LICENSE)
- github.com/thomasklein94/packer-plugin-libvirt [Mozilla Public License 2.0](https://github.com/thomasklein94/packer-plugin-libvirt/blob/main/LICENSE)
- github.com/tidwall/gjson [MIT License](https://github.com/tidwall/gjson/blob/master/LICENSE)
- github.com/tidwall/match [MIT License](https://github.com/tidwall/match/blob/master/LICENSE)
- github.com/tidwall/pretty [MIT License](https://github.com/tidwall/pretty/blob/master/LICENSE)

8
go.mod
View File

@ -53,6 +53,7 @@ require (
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f
github.com/couchbase/go-couchbase v0.1.1
github.com/denisenkom/go-mssqldb v0.12.0
github.com/digitalocean/go-libvirt v0.0.0-20220811165305-15feff002086
github.com/dimchansky/utfbom v1.1.1
github.com/djherbis/times v1.5.0
github.com/docker/docker v20.10.17+incompatible
@ -147,6 +148,7 @@ require (
github.com/stretchr/testify v1.8.0
github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62
github.com/testcontainers/testcontainers-go v0.13.0
github.com/thomasklein94/packer-plugin-libvirt v0.3.4
github.com/tidwall/gjson v1.14.3
github.com/tinylib/msgp v1.1.6
github.com/urfave/cli/v2 v2.16.3
@ -287,12 +289,13 @@ require (
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.1 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-hclog v0.16.2 // indirect
github.com/hashicorp/go-immutable-radix v1.3.0 // indirect
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/hashicorp/packer-plugin-sdk v0.3.1 // indirect
github.com/hashicorp/serf v0.9.7 // indirect
github.com/huandu/xstrings v1.3.2 // indirect
github.com/imdario/mergo v0.3.12 // indirect
@ -349,7 +352,6 @@ require (
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect
github.com/pborman/uuid v1.2.1 // indirect
github.com/philhofer/fwd v1.1.1 // indirect
github.com/pierrec/lz4 v2.6.1+incompatible // indirect
github.com/pierrec/lz4/v4 v4.1.15 // indirect
github.com/pion/logging v0.2.2 // indirect
github.com/pion/transport v0.13.0 // indirect

14
go.sum
View File

@ -694,6 +694,8 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/r
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/digitalocean/go-libvirt v0.0.0-20220811165305-15feff002086 h1:FTREXo+EVmU9nOCaQ46PvH0hs1Rt2/diCoTAtxzDxrA=
github.com/digitalocean/go-libvirt v0.0.0-20220811165305-15feff002086/go.mod h1:yhKBkgJm/PWVHCFHLlFwqhIzS7FcutIYmS/fmzex5LQ=
github.com/digitalocean/godo v1.58.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU=
github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U=
@ -1280,8 +1282,9 @@ github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brv
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM=
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-hclog v0.15.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
@ -1289,8 +1292,9 @@ github.com/hashicorp/go-hclog v0.16.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39
github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs=
github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-immutable-radix v1.3.0 h1:8exGP7ego3OmkfksihtSouGMZ+hQrhxx+FVELeXpVPE=
github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI=
github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
@ -1330,6 +1334,8 @@ github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOn
github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
github.com/hashicorp/memberlist v0.3.1 h1:MXgUXLqva1QvpVEDQW1IQLG0wivQAtmFlHRQ+1vWZfM=
github.com/hashicorp/memberlist v0.3.1/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
github.com/hashicorp/packer-plugin-sdk v0.3.1 h1:Gr/mnihsdUcPfGiruFL93BQkiFh3EFPwyxxTWkwvRsQ=
github.com/hashicorp/packer-plugin-sdk v0.3.1/go.mod h1:+GzydiXdn0CkueigqXBsX4Poz5gfmFXZ/DkxKt4fmt4=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk=
github.com/hashicorp/serf v0.9.7 h1:hkdgbqizGQHuU5IPqYM1JdSMV8nKfpuOnZYXssk9muY=
@ -1950,7 +1956,6 @@ github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi
github.com/pierrec/lz4 v2.4.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM=
github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pierrec/lz4/v4 v4.0.3/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pierrec/lz4/v4 v4.1.1/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pierrec/lz4/v4 v4.1.8/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
@ -2258,6 +2263,8 @@ github.com/tedsuo/ifrit v0.0.0-20180802180643-bea94bb476cc/go.mod h1:eyZnKCc955u
github.com/testcontainers/testcontainers-go v0.13.0 h1:OUujSlEGsXVo/ykPVZk3KanBNGN0TYb/7oKIPVn15JA=
github.com/testcontainers/testcontainers-go v0.13.0/go.mod h1:z1abufU633Eb/FmSBTzV6ntZAC1eZBYPtaFsn4nPuDk=
github.com/tetafro/godot v1.4.4/go.mod h1:FVDd4JuKliW3UgjswZfJfHq4vAx0bD/Jd5brJjGeaz4=
github.com/thomasklein94/packer-plugin-libvirt v0.3.4 h1:K+NkHFcZuiUTp4ZiDdBhWRMZiSMdsXwGuzyg4THKDAU=
github.com/thomasklein94/packer-plugin-libvirt v0.3.4/go.mod h1:FLQTTGhVNak3rFgrZCJ2TZR6Cywz7ef/+z5Pg11EvJg=
github.com/tidwall/gjson v1.14.3 h1:9jvXn7olKEHU1S9vwoMGliaT8jq1vJ7IH/n9zD9Dnlw=
github.com/tidwall/gjson v1.14.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
@ -2671,6 +2678,7 @@ golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su
golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220725212005-46097bf591d3/go.mod h1:AaygXjzTFtRAg2ttMY5RMuhpJ3cNnI0XpyFJD1iQRSM=
golang.org/x/net v0.0.0-20220728211354-c7608f3a8462/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/net v0.0.0-20220809184613-07c6da5e1ced/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=

View File

@ -0,0 +1,5 @@
//go:build !custom || inputs || inputs.libvirt
package all
import _ "github.com/influxdata/telegraf/plugins/inputs/libvirt" // register plugin

View File

@ -0,0 +1,263 @@
# Libvirt Input Plugin
The `libvirt` plugin collects statistics about virtualized
guests on a system by using virtualization libvirt API,
created by RedHat's Emerging Technology group.
Metrics are gathered directly from the hypervisor on a host
system, which means that Telegraf doesn't have to be installed
and configured on a guest system.
## Prerequisites
For proper operation of the libvirt plugin,
it is required that the host system has:
- enabled virtualization options for host CPU
- libvirtd and its dependencies installed and running
- qemu hypervisor installed and running
- at least one virtual machine for statistics monitoring
Useful links:
- [libvirt](https://libvirt.org/)
- [qemu](https://www.qemu.org/)
## Configuration
```toml
# The libvirt plugin collects statistics from virtualized guests using virtualization libvirt API.
[[inputs.libvirt]]
## Domain names from which libvirt gather statistics.
## By default (empty or missing array) the plugin gather statistics from each domain registered in the host system.
# domains = []
## Libvirt connection URI with hypervisor.
## The plugin supports multiple transport protocols and approaches which are configurable via the URI.
## The general URI form: driver[+transport]://[username@][hostname][:port]/[path][?extraparameters]
## Supported transport protocols: ssh, tcp, tls, unix
## URI examples for each type of transport protocol:
## 1. SSH: qemu+ssh://<USER@IP_OR_HOSTNAME>/system?keyfile=/<PATH_TO_PRIVATE_KEY>&known_hosts=/<PATH_TO_known_hosts>
## 2. TCP: qemu+tcp://<IP_OR_HOSTNAME>/system
## 3. TLS: qemu+tls://<HOSTNAME>/system?pkipath=/certs_dir/<COMMON_LOCATION_OF_CACERT_AND_SERVER_CLIENT_CERTS>
## 4. UNIX: qemu+unix:///system?socket=/<PATH_TO_libvirt-sock>
## Default URI is qemu:///system
# libvirt_uri = "qemu:///system"
## Statistics groups for which libvirt plugin will gather statistics.
## Supported statistics groups: state, cpu_total, balloon, vcpu, interface, block, perf, iothread, memory, dirtyrate
## Empty array means no metrics for statistics groups will be exposed by the plugin.
## By default the plugin will gather all available statistics.
# statistics_groups = ["state", "cpu_total", "balloon", "vcpu", "interface", "block", "perf", "iothread", "memory", "dirtyrate"]
## A list containing additional statistics to be exposed by libvirt plugin.
## Supported additional statistics: vcpu_mapping
## By default (empty or missing array) the plugin will not collect additional statistics.
# additional_statistics = []
```
Useful links:
- [Libvirt URI docs](https://libvirt.org/uri.html)
- [TLS setup for libvirt](https://wiki.libvirt.org/page/TLSSetup)
In cases when one or more of the following occur:
- the global Telegraf variable `interval` is set to a low value (e.g. 1s),
- a significant number of VMs are monitored,
- the medium connecting the plugin to the hypervisor is inefficient,
It is possible that following warning in the logs appears:
`Collection took longer than expected`.
For that case, `interval` should be set inside plugin configuration.
Its value should be adjusted to plugin's runtime environment.
Example:
```toml
[[inputs.libvirt]]
interval = "30s"
```
### Example configuration
```toml
[[inputs.libvirt]]
domain_names = ["ubuntu_20"]
libvirt_uri = "qemu:///system"
libvirt_metrics = ["state", "interface"]
additional_statistics = ["vcpu_mapping"]
```
## Metrics
See the table below for a list of metrics produced by the plugin.
The exact metric format depends on the statistics libvirt reports,
which may vary depending on the version of libvirt on your system.
The metrics are divided into the following groups of statistics:
- state
- cpu_total
- balloon
- vcpu
- net
- perf
- block
- iothread
- memory
- dirtyrate
- vcpu_mapping - additional statistics
Statistics groups from the plugin corresponds to the grouping of
metrics directly read from libvirtd using the `virsh domstats` command.
More details about metrics can be found at the links below:
- [Domain statistics](https://libvirt.org/manpages/virsh.html#domstats)
- [Performance monitoring events](https://libvirt.org/formatdomain.html#performance-monitoring-events)
| **Statistics group** | **Metric name** | **Exposed Telegraf field** | **Description** |
|:---|:---|:---|:---|
| **state** | state.state | state | state of the VM, returned as number from virDomainState enum |
||state.reason | reason | reason for entering given state, returned as int from virDomain*Reason enum corresponding to given state |
| **cpu_total** | cpu.time | time | total cpu time spent for this domain in nanoseconds |
|| cpu.user | user | user cpu time spent in nanoseconds |
|| cpu.system | system | system cpu time spent in nanoseconds |
|| cpu.haltpoll.success.time | haltpoll_success_time | cpu halt polling success time spent in nanoseconds |
|| cpu.haltpoll.fail.time | haltpoll_fail_time | cpu halt polling fail time spent in nanoseconds |
|| cpu.cache.monitor.count |count | the number of cache monitors for this domain |
|| cpu.cache.monitor.\<num\>.name | name | the name of cache monitor \<num\>, not available for kernels from 4.14 upwards |
|| cpu.cache.monitor.\<num\>.vcpus| vcpus |vcpu list of cache monitor \<num\>, not available for kernels from 4.14 upwards |
|| cpu.cache.monitor.\<num\>.bank.count | bank_count | the number of cache banks in cache monitor \<num\>, not available for kernels from 4.14 upwards |
|| cpu.cache.monitor.\<num\>.bank.\<index\>.id | id|host allocated cache id for bank \<index\> in cache monitor \<num\>, not available for kernels from 4.14 upwards |
|| cpu.cache.monitor.\<num\>.bank.\<index\>.bytes | bytes | the number of bytes of last level cache that the domain is using on cache bank \<index\>, not available for kernels from 4.14 upwards|
| **balloon** | balloon.current | current | the memory in KiB currently used |
|| balloon.maximum | maximum | the maximum memory in KiB allowed |
|| balloon.swap_in | swap_in | the amount of data read from swap space (in KiB) |
|| balloon.swap_out | swap_out | the amount of memory written out to swap space (in KiB) |
|| balloon.major_fault | major_fault | the number of page faults when disk IO was required |
|| balloon.minor_fault | minor_fault | the number of other page faults |
|| balloon.unused | unused | the amount of memory left unused by the system (in KiB) |
|| balloon.available | available | the amount of usable memory as seen by the domain (in KiB) |
|| balloon.rss | rss | Resident Set Size of running domain's process (in KiB) |
|| balloon.usable | usable | the amount of memory which can be reclaimed by balloon without causing host swapping (in KiB) |
|| balloon.last-update | last_update | timestamp of the last update of statistics (in seconds) |
|| balloon.disk_caches | disk_caches | the amount of memory that can be reclaimed without additional I/O, typically disk (in KiB) |
|| balloon.hugetlb_pgalloc | hugetlb_pgalloc | the number of successful huge page allocations from inside the domain via virtio balloon |
|| balloon.hugetlb_pgfail | hugetlb_pgfail | the number of failed huge page allocations from inside the domain via virtio balloon |
| **vcpu** | vcpu.current | current | yes current number of online virtual CPUs |
|| vcpu.maximum | maximum | maximum number of online virtual CPUs |
|| vcpu.\<num\>.state | state | state of the virtual CPU \<num\>, as number from virVcpuState enum |
|| vcpu.\<num\>.time | time | virtual cpu time spent by virtual CPU \<num\> (in microseconds) |
|| vcpu.\<num\>.wait | wait | virtual cpu time spent by virtual CPU \<num\> waiting on I/O (in microseconds) |
|| vcpu.\<num\>.halted | halted | virtual CPU \<num\> is halted: yes or no (may indicate the processor is idle or even disabled, depending on the architecture) |
|| vcpu.\<num\>.halted | halted_i | virtual CPU \<num\> is halted: 1 (for "yes") or 0 (for other values) (may indicate the processor is idle or even disabled, depending on the architecture) |
|| vcpu.\<num\>.delay | delay | time the vCPU \<num\> thread was enqueued by the host scheduler, but was waiting in the queue instead of running. Exposed to the VM as a steal time. |
|| --- | cpu_id | Information about mapping vcpu_id to cpu_id (id of physical cpu). Should only be exposed when statistics_group contains vcpu and additional_statistics contains vcpu_mapping (in config) |
| **interface** | net.count | count | number of network interfaces on this domain |
|| net.\<num\>.name | name | name of the interface \<num\> |
|| net.\<num\>.rx.bytes | rx_bytes | number of bytes received |
|| net.\<num\>.rx.pkts | rx_pkts | number of packets received |
|| net.\<num\>.rx.errs | rx_errs | number of receive errors |
|| net.\<num\>.rx.drop | rx_drop | number of receive packets dropped |
|| net.\<num\>.tx.bytes | tx_bytes | number of bytes transmitted |
|| net.\<num\>.tx.pkts | tx_pkts | number of packets transmitted |
|| net.\<num\>.tx.errs | tx_errs | number of transmission errors |
|| net.\<num\>.tx.drop | tx_drop | number of transmit packets dropped |
| **perf** | perf.cmt | cmt | the cache usage in Byte currently used, not available for kernels from 4.14 upwards |
|| perf.mbmt | mbmt | total system bandwidth from one level of cache, not available for kernels from 4.14 upwards |
|| perf.mbml | mbml | bandwidth of memory traffic for a memory controller, not available for kernels from 4.14 upwards |
|| perf.cpu_cycles | cpu_cycles | the count of cpu cycles (total/elapsed) |
|| perf.instructions | instructions | the count of instructions |
|| perf.cache_references | cache_references | the count of cache hits |
|| perf.cache_misses | cache_misses | the count of caches misses |
|| perf.branch_instructions | branch_instructions | the count of branch instructions |
|| perf.branch_misses | branch_misses | the count of branch misses |
|| perf.bus_cycles | bus_cycles | the count of bus cycles |
|| perf.stalled_cycles_frontend | stalled_cycles_frontend | the count of stalled frontend cpu cycles |
|| perf.stalled_cycles_backend | stalled_cycles_backend | the count of stalled backend cpu cycles |
|| perf.ref_cpu_cycles | ref_cpu_cycles | the count of ref cpu cycles |
|| perf.cpu_clock | cpu_clock | the count of cpu clock time |
|| perf.task_clock | task_clock | the count of task clock time |
|| perf.page_faults | page_faults | the count of page faults |
|| perf.context_switches | context_switches | the count of context switches |
|| perf.cpu_migrations | cpu_migrations | the count of cpu migrations |
|| perf.page_faults_min | page_faults_min | the count of minor page faults |
|| perf.page_faults_maj | page_faults_maj | the count of major page faults |
|| perf.alignment_faults | alignment_faults | the count of alignment faults |
|| perf.emulation_faults | emulation_faults | the count of emulation faults |
| **block** | block.count | count | number of block devices being listed |
|| block.\<num\>.name | name | name of the target of the block device \<num\> (the same name for multiple entries if --backing is present) |
|| block.\<num\>.backingIndex | backingIndex | when --backing is present, matches up with the \<backingStore\> index listed in domain XML for backing files |
|| block.\<num\>.path | path | file source of block device \<num\>, if it is a local file or block device |
|| block.\<num\>.rd.reqs | rd_reqs | number of read requests |
|| block.\<num\>.rd.bytes | rd_bytes | number of read bytes |
|| block.\<num\>.rd.times | rd_times | total time (ns) spent on reads |
|| block.\<num\>.wr.reqs | wr_reqs | number of write requests |
|| block.\<num\>.wr.bytes | wr_bytes | number of written bytes |
|| block.\<num\>.wr.times | wr_times | total time (ns) spent on writes |
|| block.\<num\>.fl.reqs | fl_reqs | total flush requests |
|| block.\<num\>.fl.times | fl_times | total time (ns) spent on cache flushing |
|| block.\<num\>.errors | errors | Xen only: the 'oo_req' value |
|| block.\<num\>.allocation | allocation | offset of highest written sector in bytes |
|| block.\<num\>.capacity | capacity | logical size of source file in bytes |
|| block.\<num\>.physical | physical | physical size of source file in bytes |
|| block.\<num\>.threshold | threshold | threshold (in bytes) for delivering the VIR_DOMAIN_EVENT_ID_BLOCK_THRESHOLD event. See domblkthreshold |
| **iothread** | iothread.count | count | maximum number of IOThreads in the subsequent list as unsigned int. Each IOThread in the list will will use it's iothread_id value as the \<id\>. There may be fewer \<id\> entries than the iothread.count value if the polling values are not supported |
|| iothread.\<id\>.poll-max-ns | poll_max_ns | maximum polling time in nanoseconds used by the \<id\> IOThread. A value of 0 (zero) indicates polling is disabled |
|| iothread.\<id\>.poll-grow | poll_grow | polling time grow value. A value of 0 (zero) growth is managed by the hypervisor |
|| iothread.\<id\>.poll-shrink | poll_shrink | polling time shrink value. A value of (zero) indicates shrink is managed by hypervisor |
| **memory** | memory.bandwidth.monitor.count | count | the number of memory bandwidth monitors for this domain, not available for kernels from 4.14 upwards |
|| memory.bandwidth.monitor.\<num\>.name | name | the name of monitor \<num\>, not available for kernels from 4.14 upwards |
|| memory.bandwidth.monitor.\<num\>.vcpus | vcpus | the vcpu list of monitor \<num\>, not available for kernels from 4.14 upwards |
|| memory.bandwidth.monitor.\<num\>.node.count | node_count | the number of memory controller in monitor \<num\>, not available for kernels from 4.14 upwards |
|| memory.bandwidth.monitor.\<num\>.node.\<index\>.id | id | host allocated memory controller id for controller \<index\> of monitor \<num\>, not available for kernels from 4.14 upwards |
|| memory.bandwidth.monitor.\<num\>.node.\<index\>.bytes.local | bytes_local | the accumulative bytes consumed by \@vcpus that passing through the memory controller in the same processor that the scheduled host CPU belongs to, not available for kernels from 4.14 upwards |
|| memory.bandwidth.monitor.\<num\>.node.\<index\>.bytes.total | bytes_total | the total bytes consumed by \@vcpus that passing through all memory controllers, either local or remote controller, not available for kernels from 4.14 upwards |
| **dirtyrate** | dirtyrate.calc_status | calc_status | the status of last memory dirty rate calculation, returned as number from virDomainDirtyRateStatus enum |
|| dirtyrate.calc_start_time | calc_start_time the | start time of last memory dirty rate calculation |
|| dirtyrate.calc_period | calc_period | the period of last memory dirty rate calculation |
|| dirtyrate.megabytes_per_second | megabytes_per_second | the calculated memory dirty rate in MiB/s |
|| dirtyrate.calc_mode | calc_mode | the calculation mode used last measurement (page-sampling/dirty-bitmap/dirty-ring) |
|| dirtyrate.vcpu.\<num\>.megabytes_per_second | megabytes_per_second | the calculated memory dirty rate for a virtual cpu in MiB/s |
### Additional statistics
| **Statistics group** | **Exposed Telegraf tag** | **Exposed Telegraf field** |**Description** |
|:-------------------------------|:-----------------------------:|:-------------------------------:|:-----------------------|
| **vcpu_mapping** | vcpu_id | --- | ID of Virtual CPU |
|| --- | cpu_id | Comma separated list (exposed as a string) of Physical CPU IDs |
## Example Output
```text
libvirt_cpu_affinity,domain_name=U22,host=localhost,vcpu_id=0 cpu_id="1,2,3" 1662383707000000000
libvirt_cpu_affinity,domain_name=U22,host=localhost,vcpu_id=1 cpu_id="1,2,3,4,5,6,7,8,9,10" 1662383707000000000
libvirt_balloon,domain_name=U22,host=localhost current=4194304i,maximum=4194304i,swap_in=0i,swap_out=0i,major_fault=0i,minor_fault=0i,unused=3928628i,available=4018480i,rss=1036012i,usable=3808724i,last_update=1654611373i,disk_caches=68820i,hugetlb_pgalloc=0i,hugetlb_pgfail=0i 1662383709000000000
libvirt_vcpu_total,domain_name=U22,host=localhost maximum=2i,current=2i 1662383709000000000
libvirt_vcpu,domain_name=U22,host=localhost,vcpu_id=0 state=1i,time=17943740000000i,wait=0i,halted="no",halted_i=0i,delay=14246609424i,cpu_id=1i 1662383709000000000
libvirt_vcpu,domain_name=U22,host=localhost,vcpu_id=1 state=1i,time=18288400000000i,wait=0i,halted="yes",halted_i=1i,delay=12902231142i,cpu_id=3i 1662383709000000000
libvirt_net_total,domain_name=U22,host=localhost count=1i 1662383709000000000
libvirt_net,domain_name=U22,host=localhost,interface_id=0 name="vnet0",rx_bytes=110i,rx_pkts=1i,rx_errs=0i,rx_drop=31007i,tx_bytes=0i,tx_pkts=0i,tx_errs=0i,tx_drop=0i 1662383709000000000
libvirt_block_total,domain_name=U22,host=localhost count=1i 1662383709000000000
libvirt_block,domain_name=U22,host=localhost,block_id=0 rd=17337818234i,path=name="vda",backingIndex=1i,path="/tmp/ubuntu_image.img",rd_reqs=11354i,rd_bytes=330314752i,rd_times=6240559566i,wr_reqs=52440i,wr_bytes=1183828480i,wr_times=21887150375i,fl_reqs=32250i,fl_times=23158998353i,errors=0i,allocation=770048000i,capacity=2361393152i,physical=770052096i,threshold=2147483648i
libvirt_perf,domain_name=U22,host=localhost cmt=19087360i,mbmt=77168640i,mbml=67788800i,cpu_cycles=29858995122i,instructions=0i,cache_references=3053301695i,cache_misses=609441024i,branch_instructions=2623890194i,branch_misses=103707961i,bus_cycles=188105628i,stalled_cycles_frontend=0i,stalled_cycles_backend=0i,ref_cpu_cycles=30766094039i,cpu_clock=25166642695i,task_clock=25263578917i,page_faults=2670i,context_switches=294284i,cpu_migrations=17949i,page_faults_min=2670i,page_faults_maj=0i,alignment_faults=0i,emulation_faults=0i 1662383709000000000
libvirt_dirtyrate,domain_name=U22,host=localhost calc_status=2i,calc_start_time=348414i,calc_period=1i,dirtyrate.megabytes_per_second=4i,calc_mode="dirty-ring" 1662383709000000000
libvirt_dirtyrate_vcpu,domain_name=U22,host=localhost,vcpu_id=0 megabytes_per_second=2i 1662383709000000000
libvirt_dirtyrate_vcpu,domain_name=U22,host=localhost,vcpu_id=1 megabytes_per_second=2i 1662383709000000000
libvirt_state,domain_name=U22,host=localhost state=1i,reason=5i 1662383709000000000
libvirt_cpu,domain_name=U22,host=localhost time=67419144867000i,user=63886161852000i,system=3532983015000i,haltpoll_success_time=516907915i,haltpoll_fail_time=2727253643i 1662383709000000000
libvirt_cpu_cache_monitor_total,domain_name=U22,host=localhost count=1i 1662383709000000000
libvirt_cpu_cache_monitor,domain_name=U22,host=localhost,cache_monitor_id=0 name="any_name_vcpus_0-3",vcpus="0-3",bank_count=1i 1662383709000000000
libvirt_cpu_cache_monitor_bank,domain_name=U22,host=localhost,cache_monitor_id=0,bank_index=0 id=0i,bytes=5406720i 1662383709000000000
libvirt_iothread_total,domain_name=U22,host=localhost count=1i 1662383709000000000
libvirt_iothread,domain_name=U22,host=localhost,iothread_id=0 poll_max_ns=32768i,poll_grow=0i,poll_shrink=0i 1662383709000000000
libvirt_memory_bandwidth_monitor_total,domain_name=U22,host=localhost count=2i 1662383709000000000
libvirt_memory_bandwidth_monitor,domain_name=U22,host=localhost,memory_bandwidth_monitor_id=0 name="any_name_vcpus_0-4",vcpus="0-4",node_count=2i 1662383709000000000
libvirt_memory_bandwidth_monitor,domain_name=U22,host=localhost,memory_bandwidth_monitor_id=1 name="vcpus_7",vcpus="7",node_count=2i 1662383709000000000
libvirt_memory_bandwidth_monitor_node,domain_name=U22,host=localhost,memory_bandwidth_monitor_id=0,controller_index=0 id=0i,bytes_total=10208067584i,bytes_local=4807114752i 1662383709000000000
libvirt_memory_bandwidth_monitor_node,domain_name=U22,host=localhost,memory_bandwidth_monitor_id=0,controller_index=1 id=1i,bytes_total=8693735424i,bytes_local=5850161152i 1662383709000000000
libvirt_memory_bandwidth_monitor_node,domain_name=U22,host=localhost,memory_bandwidth_monitor_id=1,controller_index=0 id=0i,bytes_total=853811200i,bytes_local=290701312i 1662383709000000000
libvirt_memory_bandwidth_monitor_node,domain_name=U22,host=localhost,memory_bandwidth_monitor_id=1,controller_index=1 id=1i,bytes_total=406044672i,bytes_local=229425152i 1662383709000000000
```

View File

@ -0,0 +1,264 @@
package libvirt
import (
_ "embed"
"fmt"
"golang.org/x/sync/errgroup"
"sync"
golibvirt "github.com/digitalocean/go-libvirt"
libvirtutils "github.com/thomasklein94/packer-plugin-libvirt/libvirt-utils"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
)
//go:embed sample.conf
var sampleConfig string
const (
domainStatsState uint32 = 1
domainStatsCPUTotal uint32 = 2
domainStatsBalloon uint32 = 4
domainStatsVCPU uint32 = 8
domainStatsInterface uint32 = 16
domainStatsBlock uint32 = 32
domainStatsPerf uint32 = 64
domainStatsIothread uint32 = 128
domainStatsMemory uint32 = 256
domainStatsDirtyrate uint32 = 512
domainStatsAll uint32 = 1023
defaultLibvirtURI = "qemu:///system"
pluginName = "libvirt"
)
type Libvirt struct {
LibvirtURI string `toml:"libvirt_uri"`
Domains []string `toml:"domains"`
StatisticsGroups []string `toml:"statistics_groups"`
AdditionalStatistics []string `toml:"additional_statistics"`
Log telegraf.Logger `toml:"-"`
utils utils
metricNumber uint32
vcpuMappingEnabled bool
domainsMap map[string]struct{}
}
func (l *Libvirt) SampleConfig() string {
return sampleConfig
}
func (l *Libvirt) Init() error {
if len(l.Domains) == 0 {
l.Log.Debugf("No domains given. Collecting metrics from all available domains.")
}
l.domainsMap = make(map[string]struct{}, len(l.Domains))
for _, domain := range l.Domains {
l.domainsMap[domain] = struct{}{}
}
if l.LibvirtURI == "" {
l.Log.Debugf("Using default libvirt url - %q", defaultLibvirtURI)
l.LibvirtURI = defaultLibvirtURI
}
if err := l.validateLibvirtURI(); err != nil {
return err
}
// setting to defaults only when statistics_groups is missing in config
if l.StatisticsGroups == nil {
l.Log.Debugf("Setting libvirt to gather all metrics.")
l.metricNumber = domainStatsAll
} else {
if err := l.calculateMetricNumber(); err != nil {
return err
}
}
if err := l.validateAdditionalStatistics(); err != nil {
return err
}
if !l.isThereAnythingToGather() {
return fmt.Errorf("all configuration options are empty or invalid. Did not find anything to gather")
}
return nil
}
func (l *Libvirt) validateLibvirtURI() error {
uri := libvirtutils.LibvirtUri{}
err := uri.Unmarshal(l.LibvirtURI)
if err != nil {
return err
}
// dialer not needed, calling this just for validating libvirt URI as soon as possible:
_, err = libvirtutils.NewDialerFromLibvirtUri(uri)
return err
}
func (l *Libvirt) calculateMetricNumber() error {
var libvirtMetricNumber = map[string]uint32{
"state": domainStatsState,
"cpu_total": domainStatsCPUTotal,
"balloon": domainStatsBalloon,
"vcpu": domainStatsVCPU,
"interface": domainStatsInterface,
"block": domainStatsBlock,
"perf": domainStatsPerf,
"iothread": domainStatsIothread,
"memory": domainStatsMemory,
"dirtyrate": domainStatsDirtyrate}
metricIsSet := make(map[string]bool)
for _, metricName := range l.StatisticsGroups {
metricNumber, exists := libvirtMetricNumber[metricName]
if !exists {
return fmt.Errorf("unrecognized metrics name %q", metricName)
}
if _, ok := metricIsSet[metricName]; ok {
return fmt.Errorf("duplicated statistics group in config: %q", metricName)
}
l.metricNumber += metricNumber
metricIsSet[metricName] = true
}
return nil
}
func (l *Libvirt) validateAdditionalStatistics() error {
for _, stat := range l.AdditionalStatistics {
switch stat {
case "vcpu_mapping":
if l.vcpuMappingEnabled {
return fmt.Errorf("duplicated additional statistic in config: %q", stat)
}
l.vcpuMappingEnabled = true
default:
return fmt.Errorf("additional statistics: %v is not supported by this plugin", stat)
}
}
return nil
}
func (l *Libvirt) isThereAnythingToGather() bool {
return l.metricNumber > 0 || len(l.AdditionalStatistics) > 0
}
func (l *Libvirt) Gather(acc telegraf.Accumulator) error {
var err error
if err = l.utils.EnsureConnected(l.LibvirtURI); err != nil {
return err
}
// Get all available domains
gatheredDomains, err := l.utils.GatherAllDomains()
if handledErr := handleError(err, "error occurred while gathering all domains", l.utils); handledErr != nil {
return handledErr
} else if len(gatheredDomains) == 0 {
l.Log.Debug("Couldn't find any domains on system")
return nil
}
// Exclude domain.
domains := l.filterDomains(gatheredDomains)
if len(domains) == 0 {
l.Log.Debug("Configured domains are not available on system")
return nil
}
var vcpuInfos map[string][]vcpuAffinity
if l.vcpuMappingEnabled {
vcpuInfos, err = l.getVcpuMapping(domains)
if handledErr := handleError(err, "error occurred while gathering vcpu mapping", l.utils); handledErr != nil {
return handledErr
}
}
err = l.gatherMetrics(domains, vcpuInfos, acc)
return handleError(err, "error occurred while gathering metrics", l.utils)
}
func handleError(err error, errMessage string, utils utils) error {
if err != nil {
if chanErr := utils.Disconnect(); chanErr != nil {
return fmt.Errorf("%s: %v; error occurred when disconnecting: %v", errMessage, err, chanErr)
}
return fmt.Errorf("%s: %v", errMessage, err)
}
return nil
}
func (l *Libvirt) filterDomains(availableDomains []golibvirt.Domain) []golibvirt.Domain {
if len(l.domainsMap) == 0 {
return availableDomains
}
var filteredDomains []golibvirt.Domain
for _, domain := range availableDomains {
if _, ok := l.domainsMap[domain.Name]; ok {
filteredDomains = append(filteredDomains, domain)
}
}
return filteredDomains
}
func (l *Libvirt) gatherMetrics(domains []golibvirt.Domain, vcpuInfos map[string][]vcpuAffinity, acc telegraf.Accumulator) error {
stats, err := l.utils.GatherStatsForDomains(domains, l.metricNumber)
if err != nil {
return err
}
l.addMetrics(stats, vcpuInfos, acc)
return nil
}
func (l *Libvirt) getVcpuMapping(domains []golibvirt.Domain) (map[string][]vcpuAffinity, error) {
pCPUs, err := l.utils.GatherNumberOfPCPUs()
if err != nil {
return nil, err
}
var vcpuInfos = make(map[string][]vcpuAffinity)
group := errgroup.Group{}
mutex := &sync.RWMutex{}
for i := range domains {
domain := domains[i]
// Executing GatherVcpuMapping can take some time, it is worth to call it in parallel
group.Go(func() error {
vcpuInfo, err := l.utils.GatherVcpuMapping(domain, pCPUs, l.shouldGetCurrentPCPU())
if err != nil {
return err
}
mutex.Lock()
vcpuInfos[domain.Name] = vcpuInfo
mutex.Unlock()
return nil
})
}
err = group.Wait()
if err != nil {
return nil, err
}
return vcpuInfos, nil
}
func (l *Libvirt) shouldGetCurrentPCPU() bool {
return l.vcpuMappingEnabled && (l.metricNumber&domainStatsVCPU) != 0
}
func init() {
inputs.Add(pluginName, func() telegraf.Input {
return &Libvirt{
utils: &utilsImpl{},
}
})
}

View File

@ -0,0 +1,571 @@
package libvirt
import (
"regexp"
"strings"
golibvirt "github.com/digitalocean/go-libvirt"
"github.com/influxdata/telegraf"
)
var (
cpuCacheMonitorRegexp = regexp.MustCompile(`^cache\.monitor\..+?\.(name|vcpus|bank_count)$`)
cpuCacheMonitorBankRegexp = regexp.MustCompile(`^cache\.monitor\..+?\.bank\..+?\.(id|bytes)$`)
memoryBandwidthMonitorRegexp = regexp.MustCompile(`^bandwidth\.monitor\..+?\.(name|vcpus|node_count)$`)
memoryBandwidthMonitorNodeRegexp = regexp.MustCompile(`^bandwidth\.monitor\..+?\.node\..+?\.(id|bytes_local|bytes_total)$`)
)
func (l *Libvirt) addMetrics(stats []golibvirt.DomainStatsRecord, vcpuInfos map[string][]vcpuAffinity, acc telegraf.Accumulator) {
domainsMetrics := l.translateMetrics(stats)
for domainName, metrics := range domainsMetrics {
for metricType, values := range metrics {
switch metricType {
case "state":
l.addStateMetrics(values, domainName, acc)
case "cpu":
l.addCPUMetrics(values, domainName, acc)
case "balloon":
l.addBalloonMetrics(values, domainName, acc)
case "vcpu":
l.addVcpuMetrics(values, domainName, vcpuInfos[domainName], acc)
case "net":
l.addInterfaceMetrics(values, domainName, acc)
case "perf":
l.addPerfMetrics(values, domainName, acc)
case "block":
l.addBlockMetrics(values, domainName, acc)
case "iothread":
l.addIothreadMetrics(values, domainName, acc)
case "memory":
l.addMemoryMetrics(values, domainName, acc)
case "dirtyrate":
l.addDirtyrateMetrics(values, domainName, acc)
}
}
}
if l.vcpuMappingEnabled {
for domainName, vcpuInfo := range vcpuInfos {
var tags = make(map[string]string)
var fields = make(map[string]interface{})
for _, vcpu := range vcpuInfo {
tags["domain_name"] = domainName
tags["vcpu_id"] = vcpu.vcpuID
fields["cpu_id"] = vcpu.coresAffinity
acc.AddFields("libvirt_cpu_affinity", fields, tags)
}
}
}
}
func (l *Libvirt) translateMetrics(stats []golibvirt.DomainStatsRecord) map[string]map[string]map[string]golibvirt.TypedParamValue {
metrics := make(map[string]map[string]map[string]golibvirt.TypedParamValue)
for _, stat := range stats {
if stat.Params != nil {
if metrics[stat.Dom.Name] == nil {
metrics[stat.Dom.Name] = make(map[string]map[string]golibvirt.TypedParamValue)
}
for _, params := range stat.Params {
statGroup := strings.Split(params.Field, ".")[0]
if metrics[stat.Dom.Name][statGroup] == nil {
metrics[stat.Dom.Name][statGroup] = make(map[string]golibvirt.TypedParamValue)
}
metrics[stat.Dom.Name][statGroup][strings.TrimPrefix(params.Field, statGroup+".")] = params.Value
}
}
}
return metrics
}
func (l *Libvirt) addStateMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) {
var stateFields = make(map[string]interface{})
var stateTags = map[string]string{
"domain_name": domainName,
}
for key, metric := range metrics {
switch key {
case "state", "reason":
stateFields[key] = metric.I
}
}
if len(stateFields) > 0 {
acc.AddFields("libvirt_state", stateFields, stateTags)
}
}
func (l *Libvirt) addCPUMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) {
var cpuFields = make(map[string]interface{})
var cpuCacheMonitorTotalFields = make(map[string]interface{})
var cpuCacheMonitorData = make(map[string]map[string]interface{})
var cpuCacheMonitorBankData = make(map[string]map[string]map[string]interface{})
var cpuTags = map[string]string{
"domain_name": domainName,
}
for key, metric := range metrics {
switch key {
case "time", "user", "system":
cpuFields[key] = metric.I
case "haltpoll.success.time", "haltpoll.fail.time":
cpuFields[strings.ReplaceAll(key, ".", "_")] = metric.I
case "cache.monitor.count":
cpuCacheMonitorTotalFields["count"] = metric.I
default:
if strings.Contains(key, "bank.count") {
key = strings.ReplaceAll(key, "bank.count", "bank_count")
}
cpuStat := strings.Split(key, ".")
if len(cpuStat) == 4 && cpuCacheMonitorRegexp.MatchString(key) {
cacheMonitorID := cpuStat[2]
cpuCacheMonitorFields, ok := cpuCacheMonitorData[cacheMonitorID]
if !ok {
cpuCacheMonitorFields = make(map[string]interface{})
cpuCacheMonitorData[cacheMonitorID] = cpuCacheMonitorFields
}
cpuCacheMonitorFields[cpuStat[3]] = metric.I
} else if len(cpuStat) == 6 && cpuCacheMonitorBankRegexp.MatchString(key) {
cacheMonitorID := cpuStat[2]
bankIndex := cpuStat[4]
bankData, ok := cpuCacheMonitorBankData[cacheMonitorID]
if !ok {
bankData = make(map[string]map[string]interface{})
cpuCacheMonitorBankData[cacheMonitorID] = bankData
}
bankFields, ok := cpuCacheMonitorBankData[cacheMonitorID][bankIndex]
if !ok {
bankFields = make(map[string]interface{})
bankData[bankIndex] = bankFields
}
bankFields[cpuStat[5]] = metric.I
}
}
}
if len(cpuFields) > 0 {
acc.AddFields("libvirt_cpu", cpuFields, cpuTags)
}
if len(cpuCacheMonitorTotalFields) > 0 {
acc.AddFields("libvirt_cpu_cache_monitor_total", cpuCacheMonitorTotalFields, cpuTags)
}
for cpuID, cpuCacheMonitorFields := range cpuCacheMonitorData {
if len(cpuCacheMonitorFields) > 0 {
cpuCacheMonitorTags := map[string]string{
"domain_name": domainName,
"cache_monitor_id": cpuID,
}
acc.AddFields("libvirt_cpu_cache_monitor", cpuCacheMonitorFields, cpuCacheMonitorTags)
}
}
for cacheMonitorID, bankData := range cpuCacheMonitorBankData {
for bankIndex, bankFields := range bankData {
if len(bankFields) > 0 {
bankTags := map[string]string{
"domain_name": domainName,
"cache_monitor_id": cacheMonitorID,
"bank_index": bankIndex,
}
acc.AddFields("libvirt_cpu_cache_monitor_bank", bankFields, bankTags)
}
}
}
}
func (l *Libvirt) addBalloonMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) {
var balloonFields = make(map[string]interface{})
var balloonTags = map[string]string{
"domain_name": domainName,
}
for key, metric := range metrics {
switch key {
case "current", "maximum", "swap_in", "swap_out", "major_fault", "minor_fault", "unused", "available",
"rss", "usable", "disk_caches", "hugetlb_pgalloc", "hugetlb_pgfail":
balloonFields[key] = metric.I
case "last-update":
balloonFields["last_update"] = metric.I
}
}
if len(balloonFields) > 0 {
acc.AddFields("libvirt_balloon", balloonFields, balloonTags)
}
}
func (l *Libvirt) addVcpuMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, vcpuInfos []vcpuAffinity, acc telegraf.Accumulator) {
var vcpuTotalFields = make(map[string]interface{})
var vcpuData = make(map[string]map[string]interface{})
var vcpuTotalTags = map[string]string{
"domain_name": domainName,
}
for key, metric := range metrics {
switch key {
case "current", "maximum":
vcpuTotalFields[key] = metric.I
default:
vcpuStat := strings.Split(key, ".")
if len(vcpuStat) != 2 {
continue
}
vcpuID := vcpuStat[0]
fieldName := vcpuStat[1]
vcpuFields, ok := vcpuData[vcpuID]
if !ok {
vcpuFields = make(map[string]interface{})
vcpuData[vcpuID] = vcpuFields
}
switch fieldName {
case "halted":
haltedIntegerValue := 0
if metric.I == "yes" {
haltedIntegerValue = 1
}
vcpuFields["halted_i"] = haltedIntegerValue
fallthrough
case "state", "time", "wait", "delay":
vcpuFields[fieldName] = metric.I
}
}
}
if len(vcpuTotalFields) > 0 {
acc.AddFields("libvirt_vcpu_total", vcpuTotalFields, vcpuTotalTags)
}
for vcpuID, vcpuFields := range vcpuData {
if len(vcpuFields) > 0 {
vcpuTags := map[string]string{
"domain_name": domainName,
"vcpu_id": vcpuID,
}
if pCPUID := l.getCurrentPCPUForVCPU(vcpuID, vcpuInfos); pCPUID >= 0 {
vcpuFields["cpu_id"] = pCPUID
}
acc.AddFields("libvirt_vcpu", vcpuFields, vcpuTags)
}
}
}
func (l *Libvirt) getCurrentPCPUForVCPU(vcpuID string, vcpuInfos []vcpuAffinity) int32 {
if !l.shouldGetCurrentPCPU() {
return -1
}
for _, vcpuInfo := range vcpuInfos {
if vcpuInfo.vcpuID == vcpuID {
return vcpuInfo.currentPCPUID
}
}
return -1
}
func (l *Libvirt) addInterfaceMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) {
var netTotalFields = make(map[string]interface{})
var netData = make(map[string]map[string]interface{})
var netTotalTags = map[string]string{
"domain_name": domainName,
}
for key, metric := range metrics {
if key == "count" {
netTotalFields[key] = metric.I
} else {
netStat := strings.SplitN(key, ".", 2)
if len(netStat) < 2 {
continue
}
netID := netStat[0]
netFields, ok := netData[netID]
if !ok {
netFields = make(map[string]interface{})
netData[netID] = netFields
}
fieldName := strings.ReplaceAll(netStat[1], ".", "_")
switch fieldName {
case "name", "rx_bytes", "rx_pkts", "rx_errs", "rx_drop", "tx_bytes", "tx_pkts", "tx_errs", "tx_drop":
netFields[fieldName] = metric.I
}
}
}
if len(netTotalFields) > 0 {
acc.AddFields("libvirt_net_total", netTotalFields, netTotalTags)
}
for netID, netFields := range netData {
if len(netFields) > 0 {
netTags := map[string]string{
"domain_name": domainName,
"interface_id": netID,
}
acc.AddFields("libvirt_net", netFields, netTags)
}
}
}
func (l *Libvirt) addPerfMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) {
var perfFields = make(map[string]interface{})
var perfTags = map[string]string{
"domain_name": domainName,
}
for key, metric := range metrics {
switch key {
case "cmt", "mbmt", "mbml", "cpu_cycles", "instructions", "cache_references", "cache_misses",
"branch_instructions", "branch_misses", "bus_cycles", "stalled_cycles_frontend", "stalled_cycles_backend",
"ref_cpu_cycles", "cpu_clock", "task_clock", "page_faults", "context_switches",
"cpu_migrations", "page_faults_min", "page_faults_maj", "alignment_faults", "emulation_faults":
perfFields[key] = metric.I
}
}
if len(perfFields) > 0 {
acc.AddFields("libvirt_perf", perfFields, perfTags)
}
}
func (l *Libvirt) addBlockMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) {
var blockTotalFields = make(map[string]interface{})
var blockData = make(map[string]map[string]interface{})
var blockTotalTags = map[string]string{
"domain_name": domainName,
}
for key, metric := range metrics {
if key == "count" {
blockTotalFields["count"] = metric.I
} else {
blockStat := strings.SplitN(key, ".", 2)
if len(blockStat) < 2 {
continue
}
blockID := blockStat[0]
blockFields, ok := blockData[blockID]
if !ok {
blockFields = make(map[string]interface{})
blockData[blockID] = blockFields
}
fieldName := strings.ReplaceAll(blockStat[1], ".", "_")
switch fieldName {
case "name", "backingIndex", "path", "rd_reqs", "rd_bytes", "rd_times", "wr_reqs", "wr_bytes", "wr_times",
"fl_reqs", "fl_times", "errors", "allocation", "capacity", "physical", "threshold":
blockFields[fieldName] = metric.I
}
}
}
if len(blockTotalFields) > 0 {
acc.AddFields("libvirt_block_total", blockTotalFields, blockTotalTags)
}
for blockID, blockFields := range blockData {
if len(blockFields) > 0 {
blockTags := map[string]string{
"domain_name": domainName,
"block_id": blockID,
}
acc.AddFields("libvirt_block", blockFields, blockTags)
}
}
}
func (l *Libvirt) addIothreadMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) {
var iothreadTotalFields = make(map[string]interface{})
var iothreadData = make(map[string]map[string]interface{})
var iothreadTotalTags = map[string]string{
"domain_name": domainName,
}
for key, metric := range metrics {
if key == "count" {
iothreadTotalFields["count"] = metric.I
} else {
iothreadStat := strings.Split(key, ".")
if len(iothreadStat) != 2 {
continue
}
iothreadID := iothreadStat[0]
iothreadFields, ok := iothreadData[iothreadID]
if !ok {
iothreadFields = make(map[string]interface{})
iothreadData[iothreadID] = iothreadFields
}
fieldName := strings.ReplaceAll(iothreadStat[1], "-", "_")
switch fieldName {
case "poll_max_ns", "poll_grow", "poll_shrink":
iothreadFields[fieldName] = metric.I
}
}
}
if len(iothreadTotalFields) > 0 {
acc.AddFields("libvirt_iothread_total", iothreadTotalFields, iothreadTotalTags)
}
for iothreadID, iothreadFields := range iothreadData {
if len(iothreadFields) > 0 {
iothreadTags := map[string]string{
"domain_name": domainName,
"iothread_id": iothreadID,
}
acc.AddFields("libvirt_iothread", iothreadFields, iothreadTags)
}
}
}
func (l *Libvirt) addMemoryMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) {
var memoryBandwidthMonitorTotalFields = make(map[string]interface{})
var memoryBandwidthMonitorData = make(map[string]map[string]interface{})
var memoryBandwidthMonitorNodeData = make(map[string]map[string]map[string]interface{})
var memoryBandwidthMonitorTotalTags = map[string]string{
"domain_name": domainName,
}
for key, metric := range metrics {
switch key {
case "bandwidth.monitor.count":
memoryBandwidthMonitorTotalFields["count"] = metric.I
default:
if strings.Contains(key, "node.count") {
key = strings.ReplaceAll(key, "node.count", "node_count")
} else if strings.Contains(key, "bytes.local") {
key = strings.ReplaceAll(key, "bytes.local", "bytes_local")
} else if strings.Contains(key, "bytes.total") {
key = strings.ReplaceAll(key, "bytes.total", "bytes_total")
}
memoryStat := strings.Split(key, ".")
if len(memoryStat) == 4 && memoryBandwidthMonitorRegexp.MatchString(key) {
memoryBandwidthMonitorID := memoryStat[2]
memoryBandwidthMonitorFields, ok := memoryBandwidthMonitorData[memoryBandwidthMonitorID]
if !ok {
memoryBandwidthMonitorFields = make(map[string]interface{})
memoryBandwidthMonitorData[memoryBandwidthMonitorID] = memoryBandwidthMonitorFields
}
memoryBandwidthMonitorFields[memoryStat[3]] = metric.I
} else if len(memoryStat) == 6 && memoryBandwidthMonitorNodeRegexp.MatchString(key) {
memoryBandwidthMonitorID := memoryStat[2]
controllerIndex := memoryStat[4]
nodeData, ok := memoryBandwidthMonitorNodeData[memoryBandwidthMonitorID]
if !ok {
nodeData = make(map[string]map[string]interface{})
memoryBandwidthMonitorNodeData[memoryBandwidthMonitorID] = nodeData
}
nodeFields, ok := memoryBandwidthMonitorNodeData[memoryBandwidthMonitorID][controllerIndex]
if !ok {
nodeFields = make(map[string]interface{})
nodeData[controllerIndex] = nodeFields
}
nodeFields[memoryStat[5]] = metric.I
}
}
}
if len(memoryBandwidthMonitorTotalFields) > 0 {
acc.AddFields("libvirt_memory_bandwidth_monitor_total", memoryBandwidthMonitorTotalFields, memoryBandwidthMonitorTotalTags)
}
for memoryBandwidthMonitorID, memoryFields := range memoryBandwidthMonitorData {
if len(memoryFields) > 0 {
tags := map[string]string{
"domain_name": domainName,
"memory_bandwidth_monitor_id": memoryBandwidthMonitorID,
}
acc.AddFields("libvirt_memory_bandwidth_monitor", memoryFields, tags)
}
}
for memoryBandwidthMonitorID, nodeData := range memoryBandwidthMonitorNodeData {
for controllerIndex, nodeFields := range nodeData {
if len(nodeFields) > 0 {
tags := map[string]string{
"domain_name": domainName,
"memory_bandwidth_monitor_id": memoryBandwidthMonitorID,
"controller_index": controllerIndex,
}
acc.AddFields("libvirt_memory_bandwidth_monitor_node", nodeFields, tags)
}
}
}
}
func (l *Libvirt) addDirtyrateMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) {
var dirtyrateFields = make(map[string]interface{})
var dirtyrateVcpuData = make(map[string]map[string]interface{})
var dirtyrateTags = map[string]string{
"domain_name": domainName,
}
for key, metric := range metrics {
switch key {
case "calc_status", "calc_start_time", "calc_period",
"megabytes_per_second", "calc_mode":
dirtyrateFields[key] = metric.I
default:
dirtyrateStat := strings.Split(key, ".")
if len(dirtyrateStat) == 3 && dirtyrateStat[0] == "vcpu" && dirtyrateStat[2] == "megabytes_per_second" {
vcpuID := dirtyrateStat[1]
dirtyRateFields, ok := dirtyrateVcpuData[vcpuID]
if !ok {
dirtyRateFields = make(map[string]interface{})
dirtyrateVcpuData[vcpuID] = dirtyRateFields
}
dirtyRateFields[dirtyrateStat[2]] = metric.I
}
}
}
if len(dirtyrateFields) > 0 {
acc.AddFields("libvirt_dirtyrate", dirtyrateFields, dirtyrateTags)
}
for vcpuID, dirtyRateFields := range dirtyrateVcpuData {
if len(dirtyRateFields) > 0 {
dirtyRateTags := map[string]string{
"domain_name": domainName,
"vcpu_id": vcpuID,
}
acc.AddFields("libvirt_dirtyrate_vcpu", dirtyRateFields, dirtyRateTags)
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,146 @@
package libvirt
import (
"strconv"
"strings"
golibvirt "github.com/digitalocean/go-libvirt"
libvirtutils "github.com/thomasklein94/packer-plugin-libvirt/libvirt-utils"
)
type utils interface {
GatherAllDomains() (domains []golibvirt.Domain, err error)
GatherStatsForDomains(domains []golibvirt.Domain, metricNumber uint32) ([]golibvirt.DomainStatsRecord, error)
GatherNumberOfPCPUs() (int, error)
GatherVcpuMapping(domain golibvirt.Domain, pCPUs int, shouldGetCurrentPCPU bool) ([]vcpuAffinity, error)
EnsureConnected(libvirtURI string) error
Disconnect() error
}
type utilsImpl struct {
libvirt *golibvirt.Libvirt
}
type vcpuAffinity struct {
vcpuID string
coresAffinity string
currentPCPUID int32
}
// GatherAllDomains gathers all domains on system
func (l *utilsImpl) GatherAllDomains() (domains []golibvirt.Domain, err error) {
allDomainStatesFlag := golibvirt.ConnectListDomainsRunning + golibvirt.ConnectListDomainsPaused +
golibvirt.ConnectListDomainsShutoff + golibvirt.ConnectListDomainsOther
domains, _, err = l.libvirt.ConnectListAllDomains(1, allDomainStatesFlag)
return domains, err
}
// GatherStatsForDomains gathers stats for given domains based on number that was previously calculated
func (l *utilsImpl) GatherStatsForDomains(domains []golibvirt.Domain, metricNumber uint32) ([]golibvirt.DomainStatsRecord, error) {
if metricNumber == 0 {
// do not need to do expensive call if no stats were set to gather
return []golibvirt.DomainStatsRecord{}, nil
}
allDomainStatesFlag := golibvirt.ConnectGetAllDomainsStatsRunning + golibvirt.ConnectGetAllDomainsStatsPaused +
golibvirt.ConnectGetAllDomainsStatsShutoff + golibvirt.ConnectGetAllDomainsStatsOther
return l.libvirt.ConnectGetAllDomainStats(domains, metricNumber, allDomainStatesFlag)
}
func (l *utilsImpl) GatherNumberOfPCPUs() (int, error) {
//nolint:dogsled //Using only needed values from library function
_, _, _, _, nodes, sockets, cores, threads, err := l.libvirt.NodeGetInfo()
if err != nil {
return 0, err
}
return int(nodes * sockets * cores * threads), nil
}
// GatherVcpuMapping is based on official go-libvirt library:
// https://github.com/libvirt/libvirt-go-module/blob/268a5d02e00cc9b3d5d7fa6c08d753071e7d14b8/domain.go#L4516
// (this library cannot be used here because of C bindings)
func (l *utilsImpl) GatherVcpuMapping(domain golibvirt.Domain, pCPUs int, shouldGetCurrentPCPU bool) ([]vcpuAffinity, error) {
//nolint:dogsled //Using only needed values from library function
_, _, _, vCPUs, _, err := l.libvirt.DomainGetInfo(domain)
if err != nil {
return nil, err
}
bytesToHoldPCPUs := (pCPUs + 7) / 8
cpuInfo, vcpuPinInfo, err := l.libvirt.DomainGetVcpus(domain, int32(vCPUs), int32(bytesToHoldPCPUs))
if err != nil {
// DomainGetVcpus gets not only affinity (1:N mapping from VCPU to PCPU)
// but also realtime 1:1 mapping from VCPU to PCPU
// Unfortunately it will return nothing (only error) for inactive domains -> for that case use
// DomainGetVcpuPinInfo (which only gets affinity but even for inactive domains)
vcpuPinInfo, _, err = l.libvirt.DomainGetVcpuPinInfo(domain, int32(vCPUs), int32(bytesToHoldPCPUs), uint32(golibvirt.DomainAffectCurrent))
if err != nil {
return nil, err
}
}
var vcpuAffinities []vcpuAffinity
for i := 0; i < int(vCPUs); i++ {
var coresAffinity []string
for j := 0; j < pCPUs; j++ {
aByte := (i * bytesToHoldPCPUs) + (j / 8)
aBit := j % 8
if (vcpuPinInfo[aByte] & (1 << uint(aBit))) != 0 {
coresAffinity = append(coresAffinity, strconv.Itoa(j))
}
}
vcpu := vcpuAffinity{
vcpuID: strconv.FormatInt(int64(i), 10),
coresAffinity: strings.Join(coresAffinity, ","),
currentPCPUID: -1,
}
if shouldGetCurrentPCPU && i < len(cpuInfo) {
vcpu.currentPCPUID = cpuInfo[i].CPU
}
if len(coresAffinity) > 0 {
vcpuAffinities = append(vcpuAffinities, vcpu)
}
}
return vcpuAffinities, nil
}
func (l *utilsImpl) EnsureConnected(libvirtURI string) error {
if isConnected(l.libvirt) {
return nil
}
driver, err := libvirtutils.ConnectByUriString(libvirtURI)
if err != nil {
return err
}
l.libvirt = driver
return nil
}
func (l *utilsImpl) Disconnect() error {
l.libvirt = nil
return nil
}
func isConnected(driver *golibvirt.Libvirt) bool {
if driver == nil {
return false
}
select {
case <-driver.Disconnected():
return false
default:
}
return true
}

View File

@ -0,0 +1,146 @@
// Code generated by mockery v2.14.0. DO NOT EDIT.
package libvirt
import (
go_libvirt "github.com/digitalocean/go-libvirt"
mock "github.com/stretchr/testify/mock"
)
// MockLibvirtUtils is an autogenerated mock type for the utils type
type MockLibvirtUtils struct {
mock.Mock
}
// Disconnect provides a mock function with given fields:
func (_m *MockLibvirtUtils) Disconnect() error {
ret := _m.Called()
var r0 error
if rf, ok := ret.Get(0).(func() error); ok {
r0 = rf()
} else {
r0 = ret.Error(0)
}
return r0
}
// EnsureConnected provides a mock function with given fields: libvirtURI
func (_m *MockLibvirtUtils) EnsureConnected(libvirtURI string) error {
ret := _m.Called(libvirtURI)
var r0 error
if rf, ok := ret.Get(0).(func(string) error); ok {
r0 = rf(libvirtURI)
} else {
r0 = ret.Error(0)
}
return r0
}
// GatherAllDomains provides a mock function with given fields:
func (_m *MockLibvirtUtils) GatherAllDomains() ([]go_libvirt.Domain, error) {
ret := _m.Called()
var r0 []go_libvirt.Domain
if rf, ok := ret.Get(0).(func() []go_libvirt.Domain); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]go_libvirt.Domain)
}
}
var r1 error
if rf, ok := ret.Get(1).(func() error); ok {
r1 = rf()
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GatherNumberOfPCPUs provides a mock function with given fields:
func (_m *MockLibvirtUtils) GatherNumberOfPCPUs() (int, error) {
ret := _m.Called()
var r0 int
if rf, ok := ret.Get(0).(func() int); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(int)
}
var r1 error
if rf, ok := ret.Get(1).(func() error); ok {
r1 = rf()
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GatherStatsForDomains provides a mock function with given fields: domains, metricNumber
func (_m *MockLibvirtUtils) GatherStatsForDomains(domains []go_libvirt.Domain, metricNumber uint32) ([]go_libvirt.DomainStatsRecord, error) {
ret := _m.Called(domains, metricNumber)
var r0 []go_libvirt.DomainStatsRecord
if rf, ok := ret.Get(0).(func([]go_libvirt.Domain, uint32) []go_libvirt.DomainStatsRecord); ok {
r0 = rf(domains, metricNumber)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]go_libvirt.DomainStatsRecord)
}
}
var r1 error
if rf, ok := ret.Get(1).(func([]go_libvirt.Domain, uint32) error); ok {
r1 = rf(domains, metricNumber)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GatherVcpuMapping provides a mock function with given fields: domain, pCPUs, shouldGetCurrentPCPU
func (_m *MockLibvirtUtils) GatherVcpuMapping(domain go_libvirt.Domain, pCPUs int, shouldGetCurrentPCPU bool) ([]vcpuAffinity, error) {
ret := _m.Called(domain, pCPUs, shouldGetCurrentPCPU)
var r0 []vcpuAffinity
if rf, ok := ret.Get(0).(func(go_libvirt.Domain, int, bool) []vcpuAffinity); ok {
r0 = rf(domain, pCPUs, shouldGetCurrentPCPU)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]vcpuAffinity)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(go_libvirt.Domain, int, bool) error); ok {
r1 = rf(domain, pCPUs, shouldGetCurrentPCPU)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
type mockConstructorTestingTNewMockLibvirtUtils interface {
mock.TestingT
Cleanup(func())
}
// NewMockLibvirtUtils creates a new instance of MockLibvirtUtils. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
func NewMockLibvirtUtils(t mockConstructorTestingTNewMockLibvirtUtils) *MockLibvirtUtils {
mock := &MockLibvirtUtils{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,29 @@
# The libvirt plugin collects statistics from virtualized guests using virtualization libvirt API.
[[inputs.libvirt]]
## Domain names from which libvirt gather statistics.
## By default (empty or missing array) the plugin gather statistics from each domain registered in the host system.
# domains = []
## Libvirt connection URI with hypervisor.
## The plugin supports multiple transport protocols and approaches which are configurable via the URI.
## The general URI form: driver[+transport]://[username@][hostname][:port]/[path][?extraparameters]
## Supported transport protocols: ssh, tcp, tls, unix
## URI examples for each type of transport protocol:
## 1. SSH: qemu+ssh://<USER@IP_OR_HOSTNAME>/system?keyfile=/<PATH_TO_PRIVATE_KEY>&known_hosts=/<PATH_TO_known_hosts>
## 2. TCP: qemu+tcp://<IP_OR_HOSTNAME>/system
## 3. TLS: qemu+tls://<HOSTNAME>/system?pkipath=/certs_dir/<COMMON_LOCATION_OF_CACERT_AND_SERVER_CLIENT_CERTS>
## 4. UNIX: qemu+unix:///system?socket=/<PATH_TO_libvirt-sock>
## Default URI is qemu:///system
# libvirt_uri = "qemu:///system"
## Statistics groups for which libvirt plugin will gather statistics.
## Supported statistics groups: state, cpu_total, balloon, vcpu, interface, block, perf, iothread, memory, dirtyrate
## Empty array means no metrics for statistics groups will be exposed by the plugin.
## By default the plugin will gather all available statistics.
# statistics_groups = ["state", "cpu_total", "balloon", "vcpu", "interface", "block", "perf", "iothread", "memory", "dirtyrate"]
## A list containing additional statistics to be exposed by libvirt plugin.
## Supported additional statistics: vcpu_mapping
## By default (empty or missing array) the plugin will not collect additional statistics.
# additional_statistics = []