fix(inputs.zfs): Parse metrics correctly on FreeBSD 14 (#14176)

This commit is contained in:
Sven Rebhan 2023-10-25 23:21:06 +02:00 committed by GitHub
parent 4e84fc8925
commit 57eb71688f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 376 additions and 111 deletions

View File

@ -0,0 +1,16 @@
zfs_pool,health=ONLINE,pool=zroot allocated=11959578624i,capacity=17i,dedupratio=1,fragmentation=4i,free=55149285376i,size=67108864000i 1698172931876196974
zfs_dataset,dataset=zroot avail=53069045760i,used=11959443456i,usedds=0i,usedsnap=0i 1698172931876204237
zfs_dataset,dataset=zroot/ROOT avail=53069045760i,used=4685123584i,usedds=0i,usedsnap=0i 1698172931876207310
zfs_dataset,dataset=zroot/ROOT/default avail=53069045760i,used=4684689408i,usedds=0i,usedsnap=0i 1698172931876213735
zfs_dataset,dataset=zroot/home avail=53069045760i,used=7263580160i,usedds=0i,usedsnap=0i 1698172931876217926
zfs_dataset,dataset=zroot/tmp avail=53069045760i,used=684032i,usedds=0i,usedsnap=0i 1698172931876220720
zfs_dataset,dataset=zroot/usr avail=53069045760i,used=1724416i,usedds=0i,usedsnap=0i 1698172931876224631
zfs_dataset,dataset=zroot/usr/obj avail=53069045760i,used=430080i,usedds=0i,usedsnap=0i 1698172931876227424
zfs_dataset,dataset=zroot/usr/ports avail=53069045760i,used=430080i,usedds=0i,usedsnap=0i 1698172931876230218
zfs_dataset,dataset=zroot/usr/src avail=53069045760i,used=430080i,usedds=0i,usedsnap=0i 1698172931876233291
zfs_dataset,dataset=zroot/var avail=53069045760i,used=2269184i,usedds=0i,usedsnap=0i 1698172931876237481
zfs_dataset,dataset=zroot/var/audit avail=53069045760i,used=438272i,usedds=0i,usedsnap=0i 1698172931876240554
zfs_dataset,dataset=zroot/var/log avail=53069045760i,used=544768i,usedds=0i,usedsnap=0i 1698172931876243348
zfs_dataset,dataset=zroot/var/mail avail=53069045760i,used=425984i,usedds=0i,usedsnap=0i 1698172931876246980
zfs_dataset,dataset=zroot/var/tmp avail=53069045760i,used=425984i,usedds=0i,usedsnap=0i 1698172931876249774
zfs,datasets=zroot::zroot/ROOT::zroot/ROOT/default::zroot/home::zroot/tmp::zroot/usr::zroot/usr/obj::zroot/usr/ports::zroot/usr/src::zroot/var::zroot/var/audit::zroot/var/log::zroot/var/mail::zroot/var/tmp,pools=zroot zfetchstats_hits=6439i,zfetchstats_io_active=0i,zfetchstats_io_issued=2197i,zfetchstats_max_streams=9411i,zfetchstats_misses=14538i 1698172931876287767

View File

@ -0,0 +1,159 @@
{
"archstats": [
"kstat.zfs.misc.arcstats.abd_chunk_waste_size: 626176",
"kstat.zfs.misc.arcstats.cached_only_in_progress: 0",
"kstat.zfs.misc.arcstats.arc_raw_size: 0",
"kstat.zfs.misc.arcstats.arc_sys_free: 0",
"kstat.zfs.misc.arcstats.arc_need_free: 0",
"kstat.zfs.misc.arcstats.demand_iohit_prescient_prefetch: 1",
"kstat.zfs.misc.arcstats.demand_hit_prescient_prefetch: 14",
"kstat.zfs.misc.arcstats.prescient_prefetch: 15",
"kstat.zfs.misc.arcstats.demand_iohit_predictive_prefetch: 218",
"kstat.zfs.misc.arcstats.demand_hit_predictive_prefetch: 1778",
"kstat.zfs.misc.arcstats.predictive_prefetch: 5099",
"kstat.zfs.misc.arcstats.async_upgrade_sync: 135",
"kstat.zfs.misc.arcstats.arc_dnode_limit: 317881139",
"kstat.zfs.misc.arcstats.arc_meta_used: 80185232",
"kstat.zfs.misc.arcstats.arc_prune: 25",
"kstat.zfs.misc.arcstats.arc_loaned_bytes: 0",
"kstat.zfs.misc.arcstats.arc_tempreserve: 0",
"kstat.zfs.misc.arcstats.arc_no_grow: 0",
"kstat.zfs.misc.arcstats.memory_available_bytes: 3663839232",
"kstat.zfs.misc.arcstats.memory_free_bytes: 3752042496",
"kstat.zfs.misc.arcstats.memory_all_bytes: 4252553216",
"kstat.zfs.misc.arcstats.memory_indirect_count: 0",
"kstat.zfs.misc.arcstats.memory_direct_count: 0",
"kstat.zfs.misc.arcstats.memory_throttle_count: 0",
"kstat.zfs.misc.arcstats.l2_rebuild_log_blks: 0",
"kstat.zfs.misc.arcstats.l2_rebuild_bufs_precached: 0",
"kstat.zfs.misc.arcstats.l2_rebuild_bufs: 0",
"kstat.zfs.misc.arcstats.l2_rebuild_asize: 0",
"kstat.zfs.misc.arcstats.l2_rebuild_size: 0",
"kstat.zfs.misc.arcstats.l2_rebuild_lowmem: 0",
"kstat.zfs.misc.arcstats.l2_rebuild_cksum_lb_errors: 0",
"kstat.zfs.misc.arcstats.l2_rebuild_dh_errors: 0",
"kstat.zfs.misc.arcstats.l2_rebuild_io_errors: 0",
"kstat.zfs.misc.arcstats.l2_rebuild_unsupported: 0",
"kstat.zfs.misc.arcstats.l2_rebuild_success: 0",
"kstat.zfs.misc.arcstats.l2_data_to_meta_ratio: 0",
"kstat.zfs.misc.arcstats.l2_log_blk_count: 0",
"kstat.zfs.misc.arcstats.l2_log_blk_asize: 0",
"kstat.zfs.misc.arcstats.l2_log_blk_avg_asize: 0",
"kstat.zfs.misc.arcstats.l2_log_blk_writes: 0",
"kstat.zfs.misc.arcstats.l2_hdr_size: 0",
"kstat.zfs.misc.arcstats.l2_asize: 0",
"kstat.zfs.misc.arcstats.l2_size: 0",
"kstat.zfs.misc.arcstats.l2_io_error: 0",
"kstat.zfs.misc.arcstats.l2_cksum_bad: 0",
"kstat.zfs.misc.arcstats.l2_abort_lowmem: 0",
"kstat.zfs.misc.arcstats.l2_free_on_write: 0",
"kstat.zfs.misc.arcstats.l2_evict_l1cached: 0",
"kstat.zfs.misc.arcstats.l2_evict_reading: 0",
"kstat.zfs.misc.arcstats.l2_evict_lock_retry: 0",
"kstat.zfs.misc.arcstats.l2_writes_lock_retry: 0",
"kstat.zfs.misc.arcstats.l2_writes_error: 0",
"kstat.zfs.misc.arcstats.l2_writes_done: 0",
"kstat.zfs.misc.arcstats.l2_writes_sent: 0",
"kstat.zfs.misc.arcstats.l2_write_bytes: 0",
"kstat.zfs.misc.arcstats.l2_read_bytes: 0",
"kstat.zfs.misc.arcstats.l2_rw_clash: 0",
"kstat.zfs.misc.arcstats.l2_feeds: 0",
"kstat.zfs.misc.arcstats.l2_bufc_metadata_asize: 0",
"kstat.zfs.misc.arcstats.l2_bufc_data_asize: 0",
"kstat.zfs.misc.arcstats.l2_mfu_asize: 0",
"kstat.zfs.misc.arcstats.l2_mru_asize: 0",
"kstat.zfs.misc.arcstats.l2_prefetch_asize: 0",
"kstat.zfs.misc.arcstats.l2_misses: 0",
"kstat.zfs.misc.arcstats.l2_hits: 0",
"kstat.zfs.misc.arcstats.uncached_evictable_metadata: 0",
"kstat.zfs.misc.arcstats.uncached_evictable_data: 0",
"kstat.zfs.misc.arcstats.uncached_metadata: 0",
"kstat.zfs.misc.arcstats.uncached_data: 0",
"kstat.zfs.misc.arcstats.uncached_size: 0",
"kstat.zfs.misc.arcstats.mfu_ghost_evictable_metadata: 35231232",
"kstat.zfs.misc.arcstats.mfu_ghost_evictable_data: 31780352",
"kstat.zfs.misc.arcstats.mfu_ghost_metadata: 35231232",
"kstat.zfs.misc.arcstats.mfu_ghost_data: 31780352",
"kstat.zfs.misc.arcstats.mfu_ghost_size: 67011584",
"kstat.zfs.misc.arcstats.mfu_evictable_metadata: 648192",
"kstat.zfs.misc.arcstats.mfu_evictable_data: 29985280",
"kstat.zfs.misc.arcstats.mfu_metadata: 9697280",
"kstat.zfs.misc.arcstats.mfu_data: 31448576",
"kstat.zfs.misc.arcstats.mfu_size: 41145856",
"kstat.zfs.misc.arcstats.mru_ghost_evictable_metadata: 27670528",
"kstat.zfs.misc.arcstats.mru_ghost_evictable_data: 39195136",
"kstat.zfs.misc.arcstats.mru_ghost_metadata: 27670528",
"kstat.zfs.misc.arcstats.mru_ghost_data: 39195136",
"kstat.zfs.misc.arcstats.mru_ghost_size: 66865664",
"kstat.zfs.misc.arcstats.mru_evictable_metadata: 4007424",
"kstat.zfs.misc.arcstats.mru_evictable_data: 21310976",
"kstat.zfs.misc.arcstats.mru_metadata: 32238592",
"kstat.zfs.misc.arcstats.mru_data: 25252864",
"kstat.zfs.misc.arcstats.mru_size: 57491456",
"kstat.zfs.misc.arcstats.anon_evictable_metadata: 0",
"kstat.zfs.misc.arcstats.anon_evictable_data: 0",
"kstat.zfs.misc.arcstats.anon_metadata: 0",
"kstat.zfs.misc.arcstats.anon_data: 0",
"kstat.zfs.misc.arcstats.anon_size: 0",
"kstat.zfs.misc.arcstats.other_size: 36376512",
"kstat.zfs.misc.arcstats.bonus_size: 7930560",
"kstat.zfs.misc.arcstats.dnode_size: 20521584",
"kstat.zfs.misc.arcstats.dbuf_size: 7924368",
"kstat.zfs.misc.arcstats.metadata_size: 41935872",
"kstat.zfs.misc.arcstats.data_size: 56701440",
"kstat.zfs.misc.arcstats.hdr_size: 1872848",
"kstat.zfs.misc.arcstats.overhead_size: 29338112",
"kstat.zfs.misc.arcstats.uncompressed_size: 121843712",
"kstat.zfs.misc.arcstats.compressed_size: 69299200",
"kstat.zfs.misc.arcstats.size: 137512848",
"kstat.zfs.misc.arcstats.c_max: 3178811392",
"kstat.zfs.misc.arcstats.c_min: 132892288",
"kstat.zfs.misc.arcstats.c: 176014976",
"kstat.zfs.misc.arcstats.pm: 2306048980",
"kstat.zfs.misc.arcstats.pd: 1302662522",
"kstat.zfs.misc.arcstats.meta: 1110189320",
"kstat.zfs.misc.arcstats.hash_chain_max: 4",
"kstat.zfs.misc.arcstats.hash_chains: 68",
"kstat.zfs.misc.arcstats.hash_collisions: 15034",
"kstat.zfs.misc.arcstats.hash_elements_max: 91215",
"kstat.zfs.misc.arcstats.hash_elements: 7890",
"kstat.zfs.misc.arcstats.evict_l2_skip: 0",
"kstat.zfs.misc.arcstats.evict_l2_ineligible: 65306624",
"kstat.zfs.misc.arcstats.evict_l2_eligible_mru: 1369077760",
"kstat.zfs.misc.arcstats.evict_l2_eligible_mfu: 940835840",
"kstat.zfs.misc.arcstats.evict_l2_eligible: 2309913600",
"kstat.zfs.misc.arcstats.evict_l2_cached: 0",
"kstat.zfs.misc.arcstats.evict_not_enough: 12",
"kstat.zfs.misc.arcstats.evict_skip: 445",
"kstat.zfs.misc.arcstats.access_skip: 0",
"kstat.zfs.misc.arcstats.mutex_miss: 0",
"kstat.zfs.misc.arcstats.deleted: 90151",
"kstat.zfs.misc.arcstats.uncached_hits: 0",
"kstat.zfs.misc.arcstats.mfu_ghost_hits: 6108",
"kstat.zfs.misc.arcstats.mfu_hits: 1240173",
"kstat.zfs.misc.arcstats.mru_ghost_hits: 2692",
"kstat.zfs.misc.arcstats.mru_hits: 233633",
"kstat.zfs.misc.arcstats.prefetch_metadata_misses: 406",
"kstat.zfs.misc.arcstats.prefetch_metadata_iohits: 2315",
"kstat.zfs.misc.arcstats.prefetch_metadata_hits: 196",
"kstat.zfs.misc.arcstats.prefetch_data_misses: 2092",
"kstat.zfs.misc.arcstats.prefetch_data_iohits: 0",
"kstat.zfs.misc.arcstats.prefetch_data_hits: 105",
"kstat.zfs.misc.arcstats.demand_metadata_misses: 12021",
"kstat.zfs.misc.arcstats.demand_metadata_iohits: 87",
"kstat.zfs.misc.arcstats.demand_metadata_hits: 906938",
"kstat.zfs.misc.arcstats.demand_data_misses: 48482",
"kstat.zfs.misc.arcstats.demand_data_iohits: 128",
"kstat.zfs.misc.arcstats.demand_data_hits: 566567",
"kstat.zfs.misc.arcstats.misses: 63001",
"kstat.zfs.misc.arcstats.iohits: 2530",
"kstat.zfs.misc.arcstats.hits: 1473806"
],
"zfetchstats": [
"kstat.zfs.misc.zfetchstats.io_active: 0",
"kstat.zfs.misc.zfetchstats.io_issued: 2197",
"kstat.zfs.misc.zfetchstats.max_streams: 9411",
"kstat.zfs.misc.zfetchstats.misses: 14538",
"kstat.zfs.misc.zfetchstats.hits: 6439"
]
}

View File

@ -0,0 +1,4 @@
[[inputs.zfs]]
kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"]
poolMetrics = true
datasetMetrics = true

View File

@ -0,0 +1 @@
14.0-RC2

View File

@ -0,0 +1,14 @@
zroot 53069045760 11959443456 - -
zroot/ROOT 53069045760 4685123584 - -
zroot/ROOT/default 53069045760 4684689408 - -
zroot/home 53069045760 7263580160 - -
zroot/tmp 53069045760 684032 - -
zroot/usr 53069045760 1724416 - -
zroot/usr/obj 53069045760 430080 - -
zroot/usr/ports 53069045760 430080 - -
zroot/usr/src 53069045760 430080 - -
zroot/var 53069045760 2269184 - -
zroot/var/audit 53069045760 438272 - -
zroot/var/log 53069045760 544768 - -
zroot/var/mail 53069045760 425984 - -
zroot/var/tmp 53069045760 425984 - -

View File

@ -0,0 +1 @@
zroot ONLINE 67108864000 11959578624 55149285376 4 17 1.00

View File

@ -13,16 +13,20 @@ var sampleConfig string
type Sysctl func(metric string) ([]string, error)
type Zpool func() ([]string, error)
type Zdataset func(properties []string) ([]string, error)
type Uname func() (string, error)
type Zfs struct {
KstatPath string
KstatMetrics []string
PoolMetrics bool
DatasetMetrics bool
sysctl Sysctl //nolint:unused // False positive - this var is used for non-default build tag: freebsd
zpool Zpool //nolint:unused // False positive - this var is used for non-default build tag: freebsd
zdataset Zdataset //nolint:unused // False positive - this var is used for non-default build tag: freebsd
Log telegraf.Logger `toml:"-"`
sysctl Sysctl //nolint:unused // False positive - this var is used for non-default build tag: freebsd
zpool Zpool //nolint:unused // False positive - this var is used for non-default build tag: freebsd
zdataset Zdataset //nolint:unused // False positive - this var is used for non-default build tag: freebsd
uname Uname //nolint:unused // False positive - this var is used for non-default build tag: freebsd
version int64 //nolint:unused // False positive - this var is used for non-default build tag: freebsd
}
func (*Zfs) SampleConfig() string {

View File

@ -11,8 +11,84 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
"golang.org/x/sys/unix"
)
func (z *Zfs) Init() error {
// Determine the kernel version to adapt parsing
release, err := z.uname()
if err != nil {
return fmt.Errorf("determining uname failed: %w", err)
}
parts := strings.SplitN(release, ".", 2)
z.version, err = strconv.ParseInt(parts[0], 10, 64)
if err != nil {
return fmt.Errorf("determining version from %q failed: %w", release, err)
}
// Setup default metrics if they are not specified.
// Please note that starting from FreeBSD 14 the 'vdev_cache_stats' are
// no longer available.
if len(z.KstatMetrics) == 0 {
if z.version < 14 {
z.KstatMetrics = []string{"arcstats", "zfetchstats", "vdev_cache_stats"}
} else {
z.KstatMetrics = []string{"arcstats", "zfetchstats"}
}
}
return nil
}
func (z *Zfs) Gather(acc telegraf.Accumulator) error {
tags := map[string]string{}
poolNames, err := z.gatherPoolStats(acc)
if err != nil {
return err
}
if poolNames != "" {
tags["pools"] = poolNames
}
datasetNames, err := z.gatherDatasetStats(acc)
if err != nil {
return err
}
if datasetNames != "" {
tags["datasets"] = datasetNames
}
// Gather information form the kernel using sysctl
fields := make(map[string]interface{})
var removeIndices []int
for i, metric := range z.KstatMetrics {
stdout, err := z.sysctl(metric)
if err != nil {
z.Log.Warnf("sysctl for 'kstat.zfs.misc.%s' failed: %v; removing metric", metric, err)
removeIndices = append(removeIndices, i)
continue
}
for _, line := range stdout {
rawData := strings.Split(line, ": ")
key := metric + "_" + strings.Split(rawData[0], ".")[4]
value, _ := strconv.ParseInt(rawData[1], 10, 64)
fields[key] = value
}
}
acc.AddFields("zfs", fields, tags)
// Remove the invalid kstat metrics
if len(removeIndices) > 0 {
for i := len(removeIndices) - 1; i >= 0; i-- {
idx := removeIndices[i]
z.KstatMetrics = append(z.KstatMetrics[:idx], z.KstatMetrics[idx+1:]...)
}
}
return nil
}
func (z *Zfs) gatherPoolStats(acc telegraf.Accumulator) (string, error) {
lines, err := z.zpool()
if err != nil {
@ -22,62 +98,63 @@ func (z *Zfs) gatherPoolStats(acc telegraf.Accumulator) (string, error) {
pools := []string{}
for _, line := range lines {
col := strings.Split(line, "\t")
pools = append(pools, col[0])
}
if z.PoolMetrics {
for _, line := range lines {
col := strings.Split(line, "\t")
if len(col) != 8 {
continue
}
if !z.PoolMetrics {
return strings.Join(pools, "::"), nil
}
tags := map[string]string{"pool": col[0], "health": col[1]}
fields := map[string]interface{}{}
if tags["health"] == "UNAVAIL" {
fields["size"] = int64(0)
} else {
size, err := strconv.ParseInt(col[2], 10, 64)
if err != nil {
return "", fmt.Errorf("Error parsing size: %s", err)
}
fields["size"] = size
alloc, err := strconv.ParseInt(col[3], 10, 64)
if err != nil {
return "", fmt.Errorf("Error parsing allocation: %s", err)
}
fields["allocated"] = alloc
free, err := strconv.ParseInt(col[4], 10, 64)
if err != nil {
return "", fmt.Errorf("Error parsing free: %s", err)
}
fields["free"] = free
frag, err := strconv.ParseInt(strings.TrimSuffix(col[5], "%"), 10, 0)
if err != nil { // This might be - for RO devs
frag = 0
}
fields["fragmentation"] = frag
capval, err := strconv.ParseInt(col[6], 10, 0)
if err != nil {
return "", fmt.Errorf("Error parsing capacity: %s", err)
}
fields["capacity"] = capval
dedup, err := strconv.ParseFloat(strings.TrimSuffix(col[7], "x"), 32)
if err != nil {
return "", fmt.Errorf("Error parsing dedupratio: %s", err)
}
fields["dedupratio"] = dedup
}
acc.AddFields("zfs_pool", fields, tags)
for _, line := range lines {
col := strings.Split(line, "\t")
if len(col) != 8 {
continue
}
tags := map[string]string{"pool": col[0], "health": col[1]}
fields := map[string]interface{}{}
if tags["health"] == "UNAVAIL" {
fields["size"] = int64(0)
} else {
size, err := strconv.ParseInt(col[2], 10, 64)
if err != nil {
return "", fmt.Errorf("Error parsing size: %s", err)
}
fields["size"] = size
alloc, err := strconv.ParseInt(col[3], 10, 64)
if err != nil {
return "", fmt.Errorf("Error parsing allocation: %s", err)
}
fields["allocated"] = alloc
free, err := strconv.ParseInt(col[4], 10, 64)
if err != nil {
return "", fmt.Errorf("Error parsing free: %s", err)
}
fields["free"] = free
frag, err := strconv.ParseInt(strings.TrimSuffix(col[5], "%"), 10, 0)
if err != nil { // This might be - for RO devs
frag = 0
}
fields["fragmentation"] = frag
capval, err := strconv.ParseInt(col[6], 10, 0)
if err != nil {
return "", fmt.Errorf("Error parsing capacity: %s", err)
}
fields["capacity"] = capval
dedup, err := strconv.ParseFloat(strings.TrimSuffix(col[7], "x"), 32)
if err != nil {
return "", fmt.Errorf("Error parsing dedupratio: %s", err)
}
fields["dedupratio"] = dedup
}
acc.AddFields("zfs_pool", fields, tags)
}
return strings.Join(pools, "::"), nil
@ -97,72 +174,39 @@ func (z *Zfs) gatherDatasetStats(acc telegraf.Accumulator) (string, error) {
datasets = append(datasets, col[0])
}
if z.DatasetMetrics {
for _, line := range lines {
col := strings.Split(line, "\t")
if len(col) != len(properties) {
z.Log.Warnf("Invalid number of columns for line: %s", line)
if !z.DatasetMetrics {
return strings.Join(datasets, "::"), nil
}
for _, line := range lines {
col := strings.Split(line, "\t")
if len(col) != len(properties) {
z.Log.Warnf("Invalid number of columns for line: %s", line)
continue
}
tags := map[string]string{"dataset": col[0]}
fields := map[string]interface{}{}
for i, key := range properties[1:] {
// Treat '-' entries as zero
if col[i+1] == "-" {
fields[key] = int64(0)
continue
}
tags := map[string]string{"dataset": col[0]}
fields := map[string]interface{}{}
for i, key := range properties[1:] {
value, err := strconv.ParseInt(col[i+1], 10, 64)
if err != nil {
return "", fmt.Errorf("Error parsing %s %q: %s", key, col[i+1], err)
}
fields[key] = value
value, err := strconv.ParseInt(col[i+1], 10, 64)
if err != nil {
return "", fmt.Errorf("Error parsing %s %q: %s", key, col[i+1], err)
}
acc.AddFields("zfs_dataset", fields, tags)
fields[key] = value
}
acc.AddFields("zfs_dataset", fields, tags)
}
return strings.Join(datasets, "::"), nil
}
func (z *Zfs) Gather(acc telegraf.Accumulator) error {
kstatMetrics := z.KstatMetrics
if len(kstatMetrics) == 0 {
kstatMetrics = []string{"arcstats", "zfetchstats", "vdev_cache_stats"}
}
tags := map[string]string{}
poolNames, err := z.gatherPoolStats(acc)
if err != nil {
return err
}
if poolNames != "" {
tags["pools"] = poolNames
}
datasetNames, err := z.gatherDatasetStats(acc)
if err != nil {
return err
}
if datasetNames != "" {
tags["datasets"] = datasetNames
}
fields := make(map[string]interface{})
for _, metric := range kstatMetrics {
stdout, err := z.sysctl(metric)
if err != nil {
return err
}
for _, line := range stdout {
rawData := strings.Split(line, ": ")
key := metric + "_" + strings.Split(rawData[0], ".")[4]
value, _ := strconv.ParseInt(rawData[1], 10, 64)
fields[key] = value
}
}
acc.AddFields("zfs", fields, tags)
return nil
}
func run(command string, args ...string) ([]string, error) {
cmd := exec.Command(command, args...)
var outbuf, errbuf bytes.Buffer
@ -194,12 +238,22 @@ func sysctl(metric string) ([]string, error) {
return run("sysctl", []string{"-q", fmt.Sprintf("kstat.zfs.misc.%s", metric)}...)
}
func uname() (string, error) {
var info unix.Utsname
if err := unix.Uname(&info); err != nil {
return "", err
}
release := unix.ByteSliceToString(info.Release[:])
return release, nil
}
func init() {
inputs.Add("zfs", func() telegraf.Input {
return &Zfs{
sysctl: sysctl,
zpool: zpool,
zdataset: zdataset,
uname: uname,
}
})
}

View File

@ -44,6 +44,7 @@ func TestCases(t *testing.T) {
inputSysctlFilename := filepath.Join(testcasePath, "sysctl.json")
inputZPoolFilename := filepath.Join(testcasePath, "zpool.txt")
inputZDatasetFilename := filepath.Join(testcasePath, "zdataset.txt")
inputUnameFilename := filepath.Join(testcasePath, "uname.txt")
expectedFilename := filepath.Join(testcasePath, "expected.out")
// Load the input data
@ -51,11 +52,20 @@ func TestCases(t *testing.T) {
require.NoError(t, err)
var sysctl map[string][]string
require.NoError(t, json.Unmarshal(buf, &sysctl))
zpool, err := testutil.ParseLinesFromFile(inputZPoolFilename)
require.NoError(t, err)
zdataset, err := testutil.ParseLinesFromFile(inputZDatasetFilename)
require.NoError(t, err)
// Try to read release from file and default to FreeBSD 13 if
// an error occurs.
uname := "13.2-STABLE"
if buf, err := os.ReadFile(inputUnameFilename); err == nil {
uname = string(buf)
}
// Prepare the influx parser for expectations
parser := &influx.Parser{}
require.NoError(t, parser.Init())
@ -79,7 +89,9 @@ func TestCases(t *testing.T) {
}
plugin.zpool = func() ([]string, error) { return zpool, nil }
plugin.zdataset = func(_ []string) ([]string, error) { return zdataset, nil }
plugin.uname = func() (string, error) { return uname, nil }
plugin.Log = testutil.Logger{}
require.NoError(t, plugin.Init())
// Gather and test
var acc testutil.Accumulator