diff --git a/.golangci.yml b/.golangci.yml index b89e9e190..d622da74a 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -266,6 +266,7 @@ linters-settings: - name: errorf - name: function-result-limit arguments: [ 3 ] + - name: get-return - name: identical-branches - name: if-return - name: import-shadowing diff --git a/config/config.go b/config/config.go index 5ac7700a2..2b1cf117c 100644 --- a/config/config.go +++ b/config/config.go @@ -568,7 +568,7 @@ func (c *Config) LoadConfigData(data []byte) error { }) } - // Setup the persister if requested + // Set up the persister if requested if c.Agent.Statefile != "" { c.Persister = &persister.Persister{ Filename: c.Agent.Statefile, @@ -887,13 +887,12 @@ func (c *Config) addSecretStore(name string, table *ast.Table) error { return nil } - var storeid string - c.getFieldString(table, "id", &storeid) - if storeid == "" { + storeID := c.getFieldString(table, "id") + if storeID == "" { return fmt.Errorf("%q secret-store without ID", name) } - if !secretStorePattern.MatchString(storeid) { - return fmt.Errorf("invalid secret-store ID %q, must only contain letters, numbers or underscore", storeid) + if !secretStorePattern.MatchString(storeID) { + return fmt.Errorf("invalid secret-store ID %q, must only contain letters, numbers or underscore", storeID) } creator, ok := secretstores.SecretStores[name] @@ -905,7 +904,7 @@ func (c *Config) addSecretStore(name string, table *ast.Table) error { } return fmt.Errorf("undefined but requested secretstores: %s", name) } - store := creator(storeid) + store := creator(storeID) if err := c.toml.UnmarshalTable(table, store); err != nil { return err @@ -919,13 +918,13 @@ func (c *Config) addSecretStore(name string, table *ast.Table) error { models.SetLoggerOnPlugin(store, logger) if err := store.Init(); err != nil { - return fmt.Errorf("error initializing secret-store %q: %w", storeid, err) + return fmt.Errorf("error initializing secret-store %q: %w", storeID, err) } - if _, found := c.SecretStores[storeid]; found { - return fmt.Errorf("duplicate ID %q for secretstore %q", storeid, name) + if _, found := c.SecretStores[storeID]; found { + return fmt.Errorf("duplicate ID %q for secretstore %q", storeID, name) } - c.SecretStores[storeid] = store + c.SecretStores[storeID] = store return nil } @@ -934,8 +933,8 @@ func (c *Config) LinkSecrets() error { resolvers := make(map[string]telegraf.ResolveFunc) for _, ref := range s.GetUnlinked() { // Split the reference and lookup the resolver - storeid, key := splitLink(ref) - store, found := c.SecretStores[storeid] + storeID, key := splitLink(ref) + store, found := c.SecretStores[storeID] if !found { return fmt.Errorf("unknown secret-store for %q", ref) } @@ -954,13 +953,12 @@ func (c *Config) LinkSecrets() error { } func (c *Config) probeParser(parentcategory string, parentname string, table *ast.Table) bool { - var dataformat string - c.getFieldString(table, "data_format", &dataformat) - if dataformat == "" { - dataformat = setDefaultParser(parentcategory, parentname) + dataFormat := c.getFieldString(table, "data_format") + if dataFormat == "" { + dataFormat = setDefaultParser(parentcategory, parentname) } - creator, ok := parsers.Parsers[dataformat] + creator, ok := parsers.Parsers[dataFormat] if !ok { return false } @@ -978,17 +976,16 @@ func (c *Config) addParser(parentcategory, parentname string, table *ast.Table) Parent: parentname, } - c.getFieldString(table, "data_format", &conf.DataFormat) + conf.DataFormat = c.getFieldString(table, "data_format") if conf.DataFormat == "" { conf.DataFormat = setDefaultParser(parentcategory, parentname) } else if conf.DataFormat == "influx" { - var influxParserType string - c.getFieldString(table, "influx_parser_type", &influxParserType) + influxParserType := c.getFieldString(table, "influx_parser_type") if influxParserType == "upstream" { conf.DataFormat = "influx_upstream" } } - c.getFieldString(table, "log_level", &conf.LogLevel) + conf.LogLevel = c.getFieldString(table, "log_level") creator, ok := parsers.Parsers[conf.DataFormat] if !ok { @@ -1017,11 +1014,11 @@ func (c *Config) addSerializer(parentname string, table *ast.Table) (*models.Run conf := &models.SerializerConfig{ Parent: parentname, } - c.getFieldString(table, "data_format", &conf.DataFormat) + conf.DataFormat = c.getFieldString(table, "data_format") if conf.DataFormat == "" { conf.DataFormat = "influx" } - c.getFieldString(table, "log_level", &conf.LogLevel) + conf.LogLevel = c.getFieldString(table, "log_level") creator, ok := serializers.Serializers[conf.DataFormat] if !ok { @@ -1060,7 +1057,7 @@ func (c *Config) addProcessor(name string, table *ast.Table) error { c.setLocalMissingTomlFieldTracker(missCount) defer c.resetMissingTomlFieldTracker() - // Setup the processor running before the aggregators + // Set up the processor running before the aggregators processorBeforeConfig, err := c.buildProcessor("processors", name, table) if err != nil { return err @@ -1330,15 +1327,22 @@ func (c *Config) buildAggregator(name string, tbl *ast.Table) (*models.Aggregato Grace: time.Second * 0, } - c.getFieldDuration(tbl, "period", &conf.Period) - c.getFieldDuration(tbl, "delay", &conf.Delay) - c.getFieldDuration(tbl, "grace", &conf.Grace) - c.getFieldBool(tbl, "drop_original", &conf.DropOriginal) - c.getFieldString(tbl, "name_prefix", &conf.MeasurementPrefix) - c.getFieldString(tbl, "name_suffix", &conf.MeasurementSuffix) - c.getFieldString(tbl, "name_override", &conf.NameOverride) - c.getFieldString(tbl, "alias", &conf.Alias) - c.getFieldString(tbl, "log_level", &conf.LogLevel) + if period, found := c.getFieldDuration(tbl, "period"); found { + conf.Period = period + } + if delay, found := c.getFieldDuration(tbl, "delay"); found { + conf.Delay = delay + } + if grace, found := c.getFieldDuration(tbl, "grace"); found { + conf.Grace = grace + } + + conf.DropOriginal = c.getFieldBool(tbl, "drop_original") + conf.MeasurementPrefix = c.getFieldString(tbl, "name_prefix") + conf.MeasurementSuffix = c.getFieldString(tbl, "name_suffix") + conf.NameOverride = c.getFieldString(tbl, "name_override") + conf.Alias = c.getFieldString(tbl, "alias") + conf.LogLevel = c.getFieldString(tbl, "log_level") conf.Tags = make(map[string]string) if node, ok := tbl.Fields["tags"]; ok { @@ -1370,9 +1374,9 @@ func (c *Config) buildAggregator(name string, tbl *ast.Table) (*models.Aggregato func (c *Config) buildProcessor(category, name string, tbl *ast.Table) (*models.ProcessorConfig, error) { conf := &models.ProcessorConfig{Name: name} - c.getFieldInt64(tbl, "order", &conf.Order) - c.getFieldString(tbl, "alias", &conf.Alias) - c.getFieldString(tbl, "log_level", &conf.LogLevel) + conf.Order = c.getFieldInt64(tbl, "order") + conf.Alias = c.getFieldString(tbl, "alias") + conf.LogLevel = c.getFieldString(tbl, "log_level") if c.hasErrs() { return nil, c.firstErr() @@ -1396,13 +1400,12 @@ func (c *Config) buildProcessor(category, name string, tbl *ast.Table) (*models. func (c *Config) buildFilter(plugin string, tbl *ast.Table) (models.Filter, error) { f := models.Filter{} - c.getFieldStringSlice(tbl, "namepass", &f.NamePass) - c.getFieldString(tbl, "namepass_separator", &f.NamePassSeparators) - c.getFieldStringSlice(tbl, "namedrop", &f.NameDrop) - c.getFieldString(tbl, "namedrop_separator", &f.NameDropSeparators) + f.NamePass = c.getFieldStringSlice(tbl, "namepass") + f.NamePassSeparators = c.getFieldString(tbl, "namepass_separator") + f.NameDrop = c.getFieldStringSlice(tbl, "namedrop") + f.NameDropSeparators = c.getFieldString(tbl, "namedrop_separator") - var oldPass []string - c.getFieldStringSlice(tbl, "pass", &oldPass) + oldPass := c.getFieldStringSlice(tbl, "pass") if len(oldPass) > 0 { PrintOptionDeprecationNotice(plugin, "pass", telegraf.DeprecationInfo{ Since: "0.10.4", @@ -1411,8 +1414,8 @@ func (c *Config) buildFilter(plugin string, tbl *ast.Table) (models.Filter, erro }) f.FieldInclude = append(f.FieldInclude, oldPass...) } - var oldFieldPass []string - c.getFieldStringSlice(tbl, "fieldpass", &oldFieldPass) + + oldFieldPass := c.getFieldStringSlice(tbl, "fieldpass") if len(oldFieldPass) > 0 { PrintOptionDeprecationNotice(plugin, "fieldpass", telegraf.DeprecationInfo{ Since: "1.29.0", @@ -1421,10 +1424,13 @@ func (c *Config) buildFilter(plugin string, tbl *ast.Table) (models.Filter, erro }) f.FieldInclude = append(f.FieldInclude, oldFieldPass...) } - c.getFieldStringSlice(tbl, "fieldinclude", &f.FieldInclude) - var oldDrop []string - c.getFieldStringSlice(tbl, "drop", &oldDrop) + fieldInclude := c.getFieldStringSlice(tbl, "fieldinclude") + if len(fieldInclude) > 0 { + f.FieldInclude = append(f.FieldInclude, fieldInclude...) + } + + oldDrop := c.getFieldStringSlice(tbl, "drop") if len(oldDrop) > 0 { PrintOptionDeprecationNotice(plugin, "drop", telegraf.DeprecationInfo{ Since: "0.10.4", @@ -1433,8 +1439,8 @@ func (c *Config) buildFilter(plugin string, tbl *ast.Table) (models.Filter, erro }) f.FieldExclude = append(f.FieldExclude, oldDrop...) } - var oldFieldDrop []string - c.getFieldStringSlice(tbl, "fielddrop", &oldFieldDrop) + + oldFieldDrop := c.getFieldStringSlice(tbl, "fielddrop") if len(oldFieldDrop) > 0 { PrintOptionDeprecationNotice(plugin, "fielddrop", telegraf.DeprecationInfo{ Since: "1.29.0", @@ -1443,15 +1449,19 @@ func (c *Config) buildFilter(plugin string, tbl *ast.Table) (models.Filter, erro }) f.FieldExclude = append(f.FieldExclude, oldFieldDrop...) } - c.getFieldStringSlice(tbl, "fieldexclude", &f.FieldExclude) - c.getFieldTagFilter(tbl, "tagpass", &f.TagPassFilters) - c.getFieldTagFilter(tbl, "tagdrop", &f.TagDropFilters) + fieldExclude := c.getFieldStringSlice(tbl, "fieldexclude") + if len(fieldExclude) > 0 { + f.FieldExclude = append(f.FieldExclude, fieldExclude...) + } - c.getFieldStringSlice(tbl, "tagexclude", &f.TagExclude) - c.getFieldStringSlice(tbl, "taginclude", &f.TagInclude) + f.TagPassFilters = c.getFieldTagFilter(tbl, "tagpass") + f.TagDropFilters = c.getFieldTagFilter(tbl, "tagdrop") - c.getFieldString(tbl, "metricpass", &f.MetricPass) + f.TagExclude = c.getFieldStringSlice(tbl, "tagexclude") + f.TagInclude = c.getFieldStringSlice(tbl, "taginclude") + + f.MetricPass = c.getFieldString(tbl, "metricpass") if c.hasErrs() { return f, c.firstErr() @@ -1473,16 +1483,16 @@ func (c *Config) buildInput(name string, tbl *ast.Table) (*models.InputConfig, e AlwaysIncludeLocalTags: c.Agent.AlwaysIncludeLocalTags, AlwaysIncludeGlobalTags: c.Agent.AlwaysIncludeGlobalTags, } - c.getFieldDuration(tbl, "interval", &cp.Interval) - c.getFieldDuration(tbl, "precision", &cp.Precision) - c.getFieldDuration(tbl, "collection_jitter", &cp.CollectionJitter) - c.getFieldDuration(tbl, "collection_offset", &cp.CollectionOffset) - c.getFieldString(tbl, "startup_error_behavior", &cp.StartupErrorBehavior) - c.getFieldString(tbl, "name_prefix", &cp.MeasurementPrefix) - c.getFieldString(tbl, "name_suffix", &cp.MeasurementSuffix) - c.getFieldString(tbl, "name_override", &cp.NameOverride) - c.getFieldString(tbl, "alias", &cp.Alias) - c.getFieldString(tbl, "log_level", &cp.LogLevel) + cp.Interval, _ = c.getFieldDuration(tbl, "interval") + cp.Precision, _ = c.getFieldDuration(tbl, "precision") + cp.CollectionJitter, _ = c.getFieldDuration(tbl, "collection_jitter") + cp.CollectionOffset, _ = c.getFieldDuration(tbl, "collection_offset") + cp.StartupErrorBehavior = c.getFieldString(tbl, "startup_error_behavior") + cp.MeasurementPrefix = c.getFieldString(tbl, "name_prefix") + cp.MeasurementSuffix = c.getFieldString(tbl, "name_suffix") + cp.NameOverride = c.getFieldString(tbl, "name_override") + cp.Alias = c.getFieldString(tbl, "alias") + cp.LogLevel = c.getFieldString(tbl, "log_level") cp.Tags = make(map[string]string) if node, ok := tbl.Fields["tags"]; ok { @@ -1526,16 +1536,16 @@ func (c *Config) buildOutput(name string, tbl *ast.Table) (*models.OutputConfig, // TODO: support FieldPass/FieldDrop on outputs - c.getFieldDuration(tbl, "flush_interval", &oc.FlushInterval) - c.getFieldDuration(tbl, "flush_jitter", &oc.FlushJitter) - c.getFieldInt(tbl, "metric_buffer_limit", &oc.MetricBufferLimit) - c.getFieldInt(tbl, "metric_batch_size", &oc.MetricBatchSize) - c.getFieldString(tbl, "alias", &oc.Alias) - c.getFieldString(tbl, "name_override", &oc.NameOverride) - c.getFieldString(tbl, "name_suffix", &oc.NameSuffix) - c.getFieldString(tbl, "name_prefix", &oc.NamePrefix) - c.getFieldString(tbl, "startup_error_behavior", &oc.StartupErrorBehavior) - c.getFieldString(tbl, "log_level", &oc.LogLevel) + oc.FlushInterval, _ = c.getFieldDuration(tbl, "flush_interval") + oc.FlushJitter, _ = c.getFieldDuration(tbl, "flush_jitter") + oc.MetricBufferLimit = c.getFieldInt(tbl, "metric_buffer_limit") + oc.MetricBatchSize = c.getFieldInt(tbl, "metric_batch_size") + oc.Alias = c.getFieldString(tbl, "alias") + oc.NameOverride = c.getFieldString(tbl, "name_override") + oc.NameSuffix = c.getFieldString(tbl, "name_suffix") + oc.NamePrefix = c.getFieldString(tbl, "name_prefix") + oc.StartupErrorBehavior = c.getFieldString(tbl, "startup_error_behavior") + oc.LogLevel = c.getFieldString(tbl, "log_level") if c.hasErrs() { return nil, c.firstErr() @@ -1615,115 +1625,129 @@ func (c *Config) resetMissingTomlFieldTracker() { c.toml.MissingField = c.missingTomlField } -func (c *Config) getFieldString(tbl *ast.Table, fieldName string, target *string) { +func (c *Config) getFieldString(tbl *ast.Table, fieldName string) string { if node, ok := tbl.Fields[fieldName]; ok { if kv, ok := node.(*ast.KeyValue); ok { if str, ok := kv.Value.(*ast.String); ok { - *target = str.Value + return str.Value } } } + + return "" } -func (c *Config) getFieldDuration(tbl *ast.Table, fieldName string, target interface{}) { +func (c *Config) getFieldDuration(tbl *ast.Table, fieldName string) (time.Duration, bool) { if node, ok := tbl.Fields[fieldName]; ok { if kv, ok := node.(*ast.KeyValue); ok { if str, ok := kv.Value.(*ast.String); ok { d, err := time.ParseDuration(str.Value) if err != nil { c.addError(tbl, fmt.Errorf("error parsing duration: %w", err)) - return + return 0, false } - targetVal := reflect.ValueOf(target).Elem() - targetVal.Set(reflect.ValueOf(d)) + return d, true } } } + + return 0, false } -func (c *Config) getFieldBool(tbl *ast.Table, fieldName string, target *bool) { - var err error +func (c *Config) getFieldBool(tbl *ast.Table, fieldName string) bool { if node, ok := tbl.Fields[fieldName]; ok { if kv, ok := node.(*ast.KeyValue); ok { switch t := kv.Value.(type) { case *ast.Boolean: - *target, err = t.Boolean() + target, err := t.Boolean() if err != nil { c.addError(tbl, fmt.Errorf("unknown boolean value type %q, expecting boolean", kv.Value)) - return + return false } + return target case *ast.String: - *target, err = strconv.ParseBool(t.Value) + target, err := strconv.ParseBool(t.Value) if err != nil { c.addError(tbl, fmt.Errorf("unknown boolean value type %q, expecting boolean", kv.Value)) - return + return false } + return target default: c.addError(tbl, fmt.Errorf("unknown boolean value type %q, expecting boolean", kv.Value.Source())) - return + return false } } } + + return false } -func (c *Config) getFieldInt(tbl *ast.Table, fieldName string, target *int) { +func (c *Config) getFieldInt(tbl *ast.Table, fieldName string) int { if node, ok := tbl.Fields[fieldName]; ok { if kv, ok := node.(*ast.KeyValue); ok { if iAst, ok := kv.Value.(*ast.Integer); ok { i, err := iAst.Int() if err != nil { c.addError(tbl, fmt.Errorf("unexpected int type %q, expecting int", iAst.Value)) - return + return 0 } - *target = int(i) + return int(i) } } } + + return 0 } -func (c *Config) getFieldInt64(tbl *ast.Table, fieldName string, target *int64) { +func (c *Config) getFieldInt64(tbl *ast.Table, fieldName string) int64 { if node, ok := tbl.Fields[fieldName]; ok { if kv, ok := node.(*ast.KeyValue); ok { if iAst, ok := kv.Value.(*ast.Integer); ok { i, err := iAst.Int() if err != nil { c.addError(tbl, fmt.Errorf("unexpected int type %q, expecting int", iAst.Value)) - return + return 0 } - *target = i - } else { - c.addError(tbl, fmt.Errorf("found unexpected format while parsing %q, expecting int", fieldName)) + return i } + c.addError(tbl, fmt.Errorf("found unexpected format while parsing %q, expecting int", fieldName)) + return 0 } } + + return 0 } -func (c *Config) getFieldStringSlice(tbl *ast.Table, fieldName string, target *[]string) { +func (c *Config) getFieldStringSlice(tbl *ast.Table, fieldName string) []string { + var target []string if node, ok := tbl.Fields[fieldName]; ok { if kv, ok := node.(*ast.KeyValue); ok { ary, ok := kv.Value.(*ast.Array) if !ok { c.addError(tbl, fmt.Errorf("found unexpected format while parsing %q, expecting string array/slice format", fieldName)) - return + return target } for _, elem := range ary.Value { if str, ok := elem.(*ast.String); ok { - *target = append(*target, str.Value) + target = append(target, str.Value) } } } } + + return target } -func (c *Config) getFieldTagFilter(tbl *ast.Table, fieldName string, target *[]models.TagFilter) { +func (c *Config) getFieldTagFilter(tbl *ast.Table, fieldName string) []models.TagFilter { + var target []models.TagFilter if node, ok := tbl.Fields[fieldName]; ok { - if subtbl, ok := node.(*ast.Table); ok { - for name, val := range subtbl.Fields { + if subTbl, ok := node.(*ast.Table); ok { + for name, val := range subTbl.Fields { if kv, ok := val.(*ast.KeyValue); ok { ary, ok := kv.Value.(*ast.Array) if !ok { c.addError(tbl, fmt.Errorf("found unexpected format while parsing %q, expecting string array/slice format on each entry", fieldName)) - return + return nil } tagFilter := models.TagFilter{Name: name} @@ -1732,11 +1756,13 @@ func (c *Config) getFieldTagFilter(tbl *ast.Table, fieldName string, target *[]m tagFilter.Values = append(tagFilter.Values, str.Value) } } - *target = append(*target, tagFilter) + target = append(target, tagFilter) } } } } + + return target } func keys(m map[string]bool) []string { diff --git a/plugins/aggregators/basicstats/basicstats.go b/plugins/aggregators/basicstats/basicstats.go index 0c70f03c3..6ba76e986 100644 --- a/plugins/aggregators/basicstats/basicstats.go +++ b/plugins/aggregators/basicstats/basicstats.go @@ -262,7 +262,7 @@ func (b *BasicStats) parseStats() *configuredStats { return parsed } -func (b *BasicStats) getConfiguredStats() { +func (b *BasicStats) initConfiguredStats() { if b.Stats == nil { b.statsConfig = &configuredStats{ count: true, @@ -303,7 +303,7 @@ func convert(in interface{}) (float64, bool) { } func (b *BasicStats) Init() error { - b.getConfiguredStats() + b.initConfiguredStats() return nil } diff --git a/plugins/aggregators/basicstats/basicstats_test.go b/plugins/aggregators/basicstats/basicstats_test.go index e255ecb88..99b8f0884 100644 --- a/plugins/aggregators/basicstats/basicstats_test.go +++ b/plugins/aggregators/basicstats/basicstats_test.go @@ -41,7 +41,7 @@ var m2 = metric.New("m1", func BenchmarkApply(b *testing.B) { minmax := NewBasicStats() minmax.Log = testutil.Logger{} - minmax.getConfiguredStats() + minmax.initConfiguredStats() for n := 0; n < b.N; n++ { minmax.Add(m1) @@ -54,7 +54,7 @@ func TestBasicStatsWithPeriod(t *testing.T) { acc := testutil.Accumulator{} minmax := NewBasicStats() minmax.Log = testutil.Logger{} - minmax.getConfiguredStats() + minmax.initConfiguredStats() minmax.Add(m1) minmax.Add(m2) @@ -113,7 +113,7 @@ func TestBasicStatsDifferentPeriods(t *testing.T) { minmax := NewBasicStats() minmax.Stats = []string{"count", "max", "min", "mean", "last"} minmax.Log = testutil.Logger{} - minmax.getConfiguredStats() + minmax.initConfiguredStats() minmax.Add(m1) minmax.Push(&acc) @@ -201,7 +201,7 @@ func TestBasicStatsWithOnlyCount(t *testing.T) { aggregator := NewBasicStats() aggregator.Stats = []string{"count"} aggregator.Log = testutil.Logger{} - aggregator.getConfiguredStats() + aggregator.initConfiguredStats() aggregator.Add(m1) aggregator.Add(m2) @@ -229,7 +229,7 @@ func TestBasicStatsWithOnlyMin(t *testing.T) { aggregator := NewBasicStats() aggregator.Stats = []string{"min"} aggregator.Log = testutil.Logger{} - aggregator.getConfiguredStats() + aggregator.initConfiguredStats() aggregator.Add(m1) aggregator.Add(m2) @@ -257,7 +257,7 @@ func TestBasicStatsWithOnlyMax(t *testing.T) { aggregator := NewBasicStats() aggregator.Stats = []string{"max"} aggregator.Log = testutil.Logger{} - aggregator.getConfiguredStats() + aggregator.initConfiguredStats() aggregator.Add(m1) aggregator.Add(m2) @@ -285,7 +285,7 @@ func TestBasicStatsWithOnlyMean(t *testing.T) { aggregator := NewBasicStats() aggregator.Stats = []string{"mean"} aggregator.Log = testutil.Logger{} - aggregator.getConfiguredStats() + aggregator.initConfiguredStats() aggregator.Add(m1) aggregator.Add(m2) @@ -313,7 +313,7 @@ func TestBasicStatsWithOnlySum(t *testing.T) { aggregator := NewBasicStats() aggregator.Stats = []string{"sum"} aggregator.Log = testutil.Logger{} - aggregator.getConfiguredStats() + aggregator.initConfiguredStats() aggregator.Add(m1) aggregator.Add(m2) @@ -372,7 +372,7 @@ func TestBasicStatsWithOnlySumFloatingPointErrata(t *testing.T) { aggregator := NewBasicStats() aggregator.Stats = []string{"sum"} aggregator.Log = testutil.Logger{} - aggregator.getConfiguredStats() + aggregator.initConfiguredStats() aggregator.Add(sum1) aggregator.Add(sum2) @@ -394,7 +394,7 @@ func TestBasicStatsWithOnlyVariance(t *testing.T) { aggregator := NewBasicStats() aggregator.Stats = []string{"s2"} aggregator.Log = testutil.Logger{} - aggregator.getConfiguredStats() + aggregator.initConfiguredStats() aggregator.Add(m1) aggregator.Add(m2) @@ -420,7 +420,7 @@ func TestBasicStatsWithOnlyStandardDeviation(t *testing.T) { aggregator := NewBasicStats() aggregator.Stats = []string{"stdev"} aggregator.Log = testutil.Logger{} - aggregator.getConfiguredStats() + aggregator.initConfiguredStats() aggregator.Add(m1) aggregator.Add(m2) @@ -446,7 +446,7 @@ func TestBasicStatsWithMinAndMax(t *testing.T) { aggregator := NewBasicStats() aggregator.Stats = []string{"min", "max"} aggregator.Log = testutil.Logger{} - aggregator.getConfiguredStats() + aggregator.initConfiguredStats() aggregator.Add(m1) aggregator.Add(m2) @@ -481,7 +481,7 @@ func TestBasicStatsWithDiff(t *testing.T) { aggregator := NewBasicStats() aggregator.Stats = []string{"diff"} aggregator.Log = testutil.Logger{} - aggregator.getConfiguredStats() + aggregator.initConfiguredStats() aggregator.Add(m1) aggregator.Add(m2) @@ -506,7 +506,7 @@ func TestBasicStatsWithRate(t *testing.T) { aggregator := NewBasicStats() aggregator.Stats = []string{"rate"} aggregator.Log = testutil.Logger{} - aggregator.getConfiguredStats() + aggregator.initConfiguredStats() aggregator.Add(m1) aggregator.Add(m2) @@ -530,7 +530,7 @@ func TestBasicStatsWithNonNegativeRate(t *testing.T) { aggregator := NewBasicStats() aggregator.Stats = []string{"non_negative_rate"} aggregator.Log = testutil.Logger{} - aggregator.getConfiguredStats() + aggregator.initConfiguredStats() aggregator.Add(m1) aggregator.Add(m2) @@ -554,7 +554,7 @@ func TestBasicStatsWithPctChange(t *testing.T) { aggregator := NewBasicStats() aggregator.Stats = []string{"percent_change"} aggregator.Log = testutil.Logger{} - aggregator.getConfiguredStats() + aggregator.initConfiguredStats() aggregator.Add(m1) aggregator.Add(m2) @@ -578,7 +578,7 @@ func TestBasicStatsWithInterval(t *testing.T) { aggregator := NewBasicStats() aggregator.Stats = []string{"interval"} aggregator.Log = testutil.Logger{} - aggregator.getConfiguredStats() + aggregator.initConfiguredStats() aggregator.Add(m1) aggregator.Add(m2) @@ -604,7 +604,7 @@ func TestBasicStatsWithNonNegativeDiff(t *testing.T) { aggregator := NewBasicStats() aggregator.Stats = []string{"non_negative_diff"} aggregator.Log = testutil.Logger{} - aggregator.getConfiguredStats() + aggregator.initConfiguredStats() aggregator.Add(m1) aggregator.Add(m2) @@ -630,7 +630,7 @@ func TestBasicStatsWithAllStats(t *testing.T) { minmax := NewBasicStats() minmax.Log = testutil.Logger{} minmax.Stats = []string{"count", "min", "max", "mean", "stdev", "s2", "sum", "last"} - minmax.getConfiguredStats() + minmax.initConfiguredStats() minmax.Add(m1) minmax.Add(m2) @@ -701,7 +701,7 @@ func TestBasicStatsWithNoStats(t *testing.T) { aggregator := NewBasicStats() aggregator.Stats = []string{} aggregator.Log = testutil.Logger{} - aggregator.getConfiguredStats() + aggregator.initConfiguredStats() aggregator.Add(m1) aggregator.Add(m2) @@ -717,7 +717,7 @@ func TestBasicStatsWithUnknownStat(t *testing.T) { aggregator := NewBasicStats() aggregator.Stats = []string{"crazy"} aggregator.Log = testutil.Logger{} - aggregator.getConfiguredStats() + aggregator.initConfiguredStats() aggregator.Add(m1) aggregator.Add(m2) @@ -735,7 +735,7 @@ func TestBasicStatsWithUnknownStat(t *testing.T) { func TestBasicStatsWithDefaultStats(t *testing.T) { aggregator := NewBasicStats() aggregator.Log = testutil.Logger{} - aggregator.getConfiguredStats() + aggregator.initConfiguredStats() aggregator.Add(m1) aggregator.Add(m2) @@ -756,7 +756,7 @@ func TestBasicStatsWithOnlyLast(t *testing.T) { aggregator := NewBasicStats() aggregator.Stats = []string{"last"} aggregator.Log = testutil.Logger{} - aggregator.getConfiguredStats() + aggregator.initConfiguredStats() aggregator.Add(m1) aggregator.Add(m2) diff --git a/plugins/inputs/smart/smart.go b/plugins/inputs/smart/smart.go index b595cf62b..df1264ab8 100644 --- a/plugins/inputs/smart/smart.go +++ b/plugins/inputs/smart/smart.go @@ -445,7 +445,7 @@ func (m *Smart) Gather(acc telegraf.Accumulator) error { isVendorExtension := len(m.EnableExtensions) != 0 if len(m.Devices) != 0 { - m.getAttributes(acc, devicesFromConfig) + m.addAttributes(acc, devicesFromConfig) // if nvme-cli is present, vendor specific attributes can be gathered if isVendorExtension && isNVMe { @@ -455,7 +455,7 @@ func (m *Smart) Gather(acc telegraf.Accumulator) error { } nvmeDevices := distinguishNVMeDevices(devicesFromConfig, scannedNVMeDevices) - m.getVendorNVMeAttributes(acc, nvmeDevices) + m.addVendorNVMeAttributes(acc, nvmeDevices) } return nil } @@ -467,9 +467,9 @@ func (m *Smart) Gather(acc telegraf.Accumulator) error { devicesFromScan = append(devicesFromScan, scannedNVMeDevices...) devicesFromScan = append(devicesFromScan, scannedNonNVMeDevices...) - m.getAttributes(acc, devicesFromScan) + m.addAttributes(acc, devicesFromScan) if isVendorExtension && isNVMe { - m.getVendorNVMeAttributes(acc, scannedNVMeDevices) + m.addVendorNVMeAttributes(acc, scannedNVMeDevices) } return nil } @@ -551,8 +551,8 @@ func excludedDev(excludes []string, deviceLine string) bool { return false } -// Get info and attributes for each S.M.A.R.T. device -func (m *Smart) getAttributes(acc telegraf.Accumulator, devices []string) { +// Add info and attributes for each S.M.A.R.T. device +func (m *Smart) addAttributes(acc telegraf.Accumulator, devices []string) { var wg sync.WaitGroup wg.Add(len(devices)) for _, device := range devices { @@ -569,7 +569,7 @@ func (m *Smart) getAttributes(acc telegraf.Accumulator, devices []string) { wg.Wait() } -func (m *Smart) getVendorNVMeAttributes(acc telegraf.Accumulator, devices []string) { +func (m *Smart) addVendorNVMeAttributes(acc telegraf.Accumulator, devices []string) { nvmeDevices := getDeviceInfoForNVMeDisks(acc, devices, m.PathNVMe, m.Timeout, m.UseSudo) var wg sync.WaitGroup