chore: Fix linter findings for `revive:enforce-slice-style` in `plugins/parsers`, `plugins/processors`, `plugins/secretstores` and `plugins/serializers` (#15980)

This commit is contained in:
Paweł Żak 2024-10-15 13:02:45 +02:00 committed by GitHub
parent 23fc01ce9c
commit f8af593d33
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
26 changed files with 64 additions and 111 deletions

View File

@ -71,7 +71,7 @@ func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) {
return nil, fmt.Errorf("collectd parser error: %w", err)
}
metrics := []telegraf.Metric{}
metrics := make([]telegraf.Metric, 0, len(valueLists))
for _, valueList := range valueLists {
metrics = append(metrics, p.unmarshalValueList(valueList)...)
}

View File

@ -221,7 +221,7 @@ func TestParse_SignSecurityLevel(t *testing.T) {
metrics, err = parser.Parse(bytes)
require.NoError(t, err)
require.Equal(t, []telegraf.Metric{}, metrics)
require.Empty(t, metrics)
// Wrong password error
buf, err = writeValueList(singleMetric.vl)
@ -250,7 +250,7 @@ func TestParse_EncryptSecurityLevel(t *testing.T) {
metrics, err := parser.Parse(bytes)
require.NoError(t, err)
require.Equal(t, []telegraf.Metric{}, metrics)
require.Empty(t, metrics)
// Encrypted data
buf, err = writeValueList(singleMetric.vl)
@ -271,7 +271,7 @@ func TestParse_EncryptSecurityLevel(t *testing.T) {
metrics, err = parser.Parse(bytes)
require.NoError(t, err)
require.Equal(t, []telegraf.Metric{}, metrics)
require.Empty(t, metrics)
// Wrong password error
buf, err = writeValueList(singleMetric.vl)

View File

@ -84,7 +84,6 @@ func (record metadataPattern) Less(i, j int) bool {
func (p *Parser) initializeMetadataSeparators() error {
// initialize metadata
p.metadataTags = map[string]string{}
p.metadataSeparatorList = []string{}
if p.MetadataRows <= 0 {
return nil
@ -94,7 +93,7 @@ func (p *Parser) initializeMetadataSeparators() error {
return errors.New("csv_metadata_separators required when specifying csv_metadata_rows")
}
p.metadataSeparatorList = metadataPattern{}
p.metadataSeparatorList = make(metadataPattern, 0, len(p.MetadataSeparators))
patternList := map[string]bool{}
for _, pattern := range p.MetadataSeparators {
if patternList[pattern] {

View File

@ -80,7 +80,7 @@ func TestHeaderOverride(t *testing.T) {
require.NoError(t, err)
metrics, err = p.Parse([]byte(testCSVRows[0]))
require.NoError(t, err)
require.Equal(t, []telegraf.Metric{}, metrics)
require.Empty(t, metrics)
m, err := p.ParseLine(testCSVRows[1])
require.NoError(t, err)
require.Equal(t, "test_name", m.Name())
@ -849,14 +849,12 @@ func TestParseMetadataSeparators(t *testing.T) {
p := &Parser{
ColumnNames: []string{"a", "b"},
MetadataRows: 0,
MetadataSeparators: []string{},
}
err := p.Init()
require.NoError(t, err)
p = &Parser{
ColumnNames: []string{"a", "b"},
MetadataRows: 1,
MetadataSeparators: []string{},
}
err = p.Init()
require.Error(t, err)

View File

@ -263,7 +263,7 @@ func (sp *StreamParser) Next() (telegraf.Metric, error) {
m, err := nextMetric(sp.decoder, sp.precision, sp.defaultTime, false)
if err != nil {
return nil, convertToParseError([]byte{}, err)
return nil, convertToParseError(nil, err)
}
return m, nil

View File

@ -685,7 +685,6 @@ func TestSeriesParser(t *testing.T) {
{
name: "empty",
input: []byte(""),
metrics: []telegraf.Metric{},
},
{
name: "minimal",
@ -717,7 +716,6 @@ func TestSeriesParser(t *testing.T) {
{
name: "missing tag value",
input: []byte("cpu,a="),
metrics: []telegraf.Metric{},
err: &ParseError{
DecodeError: &lineprotocol.DecodeError{
Line: 1,
@ -730,7 +728,6 @@ func TestSeriesParser(t *testing.T) {
{
name: "error with carriage return in long line",
input: []byte("cpu,a=" + strings.Repeat("x", maxErrorBufferSize) + "\rcd,b"),
metrics: []telegraf.Metric{},
err: &ParseError{
DecodeError: &lineprotocol.DecodeError{
Line: 1,

View File

@ -763,7 +763,6 @@ func TestSeriesParser(t *testing.T) {
{
name: "empty",
input: []byte(""),
metrics: []telegraf.Metric{},
},
{
name: "minimal",
@ -795,7 +794,6 @@ func TestSeriesParser(t *testing.T) {
{
name: "missing tag value",
input: []byte("cpu,a="),
metrics: []telegraf.Metric{},
err: &ParseError{
Offset: 6,
LineNumber: 1,
@ -807,7 +805,6 @@ func TestSeriesParser(t *testing.T) {
{
name: "error with carriage return in long line",
input: []byte("cpu,a=" + strings.Repeat("x", maxErrorBufferSize) + "\rcd,b"),
metrics: []telegraf.Metric{},
err: &ParseError{
Offset: 1031,
LineNumber: 1,

View File

@ -602,7 +602,6 @@ func TestJSONQueryErrorOnArray(t *testing.T) {
parser := &Parser{
MetricName: "json_test",
TagKeys: []string{},
Query: "shares.myArr",
}
require.NoError(t, parser.Init())
@ -913,19 +912,16 @@ func TestParse(t *testing.T) {
name: "parse empty array",
parser: &Parser{},
input: []byte(`[]`),
expected: []telegraf.Metric{},
},
{
name: "parse null",
parser: &Parser{},
input: []byte(`null`),
expected: []telegraf.Metric{},
},
{
name: "parse null with query",
parser: &Parser{Query: "result.data"},
input: []byte(`{"error":null,"result":{"data":null,"items_per_page":10,"total_items":0,"total_pages":0}}`),
expected: []telegraf.Metric{},
},
{
name: "parse simple array",

View File

@ -101,10 +101,7 @@ func TestMultipleConfigs(t *testing.T) {
}
func TestParserEmptyConfig(t *testing.T) {
plugin := &json_v2.Parser{
Configs: []json_v2.Config{},
}
plugin := &json_v2.Parser{}
require.ErrorContains(t, plugin.Init(), "no configuration provided")
}

View File

@ -21,7 +21,6 @@ func TestParse(t *testing.T) {
}{
{
name: "no bytes returns no metrics",
want: []telegraf.Metric{},
},
{
name: "test without trailing end",
@ -104,27 +103,23 @@ func TestParse(t *testing.T) {
{
name: "keys without = or values are ignored",
bytes: []byte(`i am no data.`),
want: []telegraf.Metric{},
wantErr: false,
},
{
name: "keys without values are ignored",
bytes: []byte(`foo="" bar=`),
want: []telegraf.Metric{},
wantErr: false,
},
{
name: "unterminated quote produces error",
measurement: "testlog",
bytes: []byte(`bar=baz foo="bar`),
want: []telegraf.Metric{},
wantErr: true,
},
{
name: "malformed key",
measurement: "testlog",
bytes: []byte(`"foo=" bar=baz`),
want: []telegraf.Metric{},
wantErr: true,
},
}

View File

@ -148,7 +148,7 @@ func TestTryAddState(t *testing.T) {
runErrF: func() error {
return nil
},
metrics: []telegraf.Metric{},
metrics: make([]telegraf.Metric, 0),
assertF: func(t *testing.T, metrics []telegraf.Metric) {
require.Len(t, metrics, 1)
m := metrics[0]

View File

@ -53,7 +53,7 @@ func (v *Parser) Parse(buf []byte) ([]telegraf.Metric, error) {
if v.DataType != "string" {
values := strings.Fields(vStr)
if len(values) < 1 {
return []telegraf.Metric{}, nil
return nil, nil
}
vStr = values[len(values)-1]
}

View File

@ -562,7 +562,7 @@ func splitLastPathElement(query string) []string {
// Nothing left
if query == "" || query == "/" || query == "//" || query == "." {
return []string{}
return nil
}
separatorIdx := strings.LastIndex(query, "/")

View File

@ -1388,7 +1388,6 @@ func TestProtobufImporting(t *testing.T) {
ProtobufMessageDef: "person.proto",
ProtobufMessageType: "importtest.Person",
ProtobufImportPaths: []string{"testcases/protos"},
Configs: []Config{},
Log: testutil.Logger{Name: "parsers.protobuf"},
}
require.NoError(t, parser.Init())

View File

@ -65,7 +65,6 @@ func TestBasicStartupWithTagCacheSize(t *testing.T) {
func TestBasicInitNoTagsReturnAnError(t *testing.T) {
p := newAwsEc2Processor()
p.Log = &testutil.Logger{}
p.ImdsTags = []string{}
err := p.Init()
require.Error(t, err)
}

View File

@ -95,7 +95,7 @@ func TestNoMetric(t *testing.T) {
}
require.NoError(t, plugin.Init())
input := []telegraf.Metric{}
var input []telegraf.Metric
require.Empty(t, plugin.Apply(input...))
}

View File

@ -46,9 +46,9 @@ func (p *Parser) SetParser(parser telegraf.Parser) {
}
func (p *Parser) Apply(metrics ...telegraf.Metric) []telegraf.Metric {
results := []telegraf.Metric{}
results := make([]telegraf.Metric, 0, len(metrics))
for _, metric := range metrics {
newMetrics := []telegraf.Metric{}
var newMetrics []telegraf.Metric
if !p.DropOriginal {
newMetrics = append(newMetrics, metric)
} else {

View File

@ -69,7 +69,6 @@ func NewReverseDNSCache(ttl, lookupTimeout time.Duration, workerPoolSize int) *R
ttl: ttl,
lookupTimeout: lookupTimeout,
cache: map[string]*dnslookup{},
expireList: []*dnslookup{},
maxWorkers: workerPoolSize,
sem: semaphore.NewWeighted(int64(workerPoolSize)),
cancelCleanupWorker: cancel,
@ -272,7 +271,7 @@ func (d *ReverseDNSCache) cleanup() {
d.expireListLock.Unlock()
return
}
ipsToDelete := []string{}
ipsToDelete := make([]string, 0, len(d.expireList))
for i := 0; i < len(d.expireList); i++ {
if !d.expireList[i].expiresAt.Before(now) {
break // done. Nothing after this point is expired.

View File

@ -65,7 +65,7 @@ func (s *Split) Init() error {
}
func (s *Split) Apply(in ...telegraf.Metric) []telegraf.Metric {
newMetrics := []telegraf.Metric{}
newMetrics := make([]telegraf.Metric, 0, len(in)*(len(s.Templates)+1))
for _, point := range in {
if s.DropOriginal {

View File

@ -113,7 +113,6 @@ def apply(metric):
time.Unix(0, 0),
),
},
expected: []telegraf.Metric{},
},
{
name: "passthrough",
@ -185,7 +184,6 @@ def apply(metric):
time.Unix(0, 0),
),
},
expected: []telegraf.Metric{},
expectedErrorStr: "append: cannot append to frozen list",
},
{
@ -348,7 +346,6 @@ def apply(metric):
time.Unix(0, 0),
),
},
expected: []telegraf.Metric{},
expectedErrorStr: "type error",
},
{
@ -417,7 +414,6 @@ def apply(metric):
time.Unix(0, 0),
),
},
expected: []telegraf.Metric{},
expectedErrorStr: "cannot set tags",
},
{
@ -546,7 +542,6 @@ def apply(metric):
time.Unix(0, 0),
),
},
expected: []telegraf.Metric{},
expectedErrorStr: `key "foo" not in Tags`,
},
{
@ -661,7 +656,6 @@ def apply(metric):
time.Unix(0, 0),
),
},
expected: []telegraf.Metric{},
expectedErrorStr: "tag value must be of type 'str'",
},
{
@ -773,7 +767,6 @@ def apply(metric):
time.Unix(0, 0),
),
},
expected: []telegraf.Metric{},
expectedErrorStr: "popitem(): tag dictionary is empty",
},
{
@ -1238,7 +1231,6 @@ def apply(metric):
time.Unix(0, 0),
),
},
expected: []telegraf.Metric{},
expectedErrorStr: "pop: cannot delete during iteration",
},
{
@ -1261,7 +1253,6 @@ def apply(metric):
time.Unix(0, 0),
),
},
expected: []telegraf.Metric{},
expectedErrorStr: "cannot delete during iteration",
},
{
@ -1284,7 +1275,6 @@ def apply(metric):
time.Unix(0, 0),
),
},
expected: []telegraf.Metric{},
expectedErrorStr: "cannot delete during iteration",
},
{
@ -1307,7 +1297,6 @@ def apply(metric):
time.Unix(0, 0),
),
},
expected: []telegraf.Metric{},
expectedErrorStr: "cannot insert during iteration",
},
{
@ -1378,7 +1367,6 @@ def apply(metric):
time.Unix(0, 0),
),
},
expected: []telegraf.Metric{},
expectedErrorStr: "cannot set fields",
},
{
@ -1585,7 +1573,6 @@ def apply(metric):
time.Unix(0, 0),
),
},
expected: []telegraf.Metric{},
expectedErrorStr: `key "foo" not in Fields`,
},
{
@ -1771,7 +1758,6 @@ def apply(metric):
time.Unix(0, 0),
),
},
expected: []telegraf.Metric{},
expectedErrorStr: "invalid starlark type",
},
{
@ -1887,7 +1873,6 @@ def apply(metric):
time.Unix(0, 0),
),
},
expected: []telegraf.Metric{},
expectedErrorStr: "popitem(): field dictionary is empty",
},
{
@ -2309,7 +2294,6 @@ def apply(metric):
time.Unix(0, 0),
),
},
expected: []telegraf.Metric{},
expectedErrorStr: "pop: cannot delete during iteration",
},
{
@ -2327,7 +2311,6 @@ def apply(metric):
time.Unix(0, 0),
),
},
expected: []telegraf.Metric{},
expectedErrorStr: "cannot delete during iteration",
},
{
@ -2345,7 +2328,6 @@ def apply(metric):
time.Unix(0, 0),
),
},
expected: []telegraf.Metric{},
expectedErrorStr: "cannot delete during iteration",
},
{
@ -2363,7 +2345,6 @@ def apply(metric):
time.Unix(0, 0),
),
},
expected: []telegraf.Metric{},
expectedErrorStr: "cannot insert during iteration",
},
{
@ -2435,7 +2416,6 @@ def apply(metric):
time.Unix(0, 0).UTC(),
),
},
expected: []telegraf.Metric{},
expectedErrorStr: "type error",
},
{
@ -2909,7 +2889,6 @@ func TestScript(t *testing.T) {
time.Unix(0, 0),
),
},
expected: []telegraf.Metric{},
expectedErrorStr: "fail: The field value should be greater than 1",
},
}
@ -3306,7 +3285,7 @@ func TestAllScriptTestData(t *testing.T) {
lines := strings.Split(string(b), "\n")
inputMetrics := parseMetricsFrom(t, lines, "Example Input:")
expectedErrorStr := parseErrorMessage(t, lines, "Example Output Error:")
outputMetrics := []telegraf.Metric{}
var outputMetrics []telegraf.Metric
if expectedErrorStr == "" {
outputMetrics = parseMetricsFrom(t, lines, "Example Output:")
}

View File

@ -48,8 +48,6 @@ func New() *TopK {
topk.Aggregation = "mean"
topk.GroupBy = []string{"*"}
topk.AddGroupByTag = ""
topk.AddRankFields = []string{}
topk.AddAggregateFields = []string{}
// Initialize cache
topk.Reset()
@ -187,7 +185,7 @@ func (t *TopK) Apply(in ...telegraf.Metric) []telegraf.Metric {
return t.push()
}
return []telegraf.Metric{}
return nil
}
func convert(in interface{}) (float64, bool) {
@ -211,7 +209,7 @@ func (t *TopK) push() []telegraf.Metric {
// If we could not generate the aggregation
// function, fail hard by dropping all metrics
t.Log.Errorf("%v", err)
return []telegraf.Metric{}
return nil
}
for k, ms := range t.cache {
aggregations = append(aggregations, MetricAggregation{groupbykey: k, values: aggregator(ms, t.Fields)})

View File

@ -55,7 +55,7 @@ type metricChange struct {
// they are semantically equal.
// Therefore the fields and tags must be in the same order that the processor would add them
func generateAns(input []telegraf.Metric, changeSet map[int]metricChange) []telegraf.Metric {
answer := []telegraf.Metric{}
answer := make([]telegraf.Metric, 0, len(input))
// For every input metric, we check if there is a change we need to apply
// If there is no change for a given input metric, the metric is dropped
@ -411,7 +411,7 @@ func TestTopkGroupbyMetricName1(t *testing.T) {
topk.K = 1
topk.Aggregation = "sum"
topk.AddAggregateFields = []string{"value"}
topk.GroupBy = []string{}
topk.GroupBy = make([]string, 0)
// Get the input
input := deepCopy(MetricsSet2)

View File

@ -14,7 +14,7 @@ func TestCreateAESFail(t *testing.T) {
}
func TestTrimPKCSFail(t *testing.T) {
_, err := PKCS5or7Trimming([]byte{})
_, err := PKCS5or7Trimming(nil)
require.ErrorContains(t, err, "empty value to trim")
_, err = PKCS5or7Trimming([]byte{0x00, 0x05})

View File

@ -84,7 +84,7 @@ func (s *GraphiteSerializer) Init() error {
}
func (s *GraphiteSerializer) Serialize(metric telegraf.Metric) ([]byte, error) {
out := []byte{}
var out []byte
// Convert UnixNano to Unix timestamps
timestamp := metric.Time().UnixNano() / 1000000000

View File

@ -70,7 +70,7 @@ func (s *Serializer) Serialize(metric telegraf.Metric) ([]byte, error) {
serialized, err := json.Marshal(obj)
if err != nil {
return []byte{}, err
return nil, err
}
serialized = append(serialized, '\n')
@ -101,7 +101,7 @@ func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) {
serialized, err := json.Marshal(obj)
if err != nil {
return []byte{}, err
return nil, err
}
serialized = append(serialized, '\n')

View File

@ -49,7 +49,7 @@ func TestCollectionExpire(t *testing.T) {
Type: dto.MetricType_UNTYPED.Enum(),
Metric: []*dto.Metric{
{
Label: []*dto.LabelPair{},
Label: make([]*dto.LabelPair, 0),
Untyped: &dto.Untyped{Value: proto.Float64(42.0)},
},
},
@ -91,7 +91,7 @@ func TestCollectionExpire(t *testing.T) {
Type: dto.MetricType_UNTYPED.Enum(),
Metric: []*dto.Metric{
{
Label: []*dto.LabelPair{},
Label: make([]*dto.LabelPair, 0),
Untyped: &dto.Untyped{Value: proto.Float64(43.0)},
},
},
@ -132,7 +132,7 @@ func TestCollectionExpire(t *testing.T) {
Type: dto.MetricType_UNTYPED.Enum(),
Metric: []*dto.Metric{
{
Label: []*dto.LabelPair{},
Label: make([]*dto.LabelPair, 0),
Untyped: &dto.Untyped{Value: proto.Float64(42.0)},
},
},
@ -156,7 +156,7 @@ func TestCollectionExpire(t *testing.T) {
addtime: time.Unix(0, 0),
},
},
expected: []*dto.MetricFamily{},
expected: make([]*dto.MetricFamily, 0),
},
{
name: "expired one metric in metric family",
@ -192,7 +192,7 @@ func TestCollectionExpire(t *testing.T) {
Type: dto.MetricType_UNTYPED.Enum(),
Metric: []*dto.Metric{
{
Label: []*dto.LabelPair{},
Label: make([]*dto.LabelPair, 0),
Untyped: &dto.Untyped{Value: proto.Float64(42.0)},
},
},
@ -282,7 +282,7 @@ func TestCollectionExpire(t *testing.T) {
Type: dto.MetricType_HISTOGRAM.Enum(),
Metric: []*dto.Metric{
{
Label: []*dto.LabelPair{},
Label: make([]*dto.LabelPair, 0),
Histogram: &dto.Histogram{
SampleCount: proto.Uint64(4),
SampleSum: proto.Float64(20.0),
@ -343,7 +343,7 @@ func TestCollectionExpire(t *testing.T) {
addtime: time.Unix(0, 0),
},
},
expected: []*dto.MetricFamily{},
expected: make([]*dto.MetricFamily, 0),
},
{
name: "histogram does not expire because of addtime from bucket",
@ -393,7 +393,7 @@ func TestCollectionExpire(t *testing.T) {
Type: dto.MetricType_HISTOGRAM.Enum(),
Metric: []*dto.Metric{
{
Label: []*dto.LabelPair{},
Label: make([]*dto.LabelPair, 0),
Histogram: &dto.Histogram{
SampleCount: proto.Uint64(2),
SampleSum: proto.Float64(10.0),
@ -474,7 +474,7 @@ func TestCollectionExpire(t *testing.T) {
Type: dto.MetricType_SUMMARY.Enum(),
Metric: []*dto.Metric{
{
Label: []*dto.LabelPair{},
Label: make([]*dto.LabelPair, 0),
Summary: &dto.Summary{
SampleCount: proto.Uint64(2),
SampleSum: proto.Float64(2.0),
@ -520,7 +520,7 @@ func TestCollectionExpire(t *testing.T) {
addtime: time.Unix(0, 0),
},
},
expected: []*dto.MetricFamily{},
expected: make([]*dto.MetricFamily, 0),
},
{
name: "summary does not expire because of quantile addtime",
@ -570,7 +570,7 @@ func TestCollectionExpire(t *testing.T) {
Type: dto.MetricType_SUMMARY.Enum(),
Metric: []*dto.Metric{
{
Label: []*dto.LabelPair{},
Label: make([]*dto.LabelPair, 0),
Summary: &dto.Summary{
SampleSum: proto.Float64(1),
SampleCount: proto.Uint64(1),
@ -614,7 +614,7 @@ func TestCollectionExpire(t *testing.T) {
Type: dto.MetricType_UNTYPED.Enum(),
Metric: []*dto.Metric{
{
Label: []*dto.LabelPair{},
Label: make([]*dto.LabelPair, 0),
Untyped: &dto.Untyped{Value: proto.Float64(42.0)},
},
},
@ -728,7 +728,7 @@ func TestExportTimestamps(t *testing.T) {
Type: dto.MetricType_HISTOGRAM.Enum(),
Metric: []*dto.Metric{
{
Label: []*dto.LabelPair{},
Label: make([]*dto.LabelPair, 0),
TimestampMs: proto.Int64(time.Unix(20, 0).UnixNano() / int64(time.Millisecond)),
Histogram: &dto.Histogram{
SampleCount: proto.Uint64(4),
@ -810,7 +810,7 @@ func TestExportTimestamps(t *testing.T) {
Type: dto.MetricType_SUMMARY.Enum(),
Metric: []*dto.Metric{
{
Label: []*dto.LabelPair{},
Label: make([]*dto.LabelPair, 0),
TimestampMs: proto.Int64(time.Unix(20, 0).UnixNano() / int64(time.Millisecond)),
Summary: &dto.Summary{
SampleCount: proto.Uint64(2),