From 960a1f7b1483eb801ec3f40cf66296ca3162f2a2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Mon, 21 Nov 2022 21:53:55 +0100 Subject: [PATCH] chore: Fix linter findings for prealloc (part3) (#12246) --- .golangci.yml | 5 +++ plugins/inputs/hddtemp/go-hddtemp/hddtemp.go | 5 +-- plugins/inputs/modbus/modbus_test.go | 3 +- plugins/inputs/pgbouncer/pgbouncer.go | 2 +- plugins/inputs/postgresql/postgresql.go | 2 +- .../postgresql_extensible.go | 2 +- plugins/inputs/sqlserver/sqlserver.go | 3 +- .../cmd/thrift_serialize/thrift_serialize.go | 15 ++++---- plugins/outputs/timestream/timestream.go | 11 +++--- plugins/outputs/timestream/timestream_test.go | 34 +++++++------------ plugins/parsers/influx/machine_test.go | 4 +-- plugins/parsers/json_v2/parser.go | 3 +- plugins/parsers/prometheus/parser.go | 4 +-- plugins/serializers/graphite/graphite.go | 5 ++- plugins/serializers/nowmetric/nowmetric.go | 3 +- testutil/container.go | 3 +- 16 files changed, 46 insertions(+), 58 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 13ef97f19..5f3d0af0c 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -19,6 +19,7 @@ linters: - lll - nakedret - nilerr + - prealloc - predeclared - revive - sqlclosecheck @@ -77,6 +78,10 @@ linters-settings: # Tab width in spaces. # Default: 1 tab-width: 4 + prealloc: + # Report pre-allocation suggestions only on simple loops that have no returns/breaks/continues/gotos in them. + # Default: true + simple: false revive: rules: - name: argument-limit diff --git a/plugins/inputs/hddtemp/go-hddtemp/hddtemp.go b/plugins/inputs/hddtemp/go-hddtemp/hddtemp.go index 2d0e67fee..8f0fdee8a 100644 --- a/plugins/inputs/hddtemp/go-hddtemp/hddtemp.go +++ b/plugins/inputs/hddtemp/go-hddtemp/hddtemp.go @@ -28,7 +28,6 @@ func (h *hddtemp) Fetch(address string) ([]Disk, error) { err error conn net.Conn buffer bytes.Buffer - disks []Disk ) if conn, err = net.Dial("tcp", address); err != nil { @@ -41,7 +40,9 @@ func (h *hddtemp) Fetch(address string) ([]Disk, error) { fields := strings.Split(buffer.String(), "|") - for index := 0; index < len(fields)/5; index++ { + size := len(fields) / 5 + disks := make([]Disk, 0, size) + for index := 0; index < size; index++ { status := "" offset := index * 5 device := fields[offset+1] diff --git a/plugins/inputs/modbus/modbus_test.go b/plugins/inputs/modbus/modbus_test.go index 73f367670..1f1805074 100644 --- a/plugins/inputs/modbus/modbus_test.go +++ b/plugins/inputs/modbus/modbus_test.go @@ -3473,8 +3473,7 @@ func generateRequestDefinitions(ranges []rangeDefinition) []requestFieldDefiniti } func generateExpectation(defs []requestExpectation) []request { - var requests []request - + requests := make([]request, 0, len(defs)) for _, def := range defs { r := def.req r.fields = make([]field, 0) diff --git a/plugins/inputs/pgbouncer/pgbouncer.go b/plugins/inputs/pgbouncer/pgbouncer.go index d657c657c..4e8d31c21 100644 --- a/plugins/inputs/pgbouncer/pgbouncer.go +++ b/plugins/inputs/pgbouncer/pgbouncer.go @@ -138,7 +138,6 @@ type scanner interface { func (p *PgBouncer) accRow(row scanner, columns []string) (map[string]string, map[string]*interface{}, error) { - var columnVars []interface{} var dbname bytes.Buffer // this is where we'll store the column name with its *interface{} @@ -148,6 +147,7 @@ func (p *PgBouncer) accRow(row scanner, columns []string) (map[string]string, columnMap[column] = new(interface{}) } + columnVars := make([]interface{}, 0, len(columnMap)) // populate the array of interface{} with the pointers in the right order for i := 0; i < len(columnMap); i++ { columnVars = append(columnVars, columnMap[columns[i]]) diff --git a/plugins/inputs/postgresql/postgresql.go b/plugins/inputs/postgresql/postgresql.go index 872ca492e..835d69d50 100644 --- a/plugins/inputs/postgresql/postgresql.go +++ b/plugins/inputs/postgresql/postgresql.go @@ -105,7 +105,6 @@ type scanner interface { } func (p *Postgresql) accRow(row scanner, acc telegraf.Accumulator, columns []string) error { - var columnVars []interface{} var dbname bytes.Buffer // this is where we'll store the column name with its *interface{} @@ -115,6 +114,7 @@ func (p *Postgresql) accRow(row scanner, acc telegraf.Accumulator, columns []str columnMap[column] = new(interface{}) } + columnVars := make([]interface{}, 0, len(columnMap)) // populate the array of interface{} with the pointers in the right order for i := 0; i < len(columnMap); i++ { columnVars = append(columnVars, columnMap[columns[i]]) diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible.go b/plugins/inputs/postgresql_extensible/postgresql_extensible.go index 686669f07..0c8c08bfb 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible.go @@ -170,7 +170,6 @@ type scanner interface { func (p *Postgresql) accRow(measName string, row scanner, acc telegraf.Accumulator, columns []string) error { var ( err error - columnVars []interface{} dbname bytes.Buffer tagAddress string timestamp time.Time @@ -183,6 +182,7 @@ func (p *Postgresql) accRow(measName string, row scanner, acc telegraf.Accumulat columnMap[column] = new(interface{}) } + columnVars := make([]interface{}, 0, len(columnMap)) // populate the array of interface{} with the pointers in the right order for i := 0; i < len(columnMap); i++ { columnVars = append(columnVars, columnMap[columns[i]]) diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index 36c726f55..96e3c3ac0 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -325,7 +325,6 @@ func (s *SQLServer) gatherServer(pool *sql.DB, query Query, acc telegraf.Accumul } func (s *SQLServer) accRow(query Query, acc telegraf.Accumulator, row scanner) error { - var columnVars []interface{} var fields = make(map[string]interface{}) // store the column name with its *interface{} @@ -333,6 +332,8 @@ func (s *SQLServer) accRow(query Query, acc telegraf.Accumulator, row scanner) e for _, column := range query.OrderedColumns { columnMap[column] = new(interface{}) } + + columnVars := make([]interface{}, 0, len(columnMap)) // populate the array of interface{} with the pointers in the right order for i := 0; i < len(columnMap); i++ { columnVars = append(columnVars, columnMap[query.OrderedColumns[i]]) diff --git a/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go b/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go index f56ade624..b8a0ff344 100644 --- a/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go +++ b/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go @@ -31,6 +31,7 @@ import ( "os" "github.com/apache/thrift/lib/go/thrift" + "github.com/influxdata/telegraf/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore" ) @@ -122,31 +123,27 @@ func jsonToZipkinThrift(jsonRaw []byte) ([]byte, error) { func thriftToJSONSpans(thriftData []byte) ([]byte, error) { buffer := thrift.NewTMemoryBuffer() if _, err := buffer.Write(thriftData); err != nil { - err = fmt.Errorf("error in buffer write: %v", err) - return nil, err + return nil, fmt.Errorf("error in buffer write: %w", err) } transport := thrift.NewTBinaryProtocolConf(buffer, nil) _, size, err := transport.ReadListBegin(context.Background()) if err != nil { - err = fmt.Errorf("error in ReadListBegin: %v", err) - return nil, err + return nil, fmt.Errorf("error in ReadListBegin: %w", err) } - var spans []*zipkincore.Span + spans := make([]*zipkincore.Span, 0, size) for i := 0; i < size; i++ { zs := &zipkincore.Span{} if err = zs.Read(context.Background(), transport); err != nil { - err = fmt.Errorf("Error reading into zipkin struct: %v", err) - return nil, err + return nil, fmt.Errorf("error reading into zipkin struct: %w", err) } spans = append(spans, zs) } err = transport.ReadListEnd(context.Background()) if err != nil { - err = fmt.Errorf("error ending thrift read: %v", err) - return nil, err + return nil, fmt.Errorf("error ending thrift read: %w", err) } out, _ := json.MarshalIndent(spans, "", " ") diff --git a/plugins/outputs/timestream/timestream.go b/plugins/outputs/timestream/timestream.go index 1b8ababc0..4f0ab83b0 100644 --- a/plugins/outputs/timestream/timestream.go +++ b/plugins/outputs/timestream/timestream.go @@ -357,7 +357,7 @@ func (t *Timestream) createTable(tableName *string) error { MemoryStoreRetentionPeriodInHours: t.CreateTableMemoryStoreRetentionPeriodInHours, }, } - var tags []types.Tag + tags := make([]types.Tag, 0, len(t.CreateTableTags)) for key, val := range t.CreateTableTags { tags = append(tags, types.Tag{ Key: aws.String(key), @@ -434,7 +434,7 @@ func (t *Timestream) TransformMetrics(metrics []telegraf.Metric) []*timestreamwr } func (t *Timestream) buildDimensions(point telegraf.Metric) []types.Dimension { - var dimensions []types.Dimension + dimensions := make([]types.Dimension, 0, len(point.Tags())) for tagName, tagValue := range point.Tags() { dimension := types.Dimension{ Name: aws.String(tagName), @@ -464,10 +464,8 @@ func (t *Timestream) buildWriteRecords(point telegraf.Metric) []types.Record { } func (t *Timestream) buildSingleWriteRecords(point telegraf.Metric) []types.Record { - var records []types.Record - dimensions := t.buildDimensions(point) - + records := make([]types.Record, 0, len(point.Fields())) for fieldName, fieldValue := range point.Fields() { stringFieldValue, stringFieldValueType, ok := convertValue(fieldValue) if !ok { @@ -501,8 +499,7 @@ func (t *Timestream) buildMultiMeasureWriteRecords(point telegraf.Metric) []type multiMeasureName = point.Name() } - var multiMeasures []types.MeasureValue - + multiMeasures := make([]types.MeasureValue, 0, len(point.Fields())) for fieldName, fieldValue := range point.Fields() { stringFieldValue, stringFieldValueType, ok := convertValue(fieldValue) if !ok { diff --git a/plugins/outputs/timestream/timestream_test.go b/plugins/outputs/timestream/timestream_test.go index 29fedc0f3..28ea7ee45 100644 --- a/plugins/outputs/timestream/timestream_test.go +++ b/plugins/outputs/timestream/timestream_test.go @@ -231,8 +231,7 @@ func TestWriteMultiMeasuresSingleTableMode(t *testing.T) { localTime, _ := strconv.Atoi(time1Epoch) - var inputs []telegraf.Metric - + inputs := make([]telegraf.Metric, 0, recordCount+1) for i := 1; i <= recordCount+1; i++ { localTime++ @@ -289,8 +288,7 @@ func TestWriteMultiMeasuresMultiTableMode(t *testing.T) { localTime, _ := strconv.Atoi(time1Epoch) - var inputs []telegraf.Metric - + inputs := make([]telegraf.Metric, 0, recordCount) for i := 1; i <= recordCount; i++ { localTime++ @@ -584,8 +582,7 @@ func TestWriteWhenRequestsGreaterThanMaxWriteGoRoutinesCount(t *testing.T) { require.NoError(t, plugin.Connect()) - var inputs []telegraf.Metric - + inputs := make([]telegraf.Metric, 0, totalRecords) for i := 1; i <= totalRecords; i++ { fieldName := "value_supported" + strconv.Itoa(i) inputs = append(inputs, testutil.MustMetric( @@ -624,8 +621,7 @@ func TestWriteWhenRequestsLesserThanMaxWriteGoRoutinesCount(t *testing.T) { } require.NoError(t, plugin.Connect()) - var inputs []telegraf.Metric - + inputs := make([]telegraf.Metric, 0, totalRecords) for i := 1; i <= totalRecords; i++ { fieldName := "value_supported" + strconv.Itoa(i) inputs = append(inputs, testutil.MustMetric( @@ -724,7 +720,7 @@ func TestTransformMetricsSkipEmptyMetric(t *testing.T) { func TestTransformMetricsRequestsAboveLimitAreSplit(t *testing.T) { const maxRecordsInWriteRecordsCall = 100 - var inputs []telegraf.Metric + inputs := make([]telegraf.Metric, 0, maxRecordsInWriteRecordsCall+1) for i := 1; i <= maxRecordsInWriteRecordsCall+1; i++ { fieldName := "value_supported" + strconv.Itoa(i) inputs = append(inputs, testutil.MustMetric( @@ -781,8 +777,7 @@ func TestTransformMetricsRequestsAboveLimitAreSplitSingleTable(t *testing.T) { localTime, _ := strconv.Atoi(time1Epoch) - var inputs []telegraf.Metric - + inputs := make([]telegraf.Metric, 0, maxRecordsInWriteRecordsCall+1) for i := 1; i <= maxRecordsInWriteRecordsCall+1; i++ { localTime++ @@ -1275,7 +1270,7 @@ type SimpleInput struct { } func buildExpectedInput(i SimpleInput) *timestreamwrite.WriteRecordsInput { - var tsDimensions []types.Dimension + tsDimensions := make([]types.Dimension, 0, len(i.dimensions)) for k, v := range i.dimensions { tsDimensions = append(tsDimensions, types.Dimension{ Name: aws.String(k), @@ -1283,7 +1278,7 @@ func buildExpectedInput(i SimpleInput) *timestreamwrite.WriteRecordsInput { }) } - var tsRecords []types.Record + tsRecords := make([]types.Record, 0, len(i.measureValues)) for k, v := range i.measureValues { tsRecords = append(tsRecords, types.Record{ MeasureName: aws.String(k), @@ -1316,10 +1311,7 @@ func buildRecords(inputs []SimpleInput) []types.Record { } func buildRecord(input SimpleInput) []types.Record { - var tsRecords []types.Record - - var tsDimensions []types.Dimension - + tsDimensions := make([]types.Dimension, 0, len(input.dimensions)) for k, v := range input.dimensions { tsDimensions = append(tsDimensions, types.Dimension{ Name: aws.String(k), @@ -1327,6 +1319,7 @@ func buildRecord(input SimpleInput) []types.Record { }) } + tsRecords := make([]types.Record, 0, len(input.measureValues)) for k, v := range input.measureValues { tsRecords = append(tsRecords, types.Record{ MeasureName: aws.String(k), @@ -1342,11 +1335,9 @@ func buildRecord(input SimpleInput) []types.Record { } func buildMultiRecords(inputs []SimpleInput, multiMeasureName string, measureType types.MeasureValueType) []types.Record { - var tsRecords []types.Record + tsRecords := make([]types.Record, 0, len(inputs)) for _, input := range inputs { - var multiMeasures []types.MeasureValue - var tsDimensions []types.Dimension - + tsDimensions := make([]types.Dimension, 0, len(input.dimensions)) for k, v := range input.dimensions { tsDimensions = append(tsDimensions, types.Dimension{ Name: aws.String(k), @@ -1354,6 +1345,7 @@ func buildMultiRecords(inputs []SimpleInput, multiMeasureName string, measureTyp }) } + multiMeasures := make([]types.MeasureValue, 0, len(input.measureValues)) for k, v := range input.measureValues { multiMeasures = append(multiMeasures, types.MeasureValue{ Name: aws.String(k), diff --git a/plugins/parsers/influx/machine_test.go b/plugins/parsers/influx/machine_test.go index 861fbbc69..d8caff2f0 100644 --- a/plugins/parsers/influx/machine_test.go +++ b/plugins/parsers/influx/machine_test.go @@ -2158,7 +2158,7 @@ func TestStreamMachine(t *testing.T) { err error } - var tc []testcase + tc := make([]testcase, 0, len(tests)) for _, tt := range tests { tc = append(tc, testcase{ name: tt.name, @@ -2197,7 +2197,7 @@ func TestStreamMachinePosition(t *testing.T) { column int } - var tc []testcase + tc := make([]testcase, 0, len(positionTests)) for _, tt := range positionTests { tc = append(tc, testcase{ name: tt.name, diff --git a/plugins/parsers/json_v2/parser.go b/plugins/parsers/json_v2/parser.go index c621b885b..a37ba9332 100644 --- a/plugins/parsers/json_v2/parser.go +++ b/plugins/parsers/json_v2/parser.go @@ -161,8 +161,7 @@ func (p *Parser) processMetric(input []byte, data []json_v2.DataSet, tag bool, t } p.iterateObjects = false - var metrics [][]telegraf.Metric - + metrics := make([][]telegraf.Metric, 0, len(data)) for _, c := range data { if c.Path == "" { return nil, fmt.Errorf("GJSON path is required") diff --git a/plugins/parsers/prometheus/parser.go b/plugins/parsers/prometheus/parser.go index c4b03d8e9..bb586f3ea 100644 --- a/plugins/parsers/prometheus/parser.go +++ b/plugins/parsers/prometheus/parser.go @@ -120,7 +120,7 @@ func (p *Parser) SetDefaultTags(tags map[string]string) { // Get Quantiles for summary metric & Buckets for histogram func makeQuantiles(m *dto.Metric, tags map[string]string, metricName string, metricType dto.MetricType, t time.Time) []telegraf.Metric { - var metrics []telegraf.Metric + metrics := make([]telegraf.Metric, 0, len(m.GetSummary().Quantile)+1) fields := make(map[string]interface{}) fields[metricName+"_count"] = float64(m.GetSummary().GetSampleCount()) @@ -143,7 +143,7 @@ func makeQuantiles(m *dto.Metric, tags map[string]string, metricName string, met // Get Buckets from histogram metric func makeBuckets(m *dto.Metric, tags map[string]string, metricName string, metricType dto.MetricType, t time.Time) []telegraf.Metric { - var metrics []telegraf.Metric + metrics := make([]telegraf.Metric, 0, len(m.GetHistogram().Bucket)+2) fields := make(map[string]interface{}) fields[metricName+"_count"] = float64(m.GetHistogram().GetSampleCount()) diff --git a/plugins/serializers/graphite/graphite.go b/plugins/serializers/graphite/graphite.go index 074b36e82..4679e09fb 100644 --- a/plugins/serializers/graphite/graphite.go +++ b/plugins/serializers/graphite/graphite.go @@ -205,9 +205,8 @@ func SerializeBucketName( } func InitGraphiteTemplates(templates []string) ([]*GraphiteTemplate, string, error) { - var graphiteTemplates []*GraphiteTemplate defaultTemplate := "" - + graphiteTemplates := make([]*GraphiteTemplate, 0, len(templates)) for i, t := range templates { parts := strings.Fields(t) @@ -299,7 +298,7 @@ func InsertField(bucket, fieldName string) string { } func buildTags(tags map[string]string) string { - var keys []string + keys := make([]string, 0, len(tags)) for k := range tags { keys = append(keys, k) } diff --git a/plugins/serializers/nowmetric/nowmetric.go b/plugins/serializers/nowmetric/nowmetric.go index 0462a79f1..b67c8c7f4 100644 --- a/plugins/serializers/nowmetric/nowmetric.go +++ b/plugins/serializers/nowmetric/nowmetric.go @@ -77,9 +77,8 @@ func (s *serializer) createObject(metric telegraf.Metric) ([]byte, error) { ** ci2metric_id: List of key-value pairs to identify the CI. ** source: Data source monitoring the metric type */ - var allmetrics OIMetrics + var allmetrics OIMetrics //nolint:prealloc // Pre-allocating may change format of marshaled JSON var oimetric OIMetric - oimetric.Source = "Telegraf" // Process Tags to extract node & resource name info diff --git a/testutil/container.go b/testutil/container.go index 84cac6547..65390d2a1 100644 --- a/testutil/container.go +++ b/testutil/container.go @@ -42,8 +42,7 @@ type Container struct { func (c *Container) Start() error { c.ctx = context.Background() - var containerMounts []testcontainers.ContainerMount - + containerMounts := make([]testcontainers.ContainerMount, 0, len(c.BindMounts)) for k, v := range c.BindMounts { containerMounts = append(containerMounts, testcontainers.BindMount(v, testcontainers.ContainerMountTarget(k))) }