chore: Fix linter findings for prealloc (part3) (#12246)

This commit is contained in:
Paweł Żak 2022-11-21 21:53:55 +01:00 committed by GitHub
parent e84df8983a
commit 960a1f7b14
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 46 additions and 58 deletions

View File

@ -19,6 +19,7 @@ linters:
- lll
- nakedret
- nilerr
- prealloc
- predeclared
- revive
- sqlclosecheck
@ -77,6 +78,10 @@ linters-settings:
# Tab width in spaces.
# Default: 1
tab-width: 4
prealloc:
# Report pre-allocation suggestions only on simple loops that have no returns/breaks/continues/gotos in them.
# Default: true
simple: false
revive:
rules:
- name: argument-limit

View File

@ -28,7 +28,6 @@ func (h *hddtemp) Fetch(address string) ([]Disk, error) {
err error
conn net.Conn
buffer bytes.Buffer
disks []Disk
)
if conn, err = net.Dial("tcp", address); err != nil {
@ -41,7 +40,9 @@ func (h *hddtemp) Fetch(address string) ([]Disk, error) {
fields := strings.Split(buffer.String(), "|")
for index := 0; index < len(fields)/5; index++ {
size := len(fields) / 5
disks := make([]Disk, 0, size)
for index := 0; index < size; index++ {
status := ""
offset := index * 5
device := fields[offset+1]

View File

@ -3473,8 +3473,7 @@ func generateRequestDefinitions(ranges []rangeDefinition) []requestFieldDefiniti
}
func generateExpectation(defs []requestExpectation) []request {
var requests []request
requests := make([]request, 0, len(defs))
for _, def := range defs {
r := def.req
r.fields = make([]field, 0)

View File

@ -138,7 +138,6 @@ type scanner interface {
func (p *PgBouncer) accRow(row scanner, columns []string) (map[string]string,
map[string]*interface{}, error) {
var columnVars []interface{}
var dbname bytes.Buffer
// this is where we'll store the column name with its *interface{}
@ -148,6 +147,7 @@ func (p *PgBouncer) accRow(row scanner, columns []string) (map[string]string,
columnMap[column] = new(interface{})
}
columnVars := make([]interface{}, 0, len(columnMap))
// populate the array of interface{} with the pointers in the right order
for i := 0; i < len(columnMap); i++ {
columnVars = append(columnVars, columnMap[columns[i]])

View File

@ -105,7 +105,6 @@ type scanner interface {
}
func (p *Postgresql) accRow(row scanner, acc telegraf.Accumulator, columns []string) error {
var columnVars []interface{}
var dbname bytes.Buffer
// this is where we'll store the column name with its *interface{}
@ -115,6 +114,7 @@ func (p *Postgresql) accRow(row scanner, acc telegraf.Accumulator, columns []str
columnMap[column] = new(interface{})
}
columnVars := make([]interface{}, 0, len(columnMap))
// populate the array of interface{} with the pointers in the right order
for i := 0; i < len(columnMap); i++ {
columnVars = append(columnVars, columnMap[columns[i]])

View File

@ -170,7 +170,6 @@ type scanner interface {
func (p *Postgresql) accRow(measName string, row scanner, acc telegraf.Accumulator, columns []string) error {
var (
err error
columnVars []interface{}
dbname bytes.Buffer
tagAddress string
timestamp time.Time
@ -183,6 +182,7 @@ func (p *Postgresql) accRow(measName string, row scanner, acc telegraf.Accumulat
columnMap[column] = new(interface{})
}
columnVars := make([]interface{}, 0, len(columnMap))
// populate the array of interface{} with the pointers in the right order
for i := 0; i < len(columnMap); i++ {
columnVars = append(columnVars, columnMap[columns[i]])

View File

@ -325,7 +325,6 @@ func (s *SQLServer) gatherServer(pool *sql.DB, query Query, acc telegraf.Accumul
}
func (s *SQLServer) accRow(query Query, acc telegraf.Accumulator, row scanner) error {
var columnVars []interface{}
var fields = make(map[string]interface{})
// store the column name with its *interface{}
@ -333,6 +332,8 @@ func (s *SQLServer) accRow(query Query, acc telegraf.Accumulator, row scanner) e
for _, column := range query.OrderedColumns {
columnMap[column] = new(interface{})
}
columnVars := make([]interface{}, 0, len(columnMap))
// populate the array of interface{} with the pointers in the right order
for i := 0; i < len(columnMap); i++ {
columnVars = append(columnVars, columnMap[query.OrderedColumns[i]])

View File

@ -31,6 +31,7 @@ import (
"os"
"github.com/apache/thrift/lib/go/thrift"
"github.com/influxdata/telegraf/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore"
)
@ -122,31 +123,27 @@ func jsonToZipkinThrift(jsonRaw []byte) ([]byte, error) {
func thriftToJSONSpans(thriftData []byte) ([]byte, error) {
buffer := thrift.NewTMemoryBuffer()
if _, err := buffer.Write(thriftData); err != nil {
err = fmt.Errorf("error in buffer write: %v", err)
return nil, err
return nil, fmt.Errorf("error in buffer write: %w", err)
}
transport := thrift.NewTBinaryProtocolConf(buffer, nil)
_, size, err := transport.ReadListBegin(context.Background())
if err != nil {
err = fmt.Errorf("error in ReadListBegin: %v", err)
return nil, err
return nil, fmt.Errorf("error in ReadListBegin: %w", err)
}
var spans []*zipkincore.Span
spans := make([]*zipkincore.Span, 0, size)
for i := 0; i < size; i++ {
zs := &zipkincore.Span{}
if err = zs.Read(context.Background(), transport); err != nil {
err = fmt.Errorf("Error reading into zipkin struct: %v", err)
return nil, err
return nil, fmt.Errorf("error reading into zipkin struct: %w", err)
}
spans = append(spans, zs)
}
err = transport.ReadListEnd(context.Background())
if err != nil {
err = fmt.Errorf("error ending thrift read: %v", err)
return nil, err
return nil, fmt.Errorf("error ending thrift read: %w", err)
}
out, _ := json.MarshalIndent(spans, "", " ")

View File

@ -357,7 +357,7 @@ func (t *Timestream) createTable(tableName *string) error {
MemoryStoreRetentionPeriodInHours: t.CreateTableMemoryStoreRetentionPeriodInHours,
},
}
var tags []types.Tag
tags := make([]types.Tag, 0, len(t.CreateTableTags))
for key, val := range t.CreateTableTags {
tags = append(tags, types.Tag{
Key: aws.String(key),
@ -434,7 +434,7 @@ func (t *Timestream) TransformMetrics(metrics []telegraf.Metric) []*timestreamwr
}
func (t *Timestream) buildDimensions(point telegraf.Metric) []types.Dimension {
var dimensions []types.Dimension
dimensions := make([]types.Dimension, 0, len(point.Tags()))
for tagName, tagValue := range point.Tags() {
dimension := types.Dimension{
Name: aws.String(tagName),
@ -464,10 +464,8 @@ func (t *Timestream) buildWriteRecords(point telegraf.Metric) []types.Record {
}
func (t *Timestream) buildSingleWriteRecords(point telegraf.Metric) []types.Record {
var records []types.Record
dimensions := t.buildDimensions(point)
records := make([]types.Record, 0, len(point.Fields()))
for fieldName, fieldValue := range point.Fields() {
stringFieldValue, stringFieldValueType, ok := convertValue(fieldValue)
if !ok {
@ -501,8 +499,7 @@ func (t *Timestream) buildMultiMeasureWriteRecords(point telegraf.Metric) []type
multiMeasureName = point.Name()
}
var multiMeasures []types.MeasureValue
multiMeasures := make([]types.MeasureValue, 0, len(point.Fields()))
for fieldName, fieldValue := range point.Fields() {
stringFieldValue, stringFieldValueType, ok := convertValue(fieldValue)
if !ok {

View File

@ -231,8 +231,7 @@ func TestWriteMultiMeasuresSingleTableMode(t *testing.T) {
localTime, _ := strconv.Atoi(time1Epoch)
var inputs []telegraf.Metric
inputs := make([]telegraf.Metric, 0, recordCount+1)
for i := 1; i <= recordCount+1; i++ {
localTime++
@ -289,8 +288,7 @@ func TestWriteMultiMeasuresMultiTableMode(t *testing.T) {
localTime, _ := strconv.Atoi(time1Epoch)
var inputs []telegraf.Metric
inputs := make([]telegraf.Metric, 0, recordCount)
for i := 1; i <= recordCount; i++ {
localTime++
@ -584,8 +582,7 @@ func TestWriteWhenRequestsGreaterThanMaxWriteGoRoutinesCount(t *testing.T) {
require.NoError(t, plugin.Connect())
var inputs []telegraf.Metric
inputs := make([]telegraf.Metric, 0, totalRecords)
for i := 1; i <= totalRecords; i++ {
fieldName := "value_supported" + strconv.Itoa(i)
inputs = append(inputs, testutil.MustMetric(
@ -624,8 +621,7 @@ func TestWriteWhenRequestsLesserThanMaxWriteGoRoutinesCount(t *testing.T) {
}
require.NoError(t, plugin.Connect())
var inputs []telegraf.Metric
inputs := make([]telegraf.Metric, 0, totalRecords)
for i := 1; i <= totalRecords; i++ {
fieldName := "value_supported" + strconv.Itoa(i)
inputs = append(inputs, testutil.MustMetric(
@ -724,7 +720,7 @@ func TestTransformMetricsSkipEmptyMetric(t *testing.T) {
func TestTransformMetricsRequestsAboveLimitAreSplit(t *testing.T) {
const maxRecordsInWriteRecordsCall = 100
var inputs []telegraf.Metric
inputs := make([]telegraf.Metric, 0, maxRecordsInWriteRecordsCall+1)
for i := 1; i <= maxRecordsInWriteRecordsCall+1; i++ {
fieldName := "value_supported" + strconv.Itoa(i)
inputs = append(inputs, testutil.MustMetric(
@ -781,8 +777,7 @@ func TestTransformMetricsRequestsAboveLimitAreSplitSingleTable(t *testing.T) {
localTime, _ := strconv.Atoi(time1Epoch)
var inputs []telegraf.Metric
inputs := make([]telegraf.Metric, 0, maxRecordsInWriteRecordsCall+1)
for i := 1; i <= maxRecordsInWriteRecordsCall+1; i++ {
localTime++
@ -1275,7 +1270,7 @@ type SimpleInput struct {
}
func buildExpectedInput(i SimpleInput) *timestreamwrite.WriteRecordsInput {
var tsDimensions []types.Dimension
tsDimensions := make([]types.Dimension, 0, len(i.dimensions))
for k, v := range i.dimensions {
tsDimensions = append(tsDimensions, types.Dimension{
Name: aws.String(k),
@ -1283,7 +1278,7 @@ func buildExpectedInput(i SimpleInput) *timestreamwrite.WriteRecordsInput {
})
}
var tsRecords []types.Record
tsRecords := make([]types.Record, 0, len(i.measureValues))
for k, v := range i.measureValues {
tsRecords = append(tsRecords, types.Record{
MeasureName: aws.String(k),
@ -1316,10 +1311,7 @@ func buildRecords(inputs []SimpleInput) []types.Record {
}
func buildRecord(input SimpleInput) []types.Record {
var tsRecords []types.Record
var tsDimensions []types.Dimension
tsDimensions := make([]types.Dimension, 0, len(input.dimensions))
for k, v := range input.dimensions {
tsDimensions = append(tsDimensions, types.Dimension{
Name: aws.String(k),
@ -1327,6 +1319,7 @@ func buildRecord(input SimpleInput) []types.Record {
})
}
tsRecords := make([]types.Record, 0, len(input.measureValues))
for k, v := range input.measureValues {
tsRecords = append(tsRecords, types.Record{
MeasureName: aws.String(k),
@ -1342,11 +1335,9 @@ func buildRecord(input SimpleInput) []types.Record {
}
func buildMultiRecords(inputs []SimpleInput, multiMeasureName string, measureType types.MeasureValueType) []types.Record {
var tsRecords []types.Record
tsRecords := make([]types.Record, 0, len(inputs))
for _, input := range inputs {
var multiMeasures []types.MeasureValue
var tsDimensions []types.Dimension
tsDimensions := make([]types.Dimension, 0, len(input.dimensions))
for k, v := range input.dimensions {
tsDimensions = append(tsDimensions, types.Dimension{
Name: aws.String(k),
@ -1354,6 +1345,7 @@ func buildMultiRecords(inputs []SimpleInput, multiMeasureName string, measureTyp
})
}
multiMeasures := make([]types.MeasureValue, 0, len(input.measureValues))
for k, v := range input.measureValues {
multiMeasures = append(multiMeasures, types.MeasureValue{
Name: aws.String(k),

View File

@ -2158,7 +2158,7 @@ func TestStreamMachine(t *testing.T) {
err error
}
var tc []testcase
tc := make([]testcase, 0, len(tests))
for _, tt := range tests {
tc = append(tc, testcase{
name: tt.name,
@ -2197,7 +2197,7 @@ func TestStreamMachinePosition(t *testing.T) {
column int
}
var tc []testcase
tc := make([]testcase, 0, len(positionTests))
for _, tt := range positionTests {
tc = append(tc, testcase{
name: tt.name,

View File

@ -161,8 +161,7 @@ func (p *Parser) processMetric(input []byte, data []json_v2.DataSet, tag bool, t
}
p.iterateObjects = false
var metrics [][]telegraf.Metric
metrics := make([][]telegraf.Metric, 0, len(data))
for _, c := range data {
if c.Path == "" {
return nil, fmt.Errorf("GJSON path is required")

View File

@ -120,7 +120,7 @@ func (p *Parser) SetDefaultTags(tags map[string]string) {
// Get Quantiles for summary metric & Buckets for histogram
func makeQuantiles(m *dto.Metric, tags map[string]string, metricName string, metricType dto.MetricType, t time.Time) []telegraf.Metric {
var metrics []telegraf.Metric
metrics := make([]telegraf.Metric, 0, len(m.GetSummary().Quantile)+1)
fields := make(map[string]interface{})
fields[metricName+"_count"] = float64(m.GetSummary().GetSampleCount())
@ -143,7 +143,7 @@ func makeQuantiles(m *dto.Metric, tags map[string]string, metricName string, met
// Get Buckets from histogram metric
func makeBuckets(m *dto.Metric, tags map[string]string, metricName string, metricType dto.MetricType, t time.Time) []telegraf.Metric {
var metrics []telegraf.Metric
metrics := make([]telegraf.Metric, 0, len(m.GetHistogram().Bucket)+2)
fields := make(map[string]interface{})
fields[metricName+"_count"] = float64(m.GetHistogram().GetSampleCount())

View File

@ -205,9 +205,8 @@ func SerializeBucketName(
}
func InitGraphiteTemplates(templates []string) ([]*GraphiteTemplate, string, error) {
var graphiteTemplates []*GraphiteTemplate
defaultTemplate := ""
graphiteTemplates := make([]*GraphiteTemplate, 0, len(templates))
for i, t := range templates {
parts := strings.Fields(t)
@ -299,7 +298,7 @@ func InsertField(bucket, fieldName string) string {
}
func buildTags(tags map[string]string) string {
var keys []string
keys := make([]string, 0, len(tags))
for k := range tags {
keys = append(keys, k)
}

View File

@ -77,9 +77,8 @@ func (s *serializer) createObject(metric telegraf.Metric) ([]byte, error) {
** ci2metric_id: List of key-value pairs to identify the CI.
** source: Data source monitoring the metric type
*/
var allmetrics OIMetrics
var allmetrics OIMetrics //nolint:prealloc // Pre-allocating may change format of marshaled JSON
var oimetric OIMetric
oimetric.Source = "Telegraf"
// Process Tags to extract node & resource name info

View File

@ -42,8 +42,7 @@ type Container struct {
func (c *Container) Start() error {
c.ctx = context.Background()
var containerMounts []testcontainers.ContainerMount
containerMounts := make([]testcontainers.ContainerMount, 0, len(c.BindMounts))
for k, v := range c.BindMounts {
containerMounts = append(containerMounts, testcontainers.BindMount(v, testcontainers.ContainerMountTarget(k)))
}