chore(tools): Bump golangci-lint from v1.53.2 to v1.54.2 (#13838)

This commit is contained in:
Sven Rebhan 2023-08-29 15:07:41 +02:00 committed by GitHub
parent 8dbc177d3f
commit 1d24efe55c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 48 additions and 56 deletions

View File

@ -62,7 +62,7 @@ commands:
- run: 'sh ./scripts/installgo_windows.sh' - run: 'sh ./scripts/installgo_windows.sh'
- run: choco install mingw - run: choco install mingw
- run: go env - run: go env
- run: go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.53.2 - run: go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.54.2
- when: - when:
condition: condition:
equal: [ linux, << parameters.os >> ] equal: [ linux, << parameters.os >> ]

View File

@ -174,7 +174,7 @@ vet:
.PHONY: lint-install .PHONY: lint-install
lint-install: lint-install:
@echo "Installing golangci-lint" @echo "Installing golangci-lint"
go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.53.2 go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.54.2
@echo "Installing markdownlint" @echo "Installing markdownlint"
npm install -g markdownlint-cli npm install -g markdownlint-cli

View File

@ -150,21 +150,23 @@ func (s *AliyunCMS) Init() error {
} }
//check metrics dimensions consistency //check metrics dimensions consistency
for _, metric := range s.Metrics { for i := range s.Metrics {
if metric.Dimensions != "" { metric := s.Metrics[i]
metric.dimensionsUdObj = map[string]string{} if metric.Dimensions == "" {
metric.dimensionsUdArr = []map[string]string{} continue
}
metric.dimensionsUdObj = map[string]string{}
metric.dimensionsUdArr = []map[string]string{}
// first try to unmarshal as an object // first try to unmarshal as an object
err := json.Unmarshal([]byte(metric.Dimensions), &metric.dimensionsUdObj) if err := json.Unmarshal([]byte(metric.Dimensions), &metric.dimensionsUdObj); err == nil {
if err != nil { // We were successful, so stop here
// then try to unmarshal as an array continue
err := json.Unmarshal([]byte(metric.Dimensions), &metric.dimensionsUdArr) }
if err != nil { // then try to unmarshal as an array
return fmt.Errorf("cannot parse dimensions (neither obj, nor array) %q: %w", metric.Dimensions, err) if err := json.Unmarshal([]byte(metric.Dimensions), &metric.dimensionsUdArr); err != nil {
} return fmt.Errorf("cannot parse dimensions (neither obj, nor array) %q: %w", metric.Dimensions, err)
}
} }
} }

View File

@ -94,7 +94,7 @@ func (cb *Couchbase) gatherServer(acc telegraf.Accumulator, addr string) error {
fields := cb.basicBucketStats(bucket.BasicStats) fields := cb.basicBucketStats(bucket.BasicStats)
tags := map[string]string{"cluster": cluster, "bucket": name} tags := map[string]string{"cluster": cluster, "bucket": name}
err := cb.gatherDetailedBucketStats(addr, name, nil, fields) err := cb.gatherDetailedBucketStats(addr, name, "", fields)
if err != nil { if err != nil {
return err return err
} }
@ -107,7 +107,7 @@ func (cb *Couchbase) gatherServer(acc telegraf.Accumulator, addr string) error {
fields := cb.basicBucketStats(bucket.BasicStats) fields := cb.basicBucketStats(bucket.BasicStats)
tags := map[string]string{"cluster": cluster, "bucket": name, "hostname": node.Hostname} tags := map[string]string{"cluster": cluster, "bucket": name, "hostname": node.Hostname}
err := cb.gatherDetailedBucketStats(addr, name, &node.Hostname, fields) err := cb.gatherDetailedBucketStats(addr, name, node.Hostname, fields)
if err != nil { if err != nil {
return err return err
} }
@ -133,7 +133,7 @@ func (cb *Couchbase) basicBucketStats(basicStats map[string]interface{}) map[str
return fields return fields
} }
func (cb *Couchbase) gatherDetailedBucketStats(server, bucket string, nodeHostname *string, fields map[string]interface{}) error { func (cb *Couchbase) gatherDetailedBucketStats(server, bucket string, nodeHostname string, fields map[string]interface{}) error {
extendedBucketStats := &BucketStats{} extendedBucketStats := &BucketStats{}
err := cb.queryDetailedBucketStats(server, bucket, nodeHostname, extendedBucketStats) err := cb.queryDetailedBucketStats(server, bucket, nodeHostname, extendedBucketStats)
if err != nil { if err != nil {
@ -374,10 +374,10 @@ func (cb *Couchbase) addBucketFieldChecked(fields map[string]interface{}, fieldK
cb.addBucketField(fields, fieldKey, values[len(values)-1]) cb.addBucketField(fields, fieldKey, values[len(values)-1])
} }
func (cb *Couchbase) queryDetailedBucketStats(server, bucket string, nodeHostname *string, bucketStats *BucketStats) error { func (cb *Couchbase) queryDetailedBucketStats(server, bucket string, nodeHostname string, bucketStats *BucketStats) error {
url := server + "/pools/default/buckets/" + bucket url := server + "/pools/default/buckets/" + bucket
if nodeHostname != nil { if nodeHostname != "" {
url += "/nodes/" + *nodeHostname url += "/nodes/" + nodeHostname
} }
url += "/stats?" url += "/stats?"

View File

@ -90,7 +90,7 @@ func TestGatherDetailedBucketMetrics(t *testing.T) {
tests := []struct { tests := []struct {
name string name string
node *string node string
response []byte response []byte
}{ }{
{ {
@ -104,7 +104,7 @@ func TestGatherDetailedBucketMetrics(t *testing.T) {
{ {
name: "node-level with all fields", name: "node-level with all fields",
response: nodeBucketStatsResponse, response: nodeBucketStatsResponse,
node: &node, node: node,
}, },
} }

View File

@ -150,7 +150,7 @@ func (ki *KubernetesInventory) createSelectorFilters() error {
return nil return nil
} }
var ( const (
daemonSetMeasurement = "kubernetes_daemonset" daemonSetMeasurement = "kubernetes_daemonset"
deploymentMeasurement = "kubernetes_deployment" deploymentMeasurement = "kubernetes_deployment"
endpointMeasurement = "kubernetes_endpoint" endpointMeasurement = "kubernetes_endpoint"
@ -158,10 +158,10 @@ var (
nodeMeasurement = "kubernetes_node" nodeMeasurement = "kubernetes_node"
persistentVolumeMeasurement = "kubernetes_persistentvolume" persistentVolumeMeasurement = "kubernetes_persistentvolume"
persistentVolumeClaimMeasurement = "kubernetes_persistentvolumeclaim" persistentVolumeClaimMeasurement = "kubernetes_persistentvolumeclaim"
podContainerMeasurement = "kubernetes_pod_container" podContainerMeasurement = "kubernetes_pod_container" //nolint:gosec // G101: Potential hardcoded credentials - false positive
serviceMeasurement = "kubernetes_service" serviceMeasurement = "kubernetes_service"
statefulSetMeasurement = "kubernetes_statefulset" statefulSetMeasurement = "kubernetes_statefulset"
resourcequotaMeasurement = "kubernetes_resourcequota" resourcequotaMeasurement = "kubernetes_resourcequota" //nolint:gosec // G101: Potential hardcoded credentials - false positive
certificateMeasurement = "kubernetes_certificate" certificateMeasurement = "kubernetes_certificate"
) )

View File

@ -325,11 +325,7 @@ func (logstash *Logstash) gatherPluginsStats(
return nil return nil
} }
func (logstash *Logstash) gatherQueueStats( func (logstash *Logstash) gatherQueueStats(queue PipelineQueue, tags map[string]string, acc telegraf.Accumulator) error {
queue *PipelineQueue,
tags map[string]string,
accumulator telegraf.Accumulator,
) error {
queueTags := map[string]string{ queueTags := map[string]string{
"queue_type": queue.Type, "queue_type": queue.Type,
} }
@ -369,7 +365,7 @@ func (logstash *Logstash) gatherQueueStats(
} }
} }
accumulator.AddFields("logstash_queue", queueFields, queueTags) acc.AddFields("logstash_queue", queueFields, queueTags)
return nil return nil
} }
@ -410,7 +406,7 @@ func (logstash *Logstash) gatherPipelineStats(address string, accumulator telegr
return err return err
} }
err = logstash.gatherQueueStats(&pipelineStats.Pipeline.Queue, tags, accumulator) err = logstash.gatherQueueStats(pipelineStats.Pipeline.Queue, tags, accumulator)
if err != nil { if err != nil {
return err return err
} }
@ -456,7 +452,7 @@ func (logstash *Logstash) gatherPipelinesStats(address string, accumulator teleg
return err return err
} }
err = logstash.gatherQueueStats(&pipeline.Queue, tags, accumulator) err = logstash.gatherQueueStats(pipeline.Queue, tags, accumulator)
if err != nil { if err != nil {
return err return err
} }

View File

@ -651,7 +651,7 @@ func getDatacenters(ctx context.Context, e *Endpoint, resourceFilter *ResourceFi
ref: r.ExtensibleManagedObject.Reference(), ref: r.ExtensibleManagedObject.Reference(),
parentRef: r.Parent, parentRef: r.Parent,
dcname: r.Name, dcname: r.Name,
customValues: e.loadCustomAttributes(&r.ManagedEntity), customValues: e.loadCustomAttributes(r.ManagedEntity),
} }
} }
return m, nil return m, nil
@ -697,7 +697,7 @@ func getClusters(ctx context.Context, e *Endpoint, resourceFilter *ResourceFilte
name: r.Name, name: r.Name,
ref: r.ExtensibleManagedObject.Reference(), ref: r.ExtensibleManagedObject.Reference(),
parentRef: p, parentRef: p,
customValues: e.loadCustomAttributes(&r.ManagedEntity), customValues: e.loadCustomAttributes(r.ManagedEntity),
} }
return nil return nil
}() }()
@ -721,7 +721,7 @@ func getResourcePools(ctx context.Context, e *Endpoint, resourceFilter *Resource
name: r.Name, name: r.Name,
ref: r.ExtensibleManagedObject.Reference(), ref: r.ExtensibleManagedObject.Reference(),
parentRef: r.Parent, parentRef: r.Parent,
customValues: e.loadCustomAttributes(&r.ManagedEntity), customValues: e.loadCustomAttributes(r.ManagedEntity),
} }
} }
return m, nil return m, nil
@ -750,7 +750,7 @@ func getHosts(ctx context.Context, e *Endpoint, resourceFilter *ResourceFilter)
name: r.Name, name: r.Name,
ref: r.ExtensibleManagedObject.Reference(), ref: r.ExtensibleManagedObject.Reference(),
parentRef: r.Parent, parentRef: r.Parent,
customValues: e.loadCustomAttributes(&r.ManagedEntity), customValues: e.loadCustomAttributes(r.ManagedEntity),
} }
} }
return m, nil return m, nil
@ -856,7 +856,7 @@ func getVMs(ctx context.Context, e *Endpoint, resourceFilter *ResourceFilter) (o
guest: guest, guest: guest,
altID: uuid, altID: uuid,
rpname: rpname, rpname: rpname,
customValues: e.loadCustomAttributes(&r.ManagedEntity), customValues: e.loadCustomAttributes(r.ManagedEntity),
lookup: lookup, lookup: lookup,
} }
} }
@ -885,13 +885,13 @@ func getDatastores(ctx context.Context, e *Endpoint, resourceFilter *ResourceFil
ref: r.ExtensibleManagedObject.Reference(), ref: r.ExtensibleManagedObject.Reference(),
parentRef: r.Parent, parentRef: r.Parent,
altID: lunID, altID: lunID,
customValues: e.loadCustomAttributes(&r.ManagedEntity), customValues: e.loadCustomAttributes(r.ManagedEntity),
} }
} }
return m, nil return m, nil
} }
func (e *Endpoint) loadCustomAttributes(entity *mo.ManagedEntity) map[string]string { func (e *Endpoint) loadCustomAttributes(entity mo.ManagedEntity) map[string]string {
if !e.customAttrEnabled { if !e.customAttrEnabled {
return map[string]string{} return map[string]string{}
} }

View File

@ -375,7 +375,7 @@ func (c *CloudWatchLogs) Write(metrics []telegraf.Metric) error {
continue continue
} }
} else { } else {
putLogEvents.SequenceToken = &elem.sequenceToken putLogEvents.SequenceToken = &c.ls[logStream].sequenceToken
} }
//Upload log events //Upload log events

View File

@ -265,7 +265,8 @@ func TestMetricConversionToRecordsWithTags(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
// Ignore the tags-list for comparison // Ignore the tags-list for comparison
actual.TagsList = nil actual.TagsList = nil
require.EqualValues(t, &tt.expected, actual) expected := tt.expected
require.EqualValues(t, &expected, actual)
}) })
} }
} }
@ -343,9 +344,10 @@ func TestTagsHandling(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
input := tt.input
tt.plugin.Log = &testutil.Logger{} tt.plugin.Log = &testutil.Logger{}
require.NoError(t, tt.plugin.Init()) require.NoError(t, tt.plugin.Init())
require.NoError(t, tt.plugin.modifyRecordsWithTags(&tt.input)) require.NoError(t, tt.plugin.modifyRecordsWithTags(&input))
// Ignore the tags-list for comparison // Ignore the tags-list for comparison
tt.input.TagsList = nil tt.input.TagsList = nil
require.EqualValues(t, tt.expected, tt.input) require.EqualValues(t, tt.expected, tt.input)
@ -478,10 +480,11 @@ func TestEntireMetricConversion(t *testing.T) {
require.NoError(t, tt.plugin.modifyRecordsWithTags(actual)) require.NoError(t, tt.plugin.modifyRecordsWithTags(actual))
// Ignore the tags-list for comparison // Ignore the tags-list for comparison
actual.TagsList = nil actual.TagsList = nil
expected := tt.expected
if tt.requireEqual { if tt.requireEqual {
require.EqualValues(t, &tt.expected, actual) require.EqualValues(t, &expected, actual)
} else { } else {
require.NotEqualValues(t, &tt.expected, actual) require.NotEqualValues(t, &expected, actual)
} }
}) })
} }

View File

@ -2,7 +2,6 @@ package influx
import ( import (
"bytes" "bytes"
"reflect"
"strconv" "strconv"
"strings" "strings"
"unsafe" "unsafe"
@ -81,13 +80,5 @@ func parseBoolBytes(b []byte) (bool, error) {
// //
// It is unsafe, and is intended to prepare input to short-lived functions that require strings. // It is unsafe, and is intended to prepare input to short-lived functions that require strings.
func unsafeBytesToString(in []byte) string { func unsafeBytesToString(in []byte) string {
//nolint:gosec // G103: Valid use of unsafe call to convert []byte to SliceHeader (without a heap allocation) return unsafe.String(&in[0], len(in))
src := *(*reflect.SliceHeader)(unsafe.Pointer(&in))
dst := reflect.StringHeader{
Data: src.Data,
Len: src.Len,
}
//nolint:gosec // G103: Valid use of unsafe call to convert StringHeader to string (without a heap allocation)
s := *(*string)(unsafe.Pointer(&dst))
return s
} }