chore(tools): Bump golangci-lint from v1.53.2 to v1.54.2 (#13838)
This commit is contained in:
parent
8dbc177d3f
commit
1d24efe55c
|
|
@ -62,7 +62,7 @@ commands:
|
|||
- run: 'sh ./scripts/installgo_windows.sh'
|
||||
- run: choco install mingw
|
||||
- run: go env
|
||||
- run: go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.53.2
|
||||
- run: go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.54.2
|
||||
- when:
|
||||
condition:
|
||||
equal: [ linux, << parameters.os >> ]
|
||||
|
|
|
|||
2
Makefile
2
Makefile
|
|
@ -174,7 +174,7 @@ vet:
|
|||
.PHONY: lint-install
|
||||
lint-install:
|
||||
@echo "Installing golangci-lint"
|
||||
go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.53.2
|
||||
go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.54.2
|
||||
|
||||
@echo "Installing markdownlint"
|
||||
npm install -g markdownlint-cli
|
||||
|
|
|
|||
|
|
@ -150,23 +150,25 @@ func (s *AliyunCMS) Init() error {
|
|||
}
|
||||
|
||||
//check metrics dimensions consistency
|
||||
for _, metric := range s.Metrics {
|
||||
if metric.Dimensions != "" {
|
||||
for i := range s.Metrics {
|
||||
metric := s.Metrics[i]
|
||||
if metric.Dimensions == "" {
|
||||
continue
|
||||
}
|
||||
metric.dimensionsUdObj = map[string]string{}
|
||||
metric.dimensionsUdArr = []map[string]string{}
|
||||
|
||||
// first try to unmarshal as an object
|
||||
err := json.Unmarshal([]byte(metric.Dimensions), &metric.dimensionsUdObj)
|
||||
if err != nil {
|
||||
// then try to unmarshal as an array
|
||||
err := json.Unmarshal([]byte(metric.Dimensions), &metric.dimensionsUdArr)
|
||||
if err := json.Unmarshal([]byte(metric.Dimensions), &metric.dimensionsUdObj); err == nil {
|
||||
// We were successful, so stop here
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// then try to unmarshal as an array
|
||||
if err := json.Unmarshal([]byte(metric.Dimensions), &metric.dimensionsUdArr); err != nil {
|
||||
return fmt.Errorf("cannot parse dimensions (neither obj, nor array) %q: %w", metric.Dimensions, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
s.measurement = formatMeasurement(s.Project)
|
||||
|
||||
|
|
|
|||
|
|
@ -94,7 +94,7 @@ func (cb *Couchbase) gatherServer(acc telegraf.Accumulator, addr string) error {
|
|||
fields := cb.basicBucketStats(bucket.BasicStats)
|
||||
tags := map[string]string{"cluster": cluster, "bucket": name}
|
||||
|
||||
err := cb.gatherDetailedBucketStats(addr, name, nil, fields)
|
||||
err := cb.gatherDetailedBucketStats(addr, name, "", fields)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -107,7 +107,7 @@ func (cb *Couchbase) gatherServer(acc telegraf.Accumulator, addr string) error {
|
|||
fields := cb.basicBucketStats(bucket.BasicStats)
|
||||
tags := map[string]string{"cluster": cluster, "bucket": name, "hostname": node.Hostname}
|
||||
|
||||
err := cb.gatherDetailedBucketStats(addr, name, &node.Hostname, fields)
|
||||
err := cb.gatherDetailedBucketStats(addr, name, node.Hostname, fields)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -133,7 +133,7 @@ func (cb *Couchbase) basicBucketStats(basicStats map[string]interface{}) map[str
|
|||
return fields
|
||||
}
|
||||
|
||||
func (cb *Couchbase) gatherDetailedBucketStats(server, bucket string, nodeHostname *string, fields map[string]interface{}) error {
|
||||
func (cb *Couchbase) gatherDetailedBucketStats(server, bucket string, nodeHostname string, fields map[string]interface{}) error {
|
||||
extendedBucketStats := &BucketStats{}
|
||||
err := cb.queryDetailedBucketStats(server, bucket, nodeHostname, extendedBucketStats)
|
||||
if err != nil {
|
||||
|
|
@ -374,10 +374,10 @@ func (cb *Couchbase) addBucketFieldChecked(fields map[string]interface{}, fieldK
|
|||
cb.addBucketField(fields, fieldKey, values[len(values)-1])
|
||||
}
|
||||
|
||||
func (cb *Couchbase) queryDetailedBucketStats(server, bucket string, nodeHostname *string, bucketStats *BucketStats) error {
|
||||
func (cb *Couchbase) queryDetailedBucketStats(server, bucket string, nodeHostname string, bucketStats *BucketStats) error {
|
||||
url := server + "/pools/default/buckets/" + bucket
|
||||
if nodeHostname != nil {
|
||||
url += "/nodes/" + *nodeHostname
|
||||
if nodeHostname != "" {
|
||||
url += "/nodes/" + nodeHostname
|
||||
}
|
||||
url += "/stats?"
|
||||
|
||||
|
|
|
|||
|
|
@ -90,7 +90,7 @@ func TestGatherDetailedBucketMetrics(t *testing.T) {
|
|||
|
||||
tests := []struct {
|
||||
name string
|
||||
node *string
|
||||
node string
|
||||
response []byte
|
||||
}{
|
||||
{
|
||||
|
|
@ -104,7 +104,7 @@ func TestGatherDetailedBucketMetrics(t *testing.T) {
|
|||
{
|
||||
name: "node-level with all fields",
|
||||
response: nodeBucketStatsResponse,
|
||||
node: &node,
|
||||
node: node,
|
||||
},
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -150,7 +150,7 @@ func (ki *KubernetesInventory) createSelectorFilters() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
const (
|
||||
daemonSetMeasurement = "kubernetes_daemonset"
|
||||
deploymentMeasurement = "kubernetes_deployment"
|
||||
endpointMeasurement = "kubernetes_endpoint"
|
||||
|
|
@ -158,10 +158,10 @@ var (
|
|||
nodeMeasurement = "kubernetes_node"
|
||||
persistentVolumeMeasurement = "kubernetes_persistentvolume"
|
||||
persistentVolumeClaimMeasurement = "kubernetes_persistentvolumeclaim"
|
||||
podContainerMeasurement = "kubernetes_pod_container"
|
||||
podContainerMeasurement = "kubernetes_pod_container" //nolint:gosec // G101: Potential hardcoded credentials - false positive
|
||||
serviceMeasurement = "kubernetes_service"
|
||||
statefulSetMeasurement = "kubernetes_statefulset"
|
||||
resourcequotaMeasurement = "kubernetes_resourcequota"
|
||||
resourcequotaMeasurement = "kubernetes_resourcequota" //nolint:gosec // G101: Potential hardcoded credentials - false positive
|
||||
certificateMeasurement = "kubernetes_certificate"
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -325,11 +325,7 @@ func (logstash *Logstash) gatherPluginsStats(
|
|||
return nil
|
||||
}
|
||||
|
||||
func (logstash *Logstash) gatherQueueStats(
|
||||
queue *PipelineQueue,
|
||||
tags map[string]string,
|
||||
accumulator telegraf.Accumulator,
|
||||
) error {
|
||||
func (logstash *Logstash) gatherQueueStats(queue PipelineQueue, tags map[string]string, acc telegraf.Accumulator) error {
|
||||
queueTags := map[string]string{
|
||||
"queue_type": queue.Type,
|
||||
}
|
||||
|
|
@ -369,7 +365,7 @@ func (logstash *Logstash) gatherQueueStats(
|
|||
}
|
||||
}
|
||||
|
||||
accumulator.AddFields("logstash_queue", queueFields, queueTags)
|
||||
acc.AddFields("logstash_queue", queueFields, queueTags)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
@ -410,7 +406,7 @@ func (logstash *Logstash) gatherPipelineStats(address string, accumulator telegr
|
|||
return err
|
||||
}
|
||||
|
||||
err = logstash.gatherQueueStats(&pipelineStats.Pipeline.Queue, tags, accumulator)
|
||||
err = logstash.gatherQueueStats(pipelineStats.Pipeline.Queue, tags, accumulator)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -456,7 +452,7 @@ func (logstash *Logstash) gatherPipelinesStats(address string, accumulator teleg
|
|||
return err
|
||||
}
|
||||
|
||||
err = logstash.gatherQueueStats(&pipeline.Queue, tags, accumulator)
|
||||
err = logstash.gatherQueueStats(pipeline.Queue, tags, accumulator)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -651,7 +651,7 @@ func getDatacenters(ctx context.Context, e *Endpoint, resourceFilter *ResourceFi
|
|||
ref: r.ExtensibleManagedObject.Reference(),
|
||||
parentRef: r.Parent,
|
||||
dcname: r.Name,
|
||||
customValues: e.loadCustomAttributes(&r.ManagedEntity),
|
||||
customValues: e.loadCustomAttributes(r.ManagedEntity),
|
||||
}
|
||||
}
|
||||
return m, nil
|
||||
|
|
@ -697,7 +697,7 @@ func getClusters(ctx context.Context, e *Endpoint, resourceFilter *ResourceFilte
|
|||
name: r.Name,
|
||||
ref: r.ExtensibleManagedObject.Reference(),
|
||||
parentRef: p,
|
||||
customValues: e.loadCustomAttributes(&r.ManagedEntity),
|
||||
customValues: e.loadCustomAttributes(r.ManagedEntity),
|
||||
}
|
||||
return nil
|
||||
}()
|
||||
|
|
@ -721,7 +721,7 @@ func getResourcePools(ctx context.Context, e *Endpoint, resourceFilter *Resource
|
|||
name: r.Name,
|
||||
ref: r.ExtensibleManagedObject.Reference(),
|
||||
parentRef: r.Parent,
|
||||
customValues: e.loadCustomAttributes(&r.ManagedEntity),
|
||||
customValues: e.loadCustomAttributes(r.ManagedEntity),
|
||||
}
|
||||
}
|
||||
return m, nil
|
||||
|
|
@ -750,7 +750,7 @@ func getHosts(ctx context.Context, e *Endpoint, resourceFilter *ResourceFilter)
|
|||
name: r.Name,
|
||||
ref: r.ExtensibleManagedObject.Reference(),
|
||||
parentRef: r.Parent,
|
||||
customValues: e.loadCustomAttributes(&r.ManagedEntity),
|
||||
customValues: e.loadCustomAttributes(r.ManagedEntity),
|
||||
}
|
||||
}
|
||||
return m, nil
|
||||
|
|
@ -856,7 +856,7 @@ func getVMs(ctx context.Context, e *Endpoint, resourceFilter *ResourceFilter) (o
|
|||
guest: guest,
|
||||
altID: uuid,
|
||||
rpname: rpname,
|
||||
customValues: e.loadCustomAttributes(&r.ManagedEntity),
|
||||
customValues: e.loadCustomAttributes(r.ManagedEntity),
|
||||
lookup: lookup,
|
||||
}
|
||||
}
|
||||
|
|
@ -885,13 +885,13 @@ func getDatastores(ctx context.Context, e *Endpoint, resourceFilter *ResourceFil
|
|||
ref: r.ExtensibleManagedObject.Reference(),
|
||||
parentRef: r.Parent,
|
||||
altID: lunID,
|
||||
customValues: e.loadCustomAttributes(&r.ManagedEntity),
|
||||
customValues: e.loadCustomAttributes(r.ManagedEntity),
|
||||
}
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (e *Endpoint) loadCustomAttributes(entity *mo.ManagedEntity) map[string]string {
|
||||
func (e *Endpoint) loadCustomAttributes(entity mo.ManagedEntity) map[string]string {
|
||||
if !e.customAttrEnabled {
|
||||
return map[string]string{}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -375,7 +375,7 @@ func (c *CloudWatchLogs) Write(metrics []telegraf.Metric) error {
|
|||
continue
|
||||
}
|
||||
} else {
|
||||
putLogEvents.SequenceToken = &elem.sequenceToken
|
||||
putLogEvents.SequenceToken = &c.ls[logStream].sequenceToken
|
||||
}
|
||||
|
||||
//Upload log events
|
||||
|
|
|
|||
|
|
@ -265,7 +265,8 @@ func TestMetricConversionToRecordsWithTags(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
// Ignore the tags-list for comparison
|
||||
actual.TagsList = nil
|
||||
require.EqualValues(t, &tt.expected, actual)
|
||||
expected := tt.expected
|
||||
require.EqualValues(t, &expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -343,9 +344,10 @@ func TestTagsHandling(t *testing.T) {
|
|||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
input := tt.input
|
||||
tt.plugin.Log = &testutil.Logger{}
|
||||
require.NoError(t, tt.plugin.Init())
|
||||
require.NoError(t, tt.plugin.modifyRecordsWithTags(&tt.input))
|
||||
require.NoError(t, tt.plugin.modifyRecordsWithTags(&input))
|
||||
// Ignore the tags-list for comparison
|
||||
tt.input.TagsList = nil
|
||||
require.EqualValues(t, tt.expected, tt.input)
|
||||
|
|
@ -478,10 +480,11 @@ func TestEntireMetricConversion(t *testing.T) {
|
|||
require.NoError(t, tt.plugin.modifyRecordsWithTags(actual))
|
||||
// Ignore the tags-list for comparison
|
||||
actual.TagsList = nil
|
||||
expected := tt.expected
|
||||
if tt.requireEqual {
|
||||
require.EqualValues(t, &tt.expected, actual)
|
||||
require.EqualValues(t, &expected, actual)
|
||||
} else {
|
||||
require.NotEqualValues(t, &tt.expected, actual)
|
||||
require.NotEqualValues(t, &expected, actual)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@ package influx
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unsafe"
|
||||
|
|
@ -81,13 +80,5 @@ func parseBoolBytes(b []byte) (bool, error) {
|
|||
//
|
||||
// It is unsafe, and is intended to prepare input to short-lived functions that require strings.
|
||||
func unsafeBytesToString(in []byte) string {
|
||||
//nolint:gosec // G103: Valid use of unsafe call to convert []byte to SliceHeader (without a heap allocation)
|
||||
src := *(*reflect.SliceHeader)(unsafe.Pointer(&in))
|
||||
dst := reflect.StringHeader{
|
||||
Data: src.Data,
|
||||
Len: src.Len,
|
||||
}
|
||||
//nolint:gosec // G103: Valid use of unsafe call to convert StringHeader to string (without a heap allocation)
|
||||
s := *(*string)(unsafe.Pointer(&dst))
|
||||
return s
|
||||
return unsafe.String(&in[0], len(in))
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue