chore(linters): Enable rangeValCopy and hugeParams checkers for gocritic (#14454)

This commit is contained in:
Paweł Żak 2023-12-15 15:36:34 +01:00 committed by GitHub
parent 4ad8f6b814
commit e48b72f965
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
30 changed files with 163 additions and 113 deletions

View File

@ -118,10 +118,26 @@ linters-settings:
- weakCond
# performance
- equalFold
- hugeParam
- preferDecodeRune
- preferFprint
- preferStringWriter
- rangeValCopy
- stringXbytes
# Settings passed to gocritic.
# The settings key is the name of a supported gocritic checker.
# The list of supported checkers can be find in https://go-critic.github.io/overview.
settings:
hugeParam:
# Size in bytes that makes the warning trigger.
# Default: 80
sizeThreshold: 512
rangeValCopy:
# Size in bytes that makes the warning trigger.
# Default: 128
sizeThreshold: 512
gosec:
# To select a subset of rules to run.
# Available rules: https://github.com/securego/gosec#available-rules

View File

@ -131,13 +131,14 @@ type metric struct {
func genTagsFields(gpus map[string]GPU, system map[string]sysInfo) []metric {
metrics := []metric{}
for cardID, payload := range gpus {
for cardID := range gpus {
if strings.Contains(cardID, "card") {
tags := map[string]string{
"name": cardID,
}
fields := map[string]interface{}{}
payload := gpus[cardID]
totVRAM, _ := strconv.ParseInt(payload.GpuVRAMTotalMemory, 10, 64)
usdVRAM, _ := strconv.ParseInt(payload.GpuVRAMTotalUsedMemory, 10, 64)
strFree := strconv.FormatInt(totVRAM-usdVRAM, 10)

View File

@ -5,9 +5,10 @@ import (
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf/plugins/parsers/influx"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
const cgroupID = "c69461b2c836cc3f0e3e5deb07b1f16e25f6009da2a48bb0adc7dd580befaf55"
@ -53,7 +54,7 @@ func TestParseCgroupV2Meta(t *testing.T) {
}
var acc testutil.Accumulator
metastats(cgroupID, validMeta.Containers[0], &acc, tags, time.Now())
metastats(cgroupID, &validMeta.Containers[0], &acc, tags, time.Now())
actual := acc.GetTelegrafMetrics()
testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime())

View File

@ -24,7 +24,7 @@ var (
// Client is the ECS client contract
type Client interface {
Task() (*Task, error)
ContainerStats() (map[string]types.StatsJSON, error)
ContainerStats() (map[string]*types.StatsJSON, error)
}
type httpClient interface {
@ -129,11 +129,11 @@ func (c *EcsClient) Task() (*Task, error) {
}
// ContainerStats calls the ECS stats endpoint and returns a populated container stats map
func (c *EcsClient) ContainerStats() (map[string]types.StatsJSON, error) {
func (c *EcsClient) ContainerStats() (map[string]*types.StatsJSON, error) {
req, _ := http.NewRequest("GET", c.statsURL, nil)
resp, err := c.client.Do(req)
if err != nil {
return map[string]types.StatsJSON{}, err
return nil, err
}
defer resp.Body.Close()
@ -144,19 +144,14 @@ func (c *EcsClient) ContainerStats() (map[string]types.StatsJSON, error) {
return nil, fmt.Errorf("%s returned HTTP status %s: %q", c.statsURL, resp.Status, body)
}
statsMap, err := unmarshalStats(resp.Body)
if err != nil {
return map[string]types.StatsJSON{}, err
}
return statsMap, nil
return unmarshalStats(resp.Body)
}
// PollSync executes Task and ContainerStats in parallel. If both succeed, both structs are returned.
// If either errors, a single error is returned.
func PollSync(c Client) (*Task, map[string]types.StatsJSON, error) {
func PollSync(c Client) (*Task, map[string]*types.StatsJSON, error) {
var task *Task
var stats map[string]types.StatsJSON
var stats map[string]*types.StatsJSON
var err error
if stats, err = c.ContainerStats(); err != nil {

View File

@ -15,14 +15,14 @@ import (
type pollMock struct {
task func() (*Task, error)
stats func() (map[string]types.StatsJSON, error)
stats func() (map[string]*types.StatsJSON, error)
}
func (p *pollMock) Task() (*Task, error) {
return p.task()
}
func (p *pollMock) ContainerStats() (map[string]types.StatsJSON, error) {
func (p *pollMock) ContainerStats() (map[string]*types.StatsJSON, error) {
return p.stats()
}
@ -31,7 +31,7 @@ func TestEcsClient_PollSync(t *testing.T) {
name string
mock *pollMock
want *Task
want1 map[string]types.StatsJSON
want1 map[string]*types.StatsJSON
wantErr bool
}{
{
@ -40,7 +40,7 @@ func TestEcsClient_PollSync(t *testing.T) {
task: func() (*Task, error) {
return &validMeta, nil
},
stats: func() (map[string]types.StatsJSON, error) {
stats: func() (map[string]*types.StatsJSON, error) {
return validStats, nil
},
},
@ -53,7 +53,7 @@ func TestEcsClient_PollSync(t *testing.T) {
task: func() (*Task, error) {
return nil, errors.New("err")
},
stats: func() (map[string]types.StatsJSON, error) {
stats: func() (map[string]*types.StatsJSON, error) {
return validStats, nil
},
},
@ -65,7 +65,7 @@ func TestEcsClient_PollSync(t *testing.T) {
task: func() (*Task, error) {
return &validMeta, nil
},
stats: func() (map[string]types.StatsJSON, error) {
stats: func() (map[string]*types.StatsJSON, error) {
return nil, errors.New("err")
},
},
@ -170,7 +170,7 @@ func TestEcsClient_ContainerStats(t *testing.T) {
tests := []struct {
name string
client httpClient
want map[string]types.StatsJSON
want map[string]*types.StatsJSON
wantErr bool
}{
{
@ -192,7 +192,7 @@ func TestEcsClient_ContainerStats(t *testing.T) {
return nil, errors.New("err")
},
},
want: map[string]types.StatsJSON{},
want: nil,
wantErr: true,
},
{
@ -205,7 +205,7 @@ func TestEcsClient_ContainerStats(t *testing.T) {
}, nil
},
},
want: map[string]types.StatsJSON{},
want: nil,
wantErr: true,
},
{

View File

@ -157,7 +157,8 @@ func (ecs *Ecs) accTask(task *Task, tags map[string]string, acc telegraf.Accumul
}
func (ecs *Ecs) accContainers(task *Task, taskTags map[string]string, acc telegraf.Accumulator) {
for _, c := range task.Containers {
for i := range task.Containers {
c := &task.Containers[i]
if !ecs.containerNameFilter.Match(c.Name) {
continue
}

View File

@ -20,7 +20,7 @@ var pauseStatsPreRead, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:39:59.933
var nginxStatsRead, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:40:00.93733207Z")
var nginxStatsPreRead, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:39:59.934291009Z")
var validStats = map[string]types.StatsJSON{
var validStats = map[string]*types.StatsJSON{
pauseStatsKey: {
Stats: types.Stats{
Read: pauseStatsRead,

View File

@ -6,13 +6,14 @@ import (
"time"
"github.com/docker/docker/api/types"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs/docker"
)
func parseContainerStats(c Container, acc telegraf.Accumulator, tags map[string]string) {
func parseContainerStats(c *Container, acc telegraf.Accumulator, tags map[string]string) {
id := c.ID
stats := c.Stats
stats := &c.Stats
tm := stats.Read
if tm.Before(time.Unix(0, 0)) {
@ -26,7 +27,7 @@ func parseContainerStats(c Container, acc telegraf.Accumulator, tags map[string]
blkstats(id, stats, acc, tags, tm)
}
func metastats(id string, c Container, acc telegraf.Accumulator, tags map[string]string, tm time.Time) {
func metastats(id string, c *Container, acc telegraf.Accumulator, tags map[string]string, tm time.Time) {
metafields := map[string]interface{}{
"container_id": id,
"docker_name": c.DockerName,
@ -44,7 +45,7 @@ func metastats(id string, c Container, acc telegraf.Accumulator, tags map[string
acc.AddFields("ecs_container_meta", metafields, tags, tm)
}
func memstats(id string, stats types.StatsJSON, acc telegraf.Accumulator, tags map[string]string, tm time.Time) {
func memstats(id string, stats *types.StatsJSON, acc telegraf.Accumulator, tags map[string]string, tm time.Time) {
memfields := map[string]interface{}{
"container_id": id,
}
@ -101,7 +102,7 @@ func memstats(id string, stats types.StatsJSON, acc telegraf.Accumulator, tags m
acc.AddFields("ecs_container_mem", memfields, tags, tm)
}
func cpustats(id string, stats types.StatsJSON, acc telegraf.Accumulator, tags map[string]string, tm time.Time) {
func cpustats(id string, stats *types.StatsJSON, acc telegraf.Accumulator, tags map[string]string, tm time.Time) {
cpufields := map[string]interface{}{
"usage_total": stats.CPUStats.CPUUsage.TotalUsage,
"usage_in_usermode": stats.CPUStats.CPUUsage.UsageInUsermode,
@ -115,7 +116,7 @@ func cpustats(id string, stats types.StatsJSON, acc telegraf.Accumulator, tags m
previousCPU := stats.PreCPUStats.CPUUsage.TotalUsage
previousSystem := stats.PreCPUStats.SystemUsage
cpuPercent := docker.CalculateCPUPercentUnix(previousCPU, previousSystem, &stats)
cpuPercent := docker.CalculateCPUPercentUnix(previousCPU, previousSystem, stats)
cpufields["usage_percent"] = cpuPercent
cputags := copyTags(tags)
@ -142,7 +143,7 @@ func cpustats(id string, stats types.StatsJSON, acc telegraf.Accumulator, tags m
}
}
func netstats(id string, stats types.StatsJSON, acc telegraf.Accumulator, tags map[string]string, tm time.Time) {
func netstats(id string, stats *types.StatsJSON, acc telegraf.Accumulator, tags map[string]string, tm time.Time) {
totalNetworkStatMap := make(map[string]interface{})
for network, netstats := range stats.Networks {
netfields := map[string]interface{}{
@ -194,7 +195,7 @@ func netstats(id string, stats types.StatsJSON, acc telegraf.Accumulator, tags m
}
}
func blkstats(id string, stats types.StatsJSON, acc telegraf.Accumulator, tags map[string]string, tm time.Time) {
func blkstats(id string, stats *types.StatsJSON, acc telegraf.Accumulator, tags map[string]string, tm time.Time) {
blkioStats := stats.BlkioStats
// Make a map of devices to their block io stats
deviceStatMap := make(map[string]map[string]interface{})

View File

@ -15,7 +15,7 @@ func Test_metastats(t *testing.T) {
}
tm := time.Now()
metastats(nginxStatsKey, validMeta.Containers[1], &mockAcc, tags, tm)
metastats(nginxStatsKey, &validMeta.Containers[1], &mockAcc, tags, tm)
mockAcc.AssertContainsTaggedFields(
t,
"ecs_container_meta",

View File

@ -54,15 +54,18 @@ func unmarshalTask(r io.Reader) (*Task, error) {
}
// docker parsers
func unmarshalStats(r io.Reader) (map[string]types.StatsJSON, error) {
var statsMap map[string]types.StatsJSON
err := json.NewDecoder(r).Decode(&statsMap)
return statsMap, err
func unmarshalStats(r io.Reader) (map[string]*types.StatsJSON, error) {
var statsMap map[string]*types.StatsJSON
if err := json.NewDecoder(r).Decode(&statsMap); err != nil {
return nil, err
}
return statsMap, nil
}
// interleaves Stats in to the Container objects in the Task
func mergeTaskStats(task *Task, stats map[string]types.StatsJSON) {
for i, c := range task.Containers {
func mergeTaskStats(task *Task, stats map[string]*types.StatsJSON) {
for i := range task.Containers {
c := &task.Containers[i]
if strings.Trim(c.ID, " ") == "" {
continue
}
@ -70,6 +73,6 @@ func mergeTaskStats(task *Task, stats map[string]types.StatsJSON) {
if !ok {
continue
}
task.Containers[i].Stats = stat
c.Stats = *stat
}
}

View File

@ -41,7 +41,7 @@ func Test_mergeTaskStats(t *testing.T) {
mergeTaskStats(parsedMetadata, parsedStats)
for _, cont := range parsedMetadata.Containers {
require.Equal(t, validStats[cont.ID], cont.Stats)
for i := range parsedMetadata.Containers {
require.Equal(t, validStats[parsedMetadata.Containers[i].ID], &parsedMetadata.Containers[i].Stats)
}
}

View File

@ -14,12 +14,12 @@ func collectDaemonSets(ctx context.Context, acc telegraf.Accumulator, ki *Kubern
acc.AddError(err)
return
}
for _, d := range list.Items {
ki.gatherDaemonSet(d, acc)
for i := range list.Items {
ki.gatherDaemonSet(&list.Items[i], acc)
}
}
func (ki *KubernetesInventory) gatherDaemonSet(d v1.DaemonSet, acc telegraf.Accumulator) {
func (ki *KubernetesInventory) gatherDaemonSet(d *v1.DaemonSet, acc telegraf.Accumulator) {
fields := map[string]interface{}{
"generation": d.Generation,
"current_number_scheduled": d.Status.CurrentNumberScheduled,

View File

@ -5,12 +5,12 @@ import (
"testing"
"time"
"github.com/stretchr/testify/require"
v1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
func TestDaemonSet(t *testing.T) {
@ -108,8 +108,9 @@ func TestDaemonSet(t *testing.T) {
}
require.NoError(t, ks.createSelectorFilters())
acc := new(testutil.Accumulator)
for _, dset := range ((v.handler.responseMap["/daemonsets/"]).(*v1.DaemonSetList)).Items {
ks.gatherDaemonSet(dset, acc)
items := ((v.handler.responseMap["/daemonsets/"]).(*v1.DaemonSetList)).Items
for i := range items {
ks.gatherDaemonSet(&items[i], acc)
}
err := acc.FirstError()
@ -268,8 +269,9 @@ func TestDaemonSetSelectorFilter(t *testing.T) {
ks.SelectorExclude = v.exclude
require.NoError(t, ks.createSelectorFilters())
acc := new(testutil.Accumulator)
for _, dset := range ((v.handler.responseMap["/daemonsets/"]).(*v1.DaemonSetList)).Items {
ks.gatherDaemonSet(dset, acc)
items := ((v.handler.responseMap["/daemonsets/"]).(*v1.DaemonSetList)).Items
for i := range items {
ks.gatherDaemonSet(&items[i], acc)
}
// Grab selector tags

View File

@ -3,8 +3,9 @@ package kube_inventory
import (
"context"
"github.com/influxdata/telegraf"
v1 "k8s.io/api/apps/v1"
"github.com/influxdata/telegraf"
)
func collectDeployments(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory) {
@ -13,12 +14,12 @@ func collectDeployments(ctx context.Context, acc telegraf.Accumulator, ki *Kuber
acc.AddError(err)
return
}
for _, d := range list.Items {
ki.gatherDeployment(d, acc)
for i := range list.Items {
ki.gatherDeployment(&list.Items[i], acc)
}
}
func (ki *KubernetesInventory) gatherDeployment(d v1.Deployment, acc telegraf.Accumulator) {
func (ki *KubernetesInventory) gatherDeployment(d *v1.Deployment, acc telegraf.Accumulator) {
fields := map[string]interface{}{
"replicas_available": d.Status.AvailableReplicas,
"replicas_unavailable": d.Status.UnavailableReplicas,

View File

@ -5,13 +5,13 @@ import (
"testing"
"time"
"github.com/stretchr/testify/require"
v1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
func TestDeployment(t *testing.T) {
@ -113,8 +113,9 @@ func TestDeployment(t *testing.T) {
}
require.NoError(t, ks.createSelectorFilters())
acc := new(testutil.Accumulator)
for _, deployment := range ((v.handler.responseMap["/deployments/"]).(*v1.DeploymentList)).Items {
ks.gatherDeployment(deployment, acc)
items := ((v.handler.responseMap["/deployments/"]).(*v1.DeploymentList)).Items
for i := range items {
ks.gatherDeployment(&items[i], acc)
}
err := acc.FirstError()
@ -284,8 +285,9 @@ func TestDeploymentSelectorFilter(t *testing.T) {
ks.SelectorExclude = v.exclude
require.NoError(t, ks.createSelectorFilters())
acc := new(testutil.Accumulator)
for _, deployment := range ((v.handler.responseMap["/deployments/"]).(*v1.DeploymentList)).Items {
ks.gatherDeployment(deployment, acc)
items := ((v.handler.responseMap["/deployments/"]).(*v1.DeploymentList)).Items
for i := range items {
ks.gatherDeployment(&items[i], acc)
}
// Grab selector tags

View File

@ -17,8 +17,8 @@ func collectNodes(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesI
ki.gatherNodeCount(len(list.Items), acc)
for _, n := range list.Items {
ki.gatherNode(n, acc)
for i := range list.Items {
ki.gatherNode(&list.Items[i], acc)
}
}
@ -29,7 +29,7 @@ func (ki *KubernetesInventory) gatherNodeCount(count int, acc telegraf.Accumulat
acc.AddFields(nodeMeasurement, fields, tags)
}
func (ki *KubernetesInventory) gatherNode(n corev1.Node, acc telegraf.Accumulator) {
func (ki *KubernetesInventory) gatherNode(n *corev1.Node, acc telegraf.Accumulator) {
fields := map[string]interface{}{}
tags := map[string]string{
"node_name": n.Name,

View File

@ -4,13 +4,13 @@ import (
"testing"
"time"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
func TestNode(t *testing.T) {
@ -157,8 +157,9 @@ func TestNode(t *testing.T) {
client: cli,
}
acc := new(testutil.Accumulator)
for _, node := range ((v.handler.responseMap["/nodes/"]).(corev1.NodeList)).Items {
ks.gatherNode(node, acc)
items := ((v.handler.responseMap["/nodes/"]).(corev1.NodeList)).Items
for i := range items {
ks.gatherNode(&items[i], acc)
}
err := acc.FirstError()

View File

@ -15,12 +15,12 @@ func collectPersistentVolumes(ctx context.Context, acc telegraf.Accumulator, ki
acc.AddError(err)
return
}
for _, pv := range list.Items {
ki.gatherPersistentVolume(pv, acc)
for i := range list.Items {
ki.gatherPersistentVolume(&list.Items[i], acc)
}
}
func (ki *KubernetesInventory) gatherPersistentVolume(pv corev1.PersistentVolume, acc telegraf.Accumulator) {
func (ki *KubernetesInventory) gatherPersistentVolume(pv *corev1.PersistentVolume, acc telegraf.Accumulator) {
phaseType := 5
switch strings.ToLower(string(pv.Status.Phase)) {
case "bound":

View File

@ -4,12 +4,12 @@ import (
"testing"
"time"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
func TestPersistentVolume(t *testing.T) {
@ -81,8 +81,9 @@ func TestPersistentVolume(t *testing.T) {
client: cli,
}
acc := new(testutil.Accumulator)
for _, pv := range ((v.handler.responseMap["/persistentvolumes/"]).(*corev1.PersistentVolumeList)).Items {
ks.gatherPersistentVolume(pv, acc)
items := ((v.handler.responseMap["/persistentvolumes/"]).(*corev1.PersistentVolumeList)).Items
for i := range items {
ks.gatherPersistentVolume(&items[i], acc)
}
err := acc.FirstError()

View File

@ -25,12 +25,12 @@ func collectPods(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesIn
acc.AddError(err)
return
}
for _, p := range listRef.Items {
ki.gatherPod(p, acc)
for i := range listRef.Items {
ki.gatherPod(&listRef.Items[i], acc)
}
}
func (ki *KubernetesInventory) gatherPod(p corev1.Pod, acc telegraf.Accumulator) {
func (ki *KubernetesInventory) gatherPod(p *corev1.Pod, acc telegraf.Accumulator) {
creationTs := p.GetCreationTimestamp()
if creationTs.IsZero() {
return
@ -50,7 +50,7 @@ func (ki *KubernetesInventory) gatherPod(p corev1.Pod, acc telegraf.Accumulator)
}
}
func (ki *KubernetesInventory) gatherPodContainer(p corev1.Pod, cs corev1.ContainerStatus, c corev1.Container, acc telegraf.Accumulator) {
func (ki *KubernetesInventory) gatherPodContainer(p *corev1.Pod, cs corev1.ContainerStatus, c corev1.Container, acc telegraf.Accumulator) {
stateCode := 3
stateReason := ""
state := "unknown"

View File

@ -5,13 +5,13 @@ import (
"testing"
"time"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
func TestPod(t *testing.T) {
@ -447,8 +447,9 @@ func TestPod(t *testing.T) {
}
require.NoError(t, ks.createSelectorFilters())
acc := new(testutil.Accumulator)
for _, pod := range ((v.handler.responseMap["/pods/"]).(*corev1.PodList)).Items {
ks.gatherPod(pod, acc)
items := ((v.handler.responseMap["/pods/"]).(*corev1.PodList)).Items
for i := range items {
ks.gatherPod(&items[i], acc)
}
err := acc.FirstError()
@ -682,8 +683,9 @@ func TestPodSelectorFilter(t *testing.T) {
ks.SelectorExclude = v.exclude
require.NoError(t, ks.createSelectorFilters())
acc := new(testutil.Accumulator)
for _, pod := range ((v.handler.responseMap["/pods/"]).(*corev1.PodList)).Items {
ks.gatherPod(pod, acc)
items := ((v.handler.responseMap["/pods/"]).(*corev1.PodList)).Items
for i := range items {
ks.gatherPod(&items[i], acc)
}
// Grab selector tags
@ -992,8 +994,9 @@ func TestPodPendingContainers(t *testing.T) {
}
require.NoError(t, ks.createSelectorFilters())
acc := new(testutil.Accumulator)
for _, pod := range ((v.handler.responseMap["/pods/"]).(*corev1.PodList)).Items {
ks.gatherPod(pod, acc)
items := ((v.handler.responseMap["/pods/"]).(*corev1.PodList)).Items
for i := range items {
ks.gatherPod(&items[i], acc)
}
err := acc.FirstError()

View File

@ -14,12 +14,12 @@ func collectServices(ctx context.Context, acc telegraf.Accumulator, ki *Kubernet
acc.AddError(err)
return
}
for _, i := range list.Items {
ki.gatherService(i, acc)
for i := range list.Items {
ki.gatherService(&list.Items[i], acc)
}
}
func (ki *KubernetesInventory) gatherService(s corev1.Service, acc telegraf.Accumulator) {
func (ki *KubernetesInventory) gatherService(s *corev1.Service, acc telegraf.Accumulator) {
creationTs := s.GetCreationTimestamp()
if creationTs.IsZero() {
return

View File

@ -5,13 +5,13 @@ import (
"testing"
"time"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
func TestService(t *testing.T) {
@ -107,8 +107,9 @@ func TestService(t *testing.T) {
ks.SelectorExclude = v.exclude
require.NoError(t, ks.createSelectorFilters())
acc := new(testutil.Accumulator)
for _, service := range ((v.handler.responseMap["/service/"]).(*corev1.ServiceList)).Items {
ks.gatherService(service, acc)
items := ((v.handler.responseMap["/service/"]).(*corev1.ServiceList)).Items
for i := range items {
ks.gatherService(&items[i], acc)
}
err := acc.FirstError()
@ -266,8 +267,9 @@ func TestServiceSelectorFilter(t *testing.T) {
ks.SelectorExclude = v.exclude
require.NoError(t, ks.createSelectorFilters())
acc := new(testutil.Accumulator)
for _, service := range ((v.handler.responseMap["/service/"]).(*corev1.ServiceList)).Items {
ks.gatherService(service, acc)
items := ((v.handler.responseMap["/service/"]).(*corev1.ServiceList)).Items
for i := range items {
ks.gatherService(&items[i], acc)
}
// Grab selector tags

View File

@ -14,12 +14,12 @@ func collectStatefulSets(ctx context.Context, acc telegraf.Accumulator, ki *Kube
acc.AddError(err)
return
}
for _, s := range list.Items {
ki.gatherStatefulSet(s, acc)
for i := range list.Items {
ki.gatherStatefulSet(&list.Items[i], acc)
}
}
func (ki *KubernetesInventory) gatherStatefulSet(s v1.StatefulSet, acc telegraf.Accumulator) {
func (ki *KubernetesInventory) gatherStatefulSet(s *v1.StatefulSet, acc telegraf.Accumulator) {
status := s.Status
fields := map[string]interface{}{
"created": s.GetCreationTimestamp().UnixNano(),

View File

@ -5,12 +5,12 @@ import (
"testing"
"time"
"github.com/stretchr/testify/require"
v1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
func TestStatefulSet(t *testing.T) {
@ -210,8 +210,9 @@ func TestStatefulSet(t *testing.T) {
}
require.NoError(t, ks.createSelectorFilters())
acc := &testutil.Accumulator{}
for _, ss := range ((v.handler.responseMap["/statefulsets/"]).(*v1.StatefulSetList)).Items {
ks.gatherStatefulSet(ss, acc)
items := ((v.handler.responseMap["/statefulsets/"]).(*v1.StatefulSetList)).Items
for i := range items {
ks.gatherStatefulSet(&items[i], acc)
}
err := acc.FirstError()
@ -367,8 +368,9 @@ func TestStatefulSetSelectorFilter(t *testing.T) {
ks.SelectorExclude = v.exclude
require.NoError(t, ks.createSelectorFilters())
acc := new(testutil.Accumulator)
for _, ss := range ((v.handler.responseMap["/statefulsets/"]).(*v1.StatefulSetList)).Items {
ks.gatherStatefulSet(ss, acc)
items := ((v.handler.responseMap["/statefulsets/"]).(*v1.StatefulSetList)).Items
for i := range items {
ks.gatherStatefulSet(&items[i], acc)
}
// Grab selector tags

View File

@ -127,8 +127,10 @@ func getNodeURLs(log telegraf.Logger) ([]string, error) {
}
nodeUrls := make([]string, 0, len(nodes.Items))
for _, n := range nodes.Items {
address := getNodeAddress(n)
for i := range nodes.Items {
n := &nodes.Items[i]
address := getNodeAddress(n.Status.Addresses)
if address == "" {
log.Warnf("Unable to node addresses for Node %q", n.Name)
continue
@ -140,10 +142,9 @@ func getNodeURLs(log telegraf.Logger) ([]string, error) {
}
// Prefer internal addresses, if none found, use ExternalIP
func getNodeAddress(node v1.Node) string {
func getNodeAddress(addresses []v1.NodeAddress) string {
extAddresses := make([]string, 0)
for _, addr := range node.Status.Addresses {
for _, addr := range addresses {
if addr.Type == v1.NodeInternalIP {
return addr.Address
}

View File

@ -14,7 +14,9 @@ func Parse(acc telegraf.Accumulator, buf []byte) error {
return err
}
for i, gpu := range s.GPU {
for i := range s.GPU {
gpu := &s.GPU[i]
tags := map[string]string{
"index": strconv.Itoa(i),
}

View File

@ -22,7 +22,9 @@ func Parse(acc telegraf.Accumulator, buf []byte) error {
}
}
for i, gpu := range s.Gpu {
for i := range s.Gpu {
gpu := &s.Gpu[i]
tags := map[string]string{
"index": strconv.Itoa(i),
}

View File

@ -645,7 +645,9 @@ func getDatacenters(ctx context.Context, e *Endpoint, resourceFilter *ResourceFi
return nil, err
}
m := make(objectMap, len(resources))
for _, r := range resources {
for i := range resources {
r := &resources[i]
m[r.ExtensibleManagedObject.Reference().Value] = &objectRef{
name: r.Name,
ref: r.ExtensibleManagedObject.Reference(),
@ -667,7 +669,9 @@ func getClusters(ctx context.Context, e *Endpoint, resourceFilter *ResourceFilte
}
cache := make(map[string]*types.ManagedObjectReference)
m := make(objectMap, len(resources))
for _, r := range resources {
for i := range resources {
r := &resources[i]
// Wrap in a function to make defer work correctly.
err := func() error {
// We're not interested in the immediate parent (a folder), but the data center.
@ -716,7 +720,9 @@ func getResourcePools(ctx context.Context, e *Endpoint, resourceFilter *Resource
return nil, err
}
m := make(objectMap)
for _, r := range resources {
for i := range resources {
r := &resources[i]
m[r.ExtensibleManagedObject.Reference().Value] = &objectRef{
name: r.Name,
ref: r.ExtensibleManagedObject.Reference(),
@ -745,7 +751,9 @@ func getHosts(ctx context.Context, e *Endpoint, resourceFilter *ResourceFilter)
return nil, err
}
m := make(objectMap)
for _, r := range resources {
for i := range resources {
r := &resources[i]
m[r.ExtensibleManagedObject.Reference().Value] = &objectRef{
name: r.Name,
ref: r.ExtensibleManagedObject.Reference(),
@ -779,7 +787,9 @@ func getVMs(ctx context.Context, e *Endpoint, resourceFilter *ResourceFilter) (o
if err != nil {
return nil, err
}
for _, r := range resources {
for i := range resources {
r := &resources[i]
if r.Runtime.PowerState != "poweredOn" {
continue
}
@ -872,7 +882,9 @@ func getDatastores(ctx context.Context, e *Endpoint, resourceFilter *ResourceFil
return nil, err
}
m := make(objectMap)
for _, r := range resources {
for i := range resources {
r := &resources[i]
lunID := ""
if r.Info != nil {
info := r.Info.GetDatastoreInfo()

View File

@ -255,7 +255,8 @@ func testLookupVM(ctx context.Context, t *testing.T, f *Finder, path string, exp
if expectedName != "" {
require.Equal(t, expectedName, vm[0].Name)
}
for _, v := range vm {
for i := range vm {
v := &vm[i]
require.Equal(t, poweredOn, v.Runtime.PowerState)
}
}