chore(linters): Enable rangeValCopy and hugeParams checkers for gocritic (#14454)
This commit is contained in:
parent
4ad8f6b814
commit
e48b72f965
|
|
@ -118,10 +118,26 @@ linters-settings:
|
||||||
- weakCond
|
- weakCond
|
||||||
# performance
|
# performance
|
||||||
- equalFold
|
- equalFold
|
||||||
|
- hugeParam
|
||||||
- preferDecodeRune
|
- preferDecodeRune
|
||||||
- preferFprint
|
- preferFprint
|
||||||
- preferStringWriter
|
- preferStringWriter
|
||||||
|
- rangeValCopy
|
||||||
- stringXbytes
|
- stringXbytes
|
||||||
|
|
||||||
|
# Settings passed to gocritic.
|
||||||
|
# The settings key is the name of a supported gocritic checker.
|
||||||
|
# The list of supported checkers can be find in https://go-critic.github.io/overview.
|
||||||
|
settings:
|
||||||
|
hugeParam:
|
||||||
|
# Size in bytes that makes the warning trigger.
|
||||||
|
# Default: 80
|
||||||
|
sizeThreshold: 512
|
||||||
|
rangeValCopy:
|
||||||
|
# Size in bytes that makes the warning trigger.
|
||||||
|
# Default: 128
|
||||||
|
sizeThreshold: 512
|
||||||
|
|
||||||
gosec:
|
gosec:
|
||||||
# To select a subset of rules to run.
|
# To select a subset of rules to run.
|
||||||
# Available rules: https://github.com/securego/gosec#available-rules
|
# Available rules: https://github.com/securego/gosec#available-rules
|
||||||
|
|
|
||||||
|
|
@ -131,13 +131,14 @@ type metric struct {
|
||||||
|
|
||||||
func genTagsFields(gpus map[string]GPU, system map[string]sysInfo) []metric {
|
func genTagsFields(gpus map[string]GPU, system map[string]sysInfo) []metric {
|
||||||
metrics := []metric{}
|
metrics := []metric{}
|
||||||
for cardID, payload := range gpus {
|
for cardID := range gpus {
|
||||||
if strings.Contains(cardID, "card") {
|
if strings.Contains(cardID, "card") {
|
||||||
tags := map[string]string{
|
tags := map[string]string{
|
||||||
"name": cardID,
|
"name": cardID,
|
||||||
}
|
}
|
||||||
fields := map[string]interface{}{}
|
fields := map[string]interface{}{}
|
||||||
|
|
||||||
|
payload := gpus[cardID]
|
||||||
totVRAM, _ := strconv.ParseInt(payload.GpuVRAMTotalMemory, 10, 64)
|
totVRAM, _ := strconv.ParseInt(payload.GpuVRAMTotalMemory, 10, 64)
|
||||||
usdVRAM, _ := strconv.ParseInt(payload.GpuVRAMTotalUsedMemory, 10, 64)
|
usdVRAM, _ := strconv.ParseInt(payload.GpuVRAMTotalUsedMemory, 10, 64)
|
||||||
strFree := strconv.FormatInt(totVRAM-usdVRAM, 10)
|
strFree := strconv.FormatInt(totVRAM-usdVRAM, 10)
|
||||||
|
|
|
||||||
|
|
@ -5,9 +5,10 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf/plugins/parsers/influx"
|
"github.com/influxdata/telegraf/plugins/parsers/influx"
|
||||||
"github.com/influxdata/telegraf/testutil"
|
"github.com/influxdata/telegraf/testutil"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const cgroupID = "c69461b2c836cc3f0e3e5deb07b1f16e25f6009da2a48bb0adc7dd580befaf55"
|
const cgroupID = "c69461b2c836cc3f0e3e5deb07b1f16e25f6009da2a48bb0adc7dd580befaf55"
|
||||||
|
|
@ -53,7 +54,7 @@ func TestParseCgroupV2Meta(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
var acc testutil.Accumulator
|
var acc testutil.Accumulator
|
||||||
metastats(cgroupID, validMeta.Containers[0], &acc, tags, time.Now())
|
metastats(cgroupID, &validMeta.Containers[0], &acc, tags, time.Now())
|
||||||
|
|
||||||
actual := acc.GetTelegrafMetrics()
|
actual := acc.GetTelegrafMetrics()
|
||||||
testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime())
|
testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime())
|
||||||
|
|
|
||||||
|
|
@ -24,7 +24,7 @@ var (
|
||||||
// Client is the ECS client contract
|
// Client is the ECS client contract
|
||||||
type Client interface {
|
type Client interface {
|
||||||
Task() (*Task, error)
|
Task() (*Task, error)
|
||||||
ContainerStats() (map[string]types.StatsJSON, error)
|
ContainerStats() (map[string]*types.StatsJSON, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type httpClient interface {
|
type httpClient interface {
|
||||||
|
|
@ -129,11 +129,11 @@ func (c *EcsClient) Task() (*Task, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ContainerStats calls the ECS stats endpoint and returns a populated container stats map
|
// ContainerStats calls the ECS stats endpoint and returns a populated container stats map
|
||||||
func (c *EcsClient) ContainerStats() (map[string]types.StatsJSON, error) {
|
func (c *EcsClient) ContainerStats() (map[string]*types.StatsJSON, error) {
|
||||||
req, _ := http.NewRequest("GET", c.statsURL, nil)
|
req, _ := http.NewRequest("GET", c.statsURL, nil)
|
||||||
resp, err := c.client.Do(req)
|
resp, err := c.client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return map[string]types.StatsJSON{}, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
|
|
@ -144,19 +144,14 @@ func (c *EcsClient) ContainerStats() (map[string]types.StatsJSON, error) {
|
||||||
return nil, fmt.Errorf("%s returned HTTP status %s: %q", c.statsURL, resp.Status, body)
|
return nil, fmt.Errorf("%s returned HTTP status %s: %q", c.statsURL, resp.Status, body)
|
||||||
}
|
}
|
||||||
|
|
||||||
statsMap, err := unmarshalStats(resp.Body)
|
return unmarshalStats(resp.Body)
|
||||||
if err != nil {
|
|
||||||
return map[string]types.StatsJSON{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return statsMap, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// PollSync executes Task and ContainerStats in parallel. If both succeed, both structs are returned.
|
// PollSync executes Task and ContainerStats in parallel. If both succeed, both structs are returned.
|
||||||
// If either errors, a single error is returned.
|
// If either errors, a single error is returned.
|
||||||
func PollSync(c Client) (*Task, map[string]types.StatsJSON, error) {
|
func PollSync(c Client) (*Task, map[string]*types.StatsJSON, error) {
|
||||||
var task *Task
|
var task *Task
|
||||||
var stats map[string]types.StatsJSON
|
var stats map[string]*types.StatsJSON
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
if stats, err = c.ContainerStats(); err != nil {
|
if stats, err = c.ContainerStats(); err != nil {
|
||||||
|
|
|
||||||
|
|
@ -15,14 +15,14 @@ import (
|
||||||
|
|
||||||
type pollMock struct {
|
type pollMock struct {
|
||||||
task func() (*Task, error)
|
task func() (*Task, error)
|
||||||
stats func() (map[string]types.StatsJSON, error)
|
stats func() (map[string]*types.StatsJSON, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *pollMock) Task() (*Task, error) {
|
func (p *pollMock) Task() (*Task, error) {
|
||||||
return p.task()
|
return p.task()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *pollMock) ContainerStats() (map[string]types.StatsJSON, error) {
|
func (p *pollMock) ContainerStats() (map[string]*types.StatsJSON, error) {
|
||||||
return p.stats()
|
return p.stats()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -31,7 +31,7 @@ func TestEcsClient_PollSync(t *testing.T) {
|
||||||
name string
|
name string
|
||||||
mock *pollMock
|
mock *pollMock
|
||||||
want *Task
|
want *Task
|
||||||
want1 map[string]types.StatsJSON
|
want1 map[string]*types.StatsJSON
|
||||||
wantErr bool
|
wantErr bool
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
|
|
@ -40,7 +40,7 @@ func TestEcsClient_PollSync(t *testing.T) {
|
||||||
task: func() (*Task, error) {
|
task: func() (*Task, error) {
|
||||||
return &validMeta, nil
|
return &validMeta, nil
|
||||||
},
|
},
|
||||||
stats: func() (map[string]types.StatsJSON, error) {
|
stats: func() (map[string]*types.StatsJSON, error) {
|
||||||
return validStats, nil
|
return validStats, nil
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
@ -53,7 +53,7 @@ func TestEcsClient_PollSync(t *testing.T) {
|
||||||
task: func() (*Task, error) {
|
task: func() (*Task, error) {
|
||||||
return nil, errors.New("err")
|
return nil, errors.New("err")
|
||||||
},
|
},
|
||||||
stats: func() (map[string]types.StatsJSON, error) {
|
stats: func() (map[string]*types.StatsJSON, error) {
|
||||||
return validStats, nil
|
return validStats, nil
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
@ -65,7 +65,7 @@ func TestEcsClient_PollSync(t *testing.T) {
|
||||||
task: func() (*Task, error) {
|
task: func() (*Task, error) {
|
||||||
return &validMeta, nil
|
return &validMeta, nil
|
||||||
},
|
},
|
||||||
stats: func() (map[string]types.StatsJSON, error) {
|
stats: func() (map[string]*types.StatsJSON, error) {
|
||||||
return nil, errors.New("err")
|
return nil, errors.New("err")
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
@ -170,7 +170,7 @@ func TestEcsClient_ContainerStats(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
client httpClient
|
client httpClient
|
||||||
want map[string]types.StatsJSON
|
want map[string]*types.StatsJSON
|
||||||
wantErr bool
|
wantErr bool
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
|
|
@ -192,7 +192,7 @@ func TestEcsClient_ContainerStats(t *testing.T) {
|
||||||
return nil, errors.New("err")
|
return nil, errors.New("err")
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
want: map[string]types.StatsJSON{},
|
want: nil,
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
@ -205,7 +205,7 @@ func TestEcsClient_ContainerStats(t *testing.T) {
|
||||||
}, nil
|
}, nil
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
want: map[string]types.StatsJSON{},
|
want: nil,
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
|
||||||
|
|
@ -157,7 +157,8 @@ func (ecs *Ecs) accTask(task *Task, tags map[string]string, acc telegraf.Accumul
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ecs *Ecs) accContainers(task *Task, taskTags map[string]string, acc telegraf.Accumulator) {
|
func (ecs *Ecs) accContainers(task *Task, taskTags map[string]string, acc telegraf.Accumulator) {
|
||||||
for _, c := range task.Containers {
|
for i := range task.Containers {
|
||||||
|
c := &task.Containers[i]
|
||||||
if !ecs.containerNameFilter.Match(c.Name) {
|
if !ecs.containerNameFilter.Match(c.Name) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -20,7 +20,7 @@ var pauseStatsPreRead, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:39:59.933
|
||||||
var nginxStatsRead, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:40:00.93733207Z")
|
var nginxStatsRead, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:40:00.93733207Z")
|
||||||
var nginxStatsPreRead, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:39:59.934291009Z")
|
var nginxStatsPreRead, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:39:59.934291009Z")
|
||||||
|
|
||||||
var validStats = map[string]types.StatsJSON{
|
var validStats = map[string]*types.StatsJSON{
|
||||||
pauseStatsKey: {
|
pauseStatsKey: {
|
||||||
Stats: types.Stats{
|
Stats: types.Stats{
|
||||||
Read: pauseStatsRead,
|
Read: pauseStatsRead,
|
||||||
|
|
|
||||||
|
|
@ -6,13 +6,14 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
"github.com/influxdata/telegraf/plugins/inputs/docker"
|
"github.com/influxdata/telegraf/plugins/inputs/docker"
|
||||||
)
|
)
|
||||||
|
|
||||||
func parseContainerStats(c Container, acc telegraf.Accumulator, tags map[string]string) {
|
func parseContainerStats(c *Container, acc telegraf.Accumulator, tags map[string]string) {
|
||||||
id := c.ID
|
id := c.ID
|
||||||
stats := c.Stats
|
stats := &c.Stats
|
||||||
tm := stats.Read
|
tm := stats.Read
|
||||||
|
|
||||||
if tm.Before(time.Unix(0, 0)) {
|
if tm.Before(time.Unix(0, 0)) {
|
||||||
|
|
@ -26,7 +27,7 @@ func parseContainerStats(c Container, acc telegraf.Accumulator, tags map[string]
|
||||||
blkstats(id, stats, acc, tags, tm)
|
blkstats(id, stats, acc, tags, tm)
|
||||||
}
|
}
|
||||||
|
|
||||||
func metastats(id string, c Container, acc telegraf.Accumulator, tags map[string]string, tm time.Time) {
|
func metastats(id string, c *Container, acc telegraf.Accumulator, tags map[string]string, tm time.Time) {
|
||||||
metafields := map[string]interface{}{
|
metafields := map[string]interface{}{
|
||||||
"container_id": id,
|
"container_id": id,
|
||||||
"docker_name": c.DockerName,
|
"docker_name": c.DockerName,
|
||||||
|
|
@ -44,7 +45,7 @@ func metastats(id string, c Container, acc telegraf.Accumulator, tags map[string
|
||||||
acc.AddFields("ecs_container_meta", metafields, tags, tm)
|
acc.AddFields("ecs_container_meta", metafields, tags, tm)
|
||||||
}
|
}
|
||||||
|
|
||||||
func memstats(id string, stats types.StatsJSON, acc telegraf.Accumulator, tags map[string]string, tm time.Time) {
|
func memstats(id string, stats *types.StatsJSON, acc telegraf.Accumulator, tags map[string]string, tm time.Time) {
|
||||||
memfields := map[string]interface{}{
|
memfields := map[string]interface{}{
|
||||||
"container_id": id,
|
"container_id": id,
|
||||||
}
|
}
|
||||||
|
|
@ -101,7 +102,7 @@ func memstats(id string, stats types.StatsJSON, acc telegraf.Accumulator, tags m
|
||||||
acc.AddFields("ecs_container_mem", memfields, tags, tm)
|
acc.AddFields("ecs_container_mem", memfields, tags, tm)
|
||||||
}
|
}
|
||||||
|
|
||||||
func cpustats(id string, stats types.StatsJSON, acc telegraf.Accumulator, tags map[string]string, tm time.Time) {
|
func cpustats(id string, stats *types.StatsJSON, acc telegraf.Accumulator, tags map[string]string, tm time.Time) {
|
||||||
cpufields := map[string]interface{}{
|
cpufields := map[string]interface{}{
|
||||||
"usage_total": stats.CPUStats.CPUUsage.TotalUsage,
|
"usage_total": stats.CPUStats.CPUUsage.TotalUsage,
|
||||||
"usage_in_usermode": stats.CPUStats.CPUUsage.UsageInUsermode,
|
"usage_in_usermode": stats.CPUStats.CPUUsage.UsageInUsermode,
|
||||||
|
|
@ -115,7 +116,7 @@ func cpustats(id string, stats types.StatsJSON, acc telegraf.Accumulator, tags m
|
||||||
|
|
||||||
previousCPU := stats.PreCPUStats.CPUUsage.TotalUsage
|
previousCPU := stats.PreCPUStats.CPUUsage.TotalUsage
|
||||||
previousSystem := stats.PreCPUStats.SystemUsage
|
previousSystem := stats.PreCPUStats.SystemUsage
|
||||||
cpuPercent := docker.CalculateCPUPercentUnix(previousCPU, previousSystem, &stats)
|
cpuPercent := docker.CalculateCPUPercentUnix(previousCPU, previousSystem, stats)
|
||||||
cpufields["usage_percent"] = cpuPercent
|
cpufields["usage_percent"] = cpuPercent
|
||||||
|
|
||||||
cputags := copyTags(tags)
|
cputags := copyTags(tags)
|
||||||
|
|
@ -142,7 +143,7 @@ func cpustats(id string, stats types.StatsJSON, acc telegraf.Accumulator, tags m
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func netstats(id string, stats types.StatsJSON, acc telegraf.Accumulator, tags map[string]string, tm time.Time) {
|
func netstats(id string, stats *types.StatsJSON, acc telegraf.Accumulator, tags map[string]string, tm time.Time) {
|
||||||
totalNetworkStatMap := make(map[string]interface{})
|
totalNetworkStatMap := make(map[string]interface{})
|
||||||
for network, netstats := range stats.Networks {
|
for network, netstats := range stats.Networks {
|
||||||
netfields := map[string]interface{}{
|
netfields := map[string]interface{}{
|
||||||
|
|
@ -194,7 +195,7 @@ func netstats(id string, stats types.StatsJSON, acc telegraf.Accumulator, tags m
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func blkstats(id string, stats types.StatsJSON, acc telegraf.Accumulator, tags map[string]string, tm time.Time) {
|
func blkstats(id string, stats *types.StatsJSON, acc telegraf.Accumulator, tags map[string]string, tm time.Time) {
|
||||||
blkioStats := stats.BlkioStats
|
blkioStats := stats.BlkioStats
|
||||||
// Make a map of devices to their block io stats
|
// Make a map of devices to their block io stats
|
||||||
deviceStatMap := make(map[string]map[string]interface{})
|
deviceStatMap := make(map[string]map[string]interface{})
|
||||||
|
|
|
||||||
|
|
@ -15,7 +15,7 @@ func Test_metastats(t *testing.T) {
|
||||||
}
|
}
|
||||||
tm := time.Now()
|
tm := time.Now()
|
||||||
|
|
||||||
metastats(nginxStatsKey, validMeta.Containers[1], &mockAcc, tags, tm)
|
metastats(nginxStatsKey, &validMeta.Containers[1], &mockAcc, tags, tm)
|
||||||
mockAcc.AssertContainsTaggedFields(
|
mockAcc.AssertContainsTaggedFields(
|
||||||
t,
|
t,
|
||||||
"ecs_container_meta",
|
"ecs_container_meta",
|
||||||
|
|
|
||||||
|
|
@ -54,15 +54,18 @@ func unmarshalTask(r io.Reader) (*Task, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// docker parsers
|
// docker parsers
|
||||||
func unmarshalStats(r io.Reader) (map[string]types.StatsJSON, error) {
|
func unmarshalStats(r io.Reader) (map[string]*types.StatsJSON, error) {
|
||||||
var statsMap map[string]types.StatsJSON
|
var statsMap map[string]*types.StatsJSON
|
||||||
err := json.NewDecoder(r).Decode(&statsMap)
|
if err := json.NewDecoder(r).Decode(&statsMap); err != nil {
|
||||||
return statsMap, err
|
return nil, err
|
||||||
|
}
|
||||||
|
return statsMap, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// interleaves Stats in to the Container objects in the Task
|
// interleaves Stats in to the Container objects in the Task
|
||||||
func mergeTaskStats(task *Task, stats map[string]types.StatsJSON) {
|
func mergeTaskStats(task *Task, stats map[string]*types.StatsJSON) {
|
||||||
for i, c := range task.Containers {
|
for i := range task.Containers {
|
||||||
|
c := &task.Containers[i]
|
||||||
if strings.Trim(c.ID, " ") == "" {
|
if strings.Trim(c.ID, " ") == "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
@ -70,6 +73,6 @@ func mergeTaskStats(task *Task, stats map[string]types.StatsJSON) {
|
||||||
if !ok {
|
if !ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
task.Containers[i].Stats = stat
|
c.Stats = *stat
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -41,7 +41,7 @@ func Test_mergeTaskStats(t *testing.T) {
|
||||||
|
|
||||||
mergeTaskStats(parsedMetadata, parsedStats)
|
mergeTaskStats(parsedMetadata, parsedStats)
|
||||||
|
|
||||||
for _, cont := range parsedMetadata.Containers {
|
for i := range parsedMetadata.Containers {
|
||||||
require.Equal(t, validStats[cont.ID], cont.Stats)
|
require.Equal(t, validStats[parsedMetadata.Containers[i].ID], &parsedMetadata.Containers[i].Stats)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -14,12 +14,12 @@ func collectDaemonSets(ctx context.Context, acc telegraf.Accumulator, ki *Kubern
|
||||||
acc.AddError(err)
|
acc.AddError(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
for _, d := range list.Items {
|
for i := range list.Items {
|
||||||
ki.gatherDaemonSet(d, acc)
|
ki.gatherDaemonSet(&list.Items[i], acc)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ki *KubernetesInventory) gatherDaemonSet(d v1.DaemonSet, acc telegraf.Accumulator) {
|
func (ki *KubernetesInventory) gatherDaemonSet(d *v1.DaemonSet, acc telegraf.Accumulator) {
|
||||||
fields := map[string]interface{}{
|
fields := map[string]interface{}{
|
||||||
"generation": d.Generation,
|
"generation": d.Generation,
|
||||||
"current_number_scheduled": d.Status.CurrentNumberScheduled,
|
"current_number_scheduled": d.Status.CurrentNumberScheduled,
|
||||||
|
|
|
||||||
|
|
@ -5,12 +5,12 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
v1 "k8s.io/api/apps/v1"
|
v1 "k8s.io/api/apps/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
"github.com/influxdata/telegraf/testutil"
|
"github.com/influxdata/telegraf/testutil"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestDaemonSet(t *testing.T) {
|
func TestDaemonSet(t *testing.T) {
|
||||||
|
|
@ -108,8 +108,9 @@ func TestDaemonSet(t *testing.T) {
|
||||||
}
|
}
|
||||||
require.NoError(t, ks.createSelectorFilters())
|
require.NoError(t, ks.createSelectorFilters())
|
||||||
acc := new(testutil.Accumulator)
|
acc := new(testutil.Accumulator)
|
||||||
for _, dset := range ((v.handler.responseMap["/daemonsets/"]).(*v1.DaemonSetList)).Items {
|
items := ((v.handler.responseMap["/daemonsets/"]).(*v1.DaemonSetList)).Items
|
||||||
ks.gatherDaemonSet(dset, acc)
|
for i := range items {
|
||||||
|
ks.gatherDaemonSet(&items[i], acc)
|
||||||
}
|
}
|
||||||
|
|
||||||
err := acc.FirstError()
|
err := acc.FirstError()
|
||||||
|
|
@ -268,8 +269,9 @@ func TestDaemonSetSelectorFilter(t *testing.T) {
|
||||||
ks.SelectorExclude = v.exclude
|
ks.SelectorExclude = v.exclude
|
||||||
require.NoError(t, ks.createSelectorFilters())
|
require.NoError(t, ks.createSelectorFilters())
|
||||||
acc := new(testutil.Accumulator)
|
acc := new(testutil.Accumulator)
|
||||||
for _, dset := range ((v.handler.responseMap["/daemonsets/"]).(*v1.DaemonSetList)).Items {
|
items := ((v.handler.responseMap["/daemonsets/"]).(*v1.DaemonSetList)).Items
|
||||||
ks.gatherDaemonSet(dset, acc)
|
for i := range items {
|
||||||
|
ks.gatherDaemonSet(&items[i], acc)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Grab selector tags
|
// Grab selector tags
|
||||||
|
|
|
||||||
|
|
@ -3,8 +3,9 @@ package kube_inventory
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
|
||||||
v1 "k8s.io/api/apps/v1"
|
v1 "k8s.io/api/apps/v1"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
)
|
)
|
||||||
|
|
||||||
func collectDeployments(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory) {
|
func collectDeployments(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory) {
|
||||||
|
|
@ -13,12 +14,12 @@ func collectDeployments(ctx context.Context, acc telegraf.Accumulator, ki *Kuber
|
||||||
acc.AddError(err)
|
acc.AddError(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
for _, d := range list.Items {
|
for i := range list.Items {
|
||||||
ki.gatherDeployment(d, acc)
|
ki.gatherDeployment(&list.Items[i], acc)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ki *KubernetesInventory) gatherDeployment(d v1.Deployment, acc telegraf.Accumulator) {
|
func (ki *KubernetesInventory) gatherDeployment(d *v1.Deployment, acc telegraf.Accumulator) {
|
||||||
fields := map[string]interface{}{
|
fields := map[string]interface{}{
|
||||||
"replicas_available": d.Status.AvailableReplicas,
|
"replicas_available": d.Status.AvailableReplicas,
|
||||||
"replicas_unavailable": d.Status.UnavailableReplicas,
|
"replicas_unavailable": d.Status.UnavailableReplicas,
|
||||||
|
|
|
||||||
|
|
@ -5,13 +5,13 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
v1 "k8s.io/api/apps/v1"
|
v1 "k8s.io/api/apps/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
"github.com/influxdata/telegraf/testutil"
|
"github.com/influxdata/telegraf/testutil"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestDeployment(t *testing.T) {
|
func TestDeployment(t *testing.T) {
|
||||||
|
|
@ -113,8 +113,9 @@ func TestDeployment(t *testing.T) {
|
||||||
}
|
}
|
||||||
require.NoError(t, ks.createSelectorFilters())
|
require.NoError(t, ks.createSelectorFilters())
|
||||||
acc := new(testutil.Accumulator)
|
acc := new(testutil.Accumulator)
|
||||||
for _, deployment := range ((v.handler.responseMap["/deployments/"]).(*v1.DeploymentList)).Items {
|
items := ((v.handler.responseMap["/deployments/"]).(*v1.DeploymentList)).Items
|
||||||
ks.gatherDeployment(deployment, acc)
|
for i := range items {
|
||||||
|
ks.gatherDeployment(&items[i], acc)
|
||||||
}
|
}
|
||||||
|
|
||||||
err := acc.FirstError()
|
err := acc.FirstError()
|
||||||
|
|
@ -284,8 +285,9 @@ func TestDeploymentSelectorFilter(t *testing.T) {
|
||||||
ks.SelectorExclude = v.exclude
|
ks.SelectorExclude = v.exclude
|
||||||
require.NoError(t, ks.createSelectorFilters())
|
require.NoError(t, ks.createSelectorFilters())
|
||||||
acc := new(testutil.Accumulator)
|
acc := new(testutil.Accumulator)
|
||||||
for _, deployment := range ((v.handler.responseMap["/deployments/"]).(*v1.DeploymentList)).Items {
|
items := ((v.handler.responseMap["/deployments/"]).(*v1.DeploymentList)).Items
|
||||||
ks.gatherDeployment(deployment, acc)
|
for i := range items {
|
||||||
|
ks.gatherDeployment(&items[i], acc)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Grab selector tags
|
// Grab selector tags
|
||||||
|
|
|
||||||
|
|
@ -17,8 +17,8 @@ func collectNodes(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesI
|
||||||
|
|
||||||
ki.gatherNodeCount(len(list.Items), acc)
|
ki.gatherNodeCount(len(list.Items), acc)
|
||||||
|
|
||||||
for _, n := range list.Items {
|
for i := range list.Items {
|
||||||
ki.gatherNode(n, acc)
|
ki.gatherNode(&list.Items[i], acc)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -29,7 +29,7 @@ func (ki *KubernetesInventory) gatherNodeCount(count int, acc telegraf.Accumulat
|
||||||
acc.AddFields(nodeMeasurement, fields, tags)
|
acc.AddFields(nodeMeasurement, fields, tags)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ki *KubernetesInventory) gatherNode(n corev1.Node, acc telegraf.Accumulator) {
|
func (ki *KubernetesInventory) gatherNode(n *corev1.Node, acc telegraf.Accumulator) {
|
||||||
fields := map[string]interface{}{}
|
fields := map[string]interface{}{}
|
||||||
tags := map[string]string{
|
tags := map[string]string{
|
||||||
"node_name": n.Name,
|
"node_name": n.Name,
|
||||||
|
|
|
||||||
|
|
@ -4,13 +4,13 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
"github.com/influxdata/telegraf/testutil"
|
"github.com/influxdata/telegraf/testutil"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestNode(t *testing.T) {
|
func TestNode(t *testing.T) {
|
||||||
|
|
@ -157,8 +157,9 @@ func TestNode(t *testing.T) {
|
||||||
client: cli,
|
client: cli,
|
||||||
}
|
}
|
||||||
acc := new(testutil.Accumulator)
|
acc := new(testutil.Accumulator)
|
||||||
for _, node := range ((v.handler.responseMap["/nodes/"]).(corev1.NodeList)).Items {
|
items := ((v.handler.responseMap["/nodes/"]).(corev1.NodeList)).Items
|
||||||
ks.gatherNode(node, acc)
|
for i := range items {
|
||||||
|
ks.gatherNode(&items[i], acc)
|
||||||
}
|
}
|
||||||
|
|
||||||
err := acc.FirstError()
|
err := acc.FirstError()
|
||||||
|
|
|
||||||
|
|
@ -15,12 +15,12 @@ func collectPersistentVolumes(ctx context.Context, acc telegraf.Accumulator, ki
|
||||||
acc.AddError(err)
|
acc.AddError(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
for _, pv := range list.Items {
|
for i := range list.Items {
|
||||||
ki.gatherPersistentVolume(pv, acc)
|
ki.gatherPersistentVolume(&list.Items[i], acc)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ki *KubernetesInventory) gatherPersistentVolume(pv corev1.PersistentVolume, acc telegraf.Accumulator) {
|
func (ki *KubernetesInventory) gatherPersistentVolume(pv *corev1.PersistentVolume, acc telegraf.Accumulator) {
|
||||||
phaseType := 5
|
phaseType := 5
|
||||||
switch strings.ToLower(string(pv.Status.Phase)) {
|
switch strings.ToLower(string(pv.Status.Phase)) {
|
||||||
case "bound":
|
case "bound":
|
||||||
|
|
|
||||||
|
|
@ -4,12 +4,12 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
"github.com/influxdata/telegraf/testutil"
|
"github.com/influxdata/telegraf/testutil"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestPersistentVolume(t *testing.T) {
|
func TestPersistentVolume(t *testing.T) {
|
||||||
|
|
@ -81,8 +81,9 @@ func TestPersistentVolume(t *testing.T) {
|
||||||
client: cli,
|
client: cli,
|
||||||
}
|
}
|
||||||
acc := new(testutil.Accumulator)
|
acc := new(testutil.Accumulator)
|
||||||
for _, pv := range ((v.handler.responseMap["/persistentvolumes/"]).(*corev1.PersistentVolumeList)).Items {
|
items := ((v.handler.responseMap["/persistentvolumes/"]).(*corev1.PersistentVolumeList)).Items
|
||||||
ks.gatherPersistentVolume(pv, acc)
|
for i := range items {
|
||||||
|
ks.gatherPersistentVolume(&items[i], acc)
|
||||||
}
|
}
|
||||||
|
|
||||||
err := acc.FirstError()
|
err := acc.FirstError()
|
||||||
|
|
|
||||||
|
|
@ -25,12 +25,12 @@ func collectPods(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesIn
|
||||||
acc.AddError(err)
|
acc.AddError(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
for _, p := range listRef.Items {
|
for i := range listRef.Items {
|
||||||
ki.gatherPod(p, acc)
|
ki.gatherPod(&listRef.Items[i], acc)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ki *KubernetesInventory) gatherPod(p corev1.Pod, acc telegraf.Accumulator) {
|
func (ki *KubernetesInventory) gatherPod(p *corev1.Pod, acc telegraf.Accumulator) {
|
||||||
creationTs := p.GetCreationTimestamp()
|
creationTs := p.GetCreationTimestamp()
|
||||||
if creationTs.IsZero() {
|
if creationTs.IsZero() {
|
||||||
return
|
return
|
||||||
|
|
@ -50,7 +50,7 @@ func (ki *KubernetesInventory) gatherPod(p corev1.Pod, acc telegraf.Accumulator)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ki *KubernetesInventory) gatherPodContainer(p corev1.Pod, cs corev1.ContainerStatus, c corev1.Container, acc telegraf.Accumulator) {
|
func (ki *KubernetesInventory) gatherPodContainer(p *corev1.Pod, cs corev1.ContainerStatus, c corev1.Container, acc telegraf.Accumulator) {
|
||||||
stateCode := 3
|
stateCode := 3
|
||||||
stateReason := ""
|
stateReason := ""
|
||||||
state := "unknown"
|
state := "unknown"
|
||||||
|
|
|
||||||
|
|
@ -5,13 +5,13 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
"github.com/influxdata/telegraf/testutil"
|
"github.com/influxdata/telegraf/testutil"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestPod(t *testing.T) {
|
func TestPod(t *testing.T) {
|
||||||
|
|
@ -447,8 +447,9 @@ func TestPod(t *testing.T) {
|
||||||
}
|
}
|
||||||
require.NoError(t, ks.createSelectorFilters())
|
require.NoError(t, ks.createSelectorFilters())
|
||||||
acc := new(testutil.Accumulator)
|
acc := new(testutil.Accumulator)
|
||||||
for _, pod := range ((v.handler.responseMap["/pods/"]).(*corev1.PodList)).Items {
|
items := ((v.handler.responseMap["/pods/"]).(*corev1.PodList)).Items
|
||||||
ks.gatherPod(pod, acc)
|
for i := range items {
|
||||||
|
ks.gatherPod(&items[i], acc)
|
||||||
}
|
}
|
||||||
|
|
||||||
err := acc.FirstError()
|
err := acc.FirstError()
|
||||||
|
|
@ -682,8 +683,9 @@ func TestPodSelectorFilter(t *testing.T) {
|
||||||
ks.SelectorExclude = v.exclude
|
ks.SelectorExclude = v.exclude
|
||||||
require.NoError(t, ks.createSelectorFilters())
|
require.NoError(t, ks.createSelectorFilters())
|
||||||
acc := new(testutil.Accumulator)
|
acc := new(testutil.Accumulator)
|
||||||
for _, pod := range ((v.handler.responseMap["/pods/"]).(*corev1.PodList)).Items {
|
items := ((v.handler.responseMap["/pods/"]).(*corev1.PodList)).Items
|
||||||
ks.gatherPod(pod, acc)
|
for i := range items {
|
||||||
|
ks.gatherPod(&items[i], acc)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Grab selector tags
|
// Grab selector tags
|
||||||
|
|
@ -992,8 +994,9 @@ func TestPodPendingContainers(t *testing.T) {
|
||||||
}
|
}
|
||||||
require.NoError(t, ks.createSelectorFilters())
|
require.NoError(t, ks.createSelectorFilters())
|
||||||
acc := new(testutil.Accumulator)
|
acc := new(testutil.Accumulator)
|
||||||
for _, pod := range ((v.handler.responseMap["/pods/"]).(*corev1.PodList)).Items {
|
items := ((v.handler.responseMap["/pods/"]).(*corev1.PodList)).Items
|
||||||
ks.gatherPod(pod, acc)
|
for i := range items {
|
||||||
|
ks.gatherPod(&items[i], acc)
|
||||||
}
|
}
|
||||||
|
|
||||||
err := acc.FirstError()
|
err := acc.FirstError()
|
||||||
|
|
|
||||||
|
|
@ -14,12 +14,12 @@ func collectServices(ctx context.Context, acc telegraf.Accumulator, ki *Kubernet
|
||||||
acc.AddError(err)
|
acc.AddError(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
for _, i := range list.Items {
|
for i := range list.Items {
|
||||||
ki.gatherService(i, acc)
|
ki.gatherService(&list.Items[i], acc)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ki *KubernetesInventory) gatherService(s corev1.Service, acc telegraf.Accumulator) {
|
func (ki *KubernetesInventory) gatherService(s *corev1.Service, acc telegraf.Accumulator) {
|
||||||
creationTs := s.GetCreationTimestamp()
|
creationTs := s.GetCreationTimestamp()
|
||||||
if creationTs.IsZero() {
|
if creationTs.IsZero() {
|
||||||
return
|
return
|
||||||
|
|
|
||||||
|
|
@ -5,13 +5,13 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
"github.com/influxdata/telegraf/testutil"
|
"github.com/influxdata/telegraf/testutil"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestService(t *testing.T) {
|
func TestService(t *testing.T) {
|
||||||
|
|
@ -107,8 +107,9 @@ func TestService(t *testing.T) {
|
||||||
ks.SelectorExclude = v.exclude
|
ks.SelectorExclude = v.exclude
|
||||||
require.NoError(t, ks.createSelectorFilters())
|
require.NoError(t, ks.createSelectorFilters())
|
||||||
acc := new(testutil.Accumulator)
|
acc := new(testutil.Accumulator)
|
||||||
for _, service := range ((v.handler.responseMap["/service/"]).(*corev1.ServiceList)).Items {
|
items := ((v.handler.responseMap["/service/"]).(*corev1.ServiceList)).Items
|
||||||
ks.gatherService(service, acc)
|
for i := range items {
|
||||||
|
ks.gatherService(&items[i], acc)
|
||||||
}
|
}
|
||||||
|
|
||||||
err := acc.FirstError()
|
err := acc.FirstError()
|
||||||
|
|
@ -266,8 +267,9 @@ func TestServiceSelectorFilter(t *testing.T) {
|
||||||
ks.SelectorExclude = v.exclude
|
ks.SelectorExclude = v.exclude
|
||||||
require.NoError(t, ks.createSelectorFilters())
|
require.NoError(t, ks.createSelectorFilters())
|
||||||
acc := new(testutil.Accumulator)
|
acc := new(testutil.Accumulator)
|
||||||
for _, service := range ((v.handler.responseMap["/service/"]).(*corev1.ServiceList)).Items {
|
items := ((v.handler.responseMap["/service/"]).(*corev1.ServiceList)).Items
|
||||||
ks.gatherService(service, acc)
|
for i := range items {
|
||||||
|
ks.gatherService(&items[i], acc)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Grab selector tags
|
// Grab selector tags
|
||||||
|
|
|
||||||
|
|
@ -14,12 +14,12 @@ func collectStatefulSets(ctx context.Context, acc telegraf.Accumulator, ki *Kube
|
||||||
acc.AddError(err)
|
acc.AddError(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
for _, s := range list.Items {
|
for i := range list.Items {
|
||||||
ki.gatherStatefulSet(s, acc)
|
ki.gatherStatefulSet(&list.Items[i], acc)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ki *KubernetesInventory) gatherStatefulSet(s v1.StatefulSet, acc telegraf.Accumulator) {
|
func (ki *KubernetesInventory) gatherStatefulSet(s *v1.StatefulSet, acc telegraf.Accumulator) {
|
||||||
status := s.Status
|
status := s.Status
|
||||||
fields := map[string]interface{}{
|
fields := map[string]interface{}{
|
||||||
"created": s.GetCreationTimestamp().UnixNano(),
|
"created": s.GetCreationTimestamp().UnixNano(),
|
||||||
|
|
|
||||||
|
|
@ -5,12 +5,12 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
v1 "k8s.io/api/apps/v1"
|
v1 "k8s.io/api/apps/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
"github.com/influxdata/telegraf/testutil"
|
"github.com/influxdata/telegraf/testutil"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestStatefulSet(t *testing.T) {
|
func TestStatefulSet(t *testing.T) {
|
||||||
|
|
@ -210,8 +210,9 @@ func TestStatefulSet(t *testing.T) {
|
||||||
}
|
}
|
||||||
require.NoError(t, ks.createSelectorFilters())
|
require.NoError(t, ks.createSelectorFilters())
|
||||||
acc := &testutil.Accumulator{}
|
acc := &testutil.Accumulator{}
|
||||||
for _, ss := range ((v.handler.responseMap["/statefulsets/"]).(*v1.StatefulSetList)).Items {
|
items := ((v.handler.responseMap["/statefulsets/"]).(*v1.StatefulSetList)).Items
|
||||||
ks.gatherStatefulSet(ss, acc)
|
for i := range items {
|
||||||
|
ks.gatherStatefulSet(&items[i], acc)
|
||||||
}
|
}
|
||||||
|
|
||||||
err := acc.FirstError()
|
err := acc.FirstError()
|
||||||
|
|
@ -367,8 +368,9 @@ func TestStatefulSetSelectorFilter(t *testing.T) {
|
||||||
ks.SelectorExclude = v.exclude
|
ks.SelectorExclude = v.exclude
|
||||||
require.NoError(t, ks.createSelectorFilters())
|
require.NoError(t, ks.createSelectorFilters())
|
||||||
acc := new(testutil.Accumulator)
|
acc := new(testutil.Accumulator)
|
||||||
for _, ss := range ((v.handler.responseMap["/statefulsets/"]).(*v1.StatefulSetList)).Items {
|
items := ((v.handler.responseMap["/statefulsets/"]).(*v1.StatefulSetList)).Items
|
||||||
ks.gatherStatefulSet(ss, acc)
|
for i := range items {
|
||||||
|
ks.gatherStatefulSet(&items[i], acc)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Grab selector tags
|
// Grab selector tags
|
||||||
|
|
|
||||||
|
|
@ -127,8 +127,10 @@ func getNodeURLs(log telegraf.Logger) ([]string, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
nodeUrls := make([]string, 0, len(nodes.Items))
|
nodeUrls := make([]string, 0, len(nodes.Items))
|
||||||
for _, n := range nodes.Items {
|
for i := range nodes.Items {
|
||||||
address := getNodeAddress(n)
|
n := &nodes.Items[i]
|
||||||
|
|
||||||
|
address := getNodeAddress(n.Status.Addresses)
|
||||||
if address == "" {
|
if address == "" {
|
||||||
log.Warnf("Unable to node addresses for Node %q", n.Name)
|
log.Warnf("Unable to node addresses for Node %q", n.Name)
|
||||||
continue
|
continue
|
||||||
|
|
@ -140,10 +142,9 @@ func getNodeURLs(log telegraf.Logger) ([]string, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Prefer internal addresses, if none found, use ExternalIP
|
// Prefer internal addresses, if none found, use ExternalIP
|
||||||
func getNodeAddress(node v1.Node) string {
|
func getNodeAddress(addresses []v1.NodeAddress) string {
|
||||||
extAddresses := make([]string, 0)
|
extAddresses := make([]string, 0)
|
||||||
|
for _, addr := range addresses {
|
||||||
for _, addr := range node.Status.Addresses {
|
|
||||||
if addr.Type == v1.NodeInternalIP {
|
if addr.Type == v1.NodeInternalIP {
|
||||||
return addr.Address
|
return addr.Address
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -14,7 +14,9 @@ func Parse(acc telegraf.Accumulator, buf []byte) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, gpu := range s.GPU {
|
for i := range s.GPU {
|
||||||
|
gpu := &s.GPU[i]
|
||||||
|
|
||||||
tags := map[string]string{
|
tags := map[string]string{
|
||||||
"index": strconv.Itoa(i),
|
"index": strconv.Itoa(i),
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -22,7 +22,9 @@ func Parse(acc telegraf.Accumulator, buf []byte) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, gpu := range s.Gpu {
|
for i := range s.Gpu {
|
||||||
|
gpu := &s.Gpu[i]
|
||||||
|
|
||||||
tags := map[string]string{
|
tags := map[string]string{
|
||||||
"index": strconv.Itoa(i),
|
"index": strconv.Itoa(i),
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -645,7 +645,9 @@ func getDatacenters(ctx context.Context, e *Endpoint, resourceFilter *ResourceFi
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
m := make(objectMap, len(resources))
|
m := make(objectMap, len(resources))
|
||||||
for _, r := range resources {
|
for i := range resources {
|
||||||
|
r := &resources[i]
|
||||||
|
|
||||||
m[r.ExtensibleManagedObject.Reference().Value] = &objectRef{
|
m[r.ExtensibleManagedObject.Reference().Value] = &objectRef{
|
||||||
name: r.Name,
|
name: r.Name,
|
||||||
ref: r.ExtensibleManagedObject.Reference(),
|
ref: r.ExtensibleManagedObject.Reference(),
|
||||||
|
|
@ -667,7 +669,9 @@ func getClusters(ctx context.Context, e *Endpoint, resourceFilter *ResourceFilte
|
||||||
}
|
}
|
||||||
cache := make(map[string]*types.ManagedObjectReference)
|
cache := make(map[string]*types.ManagedObjectReference)
|
||||||
m := make(objectMap, len(resources))
|
m := make(objectMap, len(resources))
|
||||||
for _, r := range resources {
|
for i := range resources {
|
||||||
|
r := &resources[i]
|
||||||
|
|
||||||
// Wrap in a function to make defer work correctly.
|
// Wrap in a function to make defer work correctly.
|
||||||
err := func() error {
|
err := func() error {
|
||||||
// We're not interested in the immediate parent (a folder), but the data center.
|
// We're not interested in the immediate parent (a folder), but the data center.
|
||||||
|
|
@ -716,7 +720,9 @@ func getResourcePools(ctx context.Context, e *Endpoint, resourceFilter *Resource
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
m := make(objectMap)
|
m := make(objectMap)
|
||||||
for _, r := range resources {
|
for i := range resources {
|
||||||
|
r := &resources[i]
|
||||||
|
|
||||||
m[r.ExtensibleManagedObject.Reference().Value] = &objectRef{
|
m[r.ExtensibleManagedObject.Reference().Value] = &objectRef{
|
||||||
name: r.Name,
|
name: r.Name,
|
||||||
ref: r.ExtensibleManagedObject.Reference(),
|
ref: r.ExtensibleManagedObject.Reference(),
|
||||||
|
|
@ -745,7 +751,9 @@ func getHosts(ctx context.Context, e *Endpoint, resourceFilter *ResourceFilter)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
m := make(objectMap)
|
m := make(objectMap)
|
||||||
for _, r := range resources {
|
for i := range resources {
|
||||||
|
r := &resources[i]
|
||||||
|
|
||||||
m[r.ExtensibleManagedObject.Reference().Value] = &objectRef{
|
m[r.ExtensibleManagedObject.Reference().Value] = &objectRef{
|
||||||
name: r.Name,
|
name: r.Name,
|
||||||
ref: r.ExtensibleManagedObject.Reference(),
|
ref: r.ExtensibleManagedObject.Reference(),
|
||||||
|
|
@ -779,7 +787,9 @@ func getVMs(ctx context.Context, e *Endpoint, resourceFilter *ResourceFilter) (o
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
for _, r := range resources {
|
for i := range resources {
|
||||||
|
r := &resources[i]
|
||||||
|
|
||||||
if r.Runtime.PowerState != "poweredOn" {
|
if r.Runtime.PowerState != "poweredOn" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
@ -872,7 +882,9 @@ func getDatastores(ctx context.Context, e *Endpoint, resourceFilter *ResourceFil
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
m := make(objectMap)
|
m := make(objectMap)
|
||||||
for _, r := range resources {
|
for i := range resources {
|
||||||
|
r := &resources[i]
|
||||||
|
|
||||||
lunID := ""
|
lunID := ""
|
||||||
if r.Info != nil {
|
if r.Info != nil {
|
||||||
info := r.Info.GetDatastoreInfo()
|
info := r.Info.GetDatastoreInfo()
|
||||||
|
|
|
||||||
|
|
@ -255,7 +255,8 @@ func testLookupVM(ctx context.Context, t *testing.T, f *Finder, path string, exp
|
||||||
if expectedName != "" {
|
if expectedName != "" {
|
||||||
require.Equal(t, expectedName, vm[0].Name)
|
require.Equal(t, expectedName, vm[0].Name)
|
||||||
}
|
}
|
||||||
for _, v := range vm {
|
for i := range vm {
|
||||||
|
v := &vm[i]
|
||||||
require.Equal(t, poweredOn, v.Runtime.PowerState)
|
require.Equal(t, poweredOn, v.Runtime.PowerState)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue