chore: Fix linter findings for `revive:unused-receiver` in `plugins/inputs/[f-k]` (#16308)
This commit is contained in:
parent
c0b3dd489e
commit
6f80899e13
|
|
@ -40,7 +40,7 @@ func (f fakeFileInfo) ModTime() time.Time { return f.modtime }
|
|||
func (f fakeFileInfo) IsDir() bool { return f.isdir }
|
||||
func (f fakeFileInfo) Sys() interface{} { return f.sys }
|
||||
|
||||
func (f fakeFileSystem) open(name string) (file, error) {
|
||||
func (fakeFileSystem) open(name string) (file, error) {
|
||||
return nil, &os.PathError{Op: "Open", Path: name, Err: errors.New("not implemented by fake filesystem")}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -87,7 +87,7 @@ func (r *Fireboard) Gather(acc telegraf.Accumulator) error {
|
|||
}
|
||||
// Range over all devices, gathering stats. Returns early in case of any error.
|
||||
for _, s := range stats {
|
||||
r.gatherTemps(s, acc)
|
||||
gatherTemps(s, acc)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -105,7 +105,7 @@ func scale(n int) string {
|
|||
}
|
||||
|
||||
// Gathers stats from a single device, adding them to the accumulator
|
||||
func (r *Fireboard) gatherTemps(s fireboardStats, acc telegraf.Accumulator) {
|
||||
func gatherTemps(s fireboardStats, acc telegraf.Accumulator) {
|
||||
// Construct lookup for scale values
|
||||
|
||||
for _, t := range s.LatestTemps {
|
||||
|
|
|
|||
|
|
@ -314,7 +314,7 @@ func (c *GNMI) Start(acc telegraf.Accumulator) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *GNMI) Gather(_ telegraf.Accumulator) error {
|
||||
func (*GNMI) Gather(telegraf.Accumulator) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -51,15 +51,15 @@ type mockServer struct {
|
|||
grpcServer *grpc.Server
|
||||
}
|
||||
|
||||
func (s *mockServer) Capabilities(context.Context, *gnmi.CapabilityRequest) (*gnmi.CapabilityResponse, error) {
|
||||
func (*mockServer) Capabilities(context.Context, *gnmi.CapabilityRequest) (*gnmi.CapabilityResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (s *mockServer) Get(context.Context, *gnmi.GetRequest) (*gnmi.GetResponse, error) {
|
||||
func (*mockServer) Get(context.Context, *gnmi.GetRequest) (*gnmi.GetResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (s *mockServer) Set(context.Context, *gnmi.SetRequest) (*gnmi.SetResponse, error) {
|
||||
func (*mockServer) Set(context.Context, *gnmi.SetRequest) (*gnmi.SetResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -89,7 +89,7 @@ func (s *tagStore) insert(subscription tagSubscription, path *pathInfo, values [
|
|||
}
|
||||
}
|
||||
case "elements":
|
||||
key, match := s.getElementsKeys(path, subscription.Elements)
|
||||
key, match := getElementsKeys(path, subscription.Elements)
|
||||
if !match || len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
|
@ -141,7 +141,7 @@ func (s *tagStore) lookup(path *pathInfo, metricTags map[string]string) map[stri
|
|||
|
||||
// Match elements
|
||||
for _, requiredKeys := range s.elements.required {
|
||||
key, match := s.getElementsKeys(path, requiredKeys)
|
||||
key, match := getElementsKeys(path, requiredKeys)
|
||||
if !match {
|
||||
continue
|
||||
}
|
||||
|
|
@ -153,7 +153,7 @@ func (s *tagStore) lookup(path *pathInfo, metricTags map[string]string) map[stri
|
|||
return tags
|
||||
}
|
||||
|
||||
func (s *tagStore) getElementsKeys(path *pathInfo, elements []string) (string, bool) {
|
||||
func getElementsKeys(path *pathInfo, elements []string) (string, bool) {
|
||||
// Search for the required path elements and collect a ordered
|
||||
// list of their values to in the form
|
||||
// elementName1={keyA=valueA,keyB=valueB,...},...,elementNameN={keyY=valueY,keyZ=valueZ}
|
||||
|
|
|
|||
|
|
@ -57,7 +57,7 @@ func (gcs *GCS) Init() error {
|
|||
return gcs.setOffset()
|
||||
}
|
||||
|
||||
func (gcs *GCS) SampleConfig() string {
|
||||
func (*GCS) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -119,10 +119,10 @@ func (c *mockHTTPClient) makeRequest(req *http.Request) (*http.Response, error)
|
|||
return &resp, nil
|
||||
}
|
||||
|
||||
func (c *mockHTTPClient) setHTTPClient(_ *http.Client) {
|
||||
func (*mockHTTPClient) setHTTPClient(*http.Client) {
|
||||
}
|
||||
|
||||
func (c *mockHTTPClient) httpClient() *http.Client {
|
||||
func (*mockHTTPClient) httpClient() *http.Client {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -17,9 +17,7 @@ import (
|
|||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
type statServer struct{}
|
||||
|
||||
func (s statServer) serverSocket(l net.Listener) {
|
||||
func serverSocket(l net.Listener) {
|
||||
for {
|
||||
conn, err := l.Accept()
|
||||
if err != nil {
|
||||
|
|
@ -151,8 +149,7 @@ func TestHaproxyGeneratesMetricsUsingSocket(t *testing.T) {
|
|||
sockets[i] = sock
|
||||
defer sock.Close() //nolint:revive,gocritic // done on purpose, closing will be executed properly
|
||||
|
||||
s := statServer{}
|
||||
go s.serverSocket(sock)
|
||||
go serverSocket(sock)
|
||||
}
|
||||
|
||||
r := &HAProxy{
|
||||
|
|
@ -191,8 +188,7 @@ func TestHaproxyGeneratesMetricsUsingTcp(t *testing.T) {
|
|||
}
|
||||
defer l.Close()
|
||||
|
||||
s := statServer{}
|
||||
go s.serverSocket(l)
|
||||
go serverSocket(l)
|
||||
|
||||
r := &HAProxy{
|
||||
Servers: []string{"tcp://" + l.Addr().String()},
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ func New() *hddtemp {
|
|||
}
|
||||
|
||||
// Fetch gathers disks data from hddtemp daemon.
|
||||
func (h *hddtemp) Fetch(address string) ([]Disk, error) {
|
||||
func (*hddtemp) Fetch(address string) ([]Disk, error) {
|
||||
var (
|
||||
err error
|
||||
conn net.Conn
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ import (
|
|||
type mockFetcher struct {
|
||||
}
|
||||
|
||||
func (h *mockFetcher) Fetch(_ string) ([]hddtemp.Disk, error) {
|
||||
func (*mockFetcher) Fetch(string) ([]hddtemp.Disk, error) {
|
||||
return []hddtemp.Disk{
|
||||
{
|
||||
DeviceName: "Disk1",
|
||||
|
|
|
|||
|
|
@ -86,7 +86,7 @@ func (h *HTTP) SetParserFunc(fn telegraf.ParserFunc) {
|
|||
h.parserFunc = fn
|
||||
}
|
||||
|
||||
func (h *HTTP) Start(_ telegraf.Accumulator) error {
|
||||
func (*HTTP) Start(telegraf.Accumulator) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -197,7 +197,7 @@ func (h *HTTPListenerV2) Start(acc telegraf.Accumulator) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (h *HTTPListenerV2) Gather(_ telegraf.Accumulator) error {
|
||||
func (*HTTPListenerV2) Gather(telegraf.Accumulator) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -118,7 +118,7 @@ func (h *Hugepages) Gather(acc telegraf.Accumulator) error {
|
|||
|
||||
// gatherStatsPerNode collects root hugepages statistics
|
||||
func (h *Hugepages) gatherRootStats(acc telegraf.Accumulator) error {
|
||||
return h.gatherFromHugepagePath(acc, "hugepages_"+rootHugepages, h.rootHugepagePath, hugepagesMetricsRoot, nil)
|
||||
return gatherFromHugepagePath(acc, "hugepages_"+rootHugepages, h.rootHugepagePath, hugepagesMetricsRoot, nil)
|
||||
}
|
||||
|
||||
// gatherStatsPerNode collects hugepages statistics per NUMA node
|
||||
|
|
@ -144,7 +144,7 @@ func (h *Hugepages) gatherStatsPerNode(acc telegraf.Accumulator) error {
|
|||
"node": nodeNumber,
|
||||
}
|
||||
hugepagesPath := filepath.Join(h.numaNodePath, nodeDir.Name(), "hugepages")
|
||||
err = h.gatherFromHugepagePath(acc, "hugepages_"+perNodeHugepages, hugepagesPath, hugepagesMetricsPerNUMANode, perNodeTags)
|
||||
err = gatherFromHugepagePath(acc, "hugepages_"+perNodeHugepages, hugepagesPath, hugepagesMetricsPerNUMANode, perNodeTags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -152,7 +152,7 @@ func (h *Hugepages) gatherStatsPerNode(acc telegraf.Accumulator) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (h *Hugepages) gatherFromHugepagePath(acc telegraf.Accumulator, measurement, path string, fileFilter, defaultTags map[string]string) error {
|
||||
func gatherFromHugepagePath(acc telegraf.Accumulator, measurement, path string, fileFilter, defaultTags map[string]string) error {
|
||||
// read metrics from: hugepages/hugepages-*/*
|
||||
hugepagesDirs, err := os.ReadDir(path)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -121,7 +121,7 @@ func (i *Icinga2) Gather(acc telegraf.Accumulator) error {
|
|||
}
|
||||
|
||||
result := resultObject{}
|
||||
err = i.parseObjectResponse(resp, &result)
|
||||
err = parseObjectResponse(resp, &result)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not parse object response: %w", err)
|
||||
}
|
||||
|
|
@ -145,13 +145,13 @@ func (i *Icinga2) Gather(acc telegraf.Accumulator) error {
|
|||
|
||||
switch statusType {
|
||||
case "ApiListener":
|
||||
fields, err = i.parsePerfdataResponse(resp)
|
||||
fields, err = parsePerfdataResponse(resp)
|
||||
case "CIB":
|
||||
fields, err = i.parseCIBResponse(resp)
|
||||
fields, err = parseCIBResponse(resp)
|
||||
case "IdoMysqlConnection":
|
||||
fields, err = i.parsePerfdataResponse(resp)
|
||||
fields, err = parsePerfdataResponse(resp)
|
||||
case "IdoPgsqlConnection":
|
||||
fields, err = i.parsePerfdataResponse(resp)
|
||||
fields, err = parsePerfdataResponse(resp)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
|
@ -233,7 +233,7 @@ func (i *Icinga2) icingaRequest(address string) (*http.Response, error) {
|
|||
return resp, nil
|
||||
}
|
||||
|
||||
func (i *Icinga2) parseObjectResponse(resp *http.Response, result *resultObject) error {
|
||||
func parseObjectResponse(resp *http.Response, result *resultObject) error {
|
||||
err := json.NewDecoder(resp.Body).Decode(&result)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -246,7 +246,7 @@ func (i *Icinga2) parseObjectResponse(resp *http.Response, result *resultObject)
|
|||
return nil
|
||||
}
|
||||
|
||||
func (i *Icinga2) parseCIBResponse(resp *http.Response) (map[string]interface{}, error) {
|
||||
func parseCIBResponse(resp *http.Response) (map[string]interface{}, error) {
|
||||
result := resultCIB{}
|
||||
|
||||
err := json.NewDecoder(resp.Body).Decode(&result)
|
||||
|
|
@ -262,7 +262,7 @@ func (i *Icinga2) parseCIBResponse(resp *http.Response) (map[string]interface{},
|
|||
return result.Results[0].Status, nil
|
||||
}
|
||||
|
||||
func (i *Icinga2) parsePerfdataResponse(resp *http.Response) (map[string]interface{}, error) {
|
||||
func parsePerfdataResponse(resp *http.Response) (map[string]interface{}, error) {
|
||||
result := resultPerfdata{}
|
||||
|
||||
err := json.NewDecoder(resp.Body).Decode(&result)
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ import (
|
|||
)
|
||||
|
||||
// Gather statistics from our infiniband cards
|
||||
func (i *Infiniband) Gather(acc telegraf.Accumulator) error {
|
||||
func (*Infiniband) Gather(acc telegraf.Accumulator) error {
|
||||
rdmaDevices := rdmamap.GetRdmaDeviceList()
|
||||
|
||||
if len(rdmaDevices) == 0 {
|
||||
|
|
|
|||
|
|
@ -76,7 +76,7 @@ func (*InfluxDBListener) SampleConfig() string {
|
|||
return sampleConfig
|
||||
}
|
||||
|
||||
func (h *InfluxDBListener) Gather(_ telegraf.Accumulator) error {
|
||||
func (*InfluxDBListener) Gather(telegraf.Accumulator) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -120,7 +120,7 @@ func (h *InfluxDBV2Listener) Init() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (h *InfluxDBV2Listener) Gather(_ telegraf.Accumulator) error {
|
||||
func (*InfluxDBV2Listener) Gather(telegraf.Accumulator) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -56,7 +56,7 @@ type Baseband struct {
|
|||
sockConn *socketConnector
|
||||
}
|
||||
|
||||
func (b *Baseband) SampleConfig() string {
|
||||
func (*Baseband) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -135,7 +135,7 @@ func (lc *logConnector) readNumVFs() error {
|
|||
continue
|
||||
}
|
||||
|
||||
numVFs, err := lc.parseNumVFs(line)
|
||||
numVFs, err := parseNumVFs(line)
|
||||
if err != nil {
|
||||
lc.numVFs = -1
|
||||
return err
|
||||
|
|
@ -189,7 +189,7 @@ func (lc *logConnector) getMetric(offsetLine int, name string) (int, *logMetric,
|
|||
return offsetLine, nil, err
|
||||
}
|
||||
|
||||
operationName := lc.parseOperationName(line)
|
||||
operationName := parseOperationName(line)
|
||||
if len(operationName) == 0 {
|
||||
return offsetLine, nil, errors.New("valid operation name wasn't found in log")
|
||||
}
|
||||
|
|
@ -221,7 +221,7 @@ func (lc *logConnector) getMetric(offsetLine int, name string) (int, *logMetric,
|
|||
}
|
||||
|
||||
// Example value = Thu Apr 13 13:28:40 2023:INFO:Device Status:: 2 VFs
|
||||
func (lc *logConnector) parseNumVFs(s string) (int, error) {
|
||||
func parseNumVFs(s string) (int, error) {
|
||||
i := strings.LastIndex(s, deviceStatusStartPrefix)
|
||||
if i == -1 {
|
||||
return 0, errors.New("couldn't find device status prefix in line")
|
||||
|
|
@ -244,7 +244,7 @@ func (lc *logConnector) parseNumVFs(s string) (int, error) {
|
|||
// Parse Operation name
|
||||
// Example = Thu Apr 13 13:28:40 2023:INFO:5GUL counters: Code Blocks
|
||||
// Output: 5GUL
|
||||
func (lc *logConnector) parseOperationName(s string) string {
|
||||
func parseOperationName(s string) string {
|
||||
i := strings.Index(s, infoLine)
|
||||
if i >= 0 {
|
||||
j := strings.Index(s[i:], countersLine)
|
||||
|
|
|
|||
|
|
@ -240,11 +240,9 @@ func TestParseOperationName(t *testing.T) {
|
|||
{"", ""},
|
||||
}
|
||||
|
||||
logConnector := prepareLogConnMock()
|
||||
require.NotNil(t, logConnector)
|
||||
for _, tc := range testCases {
|
||||
t.Run("expected "+tc.expected, func(t *testing.T) {
|
||||
operationName := logConnector.parseOperationName(tc.input)
|
||||
operationName := parseOperationName(tc.input)
|
||||
require.Equal(t, tc.expected, operationName)
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -50,7 +50,7 @@ type IntelDLB struct {
|
|||
maxInitMessageLength uint32
|
||||
}
|
||||
|
||||
func (d *IntelDLB) SampleConfig() string {
|
||||
func (*IntelDLB) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -56,7 +56,7 @@ type fileInfo struct {
|
|||
pciBdf string // PCI Bus:Device.Function (BDF)
|
||||
}
|
||||
|
||||
func (p *IntelPMT) SampleConfig() string {
|
||||
func (*IntelPMT) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -547,9 +547,9 @@ type fakeFileInfo struct {
|
|||
fileMode os.FileMode
|
||||
}
|
||||
|
||||
func (f fakeFileInfo) Name() string { return "" }
|
||||
func (f fakeFileInfo) Size() int64 { return 0 }
|
||||
func (f fakeFileInfo) Mode() os.FileMode { return f.fileMode }
|
||||
func (f fakeFileInfo) ModTime() time.Time { return time.Time{} }
|
||||
func (f fakeFileInfo) IsDir() bool { return false }
|
||||
func (f fakeFileInfo) Sys() interface{} { return nil }
|
||||
func (fakeFileInfo) Name() string { return "" }
|
||||
func (fakeFileInfo) Size() int64 { return 0 }
|
||||
func (f fakeFileInfo) Mode() os.FileMode { return f.fileMode }
|
||||
func (fakeFileInfo) ModTime() time.Time { return time.Time{} }
|
||||
func (fakeFileInfo) IsDir() bool { return false }
|
||||
func (fakeFileInfo) Sys() interface{} { return nil }
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ type optGenerator struct{}
|
|||
|
||||
// generate takes plugin configuration options and generates options needed
|
||||
// to gather requested metrics.
|
||||
func (g *optGenerator) generate(cfg optConfig) []ptel.Option {
|
||||
func (*optGenerator) generate(cfg optConfig) []ptel.Option {
|
||||
opts := make([]ptel.Option, 0)
|
||||
if len(cfg.includedCPUs) != 0 {
|
||||
opts = append(opts, ptel.WithIncludedCPUs(cfg.includedCPUs))
|
||||
|
|
|
|||
|
|
@ -100,7 +100,7 @@ func (r *IntelRDT) Start(acc telegraf.Accumulator) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (r *IntelRDT) Gather(_ telegraf.Accumulator) error {
|
||||
func (*IntelRDT) Gather(telegraf.Accumulator) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ import (
|
|||
|
||||
type mockProc struct{}
|
||||
|
||||
func (m *mockProc) getAllProcesses() ([]process, error) {
|
||||
func (*mockProc) getAllProcesses() ([]process, error) {
|
||||
procs := []process{
|
||||
{Name: "process", PID: 1000},
|
||||
{Name: "process2", PID: 1002},
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ func newProcessor() processesHandler {
|
|||
return &processManager{}
|
||||
}
|
||||
|
||||
func (p *processManager) getAllProcesses() ([]process, error) {
|
||||
func (*processManager) getAllProcesses() ([]process, error) {
|
||||
allProcesses, err := procfs.AllProcs()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
|||
|
|
@ -179,7 +179,7 @@ func (m *Ipmi) parse(acc telegraf.Accumulator, server, sensor string) error {
|
|||
return m.parseV1(acc, hostname, out, timestamp)
|
||||
}
|
||||
case "chassis_power_status":
|
||||
return m.parseChassisPowerStatus(acc, hostname, out, timestamp)
|
||||
return parseChassisPowerStatus(acc, hostname, out, timestamp)
|
||||
case "dcmi_power_reading":
|
||||
return m.parseDCMIPowerReading(acc, hostname, out, timestamp)
|
||||
}
|
||||
|
|
@ -187,7 +187,7 @@ func (m *Ipmi) parse(acc telegraf.Accumulator, server, sensor string) error {
|
|||
return fmt.Errorf("unknown sensor type %q", sensor)
|
||||
}
|
||||
|
||||
func (m *Ipmi) parseChassisPowerStatus(acc telegraf.Accumulator, hostname string, cmdOut []byte, measuredAt time.Time) error {
|
||||
func parseChassisPowerStatus(acc telegraf.Accumulator, hostname string, cmdOut []byte, measuredAt time.Time) error {
|
||||
// each line will look something like
|
||||
// Chassis Power is on
|
||||
// Chassis Power is off
|
||||
|
|
|
|||
|
|
@ -820,14 +820,10 @@ func Test_parsePowerStatus(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
ipmi := &Ipmi{
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
err := ipmi.parseChassisPowerStatus(&acc, tt.args.hostname, tt.args.cmdOut, tt.args.measuredAt)
|
||||
err := parseChassisPowerStatus(&acc, tt.args.hostname, tt.args.cmdOut, tt.args.measuredAt)
|
||||
require.NoError(t, err)
|
||||
testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime())
|
||||
})
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@ func (*Ipset) SampleConfig() string {
|
|||
return sampleConfig
|
||||
}
|
||||
|
||||
func (i *Ipset) Init() error {
|
||||
func (*Ipset) Init() error {
|
||||
_, err := exec.LookPath("ipset")
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
|||
|
|
@ -172,7 +172,7 @@ func (m *OpenConfigTelemetry) Start(acc telegraf.Accumulator) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *OpenConfigTelemetry) Gather(_ telegraf.Accumulator) error {
|
||||
func (*OpenConfigTelemetry) Gather(telegraf.Accumulator) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -58,10 +58,7 @@ type openConfigTelemetryServer struct {
|
|||
telemetry.UnimplementedOpenConfigTelemetryServer
|
||||
}
|
||||
|
||||
func (s *openConfigTelemetryServer) TelemetrySubscribe(
|
||||
req *telemetry.SubscriptionRequest,
|
||||
stream telemetry.OpenConfigTelemetry_TelemetrySubscribeServer,
|
||||
) error {
|
||||
func (*openConfigTelemetryServer) TelemetrySubscribe(req *telemetry.SubscriptionRequest, stream telemetry.OpenConfigTelemetry_TelemetrySubscribeServer) error {
|
||||
path := req.PathList[0].Path
|
||||
switch path {
|
||||
case "/sensor":
|
||||
|
|
@ -78,28 +75,28 @@ func (s *openConfigTelemetryServer) TelemetrySubscribe(
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *openConfigTelemetryServer) CancelTelemetrySubscription(
|
||||
_ context.Context,
|
||||
_ *telemetry.CancelSubscriptionRequest,
|
||||
func (*openConfigTelemetryServer) CancelTelemetrySubscription(
|
||||
context.Context,
|
||||
*telemetry.CancelSubscriptionRequest,
|
||||
) (*telemetry.CancelSubscriptionReply, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (s *openConfigTelemetryServer) GetTelemetrySubscriptions(
|
||||
_ context.Context,
|
||||
_ *telemetry.GetSubscriptionsRequest,
|
||||
func (*openConfigTelemetryServer) GetTelemetrySubscriptions(
|
||||
context.Context,
|
||||
*telemetry.GetSubscriptionsRequest,
|
||||
) (*telemetry.GetSubscriptionsReply, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (s *openConfigTelemetryServer) GetTelemetryOperationalState(
|
||||
_ context.Context,
|
||||
_ *telemetry.GetOperationalStateRequest,
|
||||
func (*openConfigTelemetryServer) GetTelemetryOperationalState(
|
||||
context.Context,
|
||||
*telemetry.GetOperationalStateRequest,
|
||||
) (*telemetry.GetOperationalStateReply, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (s *openConfigTelemetryServer) GetDataEncodings(_ context.Context, _ *telemetry.DataEncodingRequest) (*telemetry.DataEncodingReply, error) {
|
||||
func (*openConfigTelemetryServer) GetDataEncodings(context.Context, *telemetry.DataEncodingRequest) (*telemetry.DataEncodingReply, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -299,7 +299,7 @@ func (k *KafkaConsumer) Start(acc telegraf.Accumulator) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (k *KafkaConsumer) Gather(_ telegraf.Accumulator) error {
|
||||
func (*KafkaConsumer) Gather(telegraf.Accumulator) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -240,53 +240,53 @@ type FakeConsumerGroupSession struct {
|
|||
ctx context.Context
|
||||
}
|
||||
|
||||
func (s *FakeConsumerGroupSession) Claims() map[string][]int32 {
|
||||
func (*FakeConsumerGroupSession) Claims() map[string][]int32 {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func (s *FakeConsumerGroupSession) MemberID() string {
|
||||
func (*FakeConsumerGroupSession) MemberID() string {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func (s *FakeConsumerGroupSession) GenerationID() int32 {
|
||||
func (*FakeConsumerGroupSession) GenerationID() int32 {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func (s *FakeConsumerGroupSession) MarkOffset(_ string, _ int32, _ int64, _ string) {
|
||||
func (*FakeConsumerGroupSession) MarkOffset(string, int32, int64, string) {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func (s *FakeConsumerGroupSession) ResetOffset(_ string, _ int32, _ int64, _ string) {
|
||||
func (*FakeConsumerGroupSession) ResetOffset(string, int32, int64, string) {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func (s *FakeConsumerGroupSession) MarkMessage(_ *sarama.ConsumerMessage, _ string) {
|
||||
func (*FakeConsumerGroupSession) MarkMessage(*sarama.ConsumerMessage, string) {
|
||||
}
|
||||
|
||||
func (s *FakeConsumerGroupSession) Context() context.Context {
|
||||
return s.ctx
|
||||
}
|
||||
|
||||
func (s *FakeConsumerGroupSession) Commit() {
|
||||
func (*FakeConsumerGroupSession) Commit() {
|
||||
}
|
||||
|
||||
type FakeConsumerGroupClaim struct {
|
||||
messages chan *sarama.ConsumerMessage
|
||||
}
|
||||
|
||||
func (c *FakeConsumerGroupClaim) Topic() string {
|
||||
func (*FakeConsumerGroupClaim) Topic() string {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func (c *FakeConsumerGroupClaim) Partition() int32 {
|
||||
func (*FakeConsumerGroupClaim) Partition() int32 {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func (c *FakeConsumerGroupClaim) InitialOffset() int64 {
|
||||
func (*FakeConsumerGroupClaim) InitialOffset() int64 {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func (c *FakeConsumerGroupClaim) HighWaterMarkOffset() int64 {
|
||||
func (*FakeConsumerGroupClaim) HighWaterMarkOffset() int64 {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -68,12 +68,12 @@ func (k *Kernel) Init() error {
|
|||
}
|
||||
|
||||
func (k *Kernel) Gather(acc telegraf.Accumulator) error {
|
||||
data, err := k.getProcValueBytes(k.statFile)
|
||||
data, err := getProcValueBytes(k.statFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
entropyValue, err := k.getProcValueInt(k.entropyStatFile)
|
||||
entropyValue, err := getProcValueInt(k.entropyStatFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -137,7 +137,7 @@ func (k *Kernel) Gather(acc telegraf.Accumulator) error {
|
|||
extraStats := []string{"general_profit"}
|
||||
|
||||
for _, f := range stats {
|
||||
m, err := k.getProcValueInt(filepath.Join(k.ksmStatsDir, f))
|
||||
m, err := getProcValueInt(filepath.Join(k.ksmStatsDir, f))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -146,7 +146,7 @@ func (k *Kernel) Gather(acc telegraf.Accumulator) error {
|
|||
}
|
||||
|
||||
for _, f := range extraStats {
|
||||
m, err := k.getProcValueInt(filepath.Join(k.ksmStatsDir, f))
|
||||
m, err := getProcValueInt(filepath.Join(k.ksmStatsDir, f))
|
||||
if err != nil {
|
||||
// if an extraStats metric doesn't exist in our kernel version, ignore it.
|
||||
continue
|
||||
|
|
@ -166,7 +166,7 @@ func (k *Kernel) Gather(acc telegraf.Accumulator) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (k *Kernel) getProcValueBytes(path string) ([]byte, error) {
|
||||
func getProcValueBytes(path string) ([]byte, error) {
|
||||
if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||
return nil, fmt.Errorf("path %q does not exist", path)
|
||||
} else if err != nil {
|
||||
|
|
@ -181,8 +181,8 @@ func (k *Kernel) getProcValueBytes(path string) ([]byte, error) {
|
|||
return data, nil
|
||||
}
|
||||
|
||||
func (k *Kernel) getProcValueInt(path string) (int64, error) {
|
||||
data, err := k.getProcValueBytes(path)
|
||||
func getProcValueInt(path string) (int64, error) {
|
||||
data, err := getProcValueBytes(path)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -14,23 +14,13 @@ import (
|
|||
)
|
||||
|
||||
func TestGetProcValueInt(t *testing.T) {
|
||||
k := Kernel{
|
||||
statFile: "testdata/stat_file_full",
|
||||
entropyStatFile: "testdata/entropy_stat_file_full",
|
||||
}
|
||||
|
||||
d, err := k.getProcValueInt(k.entropyStatFile)
|
||||
d, err := getProcValueInt("testdata/entropy_stat_file_full")
|
||||
require.NoError(t, err)
|
||||
require.IsType(t, int64(1), d)
|
||||
}
|
||||
|
||||
func TestGetProcValueByte(t *testing.T) {
|
||||
k := Kernel{
|
||||
statFile: "testdata/stat_file_full",
|
||||
entropyStatFile: "testdata/entropy_stat_file_full",
|
||||
}
|
||||
|
||||
d, err := k.getProcValueBytes(k.entropyStatFile)
|
||||
d, err := getProcValueBytes("testdata/entropy_stat_file_full")
|
||||
require.NoError(t, err)
|
||||
require.IsType(t, []byte("test"), d)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -101,7 +101,7 @@ func (*Kibana) SampleConfig() string {
|
|||
return sampleConfig
|
||||
}
|
||||
|
||||
func (k *Kibana) Start(_ telegraf.Accumulator) error {
|
||||
func (*Kibana) Start(telegraf.Accumulator) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ func collectSecrets(ctx context.Context, acc telegraf.Accumulator, ki *Kubernete
|
|||
return
|
||||
}
|
||||
for _, i := range list.Items {
|
||||
ki.gatherCertificates(i, acc)
|
||||
gatherCertificates(i, acc)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -59,7 +59,7 @@ func getTags(cert *x509.Certificate) map[string]string {
|
|||
return tags
|
||||
}
|
||||
|
||||
func (ki *KubernetesInventory) gatherCertificates(r corev1.Secret, acc telegraf.Accumulator) {
|
||||
func gatherCertificates(r corev1.Secret, acc telegraf.Accumulator) {
|
||||
now := time.Now()
|
||||
|
||||
for resourceName, val := range r.Data {
|
||||
|
|
|
|||
|
|
@ -15,11 +15,11 @@ func collectEndpoints(ctx context.Context, acc telegraf.Accumulator, ki *Kuberne
|
|||
return
|
||||
}
|
||||
for _, i := range list.Items {
|
||||
ki.gatherEndpoint(i, acc)
|
||||
gatherEndpoint(i, acc)
|
||||
}
|
||||
}
|
||||
|
||||
func (ki *KubernetesInventory) gatherEndpoint(e corev1.Endpoints, acc telegraf.Accumulator) {
|
||||
func gatherEndpoint(e corev1.Endpoints, acc telegraf.Accumulator) {
|
||||
creationTs := e.GetCreationTimestamp()
|
||||
if creationTs.IsZero() {
|
||||
return
|
||||
|
|
|
|||
|
|
@ -13,8 +13,6 @@ import (
|
|||
)
|
||||
|
||||
func TestEndpoint(t *testing.T) {
|
||||
cli := &client{}
|
||||
|
||||
now := time.Now()
|
||||
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location())
|
||||
|
||||
|
|
@ -256,12 +254,9 @@ func TestEndpoint(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, v := range tests {
|
||||
ks := &KubernetesInventory{
|
||||
client: cli,
|
||||
}
|
||||
acc := new(testutil.Accumulator)
|
||||
for _, endpoint := range ((v.handler.responseMap["/endpoints/"]).(*v1.EndpointsList)).Items {
|
||||
ks.gatherEndpoint(endpoint, acc)
|
||||
gatherEndpoint(endpoint, acc)
|
||||
}
|
||||
|
||||
err := acc.FirstError()
|
||||
|
|
|
|||
|
|
@ -15,11 +15,11 @@ func collectIngress(ctx context.Context, acc telegraf.Accumulator, ki *Kubernete
|
|||
return
|
||||
}
|
||||
for _, i := range list.Items {
|
||||
ki.gatherIngress(i, acc)
|
||||
gatherIngress(i, acc)
|
||||
}
|
||||
}
|
||||
|
||||
func (ki *KubernetesInventory) gatherIngress(i netv1.Ingress, acc telegraf.Accumulator) {
|
||||
func gatherIngress(i netv1.Ingress, acc telegraf.Accumulator) {
|
||||
creationTs := i.GetCreationTimestamp()
|
||||
if creationTs.IsZero() {
|
||||
return
|
||||
|
|
|
|||
|
|
@ -13,8 +13,6 @@ import (
|
|||
)
|
||||
|
||||
func TestIngress(t *testing.T) {
|
||||
cli := &client{}
|
||||
|
||||
now := time.Now()
|
||||
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location())
|
||||
|
||||
|
|
@ -219,12 +217,9 @@ func TestIngress(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, v := range tests {
|
||||
ks := &KubernetesInventory{
|
||||
client: cli,
|
||||
}
|
||||
acc := new(testutil.Accumulator)
|
||||
for _, ingress := range ((v.handler.responseMap["/ingress/"]).(netv1.IngressList)).Items {
|
||||
ks.gatherIngress(ingress, acc)
|
||||
gatherIngress(ingress, acc)
|
||||
}
|
||||
|
||||
err := acc.FirstError()
|
||||
|
|
|
|||
|
|
@ -15,14 +15,14 @@ func collectNodes(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesI
|
|||
return
|
||||
}
|
||||
|
||||
ki.gatherNodeCount(len(list.Items), acc)
|
||||
gatherNodeCount(len(list.Items), acc)
|
||||
|
||||
for i := range list.Items {
|
||||
ki.gatherNode(&list.Items[i], acc)
|
||||
}
|
||||
}
|
||||
|
||||
func (ki *KubernetesInventory) gatherNodeCount(count int, acc telegraf.Accumulator) {
|
||||
func gatherNodeCount(count int, acc telegraf.Accumulator) {
|
||||
fields := map[string]interface{}{"node_count": count}
|
||||
tags := make(map[string]string)
|
||||
|
||||
|
|
|
|||
|
|
@ -173,7 +173,7 @@ func TestNode(t *testing.T) {
|
|||
|
||||
if v.name == "no nodes" {
|
||||
nodeCount := len((v.handler.responseMap["/nodes/"]).(corev1.NodeList).Items)
|
||||
ks.gatherNodeCount(nodeCount, acc)
|
||||
gatherNodeCount(nodeCount, acc)
|
||||
}
|
||||
require.Len(t, acc.Metrics, len(v.output))
|
||||
testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime())
|
||||
|
|
|
|||
|
|
@ -16,11 +16,11 @@ func collectPersistentVolumes(ctx context.Context, acc telegraf.Accumulator, ki
|
|||
return
|
||||
}
|
||||
for i := range list.Items {
|
||||
ki.gatherPersistentVolume(&list.Items[i], acc)
|
||||
gatherPersistentVolume(&list.Items[i], acc)
|
||||
}
|
||||
}
|
||||
|
||||
func (ki *KubernetesInventory) gatherPersistentVolume(pv *corev1.PersistentVolume, acc telegraf.Accumulator) {
|
||||
func gatherPersistentVolume(pv *corev1.PersistentVolume, acc telegraf.Accumulator) {
|
||||
phaseType := 5
|
||||
switch strings.ToLower(string(pv.Status.Phase)) {
|
||||
case "bound":
|
||||
|
|
|
|||
|
|
@ -13,7 +13,6 @@ import (
|
|||
)
|
||||
|
||||
func TestPersistentVolume(t *testing.T) {
|
||||
cli := &client{}
|
||||
now := time.Now()
|
||||
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location())
|
||||
|
||||
|
|
@ -77,13 +76,10 @@ func TestPersistentVolume(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, v := range tests {
|
||||
ks := &KubernetesInventory{
|
||||
client: cli,
|
||||
}
|
||||
acc := new(testutil.Accumulator)
|
||||
items := ((v.handler.responseMap["/persistentvolumes/"]).(*corev1.PersistentVolumeList)).Items
|
||||
for i := range items {
|
||||
ks.gatherPersistentVolume(&items[i], acc)
|
||||
gatherPersistentVolume(&items[i], acc)
|
||||
}
|
||||
|
||||
err := acc.FirstError()
|
||||
|
|
|
|||
Loading…
Reference in New Issue