chore: Fix linter findings for `revive:unused-receiver` in `plugins/inputs/[a-e]` (#16263)

This commit is contained in:
Paweł Żak 2024-12-13 18:26:34 +01:00 committed by GitHub
parent 2bd4559bc1
commit 516b8cfbd1
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
41 changed files with 168 additions and 185 deletions

View File

@ -121,11 +121,11 @@ func (a *Aerospike) gatherServer(acc telegraf.Accumulator, hostPort string) erro
nodes := c.GetNodes()
for _, n := range nodes {
nodeHost := n.GetHost().String()
stats, err := a.getNodeInfo(n, asInfoPolicy)
stats, err := getNodeInfo(n, asInfoPolicy)
if err != nil {
return err
}
a.parseNodeInfo(acc, stats, nodeHost, n.GetName())
parseNodeInfo(acc, stats, nodeHost, n.GetName())
namespaces, err := a.getNamespaces(n, asInfoPolicy)
if err != nil {
@ -135,12 +135,12 @@ func (a *Aerospike) gatherServer(acc telegraf.Accumulator, hostPort string) erro
if !a.DisableQueryNamespaces {
// Query Namespaces
for _, namespace := range namespaces {
stats, err = a.getNamespaceInfo(namespace, n, asInfoPolicy)
stats, err = getNamespaceInfo(namespace, n, asInfoPolicy)
if err != nil {
continue
}
a.parseNamespaceInfo(acc, stats, nodeHost, namespace, n.GetName())
parseNamespaceInfo(acc, stats, nodeHost, namespace, n.GetName())
if a.EnableTTLHistogram {
err = a.getTTLHistogram(acc, nodeHost, namespace, "", n, asInfoPolicy)
@ -162,12 +162,12 @@ func (a *Aerospike) gatherServer(acc telegraf.Accumulator, hostPort string) erro
if err == nil {
for _, namespaceSet := range namespaceSets {
namespace, set := splitNamespaceSet(namespaceSet)
stats, err := a.getSetInfo(namespaceSet, n, asInfoPolicy)
stats, err := getSetInfo(namespaceSet, n, asInfoPolicy)
if err != nil {
continue
}
a.parseSetInfo(acc, stats, nodeHost, namespaceSet, n.GetName())
parseSetInfo(acc, stats, nodeHost, namespaceSet, n.GetName())
if a.EnableTTLHistogram {
err = a.getTTLHistogram(acc, nodeHost, namespace, set, n, asInfoPolicy)
@ -189,7 +189,7 @@ func (a *Aerospike) gatherServer(acc telegraf.Accumulator, hostPort string) erro
return nil
}
func (a *Aerospike) getNodeInfo(n *as.Node, infoPolicy *as.InfoPolicy) (map[string]string, error) {
func getNodeInfo(n *as.Node, infoPolicy *as.InfoPolicy) (map[string]string, error) {
stats, err := n.RequestInfo(infoPolicy, "statistics")
if err != nil {
return nil, err
@ -198,7 +198,7 @@ func (a *Aerospike) getNodeInfo(n *as.Node, infoPolicy *as.InfoPolicy) (map[stri
return stats, nil
}
func (a *Aerospike) parseNodeInfo(acc telegraf.Accumulator, stats map[string]string, hostPort, nodeName string) {
func parseNodeInfo(acc telegraf.Accumulator, stats map[string]string, hostPort, nodeName string) {
nTags := map[string]string{
"aerospike_host": hostPort,
"node_name": nodeName,
@ -231,7 +231,7 @@ func (a *Aerospike) getNamespaces(n *as.Node, infoPolicy *as.InfoPolicy) ([]stri
return namespaces, nil
}
func (a *Aerospike) getNamespaceInfo(namespace string, n *as.Node, infoPolicy *as.InfoPolicy) (map[string]string, error) {
func getNamespaceInfo(namespace string, n *as.Node, infoPolicy *as.InfoPolicy) (map[string]string, error) {
stats, err := n.RequestInfo(infoPolicy, "namespace/"+namespace)
if err != nil {
return nil, err
@ -239,7 +239,8 @@ func (a *Aerospike) getNamespaceInfo(namespace string, n *as.Node, infoPolicy *a
return stats, err
}
func (a *Aerospike) parseNamespaceInfo(acc telegraf.Accumulator, stats map[string]string, hostPort, namespace, nodeName string) {
func parseNamespaceInfo(acc telegraf.Accumulator, stats map[string]string, hostPort, namespace, nodeName string) {
nTags := map[string]string{
"aerospike_host": hostPort,
"node_name": nodeName,
@ -296,7 +297,7 @@ func (a *Aerospike) getSets(n *as.Node, infoPolicy *as.InfoPolicy) ([]string, er
return namespaceSets, nil
}
func (a *Aerospike) getSetInfo(namespaceSet string, n *as.Node, infoPolicy *as.InfoPolicy) (map[string]string, error) {
func getSetInfo(namespaceSet string, n *as.Node, infoPolicy *as.InfoPolicy) (map[string]string, error) {
stats, err := n.RequestInfo(infoPolicy, "sets/"+namespaceSet)
if err != nil {
return nil, err
@ -304,7 +305,7 @@ func (a *Aerospike) getSetInfo(namespaceSet string, n *as.Node, infoPolicy *as.I
return stats, nil
}
func (a *Aerospike) parseSetInfo(acc telegraf.Accumulator, stats map[string]string, hostPort, namespaceSet, nodeName string) {
func parseSetInfo(acc telegraf.Accumulator, stats map[string]string, hostPort, namespaceSet, nodeName string) {
stat := strings.Split(
strings.TrimSuffix(
stats["sets/"+namespaceSet], ";"), ":")
@ -327,7 +328,7 @@ func (a *Aerospike) parseSetInfo(acc telegraf.Accumulator, stats map[string]stri
}
func (a *Aerospike) getTTLHistogram(acc telegraf.Accumulator, hostPort, namespace, set string, n *as.Node, infoPolicy *as.InfoPolicy) error {
stats, err := a.getHistogram(namespace, set, "ttl", n, infoPolicy)
stats, err := getHistogram(namespace, set, "ttl", n, infoPolicy)
if err != nil {
return err
}
@ -339,7 +340,7 @@ func (a *Aerospike) getTTLHistogram(acc telegraf.Accumulator, hostPort, namespac
}
func (a *Aerospike) getObjectSizeLinearHistogram(acc telegraf.Accumulator, hostPort, namespace, set string, n *as.Node, infoPolicy *as.InfoPolicy) error {
stats, err := a.getHistogram(namespace, set, "object-size-linear", n, infoPolicy)
stats, err := getHistogram(namespace, set, "object-size-linear", n, infoPolicy)
if err != nil {
return err
}
@ -350,7 +351,7 @@ func (a *Aerospike) getObjectSizeLinearHistogram(acc telegraf.Accumulator, hostP
return nil
}
func (a *Aerospike) getHistogram(namespace, set, histogramType string, n *as.Node, infoPolicy *as.InfoPolicy) (map[string]string, error) {
func getHistogram(namespace, set, histogramType string, n *as.Node, infoPolicy *as.InfoPolicy) (map[string]string, error) {
var queryArg string
if len(set) > 0 {
queryArg = fmt.Sprintf("histogram:type=%s;namespace=%v;set=%v", histogramType, namespace, set)

View File

@ -309,9 +309,6 @@ func TestDisableObjectSizeLinearHistogramIntegration(t *testing.T) {
}
func TestParseNodeInfo(t *testing.T) {
a := &Aerospike{}
var acc testutil.Accumulator
stats := map[string]string{
"statistics": "early_tsvc_from_proxy_error=0;cluster_principal=BB9020012AC4202;cluster_is_member=true",
}
@ -327,14 +324,12 @@ func TestParseNodeInfo(t *testing.T) {
"node_name": "TestNodeName",
}
a.parseNodeInfo(&acc, stats, "127.0.0.1:3000", "TestNodeName")
var acc testutil.Accumulator
parseNodeInfo(&acc, stats, "127.0.0.1:3000", "TestNodeName")
acc.AssertContainsTaggedFields(t, "aerospike_node", expectedFields, expectedTags)
}
func TestParseNamespaceInfo(t *testing.T) {
a := &Aerospike{}
var acc testutil.Accumulator
stats := map[string]string{
"namespace/test": "ns_cluster_size=1;effective_replication_factor=1;objects=2;tombstones=0;master_objects=2",
}
@ -353,15 +348,12 @@ func TestParseNamespaceInfo(t *testing.T) {
"namespace": "test",
}
a.parseNamespaceInfo(&acc, stats, "127.0.0.1:3000", "test", "TestNodeName")
var acc testutil.Accumulator
parseNamespaceInfo(&acc, stats, "127.0.0.1:3000", "test", "TestNodeName")
acc.AssertContainsTaggedFields(t, "aerospike_namespace", expectedFields, expectedTags)
}
func TestParseSetInfo(t *testing.T) {
a := &Aerospike{}
var acc testutil.Accumulator
stats := map[string]string{
"sets/test/foo": "objects=1:tombstones=0:memory_data_bytes=26;",
}
@ -377,7 +369,9 @@ func TestParseSetInfo(t *testing.T) {
"node_name": "TestNodeName",
"set": "test/foo",
}
a.parseSetInfo(&acc, stats, "127.0.0.1:3000", "test/foo", "TestNodeName")
var acc testutil.Accumulator
parseSetInfo(&acc, stats, "127.0.0.1:3000", "test/foo", "TestNodeName")
acc.AssertContainsTaggedFields(t, "aerospike_set", expectedFields, expectedTags)
}

View File

@ -26,7 +26,7 @@ const inputTitle = "inputs.aliyuncms"
type mockGatherAliyunCMSClient struct{}
func (m *mockGatherAliyunCMSClient) DescribeMetricList(request *cms.DescribeMetricListRequest) (*cms.DescribeMetricListResponse, error) {
func (*mockGatherAliyunCMSClient) DescribeMetricList(request *cms.DescribeMetricListRequest) (*cms.DescribeMetricListResponse, error) {
resp := new(cms.DescribeMetricListResponse)
// switch request.Metric {

View File

@ -131,7 +131,7 @@ func (rsmi *ROCmSMI) Gather(acc telegraf.Accumulator) error {
return gatherROCmSMI(data, acc)
}
func (rsmi *ROCmSMI) Stop() {}
func (*ROCmSMI) Stop() {}
func (rsmi *ROCmSMI) pollROCmSMI() ([]byte, error) {
// Construct and execute metrics query, there currently exist (ROCm v4.3.x) a "-a" option

View File

@ -64,11 +64,11 @@ type AMQPConsumer struct {
decoder internal.ContentDecoder
}
func (a *externalAuth) Mechanism() string {
func (*externalAuth) Mechanism() string {
return "EXTERNAL"
}
func (a *externalAuth) Response() string {
func (*externalAuth) Response() string {
return "\000"
}
@ -175,7 +175,7 @@ func (a *AMQPConsumer) Start(acc telegraf.Accumulator) error {
return nil
}
func (a *AMQPConsumer) Gather(_ telegraf.Accumulator) error {
func (*AMQPConsumer) Gather(_ telegraf.Accumulator) error {
return nil
}

View File

@ -120,7 +120,7 @@ func (n *Apache) gatherURL(addr *url.URL, acc telegraf.Accumulator) error {
switch key {
case "Scoreboard":
for field, value := range n.gatherScores(part) {
for field, value := range gatherScores(part) {
fields[field] = value
}
default:
@ -137,7 +137,7 @@ func (n *Apache) gatherURL(addr *url.URL, acc telegraf.Accumulator) error {
return nil
}
func (n *Apache) gatherScores(data string) map[string]interface{} {
func gatherScores(data string) map[string]interface{} {
var waiting, open = 0, 0
var s, r, w, k, d, c, l, g, i = 0, 0, 0, 0, 0, 0, 0, 0, 0

View File

@ -58,7 +58,7 @@ type azureClientsCreator interface {
//go:embed sample.conf
var sampleConfig string
func (am *AzureMonitor) SampleConfig() string {
func (*AzureMonitor) SampleConfig() string {
return sampleConfig
}
@ -170,7 +170,7 @@ func (am *AzureMonitor) setReceiver() error {
return err
}
func (acm *azureClientsManager) createAzureClients(
func (*azureClientsManager) createAzureClients(
subscriptionID, clientID, clientSecret, tenantID string,
clientOptions azcore.ClientOptions,
) (*receiver.AzureClients, error) {

View File

@ -27,7 +27,7 @@ type mockAzureMetricDefinitionsClient struct{}
type mockAzureMetricsClient struct{}
func (mam *mockAzureClientsManager) createAzureClients(_, _, _, _ string, _ azcore.ClientOptions) (*receiver.AzureClients, error) {
func (*mockAzureClientsManager) createAzureClients(_, _, _, _ string, _ azcore.ClientOptions) (*receiver.AzureClients, error) {
return &receiver.AzureClients{
Ctx: context.Background(),
ResourcesClient: &mockAzureResourcesClient{},
@ -36,7 +36,7 @@ func (mam *mockAzureClientsManager) createAzureClients(_, _, _, _ string, _ azco
}, nil
}
func (marc *mockAzureResourcesClient) List(_ context.Context, _ *armresources.ClientListOptions) ([]*armresources.ClientListResponse, error) {
func (*mockAzureResourcesClient) List(_ context.Context, _ *armresources.ClientListOptions) ([]*armresources.ClientListResponse, error) {
var responses []*armresources.ClientListResponse
file, err := os.ReadFile("testdata/json/azure_resources_response.json")
@ -59,7 +59,7 @@ func (marc *mockAzureResourcesClient) List(_ context.Context, _ *armresources.Cl
return responses, nil
}
func (marc *mockAzureResourcesClient) ListByResourceGroup(
func (*mockAzureResourcesClient) ListByResourceGroup(
_ context.Context,
resourceGroup string,
_ *armresources.ClientListByResourceGroupOptions) ([]*armresources.ClientListByResourceGroupResponse, error) {
@ -105,7 +105,7 @@ func (marc *mockAzureResourcesClient) ListByResourceGroup(
return nil, errors.New("resource group was not found")
}
func (mamdc *mockAzureMetricDefinitionsClient) List(
func (*mockAzureMetricDefinitionsClient) List(
_ context.Context,
resourceID string,
_ *armmonitor.MetricDefinitionsClientListOptions) (armmonitor.MetricDefinitionsClientListResponse, error) {
@ -146,7 +146,7 @@ func (mamdc *mockAzureMetricDefinitionsClient) List(
return armmonitor.MetricDefinitionsClientListResponse{}, errors.New("resource ID was not found")
}
func (mamc *mockAzureMetricsClient) List(
func (*mockAzureMetricsClient) List(
_ context.Context,
resourceID string,
_ *armmonitor.MetricsClientListOptions) (armmonitor.MetricsClientListResponse, error) {

View File

@ -53,7 +53,7 @@ func (b *Bcache) Gather(acc telegraf.Accumulator) error {
continue
}
}
if err := b.gatherBcache(bdev, acc); err != nil {
if err := gatherBcache(bdev, acc); err != nil {
return fmt.Errorf("gathering bcache failed: %w", err)
}
}
@ -97,7 +97,7 @@ func prettyToBytes(v string) uint64 {
return uint64(result)
}
func (b *Bcache) gatherBcache(bdev string, acc telegraf.Accumulator) error {
func gatherBcache(bdev string, acc telegraf.Accumulator) error {
tags := getTags(bdev)
metrics, err := filepath.Glob(bdev + "/stats_total/*")
if err != nil {

View File

@ -66,7 +66,7 @@ func (bond *Bond) Gather(acc telegraf.Accumulator) error {
if err != nil {
acc.AddError(err)
}
bond.gatherSysDetails(bondName, files, acc)
gatherSysDetails(bondName, files, acc)
}
}
return nil
@ -164,7 +164,7 @@ func (bond *Bond) readSysFiles(bondDir string) (sysFiles, error) {
return output, nil
}
func (bond *Bond) gatherSysDetails(bondName string, files sysFiles, acc telegraf.Accumulator) {
func gatherSysDetails(bondName string, files sysFiles, acc telegraf.Accumulator) {
var slaves []string
var adPortCount int

View File

@ -145,7 +145,7 @@ func TestGatherBondInterface(t *testing.T) {
acc = testutil.Accumulator{}
require.NoError(t, bond.gatherBondInterface("bondLACP", sampleTestLACP, &acc))
bond.gatherSysDetails("bondLACP", sysFiles{ModeFile: sampleSysMode, SlaveFile: sampleSysSlaves, ADPortsFile: sampleSysAdPorts}, &acc)
gatherSysDetails("bondLACP", sysFiles{ModeFile: sampleSysMode, SlaveFile: sampleSysSlaves, ADPortsFile: sampleSysAdPorts}, &acc)
acc.AssertContainsTaggedFields(t, "bond", map[string]interface{}{"status": 1}, map[string]string{"bond": "bondLACP"})
acc.AssertContainsTaggedFields(
t,
@ -169,7 +169,7 @@ func TestGatherBondInterface(t *testing.T) {
acc = testutil.Accumulator{}
require.NoError(t, bond.gatherBondInterface("bondLACPUpDown", sampleTestLACPFirstUpSecondDown, &acc))
bond.gatherSysDetails("bondLACPUpDown", sysFiles{ModeFile: sampleSysMode, SlaveFile: sampleSysSlaves, ADPortsFile: sampleSysAdPorts}, &acc)
gatherSysDetails("bondLACPUpDown", sysFiles{ModeFile: sampleSysMode, SlaveFile: sampleSysSlaves, ADPortsFile: sampleSysAdPorts}, &acc)
acc.AssertContainsTaggedFields(t, "bond", map[string]interface{}{"status": 1}, map[string]string{"bond": "bondLACPUpDown"})
acc.AssertContainsTaggedFields(
t,

View File

@ -289,14 +289,14 @@ func (b *Burrow) gatherTopics(guard chan struct{}, src *url.URL, cluster string,
return
}
b.genTopicMetrics(tr, cluster, topic, acc)
genTopicMetrics(tr, cluster, topic, acc)
}(topic)
}
wg.Wait()
}
func (b *Burrow) genTopicMetrics(r *apiResponse, cluster, topic string, acc telegraf.Accumulator) {
func genTopicMetrics(r *apiResponse, cluster, topic string, acc telegraf.Accumulator) {
for i, offset := range r.Offsets {
tags := map[string]string{
"cluster": cluster,
@ -346,7 +346,7 @@ func (b *Burrow) gatherGroups(guard chan struct{}, src *url.URL, cluster string,
return
}
b.genGroupStatusMetrics(gr, cluster, group, acc)
genGroupStatusMetrics(gr, cluster, group, acc)
b.genGroupLagMetrics(gr, cluster, group, acc)
}(group)
}
@ -354,7 +354,7 @@ func (b *Burrow) gatherGroups(guard chan struct{}, src *url.URL, cluster string,
wg.Wait()
}
func (b *Burrow) genGroupStatusMetrics(r *apiResponse, cluster, group string, acc telegraf.Accumulator) {
func genGroupStatusMetrics(r *apiResponse, cluster, group string, acc telegraf.Accumulator) {
partitionCount := r.Status.PartitionCount
if partitionCount == 0 {
partitionCount = len(r.Status.Partitions)

View File

@ -218,7 +218,7 @@ func (c *CiscoTelemetryMDT) Start(acc telegraf.Accumulator) error {
return nil
}
func (c *CiscoTelemetryMDT) Gather(_ telegraf.Accumulator) error {
func (*CiscoTelemetryMDT) Gather(telegraf.Accumulator) error {
return nil
}
@ -541,7 +541,7 @@ func (c *CiscoTelemetryMDT) parseKeyField(tags map[string]string, field *telemet
}
}
func (c *CiscoTelemetryMDT) parseRib(grouper *metric.SeriesGrouper, field *telemetry.TelemetryField,
func parseRib(grouper *metric.SeriesGrouper, field *telemetry.TelemetryField,
encodingPath string, tags map[string]string, timestamp time.Time) {
// RIB
measurement := encodingPath
@ -574,7 +574,7 @@ func (c *CiscoTelemetryMDT) parseRib(grouper *metric.SeriesGrouper, field *telem
}
}
func (c *CiscoTelemetryMDT) parseMicroburst(grouper *metric.SeriesGrouper, field *telemetry.TelemetryField,
func parseMicroburst(grouper *metric.SeriesGrouper, field *telemetry.TelemetryField,
encodingPath string, tags map[string]string, timestamp time.Time) {
var nxMicro *telemetry.TelemetryField
var nxMicro1 *telemetry.TelemetryField
@ -623,12 +623,12 @@ func (c *CiscoTelemetryMDT) parseClassAttributeField(grouper *metric.SeriesGroup
isDme := strings.Contains(encodingPath, "sys/")
if encodingPath == "rib" {
// handle native data path rib
c.parseRib(grouper, field, encodingPath, tags, timestamp)
parseRib(grouper, field, encodingPath, tags, timestamp)
return
}
if encodingPath == "microburst" {
// dump microburst
c.parseMicroburst(grouper, field, encodingPath, tags, timestamp)
parseMicroburst(grouper, field, encodingPath, tags, timestamp)
return
}
if field == nil || !isDme || len(field.Fields) == 0 || len(field.Fields[0].Fields) == 0 || len(field.Fields[0].Fields[0].Fields) == 0 {

View File

@ -210,7 +210,7 @@ func (ch *ClickHouse) commonMetrics(acc telegraf.Accumulator, conn *connect, met
Value float64 `json:"value"`
}
tags := ch.makeDefaultTags(conn)
tags := makeDefaultTags(conn)
fields := make(map[string]interface{})
if commonMetricsIsFloat[metric] {
@ -241,7 +241,7 @@ func (ch *ClickHouse) zookeeper(acc telegraf.Accumulator, conn *connect) error {
if err := ch.execQuery(conn.url, systemZookeeperExistsSQL, &zkExists); err != nil {
return err
}
tags := ch.makeDefaultTags(conn)
tags := makeDefaultTags(conn)
if len(zkExists) > 0 && zkExists[0].ZkExists > 0 {
var zkRootNodes []struct {
@ -270,7 +270,7 @@ func (ch *ClickHouse) replicationQueue(acc telegraf.Accumulator, conn *connect)
return err
}
tags := ch.makeDefaultTags(conn)
tags := makeDefaultTags(conn)
if len(replicationQueueExists) > 0 && replicationQueueExists[0].ReplicationQueueExists > 0 {
var replicationTooManyTries []struct {
@ -301,7 +301,7 @@ func (ch *ClickHouse) detachedParts(acc telegraf.Accumulator, conn *connect) err
}
if len(detachedParts) > 0 {
tags := ch.makeDefaultTags(conn)
tags := makeDefaultTags(conn)
acc.AddFields("clickhouse_detached_parts",
map[string]interface{}{
"detached_parts": uint64(detachedParts[0].DetachedParts),
@ -323,7 +323,7 @@ func (ch *ClickHouse) dictionaries(acc telegraf.Accumulator, conn *connect) erro
}
for _, dict := range brokenDictionaries {
tags := ch.makeDefaultTags(conn)
tags := makeDefaultTags(conn)
isLoaded := uint64(1)
if dict.Status != "LOADED" {
@ -356,7 +356,7 @@ func (ch *ClickHouse) mutations(acc telegraf.Accumulator, conn *connect) error {
}
if len(mutationsStatus) > 0 {
tags := ch.makeDefaultTags(conn)
tags := makeDefaultTags(conn)
acc.AddFields("clickhouse_mutations",
map[string]interface{}{
@ -384,7 +384,7 @@ func (ch *ClickHouse) disks(acc telegraf.Accumulator, conn *connect) error {
}
for _, disk := range disksStatus {
tags := ch.makeDefaultTags(conn)
tags := makeDefaultTags(conn)
tags["name"] = disk.Name
tags["path"] = disk.Path
@ -413,7 +413,7 @@ func (ch *ClickHouse) processes(acc telegraf.Accumulator, conn *connect) error {
}
for _, process := range processesStats {
tags := ch.makeDefaultTags(conn)
tags := makeDefaultTags(conn)
tags["query_type"] = process.QueryType
acc.AddFields("clickhouse_processes",
@ -448,7 +448,7 @@ func (ch *ClickHouse) textLog(acc telegraf.Accumulator, conn *connect) error {
}
for _, textLogItem := range textLogLast10MinMessages {
tags := ch.makeDefaultTags(conn)
tags := makeDefaultTags(conn)
tags["level"] = textLogItem.Level
acc.AddFields("clickhouse_text_log",
map[string]interface{}{
@ -473,7 +473,7 @@ func (ch *ClickHouse) tables(acc telegraf.Accumulator, conn *connect) error {
if err := ch.execQuery(conn.url, systemPartsSQL, &parts); err != nil {
return err
}
tags := ch.makeDefaultTags(conn)
tags := makeDefaultTags(conn)
for _, part := range parts {
tags["table"] = part.Table
@ -490,7 +490,7 @@ func (ch *ClickHouse) tables(acc telegraf.Accumulator, conn *connect) error {
return nil
}
func (ch *ClickHouse) makeDefaultTags(conn *connect) map[string]string {
func makeDefaultTags(conn *connect) map[string]string {
tags := map[string]string{
"source": conn.Hostname,
}

View File

@ -152,7 +152,7 @@ func (ps *PubSub) Start(ac telegraf.Accumulator) error {
}
// Gather does nothing for this service input.
func (ps *PubSub) Gather(_ telegraf.Accumulator) error {
func (*PubSub) Gather(telegraf.Accumulator) error {
return nil
}

View File

@ -133,7 +133,7 @@ func (p *PubSubPush) Start(acc telegraf.Accumulator) error {
return nil
}
func (p *PubSubPush) Gather(_ telegraf.Accumulator) error {
func (*PubSubPush) Gather(telegraf.Accumulator) error {
return nil
}

View File

@ -219,7 +219,7 @@ func TestServeHTTP(t *testing.T) {
type testMetricMaker struct{}
func (tm *testMetricMaker) Name() string {
func (*testMetricMaker) Name() string {
return "TestPlugin"
}
@ -227,11 +227,11 @@ func (tm *testMetricMaker) LogName() string {
return tm.Name()
}
func (tm *testMetricMaker) MakeMetric(metric telegraf.Metric) telegraf.Metric {
func (*testMetricMaker) MakeMetric(metric telegraf.Metric) telegraf.Metric {
return metric
}
func (tm *testMetricMaker) Log() telegraf.Logger {
func (*testMetricMaker) Log() telegraf.Logger {
return logger.New("test", "test", "")
}

View File

@ -21,7 +21,7 @@ import (
type mockGatherCloudWatchClient struct{}
func (m *mockGatherCloudWatchClient) ListMetrics(
func (*mockGatherCloudWatchClient) ListMetrics(
_ context.Context,
params *cloudwatch.ListMetricsInput,
_ ...func(*cloudwatch.Options),
@ -56,7 +56,7 @@ func (m *mockGatherCloudWatchClient) ListMetrics(
return response, nil
}
func (m *mockGatherCloudWatchClient) GetMetricData(
func (*mockGatherCloudWatchClient) GetMetricData(
_ context.Context,
params *cloudwatch.GetMetricDataInput,
_ ...func(*cloudwatch.Options),
@ -307,10 +307,10 @@ func TestGather_MultipleNamespaces(t *testing.T) {
type mockSelectMetricsCloudWatchClient struct{}
func (m *mockSelectMetricsCloudWatchClient) ListMetrics(
_ context.Context,
_ *cloudwatch.ListMetricsInput,
_ ...func(*cloudwatch.Options),
func (*mockSelectMetricsCloudWatchClient) ListMetrics(
context.Context,
*cloudwatch.ListMetricsInput,
...func(*cloudwatch.Options),
) (*cloudwatch.ListMetricsOutput, error) {
metrics := make([]types.Metric, 0)
// 4 metrics are available
@ -358,10 +358,10 @@ func (m *mockSelectMetricsCloudWatchClient) ListMetrics(
return result, nil
}
func (m *mockSelectMetricsCloudWatchClient) GetMetricData(
_ context.Context,
_ *cloudwatch.GetMetricDataInput,
_ ...func(*cloudwatch.Options),
func (*mockSelectMetricsCloudWatchClient) GetMetricData(
context.Context,
*cloudwatch.GetMetricDataInput,
...func(*cloudwatch.Options),
) (*cloudwatch.GetMetricDataOutput, error) {
return nil, nil
}

View File

@ -149,7 +149,7 @@ func (cms *CloudWatchMetricStreams) Start(acc telegraf.Accumulator) error {
return nil
}
func (cms *CloudWatchMetricStreams) Gather(_ telegraf.Accumulator) error {
func (*CloudWatchMetricStreams) Gather(telegraf.Accumulator) error {
return nil
}

View File

@ -207,43 +207,43 @@ func (c *CouchDB) fetchAndInsertData(accumulator telegraf.Accumulator, host stri
fields := make(map[string]interface{}, 31)
// CouchDB meta stats:
c.generateFields(fields, "couchdb_auth_cache_misses", stats.Couchdb.AuthCacheMisses)
c.generateFields(fields, "couchdb_database_writes", stats.Couchdb.DatabaseWrites)
c.generateFields(fields, "couchdb_open_databases", stats.Couchdb.OpenDatabases)
c.generateFields(fields, "couchdb_auth_cache_hits", stats.Couchdb.AuthCacheHits)
c.generateFields(fields, "couchdb_request_time", requestTime)
c.generateFields(fields, "couchdb_database_reads", stats.Couchdb.DatabaseReads)
c.generateFields(fields, "couchdb_open_os_files", stats.Couchdb.OpenOsFiles)
generateFields(fields, "couchdb_auth_cache_misses", stats.Couchdb.AuthCacheMisses)
generateFields(fields, "couchdb_database_writes", stats.Couchdb.DatabaseWrites)
generateFields(fields, "couchdb_open_databases", stats.Couchdb.OpenDatabases)
generateFields(fields, "couchdb_auth_cache_hits", stats.Couchdb.AuthCacheHits)
generateFields(fields, "couchdb_request_time", requestTime)
generateFields(fields, "couchdb_database_reads", stats.Couchdb.DatabaseReads)
generateFields(fields, "couchdb_open_os_files", stats.Couchdb.OpenOsFiles)
// http request methods stats:
c.generateFields(fields, "httpd_request_methods_put", httpdRequestMethodsPut)
c.generateFields(fields, "httpd_request_methods_get", httpdRequestMethodsGet)
c.generateFields(fields, "httpd_request_methods_copy", httpdRequestMethodsCopy)
c.generateFields(fields, "httpd_request_methods_delete", httpdRequestMethodsDelete)
c.generateFields(fields, "httpd_request_methods_post", httpdRequestMethodsPost)
c.generateFields(fields, "httpd_request_methods_head", httpdRequestMethodsHead)
generateFields(fields, "httpd_request_methods_put", httpdRequestMethodsPut)
generateFields(fields, "httpd_request_methods_get", httpdRequestMethodsGet)
generateFields(fields, "httpd_request_methods_copy", httpdRequestMethodsCopy)
generateFields(fields, "httpd_request_methods_delete", httpdRequestMethodsDelete)
generateFields(fields, "httpd_request_methods_post", httpdRequestMethodsPost)
generateFields(fields, "httpd_request_methods_head", httpdRequestMethodsHead)
// status code stats:
c.generateFields(fields, "httpd_status_codes_200", httpdStatusCodesStatus200)
c.generateFields(fields, "httpd_status_codes_201", httpdStatusCodesStatus201)
c.generateFields(fields, "httpd_status_codes_202", httpdStatusCodesStatus202)
c.generateFields(fields, "httpd_status_codes_301", httpdStatusCodesStatus301)
c.generateFields(fields, "httpd_status_codes_304", httpdStatusCodesStatus304)
c.generateFields(fields, "httpd_status_codes_400", httpdStatusCodesStatus400)
c.generateFields(fields, "httpd_status_codes_401", httpdStatusCodesStatus401)
c.generateFields(fields, "httpd_status_codes_403", httpdStatusCodesStatus403)
c.generateFields(fields, "httpd_status_codes_404", httpdStatusCodesStatus404)
c.generateFields(fields, "httpd_status_codes_405", httpdStatusCodesStatus405)
c.generateFields(fields, "httpd_status_codes_409", httpdStatusCodesStatus409)
c.generateFields(fields, "httpd_status_codes_412", httpdStatusCodesStatus412)
c.generateFields(fields, "httpd_status_codes_500", httpdStatusCodesStatus500)
generateFields(fields, "httpd_status_codes_200", httpdStatusCodesStatus200)
generateFields(fields, "httpd_status_codes_201", httpdStatusCodesStatus201)
generateFields(fields, "httpd_status_codes_202", httpdStatusCodesStatus202)
generateFields(fields, "httpd_status_codes_301", httpdStatusCodesStatus301)
generateFields(fields, "httpd_status_codes_304", httpdStatusCodesStatus304)
generateFields(fields, "httpd_status_codes_400", httpdStatusCodesStatus400)
generateFields(fields, "httpd_status_codes_401", httpdStatusCodesStatus401)
generateFields(fields, "httpd_status_codes_403", httpdStatusCodesStatus403)
generateFields(fields, "httpd_status_codes_404", httpdStatusCodesStatus404)
generateFields(fields, "httpd_status_codes_405", httpdStatusCodesStatus405)
generateFields(fields, "httpd_status_codes_409", httpdStatusCodesStatus409)
generateFields(fields, "httpd_status_codes_412", httpdStatusCodesStatus412)
generateFields(fields, "httpd_status_codes_500", httpdStatusCodesStatus500)
// httpd stats:
c.generateFields(fields, "httpd_clients_requesting_changes", stats.Httpd.ClientsRequestingChanges)
c.generateFields(fields, "httpd_temporary_view_reads", stats.Httpd.TemporaryViewReads)
c.generateFields(fields, "httpd_requests", stats.Httpd.Requests)
c.generateFields(fields, "httpd_bulk_requests", stats.Httpd.BulkRequests)
c.generateFields(fields, "httpd_view_reads", stats.Httpd.ViewReads)
generateFields(fields, "httpd_clients_requesting_changes", stats.Httpd.ClientsRequestingChanges)
generateFields(fields, "httpd_temporary_view_reads", stats.Httpd.TemporaryViewReads)
generateFields(fields, "httpd_requests", stats.Httpd.Requests)
generateFields(fields, "httpd_bulk_requests", stats.Httpd.BulkRequests)
generateFields(fields, "httpd_view_reads", stats.Httpd.ViewReads)
tags := map[string]string{
"server": host,
@ -252,7 +252,7 @@ func (c *CouchDB) fetchAndInsertData(accumulator telegraf.Accumulator, host stri
return nil
}
func (c *CouchDB) generateFields(fields map[string]interface{}, prefix string, obj metaData) {
func generateFields(fields map[string]interface{}, prefix string, obj metaData) {
if obj.Value != nil {
fields[prefix+"_value"] = *obj.Value
}

View File

@ -61,7 +61,7 @@ func (s *CSGO) Gather(acc telegraf.Accumulator) error {
}
// Generate the metric and add it to the accumulator
m, err := s.parseResponse(addr, response, t)
m, err := parseResponse(addr, response, t)
if err != nil {
acc.AddError(err)
return
@ -74,7 +74,7 @@ func (s *CSGO) Gather(acc telegraf.Accumulator) error {
return nil
}
func (s *CSGO) parseResponse(addr, response string, t time.Time) (telegraf.Metric, error) {
func parseResponse(addr, response string, t time.Time) (telegraf.Metric, error) {
rows := strings.Split(response, "\n")
if len(rows) < 2 {
return nil, errors.New("bad response")

View File

@ -131,7 +131,7 @@ func (c *CtrlXDataLayer) Start(acc telegraf.Accumulator) error {
return nil
}
func (c *CtrlXDataLayer) Gather(_ telegraf.Accumulator) error {
func (*CtrlXDataLayer) Gather(telegraf.Accumulator) error {
// Metrics are sent to the accumulator asynchronously in worker thread. So nothing to do here.
return nil
}

View File

@ -133,7 +133,7 @@ func (c *clusterClient) setToken(token string) {
}
func (c *clusterClient) login(ctx context.Context, sa *serviceAccount) (*authToken, error) {
token, err := c.createLoginToken(sa)
token, err := createLoginToken(sa)
if err != nil {
return nil, err
}
@ -316,7 +316,7 @@ func (c *clusterClient) toURL(path string) string {
return clusterURL.String()
}
func (c *clusterClient) createLoginToken(sa *serviceAccount) (string, error) {
func createLoginToken(sa *serviceAccount) (string, error) {
token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims{
UID: sa.accountID,
RegisteredClaims: jwt.RegisteredClaims{

View File

@ -59,14 +59,14 @@ func (c *tokenCreds) token(_ context.Context, _ client) (string, error) {
return token, nil
}
func (c *tokenCreds) isExpired() bool {
func (*tokenCreds) isExpired() bool {
return true
}
func (c *nullCreds) token(_ context.Context, _ client) (string, error) {
func (*nullCreds) token(context.Context, client) (string, error) {
return "", nil
}
func (c *nullCreds) isExpired() bool {
func (*nullCreds) isExpired() bool {
return true
}

View File

@ -131,7 +131,7 @@ func (d *DCOS) gatherNode(ctx context.Context, acc telegraf.Accumulator, cluster
acc.AddError(err)
return
}
d.addNodeMetrics(acc, cluster, m)
addNodeMetrics(acc, cluster, m)
}()
d.gatherContainers(ctx, acc, cluster, node)
@ -160,7 +160,7 @@ func (d *DCOS) gatherContainers(ctx context.Context, acc telegraf.Accumulator, c
acc.AddError(err)
return
}
d.addContainerMetrics(acc, cluster, m)
addContainerMetrics(acc, cluster, m)
}(container.ID)
}
@ -177,14 +177,14 @@ func (d *DCOS) gatherContainers(ctx context.Context, acc telegraf.Accumulator, c
acc.AddError(err)
return
}
d.addAppMetrics(acc, cluster, m)
addAppMetrics(acc, cluster, m)
}(container.ID)
}
}
wg.Wait()
}
func (d *DCOS) createPoints(m *metrics) []*point {
func createPoints(m *metrics) []*point {
points := make(map[string]*point)
for _, dp := range m.Datapoints {
fieldKey := strings.ReplaceAll(dp.Name, ".", "_")
@ -244,10 +244,10 @@ func (d *DCOS) createPoints(m *metrics) []*point {
return results
}
func (d *DCOS) addMetrics(acc telegraf.Accumulator, cluster, mname string, m *metrics, tagDimensions []string) {
func addMetrics(acc telegraf.Accumulator, cluster, mname string, m *metrics, tagDimensions []string) {
tm := time.Now()
points := d.createPoints(m)
points := createPoints(m)
for _, p := range points {
tags := make(map[string]string)
@ -266,16 +266,16 @@ func (d *DCOS) addMetrics(acc telegraf.Accumulator, cluster, mname string, m *me
}
}
func (d *DCOS) addNodeMetrics(acc telegraf.Accumulator, cluster string, m *metrics) {
d.addMetrics(acc, cluster, "dcos_node", m, nodeDimensions)
func addNodeMetrics(acc telegraf.Accumulator, cluster string, m *metrics) {
addMetrics(acc, cluster, "dcos_node", m, nodeDimensions)
}
func (d *DCOS) addContainerMetrics(acc telegraf.Accumulator, cluster string, m *metrics) {
d.addMetrics(acc, cluster, "dcos_container", m, containerDimensions)
func addContainerMetrics(acc telegraf.Accumulator, cluster string, m *metrics) {
addMetrics(acc, cluster, "dcos_container", m, containerDimensions)
}
func (d *DCOS) addAppMetrics(acc telegraf.Accumulator, cluster string, m *metrics) {
d.addMetrics(acc, cluster, "dcos_app", m, appDimensions)
func addAppMetrics(acc telegraf.Accumulator, cluster string, m *metrics) {
addMetrics(acc, cluster, "dcos_app", m, appDimensions)
}
func (d *DCOS) initialize() error {

View File

@ -196,8 +196,7 @@ func TestAddNodeMetrics(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var acc testutil.Accumulator
dcos := &DCOS{}
dcos.addNodeMetrics(&acc, "a", tt.metrics)
addNodeMetrics(&acc, "a", tt.metrics)
for i, ok := range tt.check(&acc) {
require.Truef(t, ok, "Index was not true: %d", i)
}
@ -267,8 +266,7 @@ func TestAddContainerMetrics(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var acc testutil.Accumulator
dcos := &DCOS{}
dcos.addContainerMetrics(&acc, "a", tt.metrics)
addContainerMetrics(&acc, "a", tt.metrics)
for i, ok := range tt.check(&acc) {
require.Truef(t, ok, "Index was not true: %d", i)
}
@ -341,8 +339,7 @@ func TestAddAppMetrics(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var acc testutil.Accumulator
dcos := &DCOS{}
dcos.addAppMetrics(&acc, "a", tt.metrics)
addAppMetrics(&acc, "a", tt.metrics)
for i, ok := range tt.check(&acc) {
require.Truef(t, ok, "Index was not true: %d", i)
}

View File

@ -128,7 +128,7 @@ func (d *DockerLogs) Init() error {
}
// Start is a noop which is required for a *DockerLogs to implement the telegraf.ServiceInput interface
func (d *DockerLogs) Start(telegraf.Accumulator) error {
func (*DockerLogs) Start(telegraf.Accumulator) error {
return nil
}

View File

@ -40,7 +40,7 @@ type response struct {
io.Reader
}
func (r *response) Close() error {
func (*response) Close() error {
return nil
}

View File

@ -56,7 +56,7 @@ func (d *Dovecot) Gather(acc telegraf.Accumulator) error {
wg.Add(1)
go func(s string, f string) {
defer wg.Done()
acc.AddError(d.gatherServer(s, acc, d.Type, f))
acc.AddError(gatherServer(s, acc, d.Type, f))
}(server, filter)
}
}
@ -65,7 +65,7 @@ func (d *Dovecot) Gather(acc telegraf.Accumulator) error {
return nil
}
func (d *Dovecot) gatherServer(addr string, acc telegraf.Accumulator, qtype, filter string) error {
func gatherServer(addr string, acc telegraf.Accumulator, qtype, filter string) error {
var proto string
if strings.HasPrefix(addr, "/") {

View File

@ -68,7 +68,7 @@ func (ecs *Ecs) Gather(acc telegraf.Accumulator) error {
}
// accumulate metrics
ecs.accTask(task, taskTags, acc)
accTask(task, taskTags, acc)
ecs.accContainers(task, taskTags, acc)
return nil
@ -137,7 +137,7 @@ func resolveEndpoint(ecs *Ecs) {
ecs.metadataVersion = 2
}
func (ecs *Ecs) accTask(task *ecsTask, tags map[string]string, acc telegraf.Accumulator) {
func accTask(task *ecsTask, tags map[string]string, acc telegraf.Accumulator) {
taskFields := map[string]interface{}{
"desired_status": task.DesiredStatus,
"known_status": task.KnownStatus,

View File

@ -159,7 +159,7 @@ func (e *Elasticsearch) Init() error {
return nil
}
func (e *Elasticsearch) Start(_ telegraf.Accumulator) error {
func (*Elasticsearch) Start(telegraf.Accumulator) error {
return nil
}

View File

@ -89,7 +89,7 @@ func (e *ElasticsearchQuery) Init() error {
return nil
}
func (e *ElasticsearchQuery) Start(_ telegraf.Accumulator) error {
func (*ElasticsearchQuery) Start(telegraf.Accumulator) error {
return nil
}

View File

@ -269,15 +269,15 @@ func (c *commandEthtool) init() error {
return nil
}
func (c *commandEthtool) driverName(intf namespacedInterface) (driver string, err error) {
func (*commandEthtool) driverName(intf namespacedInterface) (driver string, err error) {
return intf.namespace.driverName(intf)
}
func (c *commandEthtool) stats(intf namespacedInterface) (stats map[string]uint64, err error) {
func (*commandEthtool) stats(intf namespacedInterface) (stats map[string]uint64, err error) {
return intf.namespace.stats(intf)
}
func (c *commandEthtool) get(intf namespacedInterface) (stats map[string]uint64, err error) {
func (*commandEthtool) get(intf namespacedInterface) (stats map[string]uint64, err error) {
return intf.namespace.get(intf)
}

View File

@ -35,19 +35,19 @@ func (n *namespaceMock) name() string {
return n.namespaceName
}
func (n *namespaceMock) interfaces() ([]namespacedInterface, error) {
func (*namespaceMock) interfaces() ([]namespacedInterface, error) {
return nil, errors.New("it is a test bug to invoke this function")
}
func (n *namespaceMock) driverName(_ namespacedInterface) (string, error) {
func (*namespaceMock) driverName(_ namespacedInterface) (string, error) {
return "", errors.New("it is a test bug to invoke this function")
}
func (n *namespaceMock) stats(_ namespacedInterface) (map[string]uint64, error) {
func (*namespaceMock) stats(_ namespacedInterface) (map[string]uint64, error) {
return nil, errors.New("it is a test bug to invoke this function")
}
func (n *namespaceMock) get(_ namespacedInterface) (map[string]uint64, error) {
func (*namespaceMock) get(_ namespacedInterface) (map[string]uint64, error) {
return nil, errors.New("it is a test bug to invoke this function")
}
@ -55,7 +55,7 @@ type commandEthtoolMock struct {
interfaceMap map[string]*interfaceMock
}
func (c *commandEthtoolMock) init() error {
func (*commandEthtoolMock) init() error {
// Not required for test mock
return nil
}

View File

@ -59,7 +59,7 @@ func (*Exec) SampleConfig() string {
return sampleConfig
}
func (e *Exec) Init() error {
func (*Exec) Init() error {
return nil
}
@ -121,7 +121,7 @@ func (e *Exec) Gather(acc telegraf.Accumulator) error {
return nil
}
func (c commandRunner) truncate(buf bytes.Buffer) bytes.Buffer {
func truncate(buf bytes.Buffer) bytes.Buffer {
// Limit the number of bytes.
didTruncate := false
if buf.Len() > maxStderrBytes {

View File

@ -302,10 +302,9 @@ func TestTruncate(t *testing.T) {
},
}
c := commandRunner{}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
res := c.truncate(*tt.bufF())
res := truncate(*tt.bufF())
require.Equal(t, tt.expF().Bytes(), res.Bytes())
})
}

View File

@ -44,7 +44,7 @@ func (c commandRunner) run(
out = removeWindowsCarriageReturns(out)
if stderr.Len() > 0 && !c.debug {
stderr = removeWindowsCarriageReturns(stderr)
stderr = c.truncate(stderr)
stderr = truncate(stderr)
}
return out.Bytes(), stderr.Bytes(), runErr

View File

@ -46,7 +46,7 @@ func (c commandRunner) run(
out = removeWindowsCarriageReturns(out)
if stderr.Len() > 0 && !c.debug {
stderr = removeWindowsCarriageReturns(stderr)
stderr = c.truncate(stderr)
stderr = truncate(stderr)
}
return out.Bytes(), stderr.Bytes(), runErr

View File

@ -362,7 +362,7 @@ func readChanWithTimeout(t *testing.T, metrics chan telegraf.Metric, timeout tim
type TestMetricMaker struct{}
func (tm *TestMetricMaker) Name() string {
func (*TestMetricMaker) Name() string {
return "TestPlugin"
}
@ -370,11 +370,11 @@ func (tm *TestMetricMaker) LogName() string {
return tm.Name()
}
func (tm *TestMetricMaker) MakeMetric(aMetric telegraf.Metric) telegraf.Metric {
func (*TestMetricMaker) MakeMetric(aMetric telegraf.Metric) telegraf.Metric {
return aMetric
}
func (tm *TestMetricMaker) Log() telegraf.Logger {
func (*TestMetricMaker) Log() telegraf.Logger {
return logger.New("TestPlugin", "test", "")
}

View File

@ -8,16 +8,16 @@ type inputShim struct {
}
// LogName satisfies the MetricMaker interface
func (i inputShim) LogName() string {
func (inputShim) LogName() string {
return ""
}
// MakeMetric satisfies the MetricMaker interface
func (i inputShim) MakeMetric(m telegraf.Metric) telegraf.Metric {
func (inputShim) MakeMetric(m telegraf.Metric) telegraf.Metric {
return m // don't need to do anything to it.
}
// Log satisfies the MetricMaker interface
func (i inputShim) Log() telegraf.Logger {
func (inputShim) Log() telegraf.Logger {
return nil
}

View File

@ -85,11 +85,7 @@ type testInput struct {
metricProcessed chan bool
}
func (i *testInput) SampleConfig() string {
return ""
}
func (i *testInput) Description() string {
func (*testInput) SampleConfig() string {
return ""
}
@ -105,11 +101,11 @@ func (i *testInput) Gather(acc telegraf.Accumulator) error {
return nil
}
func (i *testInput) Start(_ telegraf.Accumulator) error {
func (*testInput) Start(telegraf.Accumulator) error {
return nil
}
func (i *testInput) Stop() {
func (*testInput) Stop() {
}
func TestLoadConfig(t *testing.T) {
@ -137,15 +133,11 @@ type serviceInput struct {
SecretValue string `toml:"secret_value"`
}
func (i *serviceInput) SampleConfig() string {
func (*serviceInput) SampleConfig() string {
return ""
}
func (i *serviceInput) Description() string {
return ""
}
func (i *serviceInput) Gather(acc telegraf.Accumulator) error {
func (*serviceInput) Gather(acc telegraf.Accumulator) error {
acc.AddFields("measurement",
map[string]interface{}{
"field": 1,
@ -157,11 +149,11 @@ func (i *serviceInput) Gather(acc telegraf.Accumulator) error {
return nil
}
func (i *serviceInput) Start(_ telegraf.Accumulator) error {
func (*serviceInput) Start(telegraf.Accumulator) error {
return nil
}
func (i *serviceInput) Stop() {
func (*serviceInput) Stop() {
}
// we can get stuck if stdout gets clogged up and nobody's reading from it.