chore: Fix linter findings for `revive:unused-receiver` in `plugins/outputs` (#16338)
This commit is contained in:
parent
f8e7aeceb0
commit
878646a2c4
|
|
@ -140,7 +140,7 @@ func (p *Point) setValue(v interface{}) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (a *Amon) Close() error {
|
||||
func (*Amon) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -32,11 +32,11 @@ const (
|
|||
|
||||
type externalAuth struct{}
|
||||
|
||||
func (a *externalAuth) Mechanism() string {
|
||||
func (*externalAuth) Mechanism() string {
|
||||
return "EXTERNAL"
|
||||
}
|
||||
|
||||
func (a *externalAuth) Response() string {
|
||||
func (*externalAuth) Response() string {
|
||||
return "\000"
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -7,6 +7,6 @@ import (
|
|||
type diagnosticsMessageSubscriber struct {
|
||||
}
|
||||
|
||||
func (ms diagnosticsMessageSubscriber) Subscribe(handler appinsights.DiagnosticsMessageHandler) appinsights.DiagnosticsMessageListener {
|
||||
func (diagnosticsMessageSubscriber) Subscribe(handler appinsights.DiagnosticsMessageHandler) appinsights.DiagnosticsMessageListener {
|
||||
return appinsights.NewDiagnosticsMessageListener(handler)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -329,11 +329,11 @@ func (f *fakeIngestor) FromReader(_ context.Context, reader io.Reader, _ ...inge
|
|||
return &ingest.Result{}, nil
|
||||
}
|
||||
|
||||
func (f *fakeIngestor) FromFile(_ context.Context, _ string, _ ...ingest.FileOption) (*ingest.Result, error) {
|
||||
func (*fakeIngestor) FromFile(context.Context, string, ...ingest.FileOption) (*ingest.Result, error) {
|
||||
return &ingest.Result{}, nil
|
||||
}
|
||||
|
||||
func (f *fakeIngestor) Close() error {
|
||||
func (*fakeIngestor) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -351,7 +351,7 @@ func (m *mockIngestor) FromReader(_ context.Context, reader io.Reader, _ ...inge
|
|||
return &ingest.Result{}, nil
|
||||
}
|
||||
|
||||
func (m *mockIngestor) FromFile(_ context.Context, _ string, _ ...ingest.FileOption) (*ingest.Result, error) {
|
||||
func (*mockIngestor) FromFile(context.Context, string, ...ingest.FileOption) (*ingest.Result, error) {
|
||||
return &ingest.Result{}, nil
|
||||
}
|
||||
|
||||
|
|
@ -364,6 +364,6 @@ func (m *mockIngestor) Records() []string {
|
|||
return m.records
|
||||
}
|
||||
|
||||
func (m *mockIngestor) Close() error {
|
||||
func (*mockIngestor) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -118,7 +118,7 @@ func (s *BigQuery) Write(metrics []telegraf.Metric) error {
|
|||
return s.writeCompact(metrics)
|
||||
}
|
||||
|
||||
groupedMetrics := s.groupByMetricName(metrics)
|
||||
groupedMetrics := groupByMetricName(metrics)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
|
|
@ -155,7 +155,7 @@ func (s *BigQuery) writeCompact(metrics []telegraf.Metric) error {
|
|||
return inserter.Put(ctx, compactValues)
|
||||
}
|
||||
|
||||
func (s *BigQuery) groupByMetricName(metrics []telegraf.Metric) map[string][]bigquery.ValueSaver {
|
||||
func groupByMetricName(metrics []telegraf.Metric) map[string][]bigquery.ValueSaver {
|
||||
groupedMetrics := make(map[string][]bigquery.ValueSaver)
|
||||
|
||||
for _, m := range metrics {
|
||||
|
|
|
|||
|
|
@ -76,7 +76,7 @@ func (c *Clarify) Init() error {
|
|||
return errors.New("no credentials provided")
|
||||
}
|
||||
|
||||
func (c *Clarify) Connect() error {
|
||||
func (*Clarify) Connect() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -172,7 +172,7 @@ func (c *Clarify) generateID(m telegraf.Metric, f *telegraf.Field) (string, erro
|
|||
return id, nil
|
||||
}
|
||||
|
||||
func (c *Clarify) SampleConfig() string {
|
||||
func (*Clarify) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -103,7 +103,7 @@ func getTestResources(tT *testing.T, settings pubsub.PublishSettings, testM []te
|
|||
return ps, t, metrics
|
||||
}
|
||||
|
||||
func (t *stubTopic) ID() string {
|
||||
func (*stubTopic) ID() string {
|
||||
return "test-topic"
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -212,7 +212,7 @@ func (c *CloudWatchLogs) Connect() error {
|
|||
}
|
||||
|
||||
// Close closes plugin connection with remote receiver
|
||||
func (c *CloudWatchLogs) Close() error {
|
||||
func (*CloudWatchLogs) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ func (c *mockCloudWatchLogs) Init(lsName string) {
|
|||
c.pushedLogEvents = make([]types.InputLogEvent, 0)
|
||||
}
|
||||
|
||||
func (c *mockCloudWatchLogs) DescribeLogGroups(
|
||||
func (*mockCloudWatchLogs) DescribeLogGroups(
|
||||
context.Context,
|
||||
*cloudwatchlogs.DescribeLogGroupsInput,
|
||||
...func(options *cloudwatchlogs.Options),
|
||||
|
|
@ -60,7 +60,7 @@ func (c *mockCloudWatchLogs) DescribeLogStreams(
|
|||
return output, nil
|
||||
}
|
||||
|
||||
func (c *mockCloudWatchLogs) CreateLogStream(
|
||||
func (*mockCloudWatchLogs) CreateLogStream(
|
||||
context.Context,
|
||||
*cloudwatchlogs.CreateLogStreamInput,
|
||||
...func(options *cloudwatchlogs.Options),
|
||||
|
|
|
|||
|
|
@ -270,7 +270,7 @@ func (p *Point) setValue(v interface{}) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (d *Datadog) Close() error {
|
||||
func (*Datadog) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -17,9 +17,9 @@ func (*Discard) SampleConfig() string {
|
|||
return sampleConfig
|
||||
}
|
||||
|
||||
func (d *Discard) Connect() error { return nil }
|
||||
func (d *Discard) Close() error { return nil }
|
||||
func (d *Discard) Write(_ []telegraf.Metric) error {
|
||||
func (*Discard) Connect() error { return nil }
|
||||
func (*Discard) Close() error { return nil }
|
||||
func (*Discard) Write([]telegraf.Metric) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -52,7 +52,7 @@ func (*Dynatrace) SampleConfig() string {
|
|||
}
|
||||
|
||||
// Connect Connects the Dynatrace output plugin to the Telegraf stream
|
||||
func (d *Dynatrace) Connect() error {
|
||||
func (*Dynatrace) Connect() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -231,8 +231,8 @@ func init() {
|
|||
|
||||
func (d *Dynatrace) getTypeOption(metric telegraf.Metric, field *telegraf.Field) dynatrace_metric.MetricOption {
|
||||
metricName := metric.Name() + "." + field.Key
|
||||
if d.isCounterMetricsMatch(d.AddCounterMetrics, metricName) ||
|
||||
d.isCounterMetricsPatternsMatch(d.AddCounterMetricsPatterns, metricName) {
|
||||
if isCounterMetricsMatch(d.AddCounterMetrics, metricName) ||
|
||||
isCounterMetricsPatternsMatch(d.AddCounterMetricsPatterns, metricName) {
|
||||
switch v := field.Value.(type) {
|
||||
case float64:
|
||||
return dynatrace_metric.WithFloatCounterValueDelta(v)
|
||||
|
|
@ -261,7 +261,7 @@ func (d *Dynatrace) getTypeOption(metric telegraf.Metric, field *telegraf.Field)
|
|||
return nil
|
||||
}
|
||||
|
||||
func (d *Dynatrace) isCounterMetricsMatch(counterMetrics []string, metricName string) bool {
|
||||
func isCounterMetricsMatch(counterMetrics []string, metricName string) bool {
|
||||
for _, i := range counterMetrics {
|
||||
if i == metricName {
|
||||
return true
|
||||
|
|
@ -270,7 +270,7 @@ func (d *Dynatrace) isCounterMetricsMatch(counterMetrics []string, metricName st
|
|||
return false
|
||||
}
|
||||
|
||||
func (d *Dynatrace) isCounterMetricsPatternsMatch(counterPatterns []string, metricName string) bool {
|
||||
func isCounterMetricsPatternsMatch(counterPatterns []string, metricName string) bool {
|
||||
for _, pattern := range counterPatterns {
|
||||
regex, err := regexp.Compile(pattern)
|
||||
if err == nil && regex.MatchString(metricName) {
|
||||
|
|
|
|||
|
|
@ -836,7 +836,7 @@ type loggerStub struct {
|
|||
testutil.Logger
|
||||
}
|
||||
|
||||
func (l loggerStub) Warnf(_ string, _ ...interface{}) {
|
||||
func (loggerStub) Warnf(string, ...interface{}) {
|
||||
warnfCalledTimes++
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -245,8 +245,8 @@ func (a *Elasticsearch) Connect() error {
|
|||
}
|
||||
}
|
||||
|
||||
a.IndexName, a.tagKeys = a.GetTagKeys(a.IndexName)
|
||||
a.pipelineName, a.pipelineTagKeys = a.GetTagKeys(a.UsePipeline)
|
||||
a.IndexName, a.tagKeys = GetTagKeys(a.IndexName)
|
||||
a.pipelineName, a.pipelineTagKeys = GetTagKeys(a.UsePipeline)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
@ -424,7 +424,7 @@ func (a *Elasticsearch) createNewTemplate(templatePattern string) (*bytes.Buffer
|
|||
return &tmpl, nil
|
||||
}
|
||||
|
||||
func (a *Elasticsearch) GetTagKeys(indexName string) (string, []string) {
|
||||
func GetTagKeys(indexName string) (string, []string) {
|
||||
tagKeys := make([]string, 0)
|
||||
startTag := strings.Index(indexName, "{{")
|
||||
|
||||
|
|
|
|||
|
|
@ -403,11 +403,6 @@ func TestTemplateInvalidIndexPatternIntegration(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGetTagKeys(t *testing.T) {
|
||||
e := &Elasticsearch{
|
||||
DefaultTagValue: "none",
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
IndexName string
|
||||
ExpectedIndexName string
|
||||
|
|
@ -452,7 +447,7 @@ func TestGetTagKeys(t *testing.T) {
|
|||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
indexName, tagKeys := e.GetTagKeys(test.IndexName)
|
||||
indexName, tagKeys := GetTagKeys(test.IndexName)
|
||||
if indexName != test.ExpectedIndexName {
|
||||
t.Errorf("Expected indexname %s, got %s\n", test.ExpectedIndexName, indexName)
|
||||
}
|
||||
|
|
@ -553,7 +548,7 @@ func TestGetPipelineName(t *testing.T) {
|
|||
DefaultPipeline: "myDefaultPipeline",
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
e.pipelineName, e.pipelineTagKeys = e.GetTagKeys(e.UsePipeline)
|
||||
e.pipelineName, e.pipelineTagKeys = GetTagKeys(e.UsePipeline)
|
||||
|
||||
tests := []struct {
|
||||
EventTime time.Time
|
||||
|
|
@ -591,7 +586,7 @@ func TestGetPipelineName(t *testing.T) {
|
|||
e = &Elasticsearch{
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
e.pipelineName, e.pipelineTagKeys = e.GetTagKeys(e.UsePipeline)
|
||||
e.pipelineName, e.pipelineTagKeys = GetTagKeys(e.UsePipeline)
|
||||
|
||||
for _, test := range tests {
|
||||
pipelineName := e.getPipelineName(e.pipelineName, e.pipelineTagKeys, test.Tags)
|
||||
|
|
@ -669,7 +664,7 @@ func TestPipelineConfigs(t *testing.T) {
|
|||
|
||||
for _, test := range tests {
|
||||
e := test.Elastic
|
||||
e.pipelineName, e.pipelineTagKeys = e.GetTagKeys(e.UsePipeline)
|
||||
e.pipelineName, e.pipelineTagKeys = GetTagKeys(e.UsePipeline)
|
||||
pipelineName := e.getPipelineName(e.pipelineName, e.pipelineTagKeys, test.Tags)
|
||||
require.Equal(t, test.Expected, pipelineName)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -86,7 +86,7 @@ func (e *EventHubs) Init() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (e *EventHubs) Connect() error {
|
||||
func (*EventHubs) Connect() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -51,12 +51,12 @@ func (e *Exec) SetSerializer(serializer telegraf.Serializer) {
|
|||
}
|
||||
|
||||
// Connect satisfies the Output interface.
|
||||
func (e *Exec) Connect() error {
|
||||
func (*Exec) Connect() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close satisfies the Output interface.
|
||||
func (e *Exec) Close() error {
|
||||
func (*Exec) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -123,7 +123,7 @@ func (c *CommandRunner) Run(timeout time.Duration, command, environments []strin
|
|||
s = removeWindowsCarriageReturns(s)
|
||||
if s.Len() > 0 {
|
||||
if c.log.Level() < telegraf.Debug {
|
||||
c.log.Errorf("Command error: %q", c.truncate(s))
|
||||
c.log.Errorf("Command error: %q", truncate(s))
|
||||
} else {
|
||||
c.log.Debugf("Command error: %q", s)
|
||||
}
|
||||
|
|
@ -141,7 +141,7 @@ func (c *CommandRunner) Run(timeout time.Duration, command, environments []strin
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *CommandRunner) truncate(buf bytes.Buffer) string {
|
||||
func truncate(buf bytes.Buffer) string {
|
||||
// Limit the number of bytes.
|
||||
didTruncate := false
|
||||
if buf.Len() > maxStderrBytes {
|
||||
|
|
|
|||
|
|
@ -166,10 +166,10 @@ func TestTruncate(t *testing.T) {
|
|||
len: len("hola") + len("..."),
|
||||
},
|
||||
}
|
||||
c := CommandRunner{}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := c.truncate(*tt.buf)
|
||||
s := truncate(*tt.buf)
|
||||
require.Len(t, s, tt.len)
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -203,7 +203,7 @@ func (g *gelfUDP) getChunksize() int {
|
|||
return g.gelfConfig.MaxChunkSizeWan
|
||||
}
|
||||
|
||||
func (g *gelfUDP) intToBytes(i int) ([]byte, error) {
|
||||
func (*gelfUDP) intToBytes(i int) ([]byte, error) {
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
err := binary.Write(buf, binary.LittleEndian, int8(i))
|
||||
|
|
@ -214,7 +214,7 @@ func (g *gelfUDP) intToBytes(i int) ([]byte, error) {
|
|||
return buf.Bytes(), err
|
||||
}
|
||||
|
||||
func (g *gelfUDP) compress(b []byte) (bytes.Buffer, error) {
|
||||
func (*gelfUDP) compress(b []byte) (bytes.Buffer, error) {
|
||||
var buf bytes.Buffer
|
||||
comp := zlib.NewWriter(&buf)
|
||||
|
||||
|
|
|
|||
|
|
@ -230,7 +230,7 @@ func (c *httpClient) CreateDatabase(ctx context.Context, database string) error
|
|||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := c.validateResponse(resp.Body)
|
||||
body, err := validateResponse(resp.Body)
|
||||
|
||||
// Check for poorly formatted response (can't be decoded)
|
||||
if err != nil {
|
||||
|
|
@ -363,7 +363,7 @@ func (c *httpClient) writeBatch(ctx context.Context, db, rp string, metrics []te
|
|||
return nil
|
||||
}
|
||||
|
||||
body, err := c.validateResponse(resp.Body)
|
||||
body, err := validateResponse(resp.Body)
|
||||
|
||||
// Check for poorly formatted response that can't be decoded
|
||||
if err != nil {
|
||||
|
|
@ -526,7 +526,7 @@ func (c *httpClient) addHeaders(req *http.Request) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *httpClient) validateResponse(response io.ReadCloser) (io.ReadCloser, error) {
|
||||
func validateResponse(response io.ReadCloser) (io.ReadCloser, error) {
|
||||
bodyBytes, err := io.ReadAll(response)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
|||
|
|
@ -80,7 +80,7 @@ func (c *udpClient) URL() string {
|
|||
return c.url.String()
|
||||
}
|
||||
|
||||
func (c *udpClient) Database() string {
|
||||
func (*udpClient) Database() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
|
|
@ -118,7 +118,7 @@ func (c *udpClient) Write(ctx context.Context, metrics []telegraf.Metric) error
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *udpClient) CreateDatabase(_ context.Context, _ string) error {
|
||||
func (*udpClient) CreateDatabase(_ context.Context, _ string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -141,5 +141,5 @@ func scanLines(data []byte, atEOF bool) (advance int, token []byte, err error) {
|
|||
return 0, nil, nil
|
||||
}
|
||||
|
||||
func (c *udpClient) Close() {
|
||||
func (*udpClient) Close() {
|
||||
}
|
||||
|
|
|
|||
|
|
@ -182,7 +182,7 @@ func (p *MockProducer) SendMessages(msgs []*sarama.ProducerMessage) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (p *MockProducer) Close() error {
|
||||
func (*MockProducer) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -81,7 +81,7 @@ func (k *KinesisOutput) Connect() error {
|
|||
return err
|
||||
}
|
||||
|
||||
func (k *KinesisOutput) Close() error {
|
||||
func (*KinesisOutput) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -243,7 +243,7 @@ func (g *Gauge) setValue(v interface{}) error {
|
|||
}
|
||||
|
||||
// Close is used to close the connection to librato Output
|
||||
func (l *Librato) Close() error {
|
||||
func (*Librato) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -94,7 +94,7 @@ func (l *Logzio) Write(metrics []telegraf.Metric) error {
|
|||
var buff bytes.Buffer
|
||||
gz := gzip.NewWriter(&buff)
|
||||
for _, metric := range metrics {
|
||||
m := l.parseMetric(metric)
|
||||
m := parseMetric(metric)
|
||||
|
||||
serialized, err := json.Marshal(m)
|
||||
if err != nil {
|
||||
|
|
@ -151,7 +151,7 @@ func (l *Logzio) authURL() (string, error) {
|
|||
return fmt.Sprintf("%s/?token=%s", l.URL, token.TemporaryString()), nil
|
||||
}
|
||||
|
||||
func (l *Logzio) parseMetric(metric telegraf.Metric) *Metric {
|
||||
func parseMetric(metric telegraf.Metric) *Metric {
|
||||
return &Metric{
|
||||
Metric: map[string]interface{}{
|
||||
metric.Name(): metric.Fields(),
|
||||
|
|
|
|||
|
|
@ -43,9 +43,8 @@ func TestConnectWithDefaultToken(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestParseMetric(t *testing.T) {
|
||||
l := &Logzio{}
|
||||
for _, tm := range testutil.MockMetrics() {
|
||||
lm := l.parseMetric(tm)
|
||||
lm := parseMetric(tm)
|
||||
require.Equal(t, tm.Fields(), lm.Metric[tm.Name()])
|
||||
require.Equal(t, logzioType, lm.Type)
|
||||
require.Equal(t, tm.Tags(), lm.Dimensions)
|
||||
|
|
|
|||
|
|
@ -111,7 +111,7 @@ func (a *NebiusCloudMonitoring) Init() error {
|
|||
// Connect initializes the plugin and validates connectivity
|
||||
func (a *NebiusCloudMonitoring) Connect() error {
|
||||
a.Log.Debugf("Getting folder ID in %s", a.metadataFolderURL)
|
||||
body, err := a.getResponseFromMetadata(a.client, a.metadataFolderURL)
|
||||
body, err := getResponseFromMetadata(a.client, a.metadataFolderURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -166,7 +166,7 @@ func (a *NebiusCloudMonitoring) Write(metrics []telegraf.Metric) error {
|
|||
return a.send(body)
|
||||
}
|
||||
|
||||
func (a *NebiusCloudMonitoring) getResponseFromMetadata(c *http.Client, metadataURL string) ([]byte, error) {
|
||||
func getResponseFromMetadata(c *http.Client, metadataURL string) ([]byte, error) {
|
||||
req, err := http.NewRequest("GET", metadataURL, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating request: %w", err)
|
||||
|
|
@ -191,7 +191,7 @@ func (a *NebiusCloudMonitoring) getResponseFromMetadata(c *http.Client, metadata
|
|||
|
||||
func (a *NebiusCloudMonitoring) getIAMTokenFromMetadata() (string, int, error) {
|
||||
a.Log.Debugf("Getting new IAM token in %s", a.metadataTokenURL)
|
||||
body, err := a.getResponseFromMetadata(a.client, a.metadataTokenURL)
|
||||
body, err := getResponseFromMetadata(a.client, a.metadataTokenURL)
|
||||
if err != nil {
|
||||
return "", 0, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -238,7 +238,7 @@ func FloatToString(inputNum float64) string {
|
|||
return strconv.FormatFloat(inputNum, 'f', 6, 64)
|
||||
}
|
||||
|
||||
func (o *OpenTSDB) Close() error {
|
||||
func (*OpenTSDB) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -63,7 +63,7 @@ func (p *Parquet) Init() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (p *Parquet) Connect() error {
|
||||
func (*Parquet) Connect() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -74,7 +74,7 @@ type Postgresql struct {
|
|||
tagsJSONColumn utils.Column
|
||||
}
|
||||
|
||||
func (p *Postgresql) SampleConfig() string {
|
||||
func (*Postgresql) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
|
|
@ -419,7 +419,7 @@ func (p *Postgresql) writeMetricsFromMeasure(ctx context.Context, db dbh, tableS
|
|||
}
|
||||
|
||||
if p.TagsAsForeignKeys {
|
||||
if err = p.writeTagTable(ctx, db, tableSource); err != nil {
|
||||
if err = writeTagTable(ctx, db, tableSource); err != nil {
|
||||
if p.ForeignTagConstraint {
|
||||
return fmt.Errorf("writing to tag table %q: %w", tableSource.Name()+p.TagTableSuffix, err)
|
||||
}
|
||||
|
|
@ -437,7 +437,7 @@ func (p *Postgresql) writeMetricsFromMeasure(ctx context.Context, db dbh, tableS
|
|||
return nil
|
||||
}
|
||||
|
||||
func (p *Postgresql) writeTagTable(ctx context.Context, db dbh, tableSource *TableSource) error {
|
||||
func writeTagTable(ctx context.Context, db dbh, tableSource *TableSource) error {
|
||||
ttsrc := NewTagTableSource(tableSource)
|
||||
|
||||
// Check whether we have any tags to insert
|
||||
|
|
|
|||
|
|
@ -193,7 +193,7 @@ func (tm *TableManager) EnsureStructure(
|
|||
col.Name = col.Name[:tm.ColumnNameLenLimit]
|
||||
missingCols[i] = col
|
||||
}
|
||||
if tm.validateColumnName(col.Name) {
|
||||
if validateColumnName(col.Name) {
|
||||
addColumns = append(addColumns, col)
|
||||
continue
|
||||
}
|
||||
|
|
@ -416,7 +416,7 @@ func (tm *TableManager) validateTableName(name string) bool {
|
|||
return len([]byte(name)) <= maxIdentifierLength
|
||||
}
|
||||
|
||||
func (tm *TableManager) validateColumnName(name string) bool {
|
||||
func validateColumnName(name string) bool {
|
||||
return len([]byte(name)) <= maxIdentifierLength
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -325,7 +325,7 @@ func (tsrc *TableSource) Values() ([]interface{}, error) {
|
|||
return tsrc.cursorValues, tsrc.cursorError
|
||||
}
|
||||
|
||||
func (tsrc *TableSource) Err() error {
|
||||
func (*TableSource) Err() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -431,6 +431,6 @@ func (ttsrc *TagTableSource) UpdateCache() {
|
|||
}
|
||||
}
|
||||
|
||||
func (ttsrc *TagTableSource) Err() error {
|
||||
func (*TagTableSource) Err() error {
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -197,7 +197,7 @@ func (p *PrometheusClient) listenTCP(host string) (net.Listener, error) {
|
|||
return net.Listen("tcp", host)
|
||||
}
|
||||
|
||||
func (p *PrometheusClient) listenVsock(host string) (net.Listener, error) {
|
||||
func listenVsock(host string) (net.Listener, error) {
|
||||
_, portStr, err := net.SplitHostPort(host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -219,7 +219,7 @@ func (p *PrometheusClient) listen() (net.Listener, error) {
|
|||
case "", "tcp", "http":
|
||||
return p.listenTCP(u.Host)
|
||||
case "vsock":
|
||||
return p.listenVsock(u.Host)
|
||||
return listenVsock(u.Host)
|
||||
default:
|
||||
return p.listenTCP(u.Host)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -85,7 +85,7 @@ func NewCollector(expire time.Duration, stringsAsLabel, exportTimestamp bool, ty
|
|||
return c
|
||||
}
|
||||
|
||||
func (c *Collector) Describe(ch chan<- *prometheus.Desc) {
|
||||
func (*Collector) Describe(ch chan<- *prometheus.Desc) {
|
||||
prometheus.NewGauge(prometheus.GaugeOpts{Name: "Dummy", Help: "Dummy"}).Describe(ch)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -56,7 +56,7 @@ func NewCollector(expire time.Duration, stringsAsLabel, exportTimestamp bool, ty
|
|||
}
|
||||
}
|
||||
|
||||
func (c *Collector) Describe(_ chan<- *prometheus.Desc) {
|
||||
func (*Collector) Describe(_ chan<- *prometheus.Desc) {
|
||||
// Sending no descriptor at all marks the Collector as "unchecked",
|
||||
// i.e. no checks will be performed at registration time, and the
|
||||
// Collector may yield any Metric it sees fit in its Collect method.
|
||||
|
|
|
|||
|
|
@ -65,11 +65,11 @@ func (r *RedisTimeSeries) Close() error {
|
|||
return r.client.Close()
|
||||
}
|
||||
|
||||
func (r *RedisTimeSeries) Description() string {
|
||||
func (*RedisTimeSeries) Description() string {
|
||||
return "Plugin for sending metrics to RedisTimeSeries"
|
||||
}
|
||||
|
||||
func (r *RedisTimeSeries) SampleConfig() string {
|
||||
func (*RedisTimeSeries) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
func (r *RedisTimeSeries) Write(metrics []telegraf.Metric) error {
|
||||
|
|
|
|||
|
|
@ -36,10 +36,10 @@ type errorsink struct {
|
|||
events []*event.Event
|
||||
}
|
||||
|
||||
func (e *errorsink) AddDatapoints(_ context.Context, _ []*datapoint.Datapoint) error {
|
||||
func (*errorsink) AddDatapoints(context.Context, []*datapoint.Datapoint) error {
|
||||
return errors.New("not sending datapoints")
|
||||
}
|
||||
func (e *errorsink) AddEvents(_ context.Context, _ []*event.Event) error {
|
||||
func (*errorsink) AddEvents(context.Context, []*event.Event) error {
|
||||
return errors.New("not sending events")
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -251,7 +251,7 @@ func (s *Stackdriver) sendBatch(batch []telegraf.Metric) error {
|
|||
}
|
||||
|
||||
if m.Type() == telegraf.Histogram {
|
||||
value, err := s.buildHistogram(m)
|
||||
value, err := buildHistogram(m)
|
||||
if err != nil {
|
||||
s.Log.Errorf("Unable to build distribution from metric %s: %s", m, err)
|
||||
continue
|
||||
|
|
@ -563,7 +563,7 @@ func (s *Stackdriver) getStackdriverTypedValue(value interface{}) (*monitoringpb
|
|||
}
|
||||
}
|
||||
|
||||
func (s *Stackdriver) buildHistogram(m telegraf.Metric) (*monitoringpb.TypedValue, error) {
|
||||
func buildHistogram(m telegraf.Metric) (*monitoringpb.TypedValue, error) {
|
||||
sumInter, ok := m.GetField("sum")
|
||||
if !ok {
|
||||
return nil, errors.New("no sum field present")
|
||||
|
|
|
|||
|
|
@ -1113,10 +1113,6 @@ func TestGenerateHistogramName(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBuildHistogram(t *testing.T) {
|
||||
s := &Stackdriver{
|
||||
MetricNameFormat: "official",
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
m := testutil.MustMetric(
|
||||
"http_server_duration",
|
||||
map[string]string{},
|
||||
|
|
@ -1132,7 +1128,7 @@ func TestBuildHistogram(t *testing.T) {
|
|||
},
|
||||
time.Unix(0, 0),
|
||||
)
|
||||
value, err := s.buildHistogram(m)
|
||||
value, err := buildHistogram(m)
|
||||
require.NoError(t, err)
|
||||
|
||||
dist := value.GetDistributionValue()
|
||||
|
|
|
|||
|
|
@ -88,7 +88,7 @@ func (q *STOMP) Write(metrics []telegraf.Metric) error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
func (q *STOMP) SampleConfig() string {
|
||||
func (*STOMP) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
func (q *STOMP) Close() error {
|
||||
|
|
|
|||
|
|
@ -108,7 +108,7 @@ func (s *SumoLogic) Connect() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *SumoLogic) Close() error {
|
||||
func (*SumoLogic) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -199,7 +199,7 @@ func (t *Timestream) Connect() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (t *Timestream) Close() error {
|
||||
func (*Timestream) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ type mockTimestreamClient struct {
|
|||
WriteRecordsRequestCount int
|
||||
}
|
||||
|
||||
func (m *mockTimestreamClient) CreateTable(
|
||||
func (*mockTimestreamClient) CreateTable(
|
||||
context.Context,
|
||||
*timestreamwrite.CreateTableInput,
|
||||
...func(*timestreamwrite.Options),
|
||||
|
|
@ -60,7 +60,7 @@ func (m *mockTimestreamClient) WriteRecords(
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *mockTimestreamClient) DescribeDatabase(
|
||||
func (*mockTimestreamClient) DescribeDatabase(
|
||||
context.Context,
|
||||
*timestreamwrite.DescribeDatabaseInput,
|
||||
...func(*timestreamwrite.Options),
|
||||
|
|
@ -530,7 +530,7 @@ type mockTimestreamErrorClient struct {
|
|||
ErrorToReturnOnWriteRecords error
|
||||
}
|
||||
|
||||
func (m *mockTimestreamErrorClient) CreateTable(
|
||||
func (*mockTimestreamErrorClient) CreateTable(
|
||||
context.Context,
|
||||
*timestreamwrite.CreateTableInput,
|
||||
...func(*timestreamwrite.Options),
|
||||
|
|
@ -546,7 +546,7 @@ func (m *mockTimestreamErrorClient) WriteRecords(
|
|||
return nil, m.ErrorToReturnOnWriteRecords
|
||||
}
|
||||
|
||||
func (m *mockTimestreamErrorClient) DescribeDatabase(
|
||||
func (*mockTimestreamErrorClient) DescribeDatabase(
|
||||
context.Context,
|
||||
*timestreamwrite.DescribeDatabaseInput,
|
||||
...func(*timestreamwrite.Options),
|
||||
|
|
|
|||
|
|
@ -144,7 +144,7 @@ func (w *Warp10) Write(metrics []telegraf.Metric) error {
|
|||
if w.PrintErrorBody {
|
||||
//nolint:errcheck // err can be ignored since it is just for logging
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
return errors.New(w.WarpURL + ": " + w.HandleError(string(body), w.MaxStringErrorSize))
|
||||
return errors.New(w.WarpURL + ": " + HandleError(string(body), w.MaxStringErrorSize))
|
||||
}
|
||||
|
||||
if len(resp.Status) < w.MaxStringErrorSize {
|
||||
|
|
@ -236,7 +236,7 @@ func floatToString(inputNum float64) string {
|
|||
}
|
||||
|
||||
// Close close
|
||||
func (w *Warp10) Close() error {
|
||||
func (*Warp10) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -255,7 +255,7 @@ func init() {
|
|||
}
|
||||
|
||||
// HandleError read http error body and return a corresponding error
|
||||
func (w *Warp10) HandleError(body string, maxStringSize int) string {
|
||||
func HandleError(body string, maxStringSize int) string {
|
||||
if body == "" {
|
||||
return "Empty return"
|
||||
}
|
||||
|
|
|
|||
|
|
@ -75,11 +75,6 @@ func TestWriteWarp10EncodedTags(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHandleWarp10Error(t *testing.T) {
|
||||
w := Warp10{
|
||||
Prefix: "unit.test",
|
||||
WarpURL: "http://localhost:8090",
|
||||
Token: config.NewSecret([]byte("WRITE")),
|
||||
}
|
||||
tests := [...]*ErrorTest{
|
||||
{
|
||||
Message: `
|
||||
|
|
@ -148,7 +143,7 @@ func TestHandleWarp10Error(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, handledError := range tests {
|
||||
payload := w.HandleError(handledError.Message, 511)
|
||||
payload := HandleError(handledError.Message, 511)
|
||||
require.Exactly(t, handledError.Expected, payload)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,11 +23,11 @@ func newTestSerializer() *testSerializer {
|
|||
return &testSerializer{}
|
||||
}
|
||||
|
||||
func (t testSerializer) Serialize(_ telegraf.Metric) ([]byte, error) {
|
||||
func (testSerializer) Serialize(_ telegraf.Metric) ([]byte, error) {
|
||||
return []byte("1"), nil
|
||||
}
|
||||
|
||||
func (t testSerializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) {
|
||||
func (testSerializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) {
|
||||
return []byte(strconv.Itoa(len(metrics))), nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -60,7 +60,7 @@ func (*Zabbix) SampleConfig() string {
|
|||
// Connect does nothing, Write() would initiate connection in each call.
|
||||
// Checking if Zabbix server is alive in this step does not allow Telegraf
|
||||
// to start if there is a temporal connection problem with the server.
|
||||
func (z *Zabbix) Connect() error {
|
||||
func (*Zabbix) Connect() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -87,7 +87,7 @@ func (z *Zabbix) Init() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (z *Zabbix) Close() error {
|
||||
func (*Zabbix) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue