chore: Review of //nolint comments (#12088)

This commit is contained in:
Paweł Żak 2022-10-26 12:06:08 +02:00 committed by GitHub
parent 07d1a63460
commit 9d9eb403a6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
48 changed files with 278 additions and 306 deletions

View File

@ -34,6 +34,8 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/setup-go@v3
with:
go-version: 1.19
- uses: actions/checkout@v3
- name: golangci-lint
uses: golangci/golangci-lint-action@v3

View File

@ -4,6 +4,7 @@ linters:
# - telegraflinter
- bodyclose
- dogsled
- errcheck
- goprintffuncname
- gosimple
- govet
@ -44,7 +45,7 @@ linters-settings:
- name: error-return
- name: error-strings
- name: errorf
# - name: flag-parameter #disable for now
# - name: flag-parameter #disable for now
- name: function-result-limit
arguments: [ 3 ]
- name: identical-branches
@ -69,7 +70,7 @@ linters-settings:
- name: unconditional-recursion
- name: unexported-naming
- name: unhandled-error
arguments: ["outputBuffer.Write", "fmt.Printf", "fmt.Println", "fmt.Print", "fmt.Fprintf", "fmt.Fprint", "fmt.Fprintln"]
arguments: [ "outputBuffer.Write", "fmt.Printf", "fmt.Println", "fmt.Print", "fmt.Fprintf", "fmt.Fprint", "fmt.Fprintln" ]
- name: unnecessary-stmt
- name: unreachable-code
# - name: unused-parameter
@ -129,5 +130,8 @@ issues:
linters:
- revive
- path: cmd/telegraf/(main|printer).go
text: "Error return value of `outputBuffer.Write` is not checked"
output:
format: tab

View File

@ -423,7 +423,9 @@ func (a *Agent) testRunInputs(
}
wg.Wait()
internal.SleepContext(ctx, wait)
if err := internal.SleepContext(ctx, wait); err != nil {
log.Printf("E! [agent] SleepContext finished with: %v", err)
}
log.Printf("D! [agent] Stopping service inputs")
stopServiceInputs(unit.inputs)

View File

@ -3,7 +3,7 @@ package main
import (
"fmt"
"io"
"log" //nolint:revive
"log"
"os"
"sort"
"strings"
@ -118,6 +118,7 @@ func runApp(args []string, outputBuffer io.Writer, pprof Server, c TelegrafConfi
extraFlags := append(pluginFilterFlags, cliFlags()...)
// This function is used when Telegraf is run with only flags
action := func(cCtx *cli.Context) error {
err := logger.SetupLogging(logger.LogConfig{})
if err != nil {

View File

@ -2,7 +2,7 @@ package main
import (
"fmt"
"log" //nolint: revive
"log"
"net/http"
"strings"
)

View File

@ -4,7 +4,7 @@ import (
"context"
"errors"
"fmt"
"log" //nolint:revive
"log"
"os"
"os/signal"
"strings"

View File

@ -953,8 +953,7 @@ type MockupProcessorPluginParser struct {
func (m *MockupProcessorPluginParser) Start(_ telegraf.Accumulator) error {
return nil
}
func (m *MockupProcessorPluginParser) Stop() error {
return nil
func (m *MockupProcessorPluginParser) Stop() {
}
func (m *MockupProcessorPluginParser) SampleConfig() string {
return "Mockup test processor plugin with parser"
@ -978,8 +977,7 @@ type MockupProcessorPlugin struct{}
func (m *MockupProcessorPlugin) Start(_ telegraf.Accumulator) error {
return nil
}
func (m *MockupProcessorPlugin) Stop() error {
return nil
func (m *MockupProcessorPlugin) Stop() {
}
func (m *MockupProcessorPlugin) SampleConfig() string {
return "Mockup test processor plugin with parser"
@ -999,8 +997,7 @@ type MockupProcessorPluginParserOnly struct {
func (m *MockupProcessorPluginParserOnly) Start(_ telegraf.Accumulator) error {
return nil
}
func (m *MockupProcessorPluginParserOnly) Stop() error {
return nil
func (m *MockupProcessorPluginParserOnly) Stop() {
}
func (m *MockupProcessorPluginParserOnly) SampleConfig() string {
return "Mockup test processor plugin with parser"
@ -1023,8 +1020,7 @@ type MockupProcessorPluginParserFunc struct {
func (m *MockupProcessorPluginParserFunc) Start(_ telegraf.Accumulator) error {
return nil
}
func (m *MockupProcessorPluginParserFunc) Stop() error {
return nil
func (m *MockupProcessorPluginParserFunc) Stop() {
}
func (m *MockupProcessorPluginParserFunc) SampleConfig() string {
return "Mockup test processor plugin with parser"

View File

@ -2,7 +2,7 @@ package config
import (
"fmt"
"log" //nolint:revive // log is ok here as the logging facility is not set-up yet
"log"
"reflect"
"sort"
"strings"
@ -258,7 +258,7 @@ func (c *Config) PrintDeprecationList(plugins []PluginDeprecationInfo) {
for _, plugin := range plugins {
switch plugin.LogLevel {
case telegraf.Warn, telegraf.Error:
_, _ = fmt.Printf(
fmt.Printf(
" %-40s %-5s since %-5s removal in %-5s %s\n",
plugin.Name, plugin.LogLevel, plugin.info.Since, plugin.info.RemovalIn, plugin.info.Notice,
)
@ -269,7 +269,7 @@ func (c *Config) PrintDeprecationList(plugins []PluginDeprecationInfo) {
}
sort.Slice(plugin.Options, func(i, j int) bool { return plugin.Options[i].Name < plugin.Options[j].Name })
for _, option := range plugin.Options {
_, _ = fmt.Printf(
fmt.Printf(
" %-40s %-5s since %-5s removal in %-5s %s\n",
plugin.Name+"/"+option.Name, option.LogLevel, option.info.Since, option.info.RemovalIn, option.info.Notice,
)

View File

@ -74,7 +74,7 @@ func TestMain(m *testing.M) {
// cleanly.
func externalProcess() {
wait := make(chan int)
_, _ = fmt.Fprintln(os.Stdout, "started")
fmt.Fprintln(os.Stdout, "started")
<-wait
os.Exit(2)
os.Exit(2) //nolint:revive // os.Exit called intentionally
}

View File

@ -7,9 +7,10 @@ import (
"strings"
"sync"
"github.com/influxdata/telegraf"
"github.com/sleepinggenius2/gosmi"
"github.com/sleepinggenius2/gosmi/types"
"github.com/influxdata/telegraf"
)
// must init, append path for each directory, load module for every file
@ -44,10 +45,6 @@ func (*GosmiMibLoader) loadModule(path string) error {
return err
}
func ClearCache() {
cache = make(map[string]bool)
}
// will give all found folders to gosmi and load in all modules found in the folders
func LoadMibsFromPath(paths []string, log telegraf.Logger, loader MibLoader) error {
folders, err := walkPaths(paths, log)
@ -138,7 +135,7 @@ func walkPaths(paths []string, log telegraf.Logger) ([]string, error) {
return nil
})
if err != nil {
return folders, fmt.Errorf("Couldn't walk path %q: %v", mibPath, err)
return folders, fmt.Errorf("couldn't walk path %q: %v", mibPath, err)
}
}
return folders, nil
@ -179,7 +176,7 @@ func TrapLookup(oid string) (e MibEntry, err error) {
// The following is for snmp
func GetIndex(oidNum string, mibPrefix string, node gosmi.SmiNode) (col []string, tagOids map[string]struct{}, err error) {
func GetIndex(mibPrefix string, node gosmi.SmiNode) (col []string, tagOids map[string]struct{}, err error) {
// first attempt to get the table's tags
tagOids = map[string]struct{}{}
@ -211,7 +208,7 @@ func SnmpTranslateCall(oid string) (mibName string, oidNum string, oidText strin
return oid, oid, oid, oid, gosmi.SmiNode{}, err
}
if s[1] == "" {
return "", oid, oid, oid, gosmi.SmiNode{}, fmt.Errorf("cannot parse %v\n", oid)
return "", oid, oid, oid, gosmi.SmiNode{}, fmt.Errorf("cannot parse %v", oid)
}
// node becomes sysUpTime.0
node := s[1]

View File

@ -88,20 +88,20 @@ func groupID(seed maphash.Seed, measurement string, taglist []*telegraf.Tag, tm
var mh maphash.Hash
mh.SetSeed(seed)
mh.WriteString(measurement) //nolint:revive // all Write***() methods for hash in maphash.go returns nil err
mh.WriteByte(0) //nolint:revive // all Write***() methods for hash in maphash.go returns nil err
mh.WriteString(measurement) //nolint:errcheck,revive // all Write***() methods for hash in maphash.go returns nil err
mh.WriteByte(0) //nolint:errcheck,revive // all Write***() methods for hash in maphash.go returns nil err
for _, tag := range taglist {
mh.WriteString(tag.Key) //nolint:revive // all Write***() methods for hash in maphash.go returns nil err
mh.WriteByte(0) //nolint:revive // all Write***() methods for hash in maphash.go returns nil err
mh.WriteString(tag.Value) //nolint:revive // all Write***() methods for hash in maphash.go returns nil err
mh.WriteByte(0) //nolint:revive // all Write***() methods for hash in maphash.go returns nil err
mh.WriteString(tag.Key) //nolint:errcheck,revive // all Write***() methods for hash in maphash.go returns nil err
mh.WriteByte(0) //nolint:errcheck,revive // all Write***() methods for hash in maphash.go returns nil err
mh.WriteString(tag.Value) //nolint:errcheck,revive // all Write***() methods for hash in maphash.go returns nil err
mh.WriteByte(0) //nolint:errcheck,revive // all Write***() methods for hash in maphash.go returns nil err
}
mh.WriteByte(0) //nolint:revive // all Write***() methods for hash in maphash.go returns nil err
mh.WriteByte(0) //nolint:errcheck,revive // all Write***() methods for hash in maphash.go returns nil err
var tsBuf [8]byte
binary.BigEndian.PutUint64(tsBuf[:], uint64(tm.UnixNano()))
mh.Write(tsBuf[:]) //nolint:revive // all Write***() methods for hash in maphash.go returns nil err
mh.Write(tsBuf[:]) //nolint:errcheck,revive // all Write***() methods for hash in maphash.go returns nil err
return mh.Sum64()
}

View File

@ -40,7 +40,7 @@ func BenchmarkRunningOutputAddWrite(b *testing.B) {
for n := 0; n < b.N; n++ {
ro.AddMetric(testutil.TestMetric(101, "metric1"))
ro.Write() //nolint: revive // skip checking err for benchmark tests
ro.Write() //nolint: errcheck,revive // skip checking err for benchmark tests
}
}
@ -56,7 +56,7 @@ func BenchmarkRunningOutputAddWriteEvery100(b *testing.B) {
for n := 0; n < b.N; n++ {
ro.AddMetric(testutil.TestMetric(101, "metric1"))
if n%100 == 0 {
ro.Write() //nolint: revive // skip checking err for benchmark tests
ro.Write() //nolint: errcheck,revive // skip checking err for benchmark tests
}
}
}

View File

@ -5,11 +5,12 @@ import (
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/models"
"github.com/influxdata/telegraf/plugins/processors"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
// MockProcessor is a Processor with an overridable Apply implementation.
@ -56,7 +57,8 @@ func TestRunningProcessor_Init(t *testing.T) {
rp := &models.RunningProcessor{
Processor: processors.NewStreamingProcessorFromProcessor(&mock),
}
rp.Init()
err := rp.Init()
require.NoError(t, err)
require.True(t, mock.HasBeenInit)
}
@ -188,13 +190,15 @@ func TestRunningProcessor_Apply(t *testing.T) {
Processor: tt.args.Processor,
Config: tt.args.Config,
}
rp.Config.Filter.Compile()
err := rp.Config.Filter.Compile()
require.NoError(t, err)
acc := testutil.Accumulator{}
err := rp.Start(&acc)
err = rp.Start(&acc)
require.NoError(t, err)
for _, m := range tt.input {
rp.Add(m, &acc)
err = rp.Add(m, &acc)
require.NoError(t, err)
}
rp.Stop()

View File

@ -63,7 +63,9 @@ func (s *Shim) RunProcessor() error {
continue
}
s.Processor.Add(m, acc)
if err = s.Processor.Add(m, acc); err != nil {
fmt.Fprintf(s.stderr, "Failure during processing metric by processor: %v\b", err)
}
}
close(s.metricCh)

View File

@ -239,7 +239,9 @@ func asStarlarkValue(value interface{}) (starlark.Value, error) {
if err != nil {
return starlark.None, err
}
dict.SetKey(sKey, sValue)
if err = dict.SetKey(sKey, sValue); err != nil {
return starlark.None, err
}
}
return dict, nil
case reflect.Float32, reflect.Float64:

View File

@ -13,9 +13,10 @@ import (
"time"
"github.com/docker/go-connections/nat"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
"github.com/testcontainers/testcontainers-go/wait"
"github.com/influxdata/telegraf/testutil"
)
func TestDovecotIntegration(t *testing.T) {
@ -53,9 +54,9 @@ func TestDovecotIntegration(t *testing.T) {
// Test type=global server=unix
addr := "/tmp/socket"
wait := make(chan int)
waitCh := make(chan int)
go func() {
defer close(wait)
defer close(waitCh)
la, err := net.ResolveUnixAddr("unix", addr)
require.NoError(t, err)
@ -65,7 +66,7 @@ func TestDovecotIntegration(t *testing.T) {
defer l.Close()
defer os.Remove(addr)
wait <- 0
waitCh <- 0
conn, err := l.Accept()
require.NoError(t, err)
defer conn.Close()
@ -80,7 +81,7 @@ func TestDovecotIntegration(t *testing.T) {
}()
// Wait for server to start
<-wait
<-waitCh
d := &Dovecot{Servers: []string{addr}, Type: "global"}
err := d.Gather(&acc)

View File

@ -126,7 +126,7 @@ func TestHelperProcess(_ *testing.T) {
os.Exit(0)
}
}
//nolint:errcheck,revive // Test will fail anyway
fmt.Fprint(os.Stdout, "invalid argument")
//nolint:revive // os.Exit called intentionally
os.Exit(1)

View File

@ -313,8 +313,7 @@ func (c *GNMI) subscribeGNMI(ctx context.Context, worker *Worker, tlscfg *tls.Co
}
func (c *GNMI) handleSubscribeResponse(worker *Worker, reply *gnmiLib.SubscribeResponse) {
switch response := reply.Response.(type) {
case *gnmiLib.SubscribeResponse_Update:
if response, ok := reply.Response.(*gnmiLib.SubscribeResponse_Update); ok {
c.handleSubscribeResponseUpdate(worker, response)
}
}
@ -359,7 +358,7 @@ func (c *GNMI) handleSubscribeResponseUpdate(worker *Worker, response *gnmiLib.S
}
aliasPath, fields := c.handleTelemetryField(update, tags, prefix)
if tagOnlyTags := worker.checkTags(fullPath, c.TagSubscriptions); tagOnlyTags != nil {
if tagOnlyTags := worker.checkTags(fullPath); tagOnlyTags != nil {
for k, v := range tagOnlyTags {
if alias, ok := c.internalAliases[k]; ok {
tags[alias] = fmt.Sprint(v)
@ -634,7 +633,7 @@ func (node *tagNode) retrieve(keys []*gnmiLib.PathElem, tagResults *tagResults)
}
}
func (w *Worker) checkTags(fullPath *gnmiLib.Path, subscriptions []TagSubscription) map[string]interface{} {
func (w *Worker) checkTags(fullPath *gnmiLib.Path) map[string]interface{} {
results := &tagResults{}
w.tagStore.retrieve(pathKeys(fullPath), results)
tags := make(map[string]interface{})

View File

@ -8,14 +8,14 @@ import (
"net/http/httptest"
"testing"
"github.com/influxdata/toml"
"github.com/influxdata/toml/ast"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf"
common "github.com/influxdata/telegraf/plugins/common/jolokia2"
"github.com/influxdata/telegraf/plugins/inputs/jolokia2_proxy"
"github.com/influxdata/telegraf/testutil"
"github.com/influxdata/toml"
"github.com/influxdata/toml/ast"
)
func TestJolokia2_ProxyTargets(t *testing.T) {
@ -145,7 +145,6 @@ func TestFillFields(t *testing.T) {
func setupServer(resp string) *httptest.Server {
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
// Ignore the returned error as the tests will fail anyway
fmt.Fprintln(w, resp)
}))
}

View File

@ -26,7 +26,6 @@ type statServer struct{}
func (s statServer) ServeHTTP(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Content-Type", "text/plain")
w.Header().Set("Content-Length", fmt.Sprint(len(outputSample)))
// Ignore the returned error as the tests will fail anyway
fmt.Fprint(w, outputSample)
}

View File

@ -44,7 +44,6 @@ func TestMockExecCommand(_ *testing.T) {
cmdline := strings.Join(cmd, " ")
if cmdline == "systemctl show TestGather_systemdUnitPIDs" {
//nolint:errcheck,revive
fmt.Printf(`PIDFile=
GuessMainPID=yes
MainPID=11408
@ -55,7 +54,6 @@ ExecMainPID=11408
os.Exit(0)
}
//nolint:errcheck,revive
fmt.Printf("command not found\n")
//nolint:revive // error code is important for this "test"
os.Exit(1)

View File

@ -4,9 +4,10 @@ import (
"fmt"
"sync"
"github.com/sleepinggenius2/gosmi"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal/snmp"
"github.com/sleepinggenius2/gosmi"
)
type gosmiTranslator struct {
@ -113,7 +114,7 @@ func (g *gosmiTranslator) SnmpTableCall(oid string) (mibName string, oidNum stri
mibPrefix := mibName + "::"
col, tagOids, err := snmp.GetIndex(oidNum, mibPrefix, node)
col, tagOids, err := snmp.GetIndex(mibPrefix, node)
for _, c := range col {
_, isTag := tagOids[mibPrefix+c]

View File

@ -7,8 +7,9 @@ import (
"strings"
"testing"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf/testutil"
)
func TestGatherStats(t *testing.T) {
@ -110,19 +111,15 @@ func createMockServer() *httptest.Server {
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if strings.Contains(r.URL.Path, "/solr/admin/cores") {
w.WriteHeader(http.StatusOK)
// Ignore the returned error as the test will fail anyway
fmt.Fprintln(w, statusResponse)
} else if strings.Contains(r.URL.Path, "solr/main/admin") {
w.WriteHeader(http.StatusOK)
// Ignore the returned error as the test will fail anyway
fmt.Fprintln(w, mBeansMainResponse)
} else if strings.Contains(r.URL.Path, "solr/core1/admin") {
w.WriteHeader(http.StatusOK)
// Ignore the returned error as the test will fail anyway
fmt.Fprintln(w, mBeansCore1Response)
} else {
w.WriteHeader(http.StatusNotFound)
// Ignore the returned error as the test will fail anyway
fmt.Fprintln(w, "nope")
}
}))
@ -133,19 +130,15 @@ func createMockNoCoreDataServer() *httptest.Server {
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if strings.Contains(r.URL.Path, "/solr/admin/cores") {
w.WriteHeader(http.StatusOK)
// Ignore the returned error as the test will fail anyway
fmt.Fprintln(w, statusResponse)
} else if strings.Contains(r.URL.Path, "solr/main/admin") {
w.WriteHeader(http.StatusOK)
// Ignore the returned error as the test will fail anyway
fmt.Fprintln(w, nodata)
} else if strings.Contains(r.URL.Path, "solr/core1/admin") {
w.WriteHeader(http.StatusOK)
// Ignore the returned error as the test will fail anyway
fmt.Fprintln(w, nodata)
} else {
w.WriteHeader(http.StatusNotFound)
// Ignore the returned error as the test will fail anyway
fmt.Fprintln(w, "nope")
}
}))
@ -155,19 +148,15 @@ func createMockSolr3Server() *httptest.Server {
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if strings.Contains(r.URL.Path, "/solr/admin/cores") {
w.WriteHeader(http.StatusOK)
// Ignore the returned error as the test will fail anyway
fmt.Fprintln(w, statusResponse)
} else if strings.Contains(r.URL.Path, "solr/main/admin") {
w.WriteHeader(http.StatusOK)
// Ignore the returned error as the test will fail anyway
fmt.Fprintln(w, mBeansSolr3MainResponse)
} else if strings.Contains(r.URL.Path, "solr/core1/admin") {
w.WriteHeader(http.StatusOK)
// Ignore the returned error as the test will fail anyway
fmt.Fprintln(w, mBeansSolr3MainResponse)
} else {
w.WriteHeader(http.StatusNotFound)
// Ignore the returned error as the test will fail anyway
fmt.Fprintln(w, "nope")
}
}))
@ -177,15 +166,12 @@ func createMockSolr7Server() *httptest.Server {
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if strings.Contains(r.URL.Path, "/solr/admin/cores") {
w.WriteHeader(http.StatusOK)
// Ignore the returned error as the test will fail anyway
fmt.Fprintln(w, statusResponse)
} else if strings.Contains(r.URL.Path, "solr/main/admin") {
w.WriteHeader(http.StatusOK)
// Ignore the returned error as the test will fail anyway
fmt.Fprintln(w, mBeansSolr7Response)
} else {
w.WriteHeader(http.StatusNotFound)
// Ignore the returned error as the test will fail anyway
fmt.Fprintln(w, "nope")
}
}))

View File

@ -24,7 +24,7 @@ import (
//go:embed sample.conf
var sampleConfig string
const magicIdleCount int = (-int(^uint(0) >> 1))
const magicIdleCount = -int(^uint(0) >> 1)
type Query struct {
Query string `toml:"query"`
@ -53,7 +53,7 @@ type Query struct {
fieldFilterString filter.Filter
}
func (q *Query) parse(ctx context.Context, acc telegraf.Accumulator, rows *dbsql.Rows, t time.Time) (int, error) {
func (q *Query) parse(acc telegraf.Accumulator, rows *dbsql.Rows, t time.Time) (int, error) {
columnNames, err := rows.Columns()
if err != nil {
return 0, err
@ -379,7 +379,7 @@ func (s *SQL) Start(_ telegraf.Accumulator) error {
for i, q := range s.Queries {
s.Log.Debugf("Preparing statement %q...", q.Query)
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(s.Timeout))
stmt, err := s.db.PrepareContext(ctx, q.Query) //nolint:sqlclosecheck // Closed in Stop()
stmt, err := s.db.PrepareContext(ctx, q.Query)
cancel()
if err != nil {
return fmt.Errorf("preparing query %q failed: %v", q.Query, err)
@ -456,7 +456,7 @@ func (s *SQL) executeQuery(ctx context.Context, acc telegraf.Accumulator, q Quer
if err != nil {
return err
}
rowCount, err := q.parse(ctx, acc, rows, tquery)
rowCount, err := q.parse(acc, rows, tquery)
s.Log.Debugf("Received %d rows and %d columns for query %q", rowCount, len(columnNames), q.Query)
return err

View File

@ -85,7 +85,6 @@ func formatUptime(uptime uint64) string {
if days > 1 {
s = "s"
}
// This will always succeed, so skip checking the error
fmt.Fprintf(w, "%d day%s, ", days, s)
}
@ -94,7 +93,6 @@ func formatUptime(uptime uint64) string {
hours %= 24
minutes %= 60
// This will always succeed, so skip checking the error
fmt.Fprintf(w, "%2d:%02d", hours, minutes)
// This will always succeed, so skip checking the error

View File

@ -159,7 +159,7 @@ func (q *AMQP) Write(metrics []telegraf.Metric) error {
if err != nil {
// If this is the first attempt to publish and the connection is
// closed, try to reconnect and retry once.
//nolint: revive // Simplifying if-else with early return will reduce clarity
if aerr, ok := err.(*amqp.Error); first && ok && aerr == amqp.ErrClosed {
q.client = nil
err := q.publish(key, body)

View File

@ -1,4 +1,3 @@
// nolint
package influxdb_test
import (
@ -16,12 +15,13 @@ import (
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/plugins/outputs/influxdb"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
func getHTTPURL() *url.URL {
@ -83,7 +83,8 @@ func TestHTTP_CreateDatabase(t *testing.T) {
queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
require.Equal(t, `CREATE DATABASE "xyzzy"`, r.FormValue("q"))
w.WriteHeader(http.StatusOK)
w.Write(successResponse)
_, err = w.Write(successResponse)
require.NoError(t, err)
},
},
{
@ -101,7 +102,8 @@ func TestHTTP_CreateDatabase(t *testing.T) {
require.Equal(t, "guy", username)
require.Equal(t, "smiley", password)
w.WriteHeader(http.StatusOK)
w.Write(successResponse)
_, err = w.Write(successResponse)
require.NoError(t, err)
},
},
{
@ -119,7 +121,8 @@ func TestHTTP_CreateDatabase(t *testing.T) {
require.Equal(t, r.Header.Get("A"), "B")
require.Equal(t, r.Header.Get("C"), "D")
w.WriteHeader(http.StatusOK)
w.Write(successResponse)
_, err = w.Write(successResponse)
require.NoError(t, err)
},
},
{
@ -136,7 +139,8 @@ func TestHTTP_CreateDatabase(t *testing.T) {
require.Equal(t, r.Header.Get("A"), "B")
require.Equal(t, r.Header.Get("C"), "D")
w.WriteHeader(http.StatusOK)
w.Write(successResponse)
_, err = w.Write(successResponse)
require.NoError(t, err)
},
},
{
@ -147,7 +151,8 @@ func TestHTTP_CreateDatabase(t *testing.T) {
queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
require.Equal(t, `CREATE DATABASE "telegraf"`, r.FormValue("q"))
w.WriteHeader(http.StatusOK)
w.Write(successResponse)
_, err = w.Write(successResponse)
require.NoError(t, err)
},
},
{
@ -159,7 +164,8 @@ func TestHTTP_CreateDatabase(t *testing.T) {
queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
require.Equal(t, `CREATE DATABASE "a \" b"`, r.FormValue("q"))
w.WriteHeader(http.StatusOK)
w.Write(successResponse)
_, err = w.Write(successResponse)
require.NoError(t, err)
},
},
{
@ -171,7 +177,8 @@ func TestHTTP_CreateDatabase(t *testing.T) {
queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
// Yes, 200 OK is the correct response...
w.WriteHeader(http.StatusOK)
w.Write([]byte(`{"results": [{"error": "invalid name", "statement_id": 0}]}`))
_, err = w.Write([]byte(`{"results": [{"error": "invalid name", "statement_id": 0}]}`))
require.NoError(t, err)
},
errFunc: func(t *testing.T, err error) {
expected := &influxdb.APIError{
@ -219,7 +226,8 @@ func TestHTTP_CreateDatabase(t *testing.T) {
},
queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(`invalid response`))
_, err = w.Write([]byte(`invalid response`))
require.NoError(t, err)
},
errFunc: func(t *testing.T, err error) {
expected := &influxdb.APIError{
@ -393,7 +401,8 @@ func TestHTTP_Write(t *testing.T) {
},
queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(`{"error": "write failed: hinted handoff queue not empty"}`))
_, err = w.Write([]byte(`{"error": "write failed: hinted handoff queue not empty"}`))
require.NoError(t, err)
},
errFunc: func(t *testing.T, err error) {
require.NoError(t, err)
@ -408,7 +417,8 @@ func TestHTTP_Write(t *testing.T) {
},
queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(`{"error": "partial write: field type conflict:"}`))
_, err = w.Write([]byte(`{"error": "partial write: field type conflict:"}`))
require.NoError(t, err)
},
logFunc: func(t *testing.T, str string) {
require.Contains(t, str, "partial write")
@ -423,7 +433,8 @@ func TestHTTP_Write(t *testing.T) {
},
queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(`{"error": "unable to parse 'cpu value': invalid field format"}`))
_, err = w.Write([]byte(`{"error": "unable to parse 'cpu value': invalid field format"}`))
require.NoError(t, err)
},
logFunc: func(t *testing.T, str string) {
require.Contains(t, str, "unable to parse")
@ -456,7 +467,8 @@ func TestHTTP_Write(t *testing.T) {
},
queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusServiceUnavailable)
w.Write([]byte(`{"error": "unknown error"}`))
_, err = w.Write([]byte(`{"error": "unknown error"}`))
require.NoError(t, err)
},
errFunc: func(t *testing.T, err error) {
expected := &influxdb.APIError{
@ -648,11 +660,13 @@ func TestHTTP_UnixSocket(t *testing.T) {
queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
require.Equal(t, `CREATE DATABASE "xyzzy"`, r.FormValue("q"))
w.WriteHeader(http.StatusOK)
w.Write(successResponse)
_, err = w.Write(successResponse)
require.NoError(t, err)
},
writeHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusNoContent)
w.Write(successResponse)
_, err = w.Write(successResponse)
require.NoError(t, err)
},
},
}
@ -692,7 +706,8 @@ func TestHTTP_WriteDatabaseTagWorksOnRetry(t *testing.T) {
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case "/write":
r.ParseForm()
err := r.ParseForm()
require.NoError(t, err)
require.Equal(t, r.Form["db"], []string{"foo"})
body, err := io.ReadAll(r.Body)
@ -1008,7 +1023,8 @@ func TestDBRPTagsCreateDatabaseNotCalledOnRetryAfterForbidden(t *testing.T) {
return
}
w.WriteHeader(http.StatusForbidden)
w.Write([]byte(`{"results": [{"error": "error authorizing query"}]}`))
_, err = w.Write([]byte(`{"results": [{"error": "error authorizing query"}]}`))
require.NoError(t, err)
default:
w.WriteHeader(http.StatusInternalServerError)
}
@ -1080,7 +1096,8 @@ func TestDBRPTagsCreateDatabaseCalledOnDatabaseNotFound(t *testing.T) {
return
}
w.WriteHeader(http.StatusForbidden)
w.Write([]byte(`{"results": [{"error": "error authorizing query"}]}`))
_, err = w.Write([]byte(`{"results": [{"error": "error authorizing query"}]}`))
require.NoError(t, err)
default:
w.WriteHeader(http.StatusInternalServerError)
}
@ -1089,7 +1106,8 @@ func TestDBRPTagsCreateDatabaseCalledOnDatabaseNotFound(t *testing.T) {
switch r.URL.Path {
case "/write":
w.WriteHeader(http.StatusNotFound)
w.Write([]byte(`{"error": "database not found: \"telegraf\""}`))
_, err = w.Write([]byte(`{"error": "database not found: \"telegraf\""}`))
require.NoError(t, err)
default:
w.WriteHeader(http.StatusInternalServerError)
}

View File

@ -1,5 +1,3 @@
// nolint
//
//go:generate ../../../tools/readme_config_includer/generator
package influxdb
@ -154,13 +152,11 @@ func (i *InfluxDB) Write(metrics []telegraf.Metric) error {
}
// retry control
// error so the write is retried
err := client.CreateDatabase(ctx, apiError.Database)
if err != nil {
i.Log.Errorf("When writing to [%s]: database %q not found and failed to recreate",
client.URL(), apiError.Database)
} else {
if err := client.CreateDatabase(ctx, apiError.Database); err == nil {
return errors.New("database created; retry write")
}
i.Log.Errorf("When writing to [%s]: database %q not found and failed to recreate",
client.URL(), apiError.Database)
default:
allErrorsAreDatabaseNotFoundErrors = false
}
@ -173,30 +169,30 @@ func (i *InfluxDB) Write(metrics []telegraf.Metric) error {
return errors.New("could not write any address")
}
func (i *InfluxDB) udpClient(url *url.URL) (Client, error) {
config := &UDPConfig{
URL: url,
func (i *InfluxDB) udpClient(address *url.URL) (Client, error) {
udpConfig := &UDPConfig{
URL: address,
MaxPayloadSize: int(i.UDPPayload),
Serializer: i.newSerializer(),
Log: i.Log,
}
c, err := i.CreateUDPClientF(config)
c, err := i.CreateUDPClientF(udpConfig)
if err != nil {
return nil, fmt.Errorf("error creating UDP client [%s]: %v", url, err)
return nil, fmt.Errorf("error creating UDP client [%s]: %v", address, err)
}
return c, nil
}
func (i *InfluxDB) httpClient(ctx context.Context, url *url.URL, proxy *url.URL) (Client, error) {
func (i *InfluxDB) httpClient(ctx context.Context, address *url.URL, proxy *url.URL) (Client, error) {
tlsConfig, err := i.ClientConfig.TLSConfig()
if err != nil {
return nil, err
}
config := &HTTPConfig{
URL: url,
httpConfig := &HTTPConfig{
URL: address,
Timeout: time.Duration(i.Timeout),
TLSConfig: tlsConfig,
UserAgent: i.UserAgent,
@ -217,9 +213,9 @@ func (i *InfluxDB) httpClient(ctx context.Context, url *url.URL, proxy *url.URL)
Log: i.Log,
}
c, err := i.CreateHTTPClientF(config)
c, err := i.CreateHTTPClientF(httpConfig)
if err != nil {
return nil, fmt.Errorf("error creating HTTP client [%s]: %v", url, err)
return nil, fmt.Errorf("error creating HTTP client [%s]: %v", address, err)
}
if !i.SkipDatabaseCreation {

View File

@ -1,4 +1,3 @@
// nolint
package postgresql
// Copied from https://github.com/jackc/pgtype/blob/master/int8.go and tweaked for uint64
@ -34,11 +33,11 @@ import (
"encoding/json"
"errors"
"fmt"
. "github.com/jackc/pgtype"
"math"
"strconv"
"github.com/jackc/pgio"
"github.com/jackc/pgtype"
)
var errUndefined = errors.New("cannot encode status undefined")
@ -46,143 +45,130 @@ var errBadStatus = errors.New("invalid status")
type Uint8 struct {
Int uint64
Status Status
Status pgtype.Status
}
func (dst *Uint8) Set(src interface{}) error {
func (u *Uint8) Set(src interface{}) error {
if src == nil {
*dst = Uint8{Status: Null}
*u = Uint8{Status: pgtype.Null}
return nil
}
if value, ok := src.(interface{ Get() interface{} }); ok {
value2 := value.Get()
if value2 != value {
return dst.Set(value2)
return u.Set(value2)
}
}
switch value := src.(type) {
case int8:
*dst = Uint8{Int: uint64(value), Status: Present}
*u = Uint8{Int: uint64(value), Status: pgtype.Present}
case uint8:
*dst = Uint8{Int: uint64(value), Status: Present}
*u = Uint8{Int: uint64(value), Status: pgtype.Present}
case int16:
*dst = Uint8{Int: uint64(value), Status: Present}
*u = Uint8{Int: uint64(value), Status: pgtype.Present}
case uint16:
*dst = Uint8{Int: uint64(value), Status: Present}
*u = Uint8{Int: uint64(value), Status: pgtype.Present}
case int32:
*dst = Uint8{Int: uint64(value), Status: Present}
*u = Uint8{Int: uint64(value), Status: pgtype.Present}
case uint32:
*dst = Uint8{Int: uint64(value), Status: Present}
*u = Uint8{Int: uint64(value), Status: pgtype.Present}
case int64:
*dst = Uint8{Int: uint64(value), Status: Present}
*u = Uint8{Int: uint64(value), Status: pgtype.Present}
case uint64:
*dst = Uint8{Int: value, Status: Present}
*u = Uint8{Int: value, Status: pgtype.Present}
case int:
if value < 0 {
return fmt.Errorf("%d is less than maximum value for Uint8", value)
}
*dst = Uint8{Int: uint64(value), Status: Present}
*u = Uint8{Int: uint64(value), Status: pgtype.Present}
case uint:
if uint64(value) > math.MaxInt64 {
return fmt.Errorf("%d is greater than maximum value for Uint8", value)
}
*dst = Uint8{Int: uint64(value), Status: Present}
*u = Uint8{Int: uint64(value), Status: pgtype.Present}
case string:
num, err := strconv.ParseUint(value, 10, 64)
if err != nil {
return err
}
*dst = Uint8{Int: num, Status: Present}
*u = Uint8{Int: num, Status: pgtype.Present}
case float32:
if value > math.MaxInt64 {
return fmt.Errorf("%f is greater than maximum value for Uint8", value)
}
*dst = Uint8{Int: uint64(value), Status: Present}
*u = Uint8{Int: uint64(value), Status: pgtype.Present}
case float64:
if value > math.MaxInt64 {
return fmt.Errorf("%f is greater than maximum value for Uint8", value)
}
*dst = Uint8{Int: uint64(value), Status: Present}
*u = Uint8{Int: uint64(value), Status: pgtype.Present}
case *int8:
if value == nil {
*dst = Uint8{Status: Null}
} else {
return dst.Set(*value)
if value != nil {
return u.Set(*value)
}
*u = Uint8{Status: pgtype.Null}
case *uint8:
if value == nil {
*dst = Uint8{Status: Null}
} else {
return dst.Set(*value)
if value != nil {
return u.Set(*value)
}
*u = Uint8{Status: pgtype.Null}
case *int16:
if value == nil {
*dst = Uint8{Status: Null}
} else {
return dst.Set(*value)
if value != nil {
return u.Set(*value)
}
*u = Uint8{Status: pgtype.Null}
case *uint16:
if value == nil {
*dst = Uint8{Status: Null}
} else {
return dst.Set(*value)
if value != nil {
return u.Set(*value)
}
*u = Uint8{Status: pgtype.Null}
case *int32:
if value == nil {
*dst = Uint8{Status: Null}
} else {
return dst.Set(*value)
if value != nil {
return u.Set(*value)
}
*u = Uint8{Status: pgtype.Null}
case *uint32:
if value == nil {
*dst = Uint8{Status: Null}
} else {
return dst.Set(*value)
if value != nil {
return u.Set(*value)
}
*u = Uint8{Status: pgtype.Null}
case *int64:
if value == nil {
*dst = Uint8{Status: Null}
} else {
return dst.Set(*value)
if value != nil {
return u.Set(*value)
}
*u = Uint8{Status: pgtype.Null}
case *uint64:
if value == nil {
*dst = Uint8{Status: Null}
} else {
return dst.Set(*value)
if value != nil {
return u.Set(*value)
}
*u = Uint8{Status: pgtype.Null}
case *int:
if value == nil {
*dst = Uint8{Status: Null}
} else {
return dst.Set(*value)
if value != nil {
return u.Set(*value)
}
*u = Uint8{Status: pgtype.Null}
case *uint:
if value == nil {
*dst = Uint8{Status: Null}
} else {
return dst.Set(*value)
if value != nil {
return u.Set(*value)
}
*u = Uint8{Status: pgtype.Null}
case *string:
if value == nil {
*dst = Uint8{Status: Null}
} else {
return dst.Set(*value)
if value != nil {
return u.Set(*value)
}
*u = Uint8{Status: pgtype.Null}
case *float32:
if value == nil {
*dst = Uint8{Status: Null}
} else {
return dst.Set(*value)
if value != nil {
return u.Set(*value)
}
*u = Uint8{Status: pgtype.Null}
case *float64:
if value == nil {
*dst = Uint8{Status: Null}
} else {
return dst.Set(*value)
if value != nil {
return u.Set(*value)
}
*u = Uint8{Status: pgtype.Null}
default:
return fmt.Errorf("cannot convert %v to Uint8", value)
}
@ -190,58 +176,58 @@ func (dst *Uint8) Set(src interface{}) error {
return nil
}
func (dst Uint8) Get() interface{} {
switch dst.Status {
case Present:
return dst.Int
case Null:
func (u *Uint8) Get() interface{} {
switch u.Status {
case pgtype.Present:
return u.Int
case pgtype.Null:
return nil
default:
return dst.Status
return u.Status
}
}
func (src *Uint8) AssignTo(dst interface{}) error {
func (u *Uint8) AssignTo(dst interface{}) error {
switch v := dst.(type) {
case *int:
*v = int(src.Int)
*v = int(u.Int)
case *int8:
*v = int8(src.Int)
*v = int8(u.Int)
case *int16:
*v = int16(src.Int)
*v = int16(u.Int)
case *int32:
*v = int32(src.Int)
*v = int32(u.Int)
case *int64:
*v = int64(src.Int)
*v = int64(u.Int)
case *uint:
*v = uint(src.Int)
*v = uint(u.Int)
case *uint8:
*v = uint8(src.Int)
*v = uint8(u.Int)
case *uint16:
*v = uint16(src.Int)
*v = uint16(u.Int)
case *uint32:
*v = uint32(src.Int)
*v = uint32(u.Int)
case *uint64:
*v = src.Int
*v = u.Int
case *float32:
*v = float32(src.Int)
*v = float32(u.Int)
case *float64:
*v = float64(src.Int)
*v = float64(u.Int)
case *string:
*v = strconv.FormatUint(src.Int, 10)
*v = strconv.FormatUint(u.Int, 10)
case sql.Scanner:
return v.Scan(src.Int)
return v.Scan(u.Int)
case interface{ Set(interface{}) error }:
return v.Set(src.Int)
return v.Set(u.Int)
default:
return fmt.Errorf("cannot assign %v into %T", src.Int, dst)
return fmt.Errorf("cannot assign %v into %T", u.Int, dst)
}
return nil
}
func (dst *Uint8) DecodeText(ci *ConnInfo, src []byte) error {
func (u *Uint8) DecodeText(_, src []byte) error {
if src == nil {
*dst = Uint8{Status: Null}
*u = Uint8{Status: pgtype.Null}
return nil
}
@ -250,13 +236,13 @@ func (dst *Uint8) DecodeText(ci *ConnInfo, src []byte) error {
return err
}
*dst = Uint8{Int: n, Status: Present}
*u = Uint8{Int: n, Status: pgtype.Present}
return nil
}
func (dst *Uint8) DecodeBinary(ci *ConnInfo, src []byte) error {
func (u *Uint8) DecodeBinary(_, src []byte) error {
if src == nil {
*dst = Uint8{Status: Null}
*u = Uint8{Status: pgtype.Null}
return nil
}
@ -266,80 +252,80 @@ func (dst *Uint8) DecodeBinary(ci *ConnInfo, src []byte) error {
n := binary.BigEndian.Uint64(src)
*dst = Uint8{Int: n, Status: Present}
*u = Uint8{Int: n, Status: pgtype.Present}
return nil
}
func (src Uint8) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
switch src.Status {
case Null:
func (u *Uint8) EncodeText(_, buf []byte) ([]byte, error) {
switch u.Status {
case pgtype.Null:
return nil, nil
case Undefined:
case pgtype.Undefined:
return nil, errUndefined
}
return append(buf, strconv.FormatUint(src.Int, 10)...), nil
return append(buf, strconv.FormatUint(u.Int, 10)...), nil
}
func (src Uint8) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
switch src.Status {
case Null:
func (u *Uint8) EncodeBinary(_, buf []byte) ([]byte, error) {
switch u.Status {
case pgtype.Null:
return nil, nil
case Undefined:
case pgtype.Undefined:
return nil, errUndefined
}
return pgio.AppendUint64(buf, src.Int), nil
return pgio.AppendUint64(buf, u.Int), nil
}
// Scan implements the database/sql Scanner interface.
func (dst *Uint8) Scan(src interface{}) error {
func (u *Uint8) Scan(src interface{}) error {
if src == nil {
*dst = Uint8{Status: Null}
*u = Uint8{Status: pgtype.Null}
return nil
}
switch src := src.(type) {
case uint64:
*dst = Uint8{Int: src, Status: Present}
*u = Uint8{Int: src, Status: pgtype.Present}
return nil
case string:
return dst.DecodeText(nil, []byte(src))
return u.DecodeText(nil, []byte(src))
case []byte:
srcCopy := make([]byte, len(src))
copy(srcCopy, src)
return dst.DecodeText(nil, srcCopy)
return u.DecodeText(nil, srcCopy)
}
return fmt.Errorf("cannot scan %T", src)
}
// Value implements the database/sql/driver Valuer interface.
func (src Uint8) Value() (driver.Value, error) {
switch src.Status {
case Present:
return int64(src.Int), nil
case Null:
func (u *Uint8) Value() (driver.Value, error) {
switch u.Status {
case pgtype.Present:
return int64(u.Int), nil
case pgtype.Null:
return nil, nil
default:
return nil, errUndefined
}
}
func (src Uint8) MarshalJSON() ([]byte, error) {
switch src.Status {
case Present:
return []byte(strconv.FormatUint(src.Int, 10)), nil
case Null:
func (u *Uint8) MarshalJSON() ([]byte, error) {
switch u.Status {
case pgtype.Present:
return []byte(strconv.FormatUint(u.Int, 10)), nil
case pgtype.Null:
return []byte("null"), nil
case Undefined:
case pgtype.Undefined:
return nil, errUndefined
}
return nil, errBadStatus
}
func (dst *Uint8) UnmarshalJSON(b []byte) error {
func (u *Uint8) UnmarshalJSON(b []byte) error {
var n *uint64
err := json.Unmarshal(b, &n)
if err != nil {
@ -347,9 +333,9 @@ func (dst *Uint8) UnmarshalJSON(b []byte) error {
}
if n == nil {
*dst = Uint8{Status: Null}
*u = Uint8{Status: pgtype.Null}
} else {
*dst = Uint8{Int: *n, Status: Present}
*u = Uint8{Int: *n, Status: pgtype.Present}
}
return nil

View File

@ -161,7 +161,7 @@ func (p *Postgresql) Connect() error {
return nil
}
func (p *Postgresql) registerUint8(ctx context.Context, conn *pgx.Conn) error {
func (p *Postgresql) registerUint8(_ context.Context, conn *pgx.Conn) error {
if p.pguint8 == nil {
dt := pgtype.DataType{
// Use 'numeric' type for encoding/decoding across the wire
@ -341,7 +341,7 @@ func isTempError(err error) bool {
case "53": // Insufficient Resources
return true
case "57": // Operator Intervention
switch pgErr.Code { //nolint:revive
switch pgErr.Code {
case "57014": // query_cancelled
// This one is a bit of a mess. This code comes back when PGX cancels the query. Such as when PGX can't
// convert to the column's type. So even though the error was originally generated by PGX, we get the

View File

@ -8,8 +8,9 @@ import (
"testing"
"time"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf/testutil"
)
func TestSqlite(t *testing.T) {
@ -68,7 +69,7 @@ func TestSqlite(t *testing.T) {
sql,
)
require.False(t, rows.Next())
require.NoError(t, rows.Close()) //nolint:sqlclosecheck
require.NoError(t, rows.Close())
// sqlite stores dates as strings. They may be in the local
// timezone. The test needs to parse them back into a time.Time to
@ -95,7 +96,7 @@ func TestSqlite(t *testing.T) {
require.Equal(t, int64(1234), d)
require.Equal(t, int64(2345), e)
require.False(t, rows.Next())
require.NoError(t, rows.Close()) //nolint:sqlclosecheck
require.NoError(t, rows.Close())
rows, err = db.Query("select timestamp, tag_three, string_one from metric_two")
require.NoError(t, err)
@ -110,7 +111,7 @@ func TestSqlite(t *testing.T) {
require.Equal(t, "tag3", g)
require.Equal(t, "string1", h)
require.False(t, rows.Next())
require.NoError(t, rows.Close()) //nolint:sqlclosecheck
require.NoError(t, rows.Close())
rows, err = db.Query(`select timestamp, "tag four", "string two" from "metric three"`)
require.NoError(t, err)
@ -125,5 +126,5 @@ func TestSqlite(t *testing.T) {
require.Equal(t, "tag4", j)
require.Equal(t, "string2", k)
require.False(t, rows.Next())
require.NoError(t, rows.Close()) //nolint:sqlclosecheck
require.NoError(t, rows.Close())
}

View File

@ -142,7 +142,7 @@ func mapHostname(metric telegraf.Metric, msg *rfc5424.SyslogMessage) {
func mapTimestamp(metric telegraf.Metric, msg *rfc5424.SyslogMessage) {
timestamp := metric.Time()
//nolint: revive // Need switch with only one case to handle `.(type)`
if value, ok := metric.GetField("timestamp"); ok {
if v, ok := value.(int64); ok {
timestamp = time.Unix(0, v).UTC()

View File

@ -137,12 +137,10 @@ func (r *AwsEc2Processor) Start(acc telegraf.Accumulator) error {
return nil
}
func (r *AwsEc2Processor) Stop() error {
if r.parallel == nil {
return errors.New("trying to stop unstarted AWS EC2 Processor")
func (r *AwsEc2Processor) Stop() {
if r.parallel != nil {
r.parallel.Stop()
}
r.parallel.Stop()
return nil
}
func (r *AwsEc2Processor) asyncAdd(metric telegraf.Metric) []telegraf.Metric {

View File

@ -100,9 +100,8 @@ func (e *Execd) Add(m telegraf.Metric, _ telegraf.Accumulator) error {
return nil
}
func (e *Execd) Stop() error {
func (e *Execd) Stop() {
e.process.Stop()
return nil
}
func (e *Execd) cmdReadOut(out io.Reader) {

View File

@ -53,7 +53,7 @@ func TestExternalProcessorWorks(t *testing.T) {
}
acc.Wait(1)
require.NoError(t, e.Stop())
e.Stop()
acc.Wait(9)
metrics := acc.GetTelegrafMetrics()
@ -116,7 +116,7 @@ func TestParseLinesWithNewLines(t *testing.T) {
require.NoError(t, e.Add(m, acc))
acc.Wait(1)
require.NoError(t, e.Stop())
e.Stop()
processedMetric := acc.GetTelegrafMetrics()[0]

View File

@ -172,9 +172,8 @@ func (d *IfName) Add(metric telegraf.Metric, _ telegraf.Accumulator) error {
return nil
}
func (d *IfName) Stop() error {
func (d *IfName) Stop() {
d.parallel.Stop()
return nil
}
// getMap gets the interface names map either from cache or from the SNMP

View File

@ -52,10 +52,9 @@ func (r *ReverseDNS) Start(acc telegraf.Accumulator) error {
return nil
}
func (r *ReverseDNS) Stop() error {
func (r *ReverseDNS) Stop() {
r.parallel.Stop()
r.reverseDNSCache.Stop()
return nil
}
func (r *ReverseDNS) Add(metric telegraf.Metric, _ telegraf.Accumulator) error {

View File

@ -40,8 +40,7 @@ func TestSimpleReverseLookupIntegration(t *testing.T) {
require.NoError(t, err)
err = dns.Add(m, acc)
require.NoError(t, err)
err = dns.Stop()
require.NoError(t, err)
dns.Stop()
// should be processed now.
require.Len(t, acc.GetTelegrafMetrics(), 1)

View File

@ -5,10 +5,11 @@ import (
_ "embed"
"fmt"
"go.starlark.net/starlark"
"github.com/influxdata/telegraf"
common "github.com/influxdata/telegraf/plugins/common/starlark"
"github.com/influxdata/telegraf/plugins/processors"
"go.starlark.net/starlark"
)
//go:embed sample.conf
@ -108,8 +109,7 @@ func (s *Starlark) Add(metric telegraf.Metric, acc telegraf.Accumulator) error {
return nil
}
func (s *Starlark) Stop() error {
return nil
func (s *Starlark) Stop() {
}
func containsMetric(metrics []telegraf.Metric, metric telegraf.Metric) bool {

View File

@ -232,9 +232,7 @@ def apply(metric):
}
}
err = plugin.Stop()
require.NoError(t, err)
plugin.Stop()
testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics())
})
}
@ -2576,9 +2574,7 @@ def apply(metric):
}
}
err = plugin.Stop()
require.NoError(t, err)
plugin.Stop()
testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics())
})
}
@ -2659,9 +2655,7 @@ def apply(metric):
require.NoError(t, err)
}
err = plugin.Stop()
require.NoError(t, err)
plugin.Stop()
testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics())
})
}
@ -2935,9 +2929,7 @@ func TestScript(t *testing.T) {
}
}
err = tt.plugin.Stop()
require.NoError(t, err)
tt.plugin.Stop()
testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics())
})
}
@ -3267,8 +3259,7 @@ def apply(metric):
}
}
err = plugin.Stop()
require.NoError(b, err)
plugin.Stop()
})
}
}
@ -3309,9 +3300,7 @@ func TestAllScriptTestData(t *testing.T) {
}
}
err = plugin.Stop()
require.NoError(t, err)
plugin.Stop()
testutil.RequireMetricsEqual(t, outputMetrics, acc.GetTelegrafMetrics(), testutil.SortMetrics())
})
return nil

View File

@ -36,8 +36,7 @@ func (sp *streamingProcessor) Add(m telegraf.Metric, acc telegraf.Accumulator) e
return nil
}
func (sp *streamingProcessor) Stop() error {
return nil
func (sp *streamingProcessor) Stop() {
}
// Make the streamingProcessor of type Initializer to be able

View File

@ -37,5 +37,5 @@ type StreamingProcessor interface {
// before returning from Stop().
// When stop returns, you should no longer be writing metrics to the
// accumulator.
Stop() error
Stop()
}

View File

@ -5,7 +5,7 @@ package main
import (
"bytes"
"encoding/json"
"log" //nolint:revive
"log"
"os"
"os/exec"
"strings"

View File

@ -2,7 +2,7 @@ package testutil
import (
"fmt"
"log" //nolint
"log"
"github.com/influxdata/telegraf"
)

View File

@ -5,17 +5,16 @@ import (
"encoding/json"
"flag"
"fmt"
"log" //nolint:revive // We cannot use the Telegraf's logging here
"log"
"os"
"path/filepath"
"sort"
"strings"
"golang.org/x/mod/modfile"
"github.com/yuin/goldmark"
"github.com/yuin/goldmark/ast"
"github.com/yuin/goldmark/text"
"golang.org/x/mod/modfile"
)
//go:embed data/spdx_mapping.json
@ -45,7 +44,6 @@ func main() {
flag.Parse()
if help || flag.NArg() > 1 {
//nolint:revive // We cannot do anything about possible failures here
fmt.Fprintf(flag.CommandLine.Output(), "Usage of %s [options] [telegraf root dir]\n", os.Args[0])
fmt.Fprintf(flag.CommandLine.Output(), "Options:\n")
flag.PrintDefaults()

View File

@ -2,7 +2,7 @@ package main
import (
"bufio"
"log" //nolint:revive // We cannot use the Telegraf's logging here
"log"
"os"
"regexp"
"strings"

View File

@ -15,7 +15,7 @@ import (
"errors"
"fmt"
"io"
"log" //nolint:revive
"log"
"os"
"path/filepath"
"regexp"

View File

@ -3,7 +3,7 @@ package main
import (
"fmt"
"io"
"log" //nolint:revive
"log"
"net/http"
"os"
"regexp"