chore: Fix linter findings for errorlint (part5) (#12731)

Co-authored-by: Pawel Zak <Pawel Zak>
This commit is contained in:
Paweł Żak 2023-03-01 22:18:35 +01:00 committed by GitHub
parent 3f3885a2b0
commit 5b2346dfa0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
50 changed files with 186 additions and 181 deletions

View File

@ -50,7 +50,7 @@ func (f *Fail2ban) Init() error {
if f.path == "" { if f.path == "" {
path, err := exec.LookPath(cmd) path, err := exec.LookPath(cmd)
if err != nil { if err != nil {
return fmt.Errorf("looking up %q failed: %v", cmd, err) return fmt.Errorf("looking up %q failed: %w", cmd, err)
} }
f.path = path f.path = path
} }
@ -81,7 +81,7 @@ func (f *Fail2ban) Gather(acc telegraf.Accumulator) error {
cmd := execCommand(name, args...) cmd := execCommand(name, args...)
out, err := cmd.Output() out, err := cmd.Output()
if err != nil { if err != nil {
return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out)) return fmt.Errorf("failed to run command %q: %w - %s", strings.Join(cmd.Args, " "), err, string(out))
} }
lines := strings.Split(string(out), "\n") lines := strings.Split(string(out), "\n")
const targetString = "Jail list:" const targetString = "Jail list:"
@ -102,7 +102,7 @@ func (f *Fail2ban) Gather(acc telegraf.Accumulator) error {
cmd := execCommand(name, args...) cmd := execCommand(name, args...)
out, err := cmd.Output() out, err := cmd.Output()
if err != nil { if err != nil {
return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out)) return fmt.Errorf("failed to run command %q: %w - %s", strings.Join(cmd.Args, " "), err, string(out))
} }
lines := strings.Split(string(out), "\n") lines := strings.Split(string(out), "\n")

View File

@ -96,7 +96,7 @@ func (r *Fireboard) Gather(acc telegraf.Accumulator) error {
// Decode the response JSON into a new stats struct // Decode the response JSON into a new stats struct
var stats []fireboardStats var stats []fireboardStats
if err := json.NewDecoder(resp.Body).Decode(&stats); err != nil { if err := json.NewDecoder(resp.Body).Decode(&stats); err != nil {
return fmt.Errorf("unable to decode fireboard response: %s", err) return fmt.Errorf("unable to decode fireboard response: %w", err)
} }
// Range over all devices, gathering stats. Returns early in case of any error. // Range over all devices, gathering stats. Returns early in case of any error.
for _, s := range stats { for _, s := range stats {

View File

@ -4,6 +4,7 @@ package github
import ( import (
"context" "context"
_ "embed" _ "embed"
"errors"
"fmt" "fmt"
"net/http" "net/http"
"strings" "strings"
@ -148,10 +149,11 @@ func (g *GitHub) Gather(acc telegraf.Accumulator) error {
} }
func (g *GitHub) handleRateLimit(response *githubLib.Response, err error) { func (g *GitHub) handleRateLimit(response *githubLib.Response, err error) {
var rlErr *githubLib.RateLimitError
if err == nil { if err == nil {
g.RateLimit.Set(int64(response.Rate.Limit)) g.RateLimit.Set(int64(response.Rate.Limit))
g.RateRemaining.Set(int64(response.Rate.Remaining)) g.RateRemaining.Set(int64(response.Rate.Remaining))
} else if _, ok := err.(*githubLib.RateLimitError); ok { } else if errors.As(err, &rlErr) {
g.RateLimitErrors.Incr(1) g.RateLimitErrors.Incr(1)
} }
} }

View File

@ -338,11 +338,11 @@ func (s *Subscription) buildAlias(aliases map[string]string) error {
longPath, _, err := handlePath(gnmiLongPath, nil, nil, "") longPath, _, err := handlePath(gnmiLongPath, nil, nil, "")
if err != nil { if err != nil {
return fmt.Errorf("handling long-path failed: %v", err) return fmt.Errorf("handling long-path failed: %w", err)
} }
shortPath, _, err := handlePath(gnmiShortPath, nil, nil, "") shortPath, _, err := handlePath(gnmiShortPath, nil, nil, "")
if err != nil { if err != nil {
return fmt.Errorf("handling short-path failed: %v", err) return fmt.Errorf("handling short-path failed: %w", err)
} }
// If the user didn't provide a measurement name, use last path element // If the user didn't provide a measurement name, use last path element

View File

@ -3,6 +3,7 @@ package gnmi
import ( import (
"context" "context"
"crypto/tls" "crypto/tls"
"errors"
"fmt" "fmt"
"io" "io"
"net" "net"
@ -62,7 +63,7 @@ func (h *handler) subscribeGNMI(ctx context.Context, acc telegraf.Accumulator, t
client, err := grpc.DialContext(ctx, h.address, opts...) client, err := grpc.DialContext(ctx, h.address, opts...)
if err != nil { if err != nil {
return fmt.Errorf("failed to dial: %v", err) return fmt.Errorf("failed to dial: %w", err)
} }
defer client.Close() defer client.Close()
@ -73,7 +74,7 @@ func (h *handler) subscribeGNMI(ctx context.Context, acc telegraf.Accumulator, t
// If io.EOF is returned, the stream may have ended and stream status // If io.EOF is returned, the stream may have ended and stream status
// can be determined by calling Recv. // can be determined by calling Recv.
if err := subscribeClient.Send(request); err != nil && err != io.EOF { if err := subscribeClient.Send(request); err != nil && !errors.Is(err, io.EOF) {
return fmt.Errorf("failed to send subscription request: %w", err) return fmt.Errorf("failed to send subscription request: %w", err)
} }
@ -82,7 +83,7 @@ func (h *handler) subscribeGNMI(ctx context.Context, acc telegraf.Accumulator, t
for ctx.Err() == nil { for ctx.Err() == nil {
var reply *gnmiLib.SubscribeResponse var reply *gnmiLib.SubscribeResponse
if reply, err = subscribeClient.Recv(); err != nil { if reply, err = subscribeClient.Recv(); err != nil {
if err != io.EOF && ctx.Err() == nil { if !errors.Is(err, io.EOF) && ctx.Err() == nil {
return fmt.Errorf("aborted gNMI subscription: %w", err) return fmt.Errorf("aborted gNMI subscription: %w", err)
} }
break break

View File

@ -144,11 +144,11 @@ func gnmiToFields(name string, updateVal *gnmiLib.TypedValue) (map[string]interf
fields[name] = value fields[name] = value
} else if jsondata != nil { } else if jsondata != nil {
if err := json.Unmarshal(jsondata, &value); err != nil { if err := json.Unmarshal(jsondata, &value); err != nil {
return nil, fmt.Errorf("failed to parse JSON value: %v", err) return nil, fmt.Errorf("failed to parse JSON value: %w", err)
} }
flattener := jsonparser.JSONFlattener{Fields: fields} flattener := jsonparser.JSONFlattener{Fields: fields}
if err := flattener.FullFlattenJSON(name, value, true, true); err != nil { if err := flattener.FullFlattenJSON(name, value, true, true); err != nil {
return nil, fmt.Errorf("failed to flatten JSON: %v", err) return nil, fmt.Errorf("failed to flatten JSON: %w", err)
} }
} }
return fields, nil return fields, nil

View File

@ -6,6 +6,7 @@ import (
"context" "context"
_ "embed" _ "embed"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"io" "io"
"os" "os"
@ -82,7 +83,7 @@ func (gcs *GCS) Gather(acc telegraf.Accumulator) error {
for { for {
attrs, err := it.Next() attrs, err := it.Next()
if err == iterator.Done { if errors.Is(err, iterator.Done) {
gcs.Log.Infof("Iterated all the keys") gcs.Log.Infof("Iterated all the keys")
break break
} }
@ -96,8 +97,8 @@ func (gcs *GCS) Gather(acc telegraf.Accumulator) error {
if !gcs.shoudIgnore(name) { if !gcs.shoudIgnore(name) {
if err := gcs.processMeasurementsInObject(name, bucket, acc); err != nil { if err := gcs.processMeasurementsInObject(name, bucket, acc); err != nil {
gcs.Log.Errorf("Could not process object: %v in bucket: %v", name, bucketName, err) gcs.Log.Errorf("Could not process object %q in bucket %q: %v", name, bucketName, err)
acc.AddError(fmt.Errorf("COULD NOT PROCESS OBJECT: %v IN BUCKET: %v", name, err)) acc.AddError(fmt.Errorf("COULD NOT PROCESS OBJECT %q IN BUCKET %q: %w", name, bucketName, err))
} }
} }

View File

@ -4,6 +4,7 @@ package haproxy
import ( import (
_ "embed" _ "embed"
"encoding/csv" "encoding/csv"
"errors"
"fmt" "fmt"
"io" "io"
"net" "net"
@ -85,8 +86,7 @@ func (h *haproxy) Gather(acc telegraf.Accumulator) error {
} }
func (h *haproxy) gatherServerSocket(addr string, acc telegraf.Accumulator) error { func (h *haproxy) gatherServerSocket(addr string, acc telegraf.Accumulator) error {
var network string var network, address string
var address string
if strings.HasPrefix(addr, "tcp://") { if strings.HasPrefix(addr, "tcp://") {
network = "tcp" network = "tcp"
address = strings.TrimPrefix(addr, "tcp://") address = strings.TrimPrefix(addr, "tcp://")
@ -96,15 +96,13 @@ func (h *haproxy) gatherServerSocket(addr string, acc telegraf.Accumulator) erro
} }
c, err := net.Dial(network, address) c, err := net.Dial(network, address)
if err != nil { if err != nil {
return fmt.Errorf("could not connect to '%s://%s': %s", network, address, err) return fmt.Errorf("could not connect to '%s://%s': %w", network, address, err)
} }
_, errw := c.Write([]byte("show stat\n")) _, errw := c.Write([]byte("show stat\n"))
if errw != nil { if errw != nil {
return fmt.Errorf("could not write to socket '%s://%s': %s", network, address, errw) return fmt.Errorf("could not write to socket '%s://%s': %w", network, address, errw)
} }
return h.importCsvResult(c, acc, address) return h.importCsvResult(c, acc, address)
@ -212,7 +210,7 @@ func (h *haproxy) importCsvResult(r io.Reader, acc telegraf.Accumulator, host st
for { for {
row, err := csvr.Read() row, err := csvr.Read()
if err == io.EOF { if errors.Is(err, io.EOF) {
break break
} }
if err != nil { if err != nil {

View File

@ -75,7 +75,7 @@ func (h *HTTP) Gather(acc telegraf.Accumulator) error {
go func(url string) { go func(url string) {
defer wg.Done() defer wg.Done()
if err := h.gatherURL(acc, url); err != nil { if err := h.gatherURL(acc, url); err != nil {
acc.AddError(fmt.Errorf("[url=%s]: %s", url, err)) acc.AddError(fmt.Errorf("[url=%s]: %w", url, err))
} }
}(u) }(u)
} }
@ -157,17 +157,17 @@ func (h *HTTP) gatherURL(
b, err := io.ReadAll(resp.Body) b, err := io.ReadAll(resp.Body)
if err != nil { if err != nil {
return fmt.Errorf("reading body failed: %v", err) return fmt.Errorf("reading body failed: %w", err)
} }
// Instantiate a new parser for the new data to avoid trouble with stateful parsers // Instantiate a new parser for the new data to avoid trouble with stateful parsers
parser, err := h.parserFunc() parser, err := h.parserFunc()
if err != nil { if err != nil {
return fmt.Errorf("instantiating parser failed: %v", err) return fmt.Errorf("instantiating parser failed: %w", err)
} }
metrics, err := parser.Parse(b) metrics, err := parser.Parse(b)
if err != nil { if err != nil {
return fmt.Errorf("parsing metrics failed: %v", err) return fmt.Errorf("parsing metrics failed: %w", err)
} }
for _, metric := range metrics { for _, metric := range metrics {
@ -187,13 +187,13 @@ func (h *HTTP) setRequestAuth(request *http.Request) error {
username, err := h.Username.Get() username, err := h.Username.Get()
if err != nil { if err != nil {
return fmt.Errorf("getting username failed: %v", err) return fmt.Errorf("getting username failed: %w", err)
} }
defer config.ReleaseSecret(username) defer config.ReleaseSecret(username)
password, err := h.Password.Get() password, err := h.Password.Get()
if err != nil { if err != nil {
return fmt.Errorf("getting password failed: %v", err) return fmt.Errorf("getting password failed: %w", err)
} }
defer config.ReleaseSecret(password) defer config.ReleaseSecret(password)

View File

@ -65,9 +65,6 @@ type httpClient interface {
Do(req *http.Request) (*http.Response, error) Do(req *http.Request) (*http.Response, error)
} }
// ErrRedirectAttempted indicates that a redirect occurred
var ErrRedirectAttempted = errors.New("redirect")
// Set the proxy. A configured proxy overwrites the system wide proxy. // Set the proxy. A configured proxy overwrites the system wide proxy.
func getProxyFunc(httpProxy string) func(*http.Request) (*url.URL, error) { func getProxyFunc(httpProxy string) func(*http.Request) (*url.URL, error) {
if httpProxy == "" { if httpProxy == "" {
@ -157,27 +154,30 @@ func setResult(resultString string, fields map[string]interface{}, tags map[stri
} }
func setError(err error, fields map[string]interface{}, tags map[string]string) error { func setError(err error, fields map[string]interface{}, tags map[string]string) error {
if timeoutError, ok := err.(net.Error); ok && timeoutError.Timeout() { var timeoutError net.Error
if errors.As(err, &timeoutError) && timeoutError.Timeout() {
setResult("timeout", fields, tags) setResult("timeout", fields, tags)
return timeoutError return timeoutError
} }
urlErr, isURLErr := err.(*url.Error) var urlErr *url.Error
if !isURLErr { if !errors.As(err, &urlErr) {
return nil return nil
} }
opErr, isNetErr := (urlErr.Err).(*net.OpError) var opErr *net.OpError
if isNetErr { if errors.As(urlErr, &opErr) {
switch e := (opErr.Err).(type) { var dnsErr *net.DNSError
case *net.DNSError: var parseErr *net.ParseError
if errors.As(opErr, &dnsErr) {
setResult("dns_error", fields, tags) setResult("dns_error", fields, tags)
return e return dnsErr
case *net.ParseError: } else if errors.As(opErr, &parseErr) {
// Parse error has to do with parsing of IP addresses, so we // Parse error has to do with parsing of IP addresses, so we
// group it with address errors // group it with address errors
setResult("address_error", fields, tags) setResult("address_error", fields, tags)
return e return parseErr
} }
} }
@ -339,7 +339,7 @@ func (h *HTTPResponse) Gather(acc telegraf.Accumulator) error {
var err error var err error
h.compiledStringMatch, err = regexp.Compile(h.ResponseStringMatch) h.compiledStringMatch, err = regexp.Compile(h.ResponseStringMatch)
if err != nil { if err != nil {
return fmt.Errorf("failed to compile regular expression %s : %s", h.ResponseStringMatch, err) return fmt.Errorf("failed to compile regular expression %q: %w", h.ResponseStringMatch, err)
} }
} }
@ -401,12 +401,12 @@ func (h *HTTPResponse) Gather(acc telegraf.Accumulator) error {
func (h *HTTPResponse) setRequestAuth(request *http.Request) error { func (h *HTTPResponse) setRequestAuth(request *http.Request) error {
username, err := h.Username.Get() username, err := h.Username.Get()
if err != nil { if err != nil {
return fmt.Errorf("getting username failed: %v", err) return fmt.Errorf("getting username failed: %w", err)
} }
defer config.ReleaseSecret(username) defer config.ReleaseSecret(username)
password, err := h.Password.Get() password, err := h.Password.Get()
if err != nil { if err != nil {
return fmt.Errorf("getting password failed: %v", err) return fmt.Errorf("getting password failed: %w", err)
} }
defer config.ReleaseSecret(password) defer config.ReleaseSecret(password)
if len(username) != 0 || len(password) != 0 { if len(username) != 0 || len(password) != 0 {

View File

@ -96,19 +96,19 @@ func (h *Hugepages) Init() error {
func (h *Hugepages) Gather(acc telegraf.Accumulator) error { func (h *Hugepages) Gather(acc telegraf.Accumulator) error {
if h.gatherRoot { if h.gatherRoot {
if err := h.gatherRootStats(acc); err != nil { if err := h.gatherRootStats(acc); err != nil {
return fmt.Errorf("gathering root stats failed: %v", err) return fmt.Errorf("gathering root stats failed: %w", err)
} }
} }
if h.gatherPerNode { if h.gatherPerNode {
if err := h.gatherStatsPerNode(acc); err != nil { if err := h.gatherStatsPerNode(acc); err != nil {
return fmt.Errorf("gathering per node stats failed: %v", err) return fmt.Errorf("gathering per node stats failed: %w", err)
} }
} }
if h.gatherMeminfo { if h.gatherMeminfo {
if err := h.gatherStatsFromMeminfo(acc); err != nil { if err := h.gatherStatsFromMeminfo(acc); err != nil {
return fmt.Errorf("gathering meminfo stats failed: %v", err) return fmt.Errorf("gathering meminfo stats failed: %w", err)
} }
} }
@ -160,7 +160,7 @@ func (h *Hugepages) gatherFromHugepagePath(
// read metrics from: hugepages/hugepages-*/* // read metrics from: hugepages/hugepages-*/*
hugepagesDirs, err := os.ReadDir(path) hugepagesDirs, err := os.ReadDir(path)
if err != nil { if err != nil {
return fmt.Errorf("reading root dir failed: %v", err) return fmt.Errorf("reading root dir failed: %w", err)
} }
for _, hugepagesDir := range hugepagesDirs { for _, hugepagesDir := range hugepagesDirs {
@ -177,7 +177,7 @@ func (h *Hugepages) gatherFromHugepagePath(
metricsPath := filepath.Join(path, hugepagesDir.Name()) metricsPath := filepath.Join(path, hugepagesDir.Name())
metricFiles, err := os.ReadDir(metricsPath) metricFiles, err := os.ReadDir(metricsPath)
if err != nil { if err != nil {
return fmt.Errorf("reading metric dir failed: %v", err) return fmt.Errorf("reading metric dir failed: %w", err)
} }
metrics := make(map[string]interface{}) metrics := make(map[string]interface{})

View File

@ -7,6 +7,7 @@ import (
"crypto/tls" "crypto/tls"
_ "embed" _ "embed"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"net" "net"
"net/http" "net/http"
@ -152,7 +153,7 @@ func (h *InfluxDBListener) Start(acc telegraf.Accumulator) error {
go func() { go func() {
err = h.server.Serve(h.listener) err = h.server.Serve(h.listener)
if err != http.ErrServerClosed { if !errors.Is(err, http.ErrServerClosed) {
h.Log.Infof("Error serving HTTP on %s", h.ServiceAddress) h.Log.Infof("Error serving HTTP on %s", h.ServiceAddress)
} }
}() }()
@ -286,7 +287,8 @@ func (h *InfluxDBListener) handleWriteInternalParser(res http.ResponseWriter, re
lastPos = pos lastPos = pos
// Continue parsing metrics even if some are malformed // Continue parsing metrics even if some are malformed
if parseErr, ok := err.(*influx.ParseError); ok { var parseErr *influx.ParseError
if errors.As(err, &parseErr) {
parseErrorCount++ parseErrorCount++
errStr := parseErr.Error() errStr := parseErr.Error()
if firstParseErrorStr == "" { if firstParseErrorStr == "" {
@ -309,7 +311,7 @@ func (h *InfluxDBListener) handleWriteInternalParser(res http.ResponseWriter, re
h.acc.AddMetric(m) h.acc.AddMetric(m)
} }
if err != influx.EOF { if !errors.Is(err, influx.EOF) {
h.Log.Debugf("Error parsing the request body: %v", err.Error()) h.Log.Debugf("Error parsing the request body: %v", err.Error())
if err := badRequest(res, err.Error()); err != nil { if err := badRequest(res, err.Error()); err != nil {
h.Log.Debugf("error in bad-request: %v", err) h.Log.Debugf("error in bad-request: %v", err)
@ -401,7 +403,8 @@ func (h *InfluxDBListener) handleWriteUpstreamParser(res http.ResponseWriter, re
m, err = parser.Next() m, err = parser.Next()
// Continue parsing metrics even if some are malformed // Continue parsing metrics even if some are malformed
if parseErr, ok := err.(*influx_upstream.ParseError); ok { var parseErr *influx_upstream.ParseError
if errors.As(err, &parseErr) {
parseErrorCount++ parseErrorCount++
errStr := parseErr.Error() errStr := parseErr.Error()
if firstParseErrorStr == "" { if firstParseErrorStr == "" {
@ -424,7 +427,7 @@ func (h *InfluxDBListener) handleWriteUpstreamParser(res http.ResponseWriter, re
h.acc.AddMetric(m) h.acc.AddMetric(m)
} }
if err != influx_upstream.ErrEOF { if !errors.Is(err, influx_upstream.ErrEOF) {
h.Log.Debugf("Error parsing the request body: %v", err.Error()) h.Log.Debugf("Error parsing the request body: %v", err.Error())
if err := badRequest(res, err.Error()); err != nil { if err := badRequest(res, err.Error()); err != nil {
h.Log.Debugf("error in bad-request: %v", err) h.Log.Debugf("error in bad-request: %v", err)

View File

@ -152,7 +152,7 @@ func (h *InfluxDBV2Listener) Start(acc telegraf.Accumulator) error {
go func() { go func() {
err = h.server.Serve(h.listener) err = h.server.Serve(h.listener)
if err != http.ErrServerClosed { if !errors.Is(err, http.ErrServerClosed) {
h.Log.Infof("Error serving HTTP on %s", h.ServiceAddress) h.Log.Infof("Error serving HTTP on %s", h.ServiceAddress)
} }
}() }()
@ -250,7 +250,7 @@ func (h *InfluxDBV2Listener) handleWrite() http.HandlerFunc {
if h.ParserType == "upstream" { if h.ParserType == "upstream" {
parser := influx_upstream.Parser{} parser := influx_upstream.Parser{}
err = parser.Init() err = parser.Init()
if err != ErrEOF && err != nil { if !errors.Is(err, ErrEOF) && err != nil {
h.Log.Debugf("Error initializing parser: %v", err.Error()) h.Log.Debugf("Error initializing parser: %v", err.Error())
return return
} }
@ -265,7 +265,7 @@ func (h *InfluxDBV2Listener) handleWrite() http.HandlerFunc {
} else { } else {
parser := influx.Parser{} parser := influx.Parser{}
err = parser.Init() err = parser.Init()
if err != ErrEOF && err != nil { if !errors.Is(err, ErrEOF) && err != nil {
h.Log.Debugf("Error initializing parser: %v", err.Error()) h.Log.Debugf("Error initializing parser: %v", err.Error())
return return
} }
@ -279,7 +279,7 @@ func (h *InfluxDBV2Listener) handleWrite() http.HandlerFunc {
metrics, err = parser.Parse(bytes) metrics, err = parser.Parse(bytes)
} }
if err != ErrEOF && err != nil { if !errors.Is(err, ErrEOF) && err != nil {
h.Log.Debugf("Error parsing the request body: %v", err.Error()) h.Log.Debugf("Error parsing the request body: %v", err.Error())
if err := badRequest(res, Invalid, err.Error()); err != nil { if err := badRequest(res, Invalid, err.Error()); err != nil {
h.Log.Debugf("error in bad-request: %v", err) h.Log.Debugf("error in bad-request: %v", err)

View File

@ -108,7 +108,7 @@ func (d *IntelDLB) Init() error {
func (d *IntelDLB) Gather(acc telegraf.Accumulator) error { func (d *IntelDLB) Gather(acc telegraf.Accumulator) error {
err := d.gatherMetricsFromSocket(acc) err := d.gatherMetricsFromSocket(acc)
if err != nil { if err != nil {
socketErr := fmt.Errorf("gathering metrics from socket by given commands failed: %v", err) socketErr := fmt.Errorf("gathering metrics from socket by given commands failed: %w", err)
if d.UnreachableSocketBehavior == "error" { if d.UnreachableSocketBehavior == "error" {
return socketErr return socketErr
} }
@ -117,7 +117,7 @@ func (d *IntelDLB) Gather(acc telegraf.Accumulator) error {
err = d.gatherRasMetrics(acc) err = d.gatherRasMetrics(acc)
if err != nil { if err != nil {
return fmt.Errorf("gathering RAS metrics failed: %v", err) return fmt.Errorf("gathering RAS metrics failed: %w", err)
} }
return nil return nil
@ -305,7 +305,7 @@ func (d *IntelDLB) setInitMessageLength() error {
buf := make([]byte, d.maxInitMessageLength) buf := make([]byte, d.maxInitMessageLength)
messageLength, err := d.connection.Read(buf) messageLength, err := d.connection.Read(buf)
if err != nil { if err != nil {
return d.closeSocketAndThrowError("custom", fmt.Errorf("failed to read InitMessage from socket - %v", err)) return d.closeSocketAndThrowError("custom", fmt.Errorf("failed to read InitMessage from socket: %w", err))
} }
if messageLength > len(buf) { if messageLength > len(buf) {
return d.closeSocketAndThrowError("custom", fmt.Errorf("socket reply length is bigger than default buffer length")) return d.closeSocketAndThrowError("custom", fmt.Errorf("socket reply length is bigger than default buffer length"))
@ -446,7 +446,7 @@ func checkSocketPath(path string) error {
} }
if err != nil { if err != nil {
return fmt.Errorf("cannot get system information of '%v' file: %v", path, err) return fmt.Errorf("cannot get system information of %q file: %w", path, err)
} }
if pathInfo.Mode()&os.ModeSocket != os.ModeSocket { if pathInfo.Mode()&os.ModeSocket != os.ModeSocket {

View File

@ -21,7 +21,7 @@ type rasReaderImpl struct {
func (rasReaderImpl) gatherPaths(pattern string) ([]string, error) { func (rasReaderImpl) gatherPaths(pattern string) ([]string, error) {
filePaths, err := filepath.Glob(pattern) filePaths, err := filepath.Glob(pattern)
if err != nil { if err != nil {
return nil, fmt.Errorf("glob failed for pattern: %s: %v", pattern, err) return nil, fmt.Errorf("glob failed for pattern %q: %w", pattern, err)
} }
if len(filePaths) == 0 { if len(filePaths) == 0 {

View File

@ -96,7 +96,7 @@ func (ea *iaEntitiesActivator) activateCoreEvents(entity *CoreEventEntity) error
if entity.PerfGroup { if entity.PerfGroup {
err := ea.activateCoreEventsGroup(entity) err := ea.activateCoreEventsGroup(entity)
if err != nil { if err != nil {
return fmt.Errorf("failed to activate core events group: %v", err) return fmt.Errorf("failed to activate core events group: %w", err)
} }
} else { } else {
for _, event := range entity.parsedEvents { for _, event := range entity.parsedEvents {
@ -169,7 +169,7 @@ func (ea *iaEntitiesActivator) activateCoreEventsGroup(entity *CoreEventEntity)
placements, err := ea.placementMaker.makeCorePlacements(entity.parsedCores, leader.Event) placements, err := ea.placementMaker.makeCorePlacements(entity.parsedCores, leader.Event)
if err != nil { if err != nil {
return fmt.Errorf("failed to make core placements: %v", err) return fmt.Errorf("failed to make core placements: %w", err)
} }
for _, plc := range placements { for _, plc := range placements {

View File

@ -46,7 +46,7 @@ func (cp *configParser) parseEntities(coreEntities []*CoreEventEntity, uncoreEnt
coreEntity.parsedCores, err = cp.parseCores(coreEntity.Cores) coreEntity.parsedCores, err = cp.parseCores(coreEntity.Cores)
if err != nil { if err != nil {
return fmt.Errorf("error during cores parsing: %v", err) return fmt.Errorf("error during cores parsing: %w", err)
} }
} }
@ -69,7 +69,7 @@ func (cp *configParser) parseEntities(coreEntities []*CoreEventEntity, uncoreEnt
uncoreEntity.parsedSockets, err = cp.parseSockets(uncoreEntity.Sockets) uncoreEntity.parsedSockets, err = cp.parseSockets(uncoreEntity.Sockets)
if err != nil { if err != nil {
return fmt.Errorf("error during sockets parsing: %v", err) return fmt.Errorf("error during sockets parsing: %w", err)
} }
} }
return nil return nil
@ -99,7 +99,7 @@ func (cp *configParser) parseCores(cores []string) ([]int, error) {
} }
cores, err := cp.sys.allCPUs() cores, err := cp.sys.allCPUs()
if err != nil { if err != nil {
return nil, fmt.Errorf("cannot obtain all cpus: %v", err) return nil, fmt.Errorf("cannot obtain all cpus: %w", err)
} }
return cores, nil return cores, nil
} }
@ -124,7 +124,7 @@ func (cp *configParser) parseSockets(sockets []string) ([]int, error) {
} }
sockets, err := cp.sys.allSockets() sockets, err := cp.sys.allSockets()
if err != nil { if err != nil {
return nil, fmt.Errorf("cannot obtain all sockets: %v", err) return nil, fmt.Errorf("cannot obtain all sockets: %w", err)
} }
return sockets, nil return sockets, nil
} }

View File

@ -128,7 +128,7 @@ func (*IntelPMU) SampleConfig() string {
func (i *IntelPMU) Init() error { func (i *IntelPMU) Init() error {
err := checkFiles(i.EventListPaths, i.fileInfo) err := checkFiles(i.EventListPaths, i.fileInfo)
if err != nil { if err != nil {
return fmt.Errorf("error during event definitions paths validation: %v", err) return fmt.Errorf("error during event definitions paths validation: %w", err)
} }
reader, err := newReader(i.EventListPaths) reader, err := newReader(i.EventListPaths)
@ -152,22 +152,22 @@ func (i *IntelPMU) initialization(parser entitiesParser, resolver entitiesResolv
err := parser.parseEntities(i.CoreEntities, i.UncoreEntities) err := parser.parseEntities(i.CoreEntities, i.UncoreEntities)
if err != nil { if err != nil {
return fmt.Errorf("error during parsing configuration sections: %v", err) return fmt.Errorf("error during parsing configuration sections: %w", err)
} }
err = resolver.resolveEntities(i.CoreEntities, i.UncoreEntities) err = resolver.resolveEntities(i.CoreEntities, i.UncoreEntities)
if err != nil { if err != nil {
return fmt.Errorf("error during events resolving: %v", err) return fmt.Errorf("error during events resolving: %w", err)
} }
err = i.checkFileDescriptors() err = i.checkFileDescriptors()
if err != nil { if err != nil {
return fmt.Errorf("error during file descriptors checking: %v", err) return fmt.Errorf("error during file descriptors checking: %w", err)
} }
err = activator.activateEntities(i.CoreEntities, i.UncoreEntities) err = activator.activateEntities(i.CoreEntities, i.UncoreEntities)
if err != nil { if err != nil {
return fmt.Errorf("error during events activation: %v", err) return fmt.Errorf("error during events activation: %w", err)
} }
return nil return nil
} }
@ -175,11 +175,11 @@ func (i *IntelPMU) initialization(parser entitiesParser, resolver entitiesResolv
func (i *IntelPMU) checkFileDescriptors() error { func (i *IntelPMU) checkFileDescriptors() error {
coreFd, err := estimateCoresFd(i.CoreEntities) coreFd, err := estimateCoresFd(i.CoreEntities)
if err != nil { if err != nil {
return fmt.Errorf("failed to estimate number of core events file descriptors: %v", err) return fmt.Errorf("failed to estimate number of core events file descriptors: %w", err)
} }
uncoreFd, err := estimateUncoreFd(i.UncoreEntities) uncoreFd, err := estimateUncoreFd(i.UncoreEntities)
if err != nil { if err != nil {
return fmt.Errorf("failed to estimate nubmer of uncore events file descriptors: %v", err) return fmt.Errorf("failed to estimate nubmer of uncore events file descriptors: %w", err)
} }
if coreFd > math.MaxUint64-uncoreFd { if coreFd > math.MaxUint64-uncoreFd {
return fmt.Errorf("requested number of file descriptors exceeds uint64") return fmt.Errorf("requested number of file descriptors exceeds uint64")
@ -213,7 +213,7 @@ func (i *IntelPMU) Gather(acc telegraf.Accumulator) error {
} }
coreMetrics, uncoreMetrics, err := i.entitiesReader.readEntities(i.CoreEntities, i.UncoreEntities) coreMetrics, uncoreMetrics, err := i.entitiesReader.readEntities(i.CoreEntities, i.UncoreEntities)
if err != nil { if err != nil {
return fmt.Errorf("failed to read entities events values: %v", err) return fmt.Errorf("failed to read entities events values: %w", err)
} }
for id, m := range coreMetrics { for id, m := range coreMetrics {
@ -275,7 +275,7 @@ func newReader(files []string) (*ia.JSONFilesReader, error) {
for _, file := range files { for _, file := range files {
err := reader.AddFiles(file) err := reader.AddFiles(file)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to add files to reader: %v", err) return nil, fmt.Errorf("failed to add files to reader: %w", err)
} }
} }
return reader, nil return reader, nil

View File

@ -29,7 +29,7 @@ func (e *iaEntitiesResolver) resolveEntities(coreEntities []*CoreEventEntity, un
if entity.allEvents { if entity.allEvents {
newEvents, _, err := e.resolveAllEvents() newEvents, _, err := e.resolveAllEvents()
if err != nil { if err != nil {
return fmt.Errorf("failed to resolve all events: %v", err) return fmt.Errorf("failed to resolve all events: %w", err)
} }
entity.parsedEvents = newEvents entity.parsedEvents = newEvents
continue continue
@ -55,7 +55,7 @@ func (e *iaEntitiesResolver) resolveEntities(coreEntities []*CoreEventEntity, un
if entity.allEvents { if entity.allEvents {
_, newEvents, err := e.resolveAllEvents() _, newEvents, err := e.resolveAllEvents()
if err != nil { if err != nil {
return fmt.Errorf("failed to resolve all events: %v", err) return fmt.Errorf("failed to resolve all events: %w", err)
} }
entity.parsedEvents = newEvents entity.parsedEvents = newEvents
continue continue
@ -84,8 +84,8 @@ func (e *iaEntitiesResolver) resolveAllEvents() (coreEvents []*eventWithQuals, u
perfEvents, err := e.transformer.Transform(e.reader, ia.NewNameMatcher()) perfEvents, err := e.transformer.Transform(e.reader, ia.NewNameMatcher())
if err != nil { if err != nil {
re, ok := err.(*ia.TransformationError) var re *ia.TransformationError
if !ok { if !errors.As(err, &re) {
return nil, nil, err return nil, nil, err
} }
if e.log != nil && re != nil { if e.log != nil && re != nil {
@ -131,7 +131,7 @@ func (e *iaEntitiesResolver) resolveEvent(name string, qualifiers []string) (ia.
matcher := ia.NewNameMatcher(name) matcher := ia.NewNameMatcher(name)
perfEvents, err := e.transformer.Transform(e.reader, matcher) perfEvents, err := e.transformer.Transform(e.reader, matcher)
if err != nil { if err != nil {
return custom, fmt.Errorf("failed to transform perf events: %v", err) return custom, fmt.Errorf("failed to transform perf events: %w", err)
} }
if len(perfEvents) < 1 { if len(perfEvents) < 1 {
return custom, fmt.Errorf("failed to resolve unknown event %q", name) return custom, fmt.Errorf("failed to resolve unknown event %q", name)

View File

@ -32,7 +32,7 @@ func (fs *fileServiceImpl) getCPUInfoStats() (map[string]*cpuInfo, error) {
path := "/proc/cpuinfo" path := "/proc/cpuinfo"
cpuInfoFile, err := os.Open(path) cpuInfoFile, err := os.Open(path)
if err != nil { if err != nil {
return nil, fmt.Errorf("error while reading %s, err: %v", path, err) return nil, fmt.Errorf("error while reading %q: %w", path, err)
} }
defer cpuInfoFile.Close() defer cpuInfoFile.Close()
@ -142,7 +142,7 @@ func (fs *fileServiceImpl) readFileAtOffsetToUint64(reader io.ReaderAt, offset i
_, err := reader.ReadAt(buffer, offset) _, err := reader.ReadAt(buffer, offset)
if err != nil { if err != nil {
return 0, fmt.Errorf("error on reading file at offset %d, err: %v", offset, err) return 0, fmt.Errorf("error on reading file at offset %d: %w", offset, err)
} }
return binary.LittleEndian.Uint64(buffer), nil return binary.LittleEndian.Uint64(buffer), nil

View File

@ -85,7 +85,7 @@ func (m *msrServiceImpl) retrieveCPUFrequencyForCore(core string) (float64, erro
} }
cpuFreqFile, err := os.Open(cpuFreqPath) cpuFreqFile, err := os.Open(cpuFreqPath)
if err != nil { if err != nil {
return 0, fmt.Errorf("error opening scaling_cur_freq file on path %s, err: %v", cpuFreqPath, err) return 0, fmt.Errorf("error opening scaling_cur_freq file on path %q: %w", cpuFreqPath, err)
} }
defer cpuFreqFile.Close() defer cpuFreqFile.Close()
@ -96,7 +96,7 @@ func (m *msrServiceImpl) retrieveCPUFrequencyForCore(core string) (float64, erro
func (m *msrServiceImpl) retrieveUncoreFrequency(socketID string, typeFreq string, kind string, die string) (float64, error) { func (m *msrServiceImpl) retrieveUncoreFrequency(socketID string, typeFreq string, kind string, die string) (float64, error) {
uncoreFreqPath, err := createUncoreFreqPath(socketID, typeFreq, kind, die) uncoreFreqPath, err := createUncoreFreqPath(socketID, typeFreq, kind, die)
if err != nil { if err != nil {
return 0, fmt.Errorf("unable to create uncore freq read path for socketID %s, and frequency type %s err: %v", socketID, typeFreq, err) return 0, fmt.Errorf("unable to create uncore freq read path for socketID %q, and frequency type %q: %w", socketID, typeFreq, err)
} }
err = checkFile(uncoreFreqPath) err = checkFile(uncoreFreqPath)
if err != nil { if err != nil {
@ -104,7 +104,7 @@ func (m *msrServiceImpl) retrieveUncoreFrequency(socketID string, typeFreq strin
} }
uncoreFreqFile, err := os.Open(uncoreFreqPath) uncoreFreqFile, err := os.Open(uncoreFreqPath)
if err != nil { if err != nil {
return 0, fmt.Errorf("error opening uncore frequncy file on %s, err: %v", uncoreFreqPath, err) return 0, fmt.Errorf("error opening uncore frequncy file on %q: %w", uncoreFreqPath, err)
} }
defer uncoreFreqFile.Close() defer uncoreFreqFile.Close()
@ -144,13 +144,13 @@ func (m *msrServiceImpl) openAndReadMsr(core string) error {
} }
msrFile, err := os.Open(path) msrFile, err := os.Open(path)
if err != nil { if err != nil {
return fmt.Errorf("error opening MSR file on path %s, err: %v", path, err) return fmt.Errorf("error opening MSR file on path %q: %w", path, err)
} }
defer msrFile.Close() defer msrFile.Close()
err = m.readDataFromMsr(core, msrFile) err = m.readDataFromMsr(core, msrFile)
if err != nil { if err != nil {
return fmt.Errorf("error reading data from MSR for core %s, err: %v", core, err) return fmt.Errorf("error reading data from MSR for core %q: %w", core, err)
} }
return nil return nil
} }
@ -163,7 +163,7 @@ func (m *msrServiceImpl) readSingleMsr(core string, msr string) (uint64, error)
} }
msrFile, err := os.Open(path) msrFile, err := os.Open(path)
if err != nil { if err != nil {
return 0, fmt.Errorf("error opening MSR file on path %s, err: %v", path, err) return 0, fmt.Errorf("error opening MSR file on path %q: %w", path, err)
} }
defer msrFile.Close() defer msrFile.Close()
@ -213,7 +213,7 @@ func (m *msrServiceImpl) readDataFromMsr(core string, reader io.ReaderAt) error
err := m.readValueFromFileAtOffset(ctx, ch, reader, off) err := m.readValueFromFileAtOffset(ctx, ch, reader, off)
if err != nil { if err != nil {
return fmt.Errorf("error reading MSR file, err: %v", err) return fmt.Errorf("error reading MSR file: %w", err)
} }
return nil return nil
@ -231,7 +231,7 @@ func (m *msrServiceImpl) readDataFromMsr(core string, reader io.ReaderAt) error
newTemp := <-msrOffsetsWithChannels[temperatureLocation] newTemp := <-msrOffsetsWithChannels[temperatureLocation]
if err := g.Wait(); err != nil { if err := g.Wait(); err != nil {
return fmt.Errorf("received error during reading MSR values in goroutines: %v", err) return fmt.Errorf("received error during reading MSR values in goroutines: %w", err)
} }
m.cpuCoresData[core].c3Delta = newC3 - m.cpuCoresData[core].c3 m.cpuCoresData[core].c3Delta = newC3 - m.cpuCoresData[core].c3

View File

@ -57,7 +57,7 @@ func (r *raplServiceImpl) retrieveAndCalculateData(socketID string) error {
} }
socketEnergyUjFile, err := os.Open(socketEnergyUjPath) socketEnergyUjFile, err := os.Open(socketEnergyUjPath)
if err != nil { if err != nil {
return fmt.Errorf("error opening socket energy_uj file on path %s, err: %v", socketEnergyUjPath, err) return fmt.Errorf("error opening socket energy_uj file on path %q: %w", socketEnergyUjPath, err)
} }
defer socketEnergyUjFile.Close() defer socketEnergyUjFile.Close()
@ -69,7 +69,7 @@ func (r *raplServiceImpl) retrieveAndCalculateData(socketID string) error {
} }
dramEnergyUjFile, err := os.Open(dramEnergyUjPath) dramEnergyUjFile, err := os.Open(dramEnergyUjPath)
if err != nil { if err != nil {
return fmt.Errorf("error opening dram energy_uj file on path %s, err: %v", dramEnergyUjPath, err) return fmt.Errorf("error opening dram energy_uj file on path %q: %w", dramEnergyUjPath, err)
} }
defer dramEnergyUjFile.Close() defer dramEnergyUjFile.Close()
@ -80,7 +80,7 @@ func (r *raplServiceImpl) retrieveAndCalculateData(socketID string) error {
} }
socketMaxEnergyUjFile, err := os.Open(socketMaxEnergyUjPath) socketMaxEnergyUjFile, err := os.Open(socketMaxEnergyUjPath)
if err != nil { if err != nil {
return fmt.Errorf("error opening socket max_energy_range_uj file on path %s, err: %v", socketMaxEnergyUjPath, err) return fmt.Errorf("error opening socket max_energy_range_uj file on path %q: %w", socketMaxEnergyUjPath, err)
} }
defer socketMaxEnergyUjFile.Close() defer socketMaxEnergyUjFile.Close()
@ -91,7 +91,7 @@ func (r *raplServiceImpl) retrieveAndCalculateData(socketID string) error {
} }
dramMaxEnergyUjFile, err := os.Open(dramMaxEnergyUjPath) dramMaxEnergyUjFile, err := os.Open(dramMaxEnergyUjPath)
if err != nil { if err != nil {
return fmt.Errorf("error opening dram max_energy_range_uj file on path %s, err: %v", dramMaxEnergyUjPath, err) return fmt.Errorf("error opening dram max_energy_range_uj file on path %q: %w", dramMaxEnergyUjPath, err)
} }
defer dramMaxEnergyUjFile.Close() defer dramMaxEnergyUjFile.Close()
@ -107,7 +107,7 @@ func (r *raplServiceImpl) getConstraintMaxPowerWatts(socketID string) (float64,
} }
socketMaxPowerFile, err := os.Open(socketMaxPowerPath) socketMaxPowerFile, err := os.Open(socketMaxPowerPath)
if err != nil { if err != nil {
return 0, fmt.Errorf("error opening constraint_0_max_power_uw file on path %s, err: %v", socketMaxPowerPath, err) return 0, fmt.Errorf("error opening constraint_0_max_power_uw file on path %q: %w", socketMaxPowerPath, err)
} }
defer socketMaxPowerFile.Close() defer socketMaxPowerFile.Close()
@ -186,7 +186,7 @@ func (r *raplServiceImpl) findDramFolder(raplFolders []string, socketID string)
read, err := r.fs.readFile(nameFilePath) read, err := r.fs.readFile(nameFilePath)
if err != nil { if err != nil {
if val := r.logOnce[nameFilePath]; val == nil || val.Error() != err.Error() { if val := r.logOnce[nameFilePath]; val == nil || val.Error() != err.Error() {
r.log.Errorf("error reading file on path: %s, err: %v", nameFilePath, err) r.log.Errorf("error reading file on path %q: %v", nameFilePath, err)
r.logOnce[nameFilePath] = err r.logOnce[nameFilePath] = err
} }
continue continue

View File

@ -7,6 +7,7 @@ import (
"bufio" "bufio"
"context" "context"
_ "embed" _ "embed"
"errors"
"fmt" "fmt"
"io" "io"
"os" "os"
@ -335,7 +336,7 @@ func shutDownPqos(pqos *exec.Cmd) error {
ctx, cancel := context.WithTimeout(context.Background(), timeout) ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel() defer cancel()
for { for {
if err := pqos.Process.Signal(syscall.Signal(0)); err == os.ErrProcessDone { if err := pqos.Process.Signal(syscall.Signal(0)); errors.Is(err, os.ErrProcessDone) {
return nil return nil
} else if ctx.Err() != nil { } else if ctx.Err() != nil {
break break
@ -347,7 +348,7 @@ func shutDownPqos(pqos *exec.Cmd) error {
// fixed in https://github.com/intel/intel-cmt-cat/issues/197 // fixed in https://github.com/intel/intel-cmt-cat/issues/197
err := pqos.Process.Kill() err := pqos.Process.Kill()
if err != nil { if err != nil {
return fmt.Errorf("failed to shut down pqos: %v", err) return fmt.Errorf("failed to shut down pqos: %w", err)
} }
} }
return nil return nil
@ -401,7 +402,6 @@ func validatePqosPath(pqosPath string) error {
func parseCoresConfig(cores []string) ([]string, error) { func parseCoresConfig(cores []string) ([]string, error) {
var allCores []int var allCores []int
configError := fmt.Errorf("wrong cores input config data format")
parsedCores := make([]string, 0, len(cores)) parsedCores := make([]string, 0, len(cores))
for _, singleCoreGroup := range cores { for _, singleCoreGroup := range cores {
@ -411,10 +411,10 @@ func parseCoresConfig(cores []string) ([]string, error) {
for _, coreStr := range separatedCores { for _, coreStr := range separatedCores {
actualCores, err := validateAndParseCores(coreStr) actualCores, err := validateAndParseCores(coreStr)
if err != nil { if err != nil {
return nil, fmt.Errorf("%v: %v", configError, err) return nil, fmt.Errorf("wrong cores input config data format: %w", err)
} }
if checkForDuplicates(allCores, actualCores) { if checkForDuplicates(allCores, actualCores) {
return nil, fmt.Errorf("%v: %v", configError, "core value cannot be duplicated") return nil, errors.New("wrong cores input config data format: core value cannot be duplicated")
} }
actualGroupOfCores = append(actualGroupOfCores, actualCores...) actualGroupOfCores = append(actualGroupOfCores, actualCores...)
allCores = append(allCores, actualGroupOfCores...) allCores = append(allCores, actualGroupOfCores...)

View File

@ -76,7 +76,7 @@ scan:
irqs = append(irqs, *irq) irqs = append(irqs, *irq)
} }
if scanner.Err() != nil { if scanner.Err() != nil {
return nil, fmt.Errorf("error scanning file: %s", scanner.Err()) return nil, fmt.Errorf("error scanning file: %w", scanner.Err())
} }
return irqs, nil return irqs, nil
} }
@ -116,7 +116,7 @@ func parseFile(file string) ([]IRQ, error) {
irqs, err := parseInterrupts(f) irqs, err := parseInterrupts(f)
if err != nil { if err != nil {
return nil, fmt.Errorf("parsing %s: %s", file, err) return nil, fmt.Errorf("parsing %q: %w", file, err)
} }
return irqs, nil return irqs, nil
} }

View File

@ -58,7 +58,7 @@ func (m *Ipmi) Init() error {
if m.Path == "" { if m.Path == "" {
path, err := exec.LookPath(cmd) path, err := exec.LookPath(cmd)
if err != nil { if err != nil {
return fmt.Errorf("looking up %q failed: %v", cmd, err) return fmt.Errorf("looking up %q failed: %w", cmd, err)
} }
m.Path = path m.Path = path
} }
@ -129,7 +129,7 @@ func (m *Ipmi) parse(acc telegraf.Accumulator, server string) error {
cmd := execCommand(name, dumpOpts...) cmd := execCommand(name, dumpOpts...)
out, err := internal.CombinedOutputTimeout(cmd, time.Duration(m.Timeout)) out, err := internal.CombinedOutputTimeout(cmd, time.Duration(m.Timeout))
if err != nil { if err != nil {
return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(sanitizeIPMICmd(cmd.Args), " "), err, string(out)) return fmt.Errorf("failed to run command %q: %w - %s", strings.Join(sanitizeIPMICmd(cmd.Args), " "), err, string(out))
} }
} }
opts = append(opts, "-S") opts = append(opts, "-S")
@ -148,7 +148,7 @@ func (m *Ipmi) parse(acc telegraf.Accumulator, server string) error {
out, err := internal.CombinedOutputTimeout(cmd, time.Duration(m.Timeout)) out, err := internal.CombinedOutputTimeout(cmd, time.Duration(m.Timeout))
timestamp := time.Now() timestamp := time.Now()
if err != nil { if err != nil {
return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(sanitizeIPMICmd(cmd.Args), " "), err, string(out)) return fmt.Errorf("failed to run command %q: %w - %s", strings.Join(sanitizeIPMICmd(cmd.Args), " "), err, string(out))
} }
if m.MetricVersion == 2 { if m.MetricVersion == 2 {
return m.parseV2(acc, hostname, out, timestamp) return m.parseV2(acc, hostname, out, timestamp)

View File

@ -111,7 +111,7 @@ func setList(timeout config.Duration, useSudo bool) (*bytes.Buffer, error) {
cmd.Stdout = &out cmd.Stdout = &out
err = internal.RunTimeout(cmd, time.Duration(timeout)) err = internal.RunTimeout(cmd, time.Duration(timeout))
if err != nil { if err != nil {
return &out, fmt.Errorf("error running ipset save: %s", err) return &out, fmt.Errorf("error running ipset save: %w", err)
} }
return &out, nil return &out, nil

View File

@ -35,7 +35,7 @@ func (i *IPVS) Gather(acc telegraf.Accumulator) error {
if i.handle == nil { if i.handle == nil {
h, err := ipvs.New("") // TODO: make the namespace configurable h, err := ipvs.New("") // TODO: make the namespace configurable
if err != nil { if err != nil {
return fmt.Errorf("unable to open IPVS handle: %v", err) return fmt.Errorf("unable to open IPVS handle: %w", err)
} }
i.handle = h i.handle = h
} }
@ -44,7 +44,7 @@ func (i *IPVS) Gather(acc telegraf.Accumulator) error {
if err != nil { if err != nil {
i.handle.Close() i.handle.Close()
i.handle = nil // trigger a reopen on next call to gather i.handle = nil // trigger a reopen on next call to gather
return fmt.Errorf("failed to list IPVS services: %v", err) return fmt.Errorf("failed to list IPVS services: %w", err)
} }
for _, s := range services { for _, s := range services {
fields := map[string]interface{}{ fields := map[string]interface{}{

View File

@ -84,7 +84,7 @@ func (j *Jenkins) Gather(acc telegraf.Accumulator) error {
func (j *Jenkins) newHTTPClient() (*http.Client, error) { func (j *Jenkins) newHTTPClient() (*http.Client, error) {
tlsCfg, err := j.ClientConfig.TLSConfig() tlsCfg, err := j.ClientConfig.TLSConfig()
if err != nil { if err != nil {
return nil, fmt.Errorf("error parse jenkins config[%s]: %v", j.URL, err) return nil, fmt.Errorf("error parse jenkins config %q: %w", j.URL, err)
} }
return &http.Client{ return &http.Client{
Transport: &http.Transport{ Transport: &http.Transport{
@ -118,11 +118,11 @@ func (j *Jenkins) initialize(client *http.Client) error {
// init filters // init filters
j.jobFilter, err = filter.NewIncludeExcludeFilter(j.JobInclude, j.JobExclude) j.jobFilter, err = filter.NewIncludeExcludeFilter(j.JobInclude, j.JobExclude)
if err != nil { if err != nil {
return fmt.Errorf("error compiling job filters[%s]: %v", j.URL, err) return fmt.Errorf("error compiling job filters %q: %w", j.URL, err)
} }
j.nodeFilter, err = filter.NewIncludeExcludeFilter(j.NodeInclude, j.NodeExclude) j.nodeFilter, err = filter.NewIncludeExcludeFilter(j.NodeInclude, j.NodeExclude)
if err != nil { if err != nil {
return fmt.Errorf("error compiling node filters[%s]: %v", j.URL, err) return fmt.Errorf("error compiling node filters %q: %w", j.URL, err)
} }
// init tcp pool with default value // init tcp pool with default value

View File

@ -91,7 +91,7 @@ func (j *Jolokia) doRequest(req *http.Request) ([]map[string]interface{}, error)
// Unmarshal json // Unmarshal json
var jsonOut []map[string]interface{} var jsonOut []map[string]interface{}
if err = json.Unmarshal(body, &jsonOut); err != nil { if err = json.Unmarshal(body, &jsonOut); err != nil {
return nil, fmt.Errorf("error decoding JSON response: %s: %s", err, body) return nil, fmt.Errorf("error decoding JSON response %q: %w", body, err)
} }
return jsonOut, nil return jsonOut, nil
@ -216,12 +216,12 @@ func (j *Jolokia) Gather(acc telegraf.Accumulator) error {
req, err := j.prepareRequest(server, metrics) req, err := j.prepareRequest(server, metrics)
if err != nil { if err != nil {
acc.AddError(fmt.Errorf("unable to create request: %s", err)) acc.AddError(fmt.Errorf("unable to create request: %w", err))
continue continue
} }
out, err := j.doRequest(req) out, err := j.doRequest(req)
if err != nil { if err != nil {
acc.AddError(fmt.Errorf("error performing request: %s", err)) acc.AddError(fmt.Errorf("error performing request: %w", err))
continue continue
} }

View File

@ -51,7 +51,7 @@ func (ja *JolokiaAgent) Gather(acc telegraf.Accumulator) error {
for _, url := range ja.URLs { for _, url := range ja.URLs {
client, err := ja.createClient(url) client, err := ja.createClient(url)
if err != nil { if err != nil {
acc.AddError(fmt.Errorf("unable to create client for %s: %v", url, err)) acc.AddError(fmt.Errorf("unable to create client for %q: %w", url, err))
continue continue
} }
ja.clients = append(ja.clients, client) ja.clients = append(ja.clients, client)
@ -67,7 +67,7 @@ func (ja *JolokiaAgent) Gather(acc telegraf.Accumulator) error {
err := ja.gatherer.Gather(client, acc) err := ja.gatherer.Gather(client, acc)
if err != nil { if err != nil {
acc.AddError(fmt.Errorf("unable to gather metrics for %s: %v", client.URL, err)) acc.AddError(fmt.Errorf("unable to gather metrics for %q: %w", client.URL, err))
} }
}(client) }(client)
} }

View File

@ -256,8 +256,7 @@ func (m *OpenConfigTelemetry) collectData(
rpcStatus, _ := status.FromError(err) rpcStatus, _ := status.FromError(err)
// If service is currently unavailable and may come back later, retry // If service is currently unavailable and may come back later, retry
if rpcStatus.Code() != codes.Unavailable { if rpcStatus.Code() != codes.Unavailable {
acc.AddError(fmt.Errorf("could not subscribe to %s: %v", grpcServer, acc.AddError(fmt.Errorf("could not subscribe to %q: %w", grpcServer, err))
err))
return return
} }
@ -274,7 +273,7 @@ func (m *OpenConfigTelemetry) collectData(
if err != nil { if err != nil {
// If we encounter error in the stream, break so we can retry // If we encounter error in the stream, break so we can retry
// the connection // the connection
acc.AddError(fmt.Errorf("failed to read from %s: %s", grpcServer, err)) acc.AddError(fmt.Errorf("failed to read from %q: %w", grpcServer, err))
break break
} }

View File

@ -114,7 +114,7 @@ func (k *Kafka) receiver() {
return return
case err := <-k.errs: case err := <-k.errs:
if err != nil { if err != nil {
k.acc.AddError(fmt.Errorf("consumer Error: %s", err)) k.acc.AddError(fmt.Errorf("consumer error: %w", err))
} }
case msg := <-k.in: case msg := <-k.in:
if k.MaxMessageLen != 0 && len(msg.Value) > k.MaxMessageLen { if k.MaxMessageLen != 0 && len(msg.Value) > k.MaxMessageLen {
@ -123,8 +123,7 @@ func (k *Kafka) receiver() {
} else { } else {
metrics, err := k.parser.Parse(msg.Value) metrics, err := k.parser.Parse(msg.Value)
if err != nil { if err != nil {
k.acc.AddError(fmt.Errorf("Message Parse Error\nmessage: %s\nerror: %s", k.acc.AddError(fmt.Errorf("error during parsing message %q: %w", string(msg.Value), err))
string(msg.Value), err.Error()))
} }
for _, metric := range metrics { for _, metric := range metrics {
k.acc.AddFields(metric.Name(), metric.Fields(), metric.Tags(), metric.Time()) k.acc.AddFields(metric.Name(), metric.Fields(), metric.Tags(), metric.Time())
@ -138,7 +137,7 @@ func (k *Kafka) receiver() {
err := k.Consumer.CommitUpto(msg) err := k.Consumer.CommitUpto(msg)
k.Unlock() k.Unlock()
if err != nil { if err != nil {
k.acc.AddError(fmt.Errorf("committing to consumer failed: %v", err)) k.acc.AddError(fmt.Errorf("committing to consumer failed: %w", err))
} }
} }
} }
@ -150,7 +149,7 @@ func (k *Kafka) Stop() {
defer k.Unlock() defer k.Unlock()
close(k.done) close(k.done)
if err := k.Consumer.Close(); err != nil { if err := k.Consumer.Close(); err != nil {
k.acc.AddError(fmt.Errorf("error closing consumer: %s", err.Error())) k.acc.AddError(fmt.Errorf("error closing consumer: %w", err))
} }
} }

View File

@ -49,7 +49,7 @@ func (k *Kapacitor) Gather(acc telegraf.Accumulator) error {
go func(url string) { go func(url string) {
defer wg.Done() defer wg.Done()
if err := k.gatherURL(acc, url); err != nil { if err := k.gatherURL(acc, url); err != nil {
acc.AddError(fmt.Errorf("[url=%s]: %s", url, err)) acc.AddError(fmt.Errorf("[url=%s]: %w", url, err))
} }
}(u) }(u)
} }

View File

@ -136,7 +136,7 @@ func (k *Kibana) Gather(acc telegraf.Accumulator) error {
go func(baseUrl string, acc telegraf.Accumulator) { go func(baseUrl string, acc telegraf.Accumulator) {
defer wg.Done() defer wg.Done()
if err := k.gatherKibanaStatus(baseUrl, acc); err != nil { if err := k.gatherKibanaStatus(baseUrl, acc); err != nil {
acc.AddError(fmt.Errorf("[url=%s]: %s", baseUrl, err)) acc.AddError(fmt.Errorf("[url=%s]: %w", baseUrl, err))
return return
} }
}(serv, acc) }(serv, acc)

View File

@ -265,7 +265,7 @@ func (k *Kubernetes) LoadJSON(url string, v interface{}) error {
req.Header.Add("Accept", "application/json") req.Header.Add("Accept", "application/json")
resp, err = k.RoundTripper.RoundTrip(req) resp, err = k.RoundTripper.RoundTrip(req)
if err != nil { if err != nil {
return fmt.Errorf("error making HTTP request to %s: %s", url, err) return fmt.Errorf("error making HTTP request to %q: %w", url, err)
} }
defer resp.Body.Close() defer resp.Body.Close()
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
@ -274,7 +274,7 @@ func (k *Kubernetes) LoadJSON(url string, v interface{}) error {
err = json.NewDecoder(resp.Body).Decode(v) err = json.NewDecoder(resp.Body).Decode(v)
if err != nil { if err != nil {
return fmt.Errorf(`Error parsing response: %s`, err) return fmt.Errorf("error parsing response: %w", err)
} }
return nil return nil

View File

@ -227,7 +227,7 @@ func (l *LeoFS) gatherServer(
} }
fVal, err := strconv.ParseFloat(val, 64) fVal, err := strconv.ParseFloat(val, 64)
if err != nil { if err != nil {
return fmt.Errorf("Unable to parse the value:%s, err:%s", val, err) return fmt.Errorf("unable to parse the value %q: %w", val, err)
} }
fields[key] = fVal fields[key] = fVal
i++ i++

View File

@ -187,9 +187,9 @@ func (l *Libvirt) Gather(acc telegraf.Accumulator) error {
func handleError(err error, errMessage string, utils utils) error { func handleError(err error, errMessage string, utils utils) error {
if err != nil { if err != nil {
if chanErr := utils.Disconnect(); chanErr != nil { if chanErr := utils.Disconnect(); chanErr != nil {
return fmt.Errorf("%s: %v; error occurred when disconnecting: %v", errMessage, err, chanErr) return fmt.Errorf("%s: %w; error occurred when disconnecting: %w", errMessage, err, chanErr)
} }
return fmt.Errorf("%s: %v", errMessage, err) return fmt.Errorf("%s: %w", errMessage, err)
} }
return nil return nil
} }

View File

@ -5,6 +5,7 @@ package linux_cpu
import ( import (
_ "embed" _ "embed"
"errors"
"fmt" "fmt"
"io" "io"
"os" "os"
@ -187,7 +188,7 @@ func validatePath(propPath string) error {
} }
if err != nil { if err != nil {
return fmt.Errorf("cannot get system information for CPU property: [%s] - %v", propPath, err) return fmt.Errorf("cannot get system information for CPU property %q: %w", propPath, err)
} }
_ = f.Close() // File is not written to, closing should be safe _ = f.Close() // File is not written to, closing should be safe
@ -204,10 +205,10 @@ func readUintFromFile(propPath string) (uint64, error) {
buffer := make([]byte, 22) buffer := make([]byte, 22)
n, err := f.Read(buffer) n, err := f.Read(buffer)
if err != nil && err != io.EOF { if err != nil && !errors.Is(err, io.EOF) {
return 0, fmt.Errorf("error on reading file, err: %v", err) return 0, fmt.Errorf("error on reading file: %w", err)
} else if n == 0 { } else if n == 0 {
return 0, fmt.Errorf("error on reading file, file is empty") return 0, fmt.Errorf("error on reading file: file is empty")
} }
return strconv.ParseUint(string(buffer[:n-1]), 10, 64) return strconv.ParseUint(string(buffer[:n-1]), 10, 64)

View File

@ -124,7 +124,7 @@ func (*Logstash) SampleConfig() string {
func (logstash *Logstash) Init() error { func (logstash *Logstash) Init() error {
err := choice.CheckSlice(logstash.Collect, []string{"pipelines", "process", "jvm"}) err := choice.CheckSlice(logstash.Collect, []string{"pipelines", "process", "jvm"})
if err != nil { if err != nil {
return fmt.Errorf(`cannot verify "collect" setting: %v`, err) return fmt.Errorf(`cannot verify "collect" setting: %w`, err)
} }
return nil return nil
} }

View File

@ -62,7 +62,7 @@ func (lvm *LVM) gatherPhysicalVolumes(acc telegraf.Accumulator) error {
var report pvsReport var report pvsReport
err = json.Unmarshal(out, &report) err = json.Unmarshal(out, &report)
if err != nil { if err != nil {
return fmt.Errorf("failed to unmarshal physical volume JSON: %s", err) return fmt.Errorf("failed to unmarshal physical volume JSON: %w", err)
} }
if len(report.Report) > 0 { if len(report.Report) > 0 {
@ -116,7 +116,7 @@ func (lvm *LVM) gatherVolumeGroups(acc telegraf.Accumulator) error {
var report vgsReport var report vgsReport
err = json.Unmarshal(out, &report) err = json.Unmarshal(out, &report)
if err != nil { if err != nil {
return fmt.Errorf("failed to unmarshal vol group JSON: %s", err) return fmt.Errorf("failed to unmarshal vol group JSON: %w", err)
} }
if len(report.Report) > 0 { if len(report.Report) > 0 {
@ -179,7 +179,7 @@ func (lvm *LVM) gatherLogicalVolumes(acc telegraf.Accumulator) error {
var report lvsReport var report lvsReport
err = json.Unmarshal(out, &report) err = json.Unmarshal(out, &report)
if err != nil { if err != nil {
return fmt.Errorf("failed to unmarshal logical vol JSON: %s", err) return fmt.Errorf("failed to unmarshal logical vol JSON: %w", err)
} }
if len(report.Report) > 0 { if len(report.Report) > 0 {
@ -234,8 +234,7 @@ func (lvm *LVM) runCmd(cmd string, args []string) ([]byte, error) {
out, err := internal.StdOutputTimeout(execCmd, 5*time.Second) out, err := internal.StdOutputTimeout(execCmd, 5*time.Second)
if err != nil { if err != nil {
return nil, fmt.Errorf( return nil, fmt.Errorf(
"failed to run command %s: %s - %s", "failed to run command %s: %w - %s", strings.Join(execCmd.Args, " "), err, string(out),
strings.Join(execCmd.Args, " "), err, string(out),
) )
} }

View File

@ -133,7 +133,7 @@ func (c *Marklogic) Gather(accumulator telegraf.Accumulator) error {
go func(serv string) { go func(serv string) {
defer wg.Done() defer wg.Done()
if err := c.fetchAndInsertData(accumulator, serv); err != nil { if err := c.fetchAndInsertData(accumulator, serv); err != nil {
accumulator.AddError(fmt.Errorf("[host=%s]: %s", serv, err)) accumulator.AddError(fmt.Errorf("[host=%s]: %w", serv, err))
} }
}(serv) }(serv)
} }

View File

@ -31,7 +31,7 @@ func (ms *MemStats) Init() error {
func (ms *MemStats) Gather(acc telegraf.Accumulator) error { func (ms *MemStats) Gather(acc telegraf.Accumulator) error {
vm, err := ms.ps.VMStat() vm, err := ms.ps.VMStat()
if err != nil { if err != nil {
return fmt.Errorf("error getting virtual memory info: %s", err) return fmt.Errorf("error getting virtual memory info: %w", err)
} }
fields := map[string]interface{}{ fields := map[string]interface{}{

View File

@ -110,7 +110,7 @@ func (c *ConfigurationOriginal) initFields(fieldDefs []fieldDefinition) ([]field
for _, def := range fieldDefs { for _, def := range fieldDefs {
f, err := c.newFieldFromDefinition(def) f, err := c.newFieldFromDefinition(def)
if err != nil { if err != nil {
return nil, fmt.Errorf("initializing field %q failed: %v", def.Name, err) return nil, fmt.Errorf("initializing field %q failed: %w", def.Name, err)
} }
fields = append(fields, f) fields = append(fields, f)
} }

View File

@ -139,7 +139,7 @@ func (c *ConfigurationPerRequest) Check() error {
// Check for duplicate field definitions // Check for duplicate field definitions
id, err := c.fieldID(seed, def, f) id, err := c.fieldID(seed, def, f)
if err != nil { if err != nil {
return fmt.Errorf("cannot determine field id for %q: %v", f.Name, err) return fmt.Errorf("cannot determine field id for %q: %w", f.Name, err)
} }
if seenFields[id] { if seenFields[id] {
return fmt.Errorf("field %q duplicated in measurement %q (slave %d/%q)", f.Name, f.Measurement, def.SlaveID, def.RegisterType) return fmt.Errorf("field %q duplicated in measurement %q (slave %d/%q)", f.Name, f.Measurement, def.SlaveID, def.RegisterType)
@ -230,7 +230,7 @@ func (c *ConfigurationPerRequest) initFields(fieldDefs []requestFieldDefinition,
for _, def := range fieldDefs { for _, def := range fieldDefs {
f, err := c.newFieldFromDefinition(def, typed, byteOrder) f, err := c.newFieldFromDefinition(def, typed, byteOrder)
if err != nil { if err != nil {
return nil, fmt.Errorf("initializing field %q failed: %v", def.Name, err) return nil, fmt.Errorf("initializing field %q failed: %w", def.Name, err)
} }
fields = append(fields, f) fields = append(fields, f)
} }

View File

@ -3,6 +3,7 @@ package modbus
import ( import (
_ "embed" _ "embed"
"errors"
"fmt" "fmt"
"net" "net"
"net/url" "net/url"
@ -135,18 +136,18 @@ func (m *Modbus) Init() error {
// Check and process the configuration // Check and process the configuration
if err := cfg.Check(); err != nil { if err := cfg.Check(); err != nil {
return fmt.Errorf("configuration invalid: %v", err) return fmt.Errorf("configuration invalid: %w", err)
} }
r, err := cfg.Process() r, err := cfg.Process()
if err != nil { if err != nil {
return fmt.Errorf("cannot process configuration: %v", err) return fmt.Errorf("cannot process configuration: %w", err)
} }
m.requests = r m.requests = r
// Setup client // Setup client
if err := m.initClient(); err != nil { if err := m.initClient(); err != nil {
return fmt.Errorf("initializing client failed: %v", err) return fmt.Errorf("initializing client failed: %w", err)
} }
for slaveID, rqs := range m.requests { for slaveID, rqs := range m.requests {
var nHoldingRegs, nInputsRegs, nDiscreteRegs, nCoilRegs uint16 var nHoldingRegs, nInputsRegs, nDiscreteRegs, nCoilRegs uint16
@ -192,8 +193,8 @@ func (m *Modbus) Gather(acc telegraf.Accumulator) error {
m.Log.Debugf("Reading slave %d for %s...", slaveID, m.Controller) m.Log.Debugf("Reading slave %d for %s...", slaveID, m.Controller)
if err := m.readSlaveData(slaveID, requests); err != nil { if err := m.readSlaveData(slaveID, requests); err != nil {
acc.AddError(fmt.Errorf("slave %d: %w", slaveID, err)) acc.AddError(fmt.Errorf("slave %d: %w", slaveID, err))
mberr, ok := err.(*mb.Error) var mbErr *mb.Error
if !ok || mberr.ExceptionCode != mb.ExceptionCodeServerDeviceBusy { if !errors.As(err, &mbErr) || mbErr.ExceptionCode != mb.ExceptionCodeServerDeviceBusy {
m.Log.Debugf("Reconnecting to %s...", m.Controller) m.Log.Debugf("Reconnecting to %s...", m.Controller)
if err := m.disconnect(); err != nil { if err := m.disconnect(); err != nil {
return fmt.Errorf("disconnecting failed: %w", err) return fmt.Errorf("disconnecting failed: %w", err)
@ -337,8 +338,8 @@ func (m *Modbus) readSlaveData(slaveID byte, requests requestSet) error {
} }
// Exit in case a non-recoverable error occurred // Exit in case a non-recoverable error occurred
mberr, ok := err.(*mb.Error) var mbErr *mb.Error
if !ok || mberr.ExceptionCode != mb.ExceptionCodeServerDeviceBusy { if !errors.As(err, &mbErr) || mbErr.ExceptionCode != mb.ExceptionCodeServerDeviceBusy {
return err return err
} }

View File

@ -255,7 +255,7 @@ func (s *Server) gatherCollectionStats(colStatsDbs []string) (*ColStats, error)
}, },
}, colStatLine) }, colStatLine)
if err != nil { if err != nil {
s.authLog(fmt.Errorf("error getting col stats from %q: %v", colName, err)) s.authLog(fmt.Errorf("error getting col stats from %q: %w", colName, err))
continue continue
} }
collection := &Collection{ collection := &Collection{
@ -296,7 +296,7 @@ func (s *Server) gatherData(
if replSetStatus != nil { if replSetStatus != nil {
oplogStats, err = s.gatherOplogStats() oplogStats, err = s.gatherOplogStats()
if err != nil { if err != nil {
s.authLog(fmt.Errorf("Unable to get oplog stats: %v", err)) s.authLog(fmt.Errorf("unable to get oplog stats: %w", err))
} }
} }
@ -311,7 +311,7 @@ func (s *Server) gatherData(
shardStats, err := s.gatherShardConnPoolStats(serverStatus.Version) shardStats, err := s.gatherShardConnPoolStats(serverStatus.Version)
if err != nil { if err != nil {
s.authLog(fmt.Errorf("unable to gather shard connection pool stats: %s", err.Error())) s.authLog(fmt.Errorf("unable to gather shard connection pool stats: %w", err))
} }
var collectionStats *ColStats var collectionStats *ColStats

View File

@ -235,7 +235,7 @@ func (m *Monit) Gather(acc telegraf.Accumulator) error {
decoder := xml.NewDecoder(resp.Body) decoder := xml.NewDecoder(resp.Body)
decoder.CharsetReader = charset.NewReaderLabel decoder.CharsetReader = charset.NewReaderLabel
if err := decoder.Decode(&status); err != nil { if err := decoder.Decode(&status); err != nil {
return fmt.Errorf("error parsing input: %v", err) return fmt.Errorf("error parsing input: %w", err)
} }
tags := map[string]string{ tags := map[string]string{

View File

@ -579,8 +579,9 @@ func TestConnection(t *testing.T) {
err := r.Gather(&acc) err := r.Gather(&acc)
require.Error(t, err) require.Error(t, err)
_, ok := err.(*url.Error)
require.True(t, ok) var urlErr *url.Error
require.ErrorAs(t, err, &urlErr)
} }
func TestInvalidUsernameOrPassword(t *testing.T) { func TestInvalidUsernameOrPassword(t *testing.T) {

View File

@ -193,15 +193,14 @@ func (m *MQTTConsumer) connect() error {
subscribeToken := m.client.SubscribeMultiple(topics, m.recvMessage) subscribeToken := m.client.SubscribeMultiple(topics, m.recvMessage)
subscribeToken.Wait() subscribeToken.Wait()
if subscribeToken.Error() != nil { if subscribeToken.Error() != nil {
m.acc.AddError(fmt.Errorf("subscription error: topics: %s: %v", m.acc.AddError(fmt.Errorf("subscription error: topics %q: %w", strings.Join(m.Topics[:], ","), subscribeToken.Error()))
strings.Join(m.Topics[:], ","), subscribeToken.Error()))
} }
return nil return nil
} }
func (m *MQTTConsumer) onConnectionLost(_ mqtt.Client, err error) { func (m *MQTTConsumer) onConnectionLost(_ mqtt.Client, err error) {
// Should already be disconnected, but make doubly sure // Should already be disconnected, but make doubly sure
m.client.Disconnect(5) m.client.Disconnect(5)
m.acc.AddError(fmt.Errorf("connection lost: %v", err)) m.acc.AddError(fmt.Errorf("connection lost: %w", err))
m.Log.Debugf("Disconnected %v", m.Servers) m.Log.Debugf("Disconnected %v", m.Servers)
m.state = Disconnected m.state = Disconnected
} }

View File

@ -4,6 +4,7 @@ package mysql
import ( import (
"database/sql" "database/sql"
_ "embed" _ "embed"
"errors"
"fmt" "fmt"
"strconv" "strconv"
"strings" "strings"
@ -93,7 +94,7 @@ func (m *Mysql) Init() error {
tlsid := "custom-" + tlsuuid.String() tlsid := "custom-" + tlsuuid.String()
tlsConfig, err := m.ClientConfig.TLSConfig() tlsConfig, err := m.ClientConfig.TLSConfig()
if err != nil { if err != nil {
return fmt.Errorf("registering TLS config: %s", err) return fmt.Errorf("registering TLS config: %w", err)
} }
if tlsConfig != nil { if tlsConfig != nil {
if err := mysql.RegisterTLSConfig(tlsid, tlsConfig); err != nil { if err := mysql.RegisterTLSConfig(tlsid, tlsConfig); err != nil {
@ -571,7 +572,7 @@ func (m *Mysql) gatherGlobalVariables(db *sql.DB, servtag string, acc telegraf.A
value, err := m.parseGlobalVariables(key, val) value, err := m.parseGlobalVariables(key, val)
if err != nil { if err != nil {
errString := fmt.Errorf("error parsing mysql global variable %q=%q: %v", key, string(val), err) errString := fmt.Errorf("error parsing mysql global variable %q=%q: %w", key, string(val), err)
if m.MetricVersion < 2 { if m.MetricVersion < 2 {
m.Log.Debug(errString) m.Log.Debug(errString)
} else { } else {
@ -668,7 +669,7 @@ func (m *Mysql) gatherSlaveStatuses(db *sql.DB, servtag string, acc telegraf.Acc
value, err := m.parseValueByDatabaseTypeName(colValue, col.DatabaseTypeName()) value, err := m.parseValueByDatabaseTypeName(colValue, col.DatabaseTypeName())
if err != nil { if err != nil {
errString := fmt.Errorf("error parsing mysql slave status %q=%q: %v", colName, string(colValue), err) errString := fmt.Errorf("error parsing mysql slave status %q=%q: %w", colName, string(colValue), err)
if m.MetricVersion < 2 { if m.MetricVersion < 2 {
m.Log.Debug(errString) m.Log.Debug(errString)
} else { } else {
@ -787,42 +788,42 @@ func (m *Mysql) gatherGlobalStatuses(db *sql.DB, servtag string, acc telegraf.Ac
case "Queries": case "Queries":
i, err := strconv.ParseInt(string(val), 10, 64) i, err := strconv.ParseInt(string(val), 10, 64)
if err != nil { if err != nil {
acc.AddError(fmt.Errorf("error mysql: parsing %s int value (%s)", key, err)) acc.AddError(fmt.Errorf("error parsing mysql %q int value: %w", key, err))
} else { } else {
fields["queries"] = i fields["queries"] = i
} }
case "Questions": case "Questions":
i, err := strconv.ParseInt(string(val), 10, 64) i, err := strconv.ParseInt(string(val), 10, 64)
if err != nil { if err != nil {
acc.AddError(fmt.Errorf("error mysql: parsing %s int value (%s)", key, err)) acc.AddError(fmt.Errorf("error parsing mysql %q int value: %w", key, err))
} else { } else {
fields["questions"] = i fields["questions"] = i
} }
case "Slow_queries": case "Slow_queries":
i, err := strconv.ParseInt(string(val), 10, 64) i, err := strconv.ParseInt(string(val), 10, 64)
if err != nil { if err != nil {
acc.AddError(fmt.Errorf("error mysql: parsing %s int value (%s)", key, err)) acc.AddError(fmt.Errorf("error parsing mysql %q int value: %w", key, err))
} else { } else {
fields["slow_queries"] = i fields["slow_queries"] = i
} }
case "Connections": case "Connections":
i, err := strconv.ParseInt(string(val), 10, 64) i, err := strconv.ParseInt(string(val), 10, 64)
if err != nil { if err != nil {
acc.AddError(fmt.Errorf("error mysql: parsing %s int value (%s)", key, err)) acc.AddError(fmt.Errorf("error parsing mysql %q int value: %w", key, err))
} else { } else {
fields["connections"] = i fields["connections"] = i
} }
case "Syncs": case "Syncs":
i, err := strconv.ParseInt(string(val), 10, 64) i, err := strconv.ParseInt(string(val), 10, 64)
if err != nil { if err != nil {
acc.AddError(fmt.Errorf("error mysql: parsing %s int value (%s)", key, err)) acc.AddError(fmt.Errorf("error parsing mysql %q int value: %w", key, err))
} else { } else {
fields["syncs"] = i fields["syncs"] = i
} }
case "Uptime": case "Uptime":
i, err := strconv.ParseInt(string(val), 10, 64) i, err := strconv.ParseInt(string(val), 10, 64)
if err != nil { if err != nil {
acc.AddError(fmt.Errorf("error mysql: parsing %s int value (%s)", key, err)) acc.AddError(fmt.Errorf("error parsing mysql %q int value: %w", key, err))
} else { } else {
fields["uptime"] = i fields["uptime"] = i
} }
@ -831,7 +832,7 @@ func (m *Mysql) gatherGlobalStatuses(db *sql.DB, servtag string, acc telegraf.Ac
key = strings.ToLower(key) key = strings.ToLower(key)
value, err := v2.ConvertGlobalStatus(key, val) value, err := v2.ConvertGlobalStatus(key, val)
if err != nil { if err != nil {
acc.AddError(fmt.Errorf("error parsing mysql global status %q=%q: %v", key, string(val), err)) acc.AddError(fmt.Errorf("error parsing mysql global status %q=%q: %w", key, string(val), err))
} else { } else {
fields[key] = value fields[key] = value
} }
@ -1304,7 +1305,7 @@ func (m *Mysql) gatherInnoDBMetrics(db *sql.DB, servtag string, acc telegraf.Acc
key = strings.ToLower(key) key = strings.ToLower(key)
value, err := m.parseValueByDatabaseTypeName(val, "BIGINT") value, err := m.parseValueByDatabaseTypeName(val, "BIGINT")
if err != nil { if err != nil {
acc.AddError(fmt.Errorf("error parsing mysql InnoDB metric %q=%q: %v", key, string(val), err)) acc.AddError(fmt.Errorf("error parsing mysql InnoDB metric %q=%q: %w", key, string(val), err))
continue continue
} }
@ -1469,7 +1470,7 @@ func (m *Mysql) gatherPerfTableLockWaits(db *sql.DB, servtag string, acc telegra
var tableName string var tableName string
err := db.QueryRow(perfSchemaTablesQuery, "table_lock_waits_summary_by_table").Scan(&tableName) err := db.QueryRow(perfSchemaTablesQuery, "table_lock_waits_summary_by_table").Scan(&tableName)
switch { switch {
case err == sql.ErrNoRows: case errors.Is(err, sql.ErrNoRows):
return nil return nil
case err != nil: case err != nil:
return err return err
@ -1694,7 +1695,7 @@ func (m *Mysql) gatherPerfEventsStatements(db *sql.DB, servtag string, acc teleg
var ( var (
schemaName, digest, digestText string schemaName, digest, digestText string
count, queryTime, errors, warnings float64 count, queryTime, errs, warnings float64
rowsAffected, rowsSent, rowsExamined float64 rowsAffected, rowsSent, rowsExamined float64
tmpTables, tmpDiskTables float64 tmpTables, tmpDiskTables float64
sortMergePasses, sortRows float64 sortMergePasses, sortRows float64
@ -1708,7 +1709,7 @@ func (m *Mysql) gatherPerfEventsStatements(db *sql.DB, servtag string, acc teleg
for rows.Next() { for rows.Next() {
err = rows.Scan( err = rows.Scan(
&schemaName, &digest, &digestText, &schemaName, &digest, &digestText,
&count, &queryTime, &errors, &warnings, &count, &queryTime, &errs, &warnings,
&rowsAffected, &rowsSent, &rowsExamined, &rowsAffected, &rowsSent, &rowsExamined,
&tmpTables, &tmpDiskTables, &tmpTables, &tmpDiskTables,
&sortMergePasses, &sortRows, &sortMergePasses, &sortRows,
@ -1725,7 +1726,7 @@ func (m *Mysql) gatherPerfEventsStatements(db *sql.DB, servtag string, acc teleg
fields := map[string]interface{}{ fields := map[string]interface{}{
"events_statements_total": count, "events_statements_total": count,
"events_statements_seconds_total": queryTime / picoSeconds, "events_statements_seconds_total": queryTime / picoSeconds,
"events_statements_errors_total": errors, "events_statements_errors_total": errs,
"events_statements_warnings_total": warnings, "events_statements_warnings_total": warnings,
"events_statements_rows_affected_total": rowsAffected, "events_statements_rows_affected_total": rowsAffected,
"events_statements_rows_sent_total": rowsSent, "events_statements_rows_sent_total": rowsSent,