chore: Fix linter findings for errorlint (part5) (#12731)
Co-authored-by: Pawel Zak <Pawel Zak>
This commit is contained in:
parent
3f3885a2b0
commit
5b2346dfa0
|
|
@ -50,7 +50,7 @@ func (f *Fail2ban) Init() error {
|
|||
if f.path == "" {
|
||||
path, err := exec.LookPath(cmd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("looking up %q failed: %v", cmd, err)
|
||||
return fmt.Errorf("looking up %q failed: %w", cmd, err)
|
||||
}
|
||||
f.path = path
|
||||
}
|
||||
|
|
@ -81,7 +81,7 @@ func (f *Fail2ban) Gather(acc telegraf.Accumulator) error {
|
|||
cmd := execCommand(name, args...)
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out))
|
||||
return fmt.Errorf("failed to run command %q: %w - %s", strings.Join(cmd.Args, " "), err, string(out))
|
||||
}
|
||||
lines := strings.Split(string(out), "\n")
|
||||
const targetString = "Jail list:"
|
||||
|
|
@ -102,7 +102,7 @@ func (f *Fail2ban) Gather(acc telegraf.Accumulator) error {
|
|||
cmd := execCommand(name, args...)
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out))
|
||||
return fmt.Errorf("failed to run command %q: %w - %s", strings.Join(cmd.Args, " "), err, string(out))
|
||||
}
|
||||
|
||||
lines := strings.Split(string(out), "\n")
|
||||
|
|
|
|||
|
|
@ -96,7 +96,7 @@ func (r *Fireboard) Gather(acc telegraf.Accumulator) error {
|
|||
// Decode the response JSON into a new stats struct
|
||||
var stats []fireboardStats
|
||||
if err := json.NewDecoder(resp.Body).Decode(&stats); err != nil {
|
||||
return fmt.Errorf("unable to decode fireboard response: %s", err)
|
||||
return fmt.Errorf("unable to decode fireboard response: %w", err)
|
||||
}
|
||||
// Range over all devices, gathering stats. Returns early in case of any error.
|
||||
for _, s := range stats {
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ package github
|
|||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
|
@ -148,10 +149,11 @@ func (g *GitHub) Gather(acc telegraf.Accumulator) error {
|
|||
}
|
||||
|
||||
func (g *GitHub) handleRateLimit(response *githubLib.Response, err error) {
|
||||
var rlErr *githubLib.RateLimitError
|
||||
if err == nil {
|
||||
g.RateLimit.Set(int64(response.Rate.Limit))
|
||||
g.RateRemaining.Set(int64(response.Rate.Remaining))
|
||||
} else if _, ok := err.(*githubLib.RateLimitError); ok {
|
||||
} else if errors.As(err, &rlErr) {
|
||||
g.RateLimitErrors.Incr(1)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -338,11 +338,11 @@ func (s *Subscription) buildAlias(aliases map[string]string) error {
|
|||
|
||||
longPath, _, err := handlePath(gnmiLongPath, nil, nil, "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("handling long-path failed: %v", err)
|
||||
return fmt.Errorf("handling long-path failed: %w", err)
|
||||
}
|
||||
shortPath, _, err := handlePath(gnmiShortPath, nil, nil, "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("handling short-path failed: %v", err)
|
||||
return fmt.Errorf("handling short-path failed: %w", err)
|
||||
}
|
||||
|
||||
// If the user didn't provide a measurement name, use last path element
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package gnmi
|
|||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
|
|
@ -62,7 +63,7 @@ func (h *handler) subscribeGNMI(ctx context.Context, acc telegraf.Accumulator, t
|
|||
|
||||
client, err := grpc.DialContext(ctx, h.address, opts...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to dial: %v", err)
|
||||
return fmt.Errorf("failed to dial: %w", err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
|
|
@ -73,7 +74,7 @@ func (h *handler) subscribeGNMI(ctx context.Context, acc telegraf.Accumulator, t
|
|||
|
||||
// If io.EOF is returned, the stream may have ended and stream status
|
||||
// can be determined by calling Recv.
|
||||
if err := subscribeClient.Send(request); err != nil && err != io.EOF {
|
||||
if err := subscribeClient.Send(request); err != nil && !errors.Is(err, io.EOF) {
|
||||
return fmt.Errorf("failed to send subscription request: %w", err)
|
||||
}
|
||||
|
||||
|
|
@ -82,7 +83,7 @@ func (h *handler) subscribeGNMI(ctx context.Context, acc telegraf.Accumulator, t
|
|||
for ctx.Err() == nil {
|
||||
var reply *gnmiLib.SubscribeResponse
|
||||
if reply, err = subscribeClient.Recv(); err != nil {
|
||||
if err != io.EOF && ctx.Err() == nil {
|
||||
if !errors.Is(err, io.EOF) && ctx.Err() == nil {
|
||||
return fmt.Errorf("aborted gNMI subscription: %w", err)
|
||||
}
|
||||
break
|
||||
|
|
|
|||
|
|
@ -144,11 +144,11 @@ func gnmiToFields(name string, updateVal *gnmiLib.TypedValue) (map[string]interf
|
|||
fields[name] = value
|
||||
} else if jsondata != nil {
|
||||
if err := json.Unmarshal(jsondata, &value); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse JSON value: %v", err)
|
||||
return nil, fmt.Errorf("failed to parse JSON value: %w", err)
|
||||
}
|
||||
flattener := jsonparser.JSONFlattener{Fields: fields}
|
||||
if err := flattener.FullFlattenJSON(name, value, true, true); err != nil {
|
||||
return nil, fmt.Errorf("failed to flatten JSON: %v", err)
|
||||
return nil, fmt.Errorf("failed to flatten JSON: %w", err)
|
||||
}
|
||||
}
|
||||
return fields, nil
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ import (
|
|||
"context"
|
||||
_ "embed"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
|
@ -82,7 +83,7 @@ func (gcs *GCS) Gather(acc telegraf.Accumulator) error {
|
|||
for {
|
||||
attrs, err := it.Next()
|
||||
|
||||
if err == iterator.Done {
|
||||
if errors.Is(err, iterator.Done) {
|
||||
gcs.Log.Infof("Iterated all the keys")
|
||||
break
|
||||
}
|
||||
|
|
@ -96,8 +97,8 @@ func (gcs *GCS) Gather(acc telegraf.Accumulator) error {
|
|||
|
||||
if !gcs.shoudIgnore(name) {
|
||||
if err := gcs.processMeasurementsInObject(name, bucket, acc); err != nil {
|
||||
gcs.Log.Errorf("Could not process object: %v in bucket: %v", name, bucketName, err)
|
||||
acc.AddError(fmt.Errorf("COULD NOT PROCESS OBJECT: %v IN BUCKET: %v", name, err))
|
||||
gcs.Log.Errorf("Could not process object %q in bucket %q: %v", name, bucketName, err)
|
||||
acc.AddError(fmt.Errorf("COULD NOT PROCESS OBJECT %q IN BUCKET %q: %w", name, bucketName, err))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ package haproxy
|
|||
import (
|
||||
_ "embed"
|
||||
"encoding/csv"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
|
|
@ -85,8 +86,7 @@ func (h *haproxy) Gather(acc telegraf.Accumulator) error {
|
|||
}
|
||||
|
||||
func (h *haproxy) gatherServerSocket(addr string, acc telegraf.Accumulator) error {
|
||||
var network string
|
||||
var address string
|
||||
var network, address string
|
||||
if strings.HasPrefix(addr, "tcp://") {
|
||||
network = "tcp"
|
||||
address = strings.TrimPrefix(addr, "tcp://")
|
||||
|
|
@ -96,15 +96,13 @@ func (h *haproxy) gatherServerSocket(addr string, acc telegraf.Accumulator) erro
|
|||
}
|
||||
|
||||
c, err := net.Dial(network, address)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not connect to '%s://%s': %s", network, address, err)
|
||||
return fmt.Errorf("could not connect to '%s://%s': %w", network, address, err)
|
||||
}
|
||||
|
||||
_, errw := c.Write([]byte("show stat\n"))
|
||||
|
||||
if errw != nil {
|
||||
return fmt.Errorf("could not write to socket '%s://%s': %s", network, address, errw)
|
||||
return fmt.Errorf("could not write to socket '%s://%s': %w", network, address, errw)
|
||||
}
|
||||
|
||||
return h.importCsvResult(c, acc, address)
|
||||
|
|
@ -212,7 +210,7 @@ func (h *haproxy) importCsvResult(r io.Reader, acc telegraf.Accumulator, host st
|
|||
|
||||
for {
|
||||
row, err := csvr.Read()
|
||||
if err == io.EOF {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -75,7 +75,7 @@ func (h *HTTP) Gather(acc telegraf.Accumulator) error {
|
|||
go func(url string) {
|
||||
defer wg.Done()
|
||||
if err := h.gatherURL(acc, url); err != nil {
|
||||
acc.AddError(fmt.Errorf("[url=%s]: %s", url, err))
|
||||
acc.AddError(fmt.Errorf("[url=%s]: %w", url, err))
|
||||
}
|
||||
}(u)
|
||||
}
|
||||
|
|
@ -157,17 +157,17 @@ func (h *HTTP) gatherURL(
|
|||
|
||||
b, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading body failed: %v", err)
|
||||
return fmt.Errorf("reading body failed: %w", err)
|
||||
}
|
||||
|
||||
// Instantiate a new parser for the new data to avoid trouble with stateful parsers
|
||||
parser, err := h.parserFunc()
|
||||
if err != nil {
|
||||
return fmt.Errorf("instantiating parser failed: %v", err)
|
||||
return fmt.Errorf("instantiating parser failed: %w", err)
|
||||
}
|
||||
metrics, err := parser.Parse(b)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing metrics failed: %v", err)
|
||||
return fmt.Errorf("parsing metrics failed: %w", err)
|
||||
}
|
||||
|
||||
for _, metric := range metrics {
|
||||
|
|
@ -187,13 +187,13 @@ func (h *HTTP) setRequestAuth(request *http.Request) error {
|
|||
|
||||
username, err := h.Username.Get()
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting username failed: %v", err)
|
||||
return fmt.Errorf("getting username failed: %w", err)
|
||||
}
|
||||
defer config.ReleaseSecret(username)
|
||||
|
||||
password, err := h.Password.Get()
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting password failed: %v", err)
|
||||
return fmt.Errorf("getting password failed: %w", err)
|
||||
}
|
||||
defer config.ReleaseSecret(password)
|
||||
|
||||
|
|
|
|||
|
|
@ -65,9 +65,6 @@ type httpClient interface {
|
|||
Do(req *http.Request) (*http.Response, error)
|
||||
}
|
||||
|
||||
// ErrRedirectAttempted indicates that a redirect occurred
|
||||
var ErrRedirectAttempted = errors.New("redirect")
|
||||
|
||||
// Set the proxy. A configured proxy overwrites the system wide proxy.
|
||||
func getProxyFunc(httpProxy string) func(*http.Request) (*url.URL, error) {
|
||||
if httpProxy == "" {
|
||||
|
|
@ -157,27 +154,30 @@ func setResult(resultString string, fields map[string]interface{}, tags map[stri
|
|||
}
|
||||
|
||||
func setError(err error, fields map[string]interface{}, tags map[string]string) error {
|
||||
if timeoutError, ok := err.(net.Error); ok && timeoutError.Timeout() {
|
||||
var timeoutError net.Error
|
||||
if errors.As(err, &timeoutError) && timeoutError.Timeout() {
|
||||
setResult("timeout", fields, tags)
|
||||
return timeoutError
|
||||
}
|
||||
|
||||
urlErr, isURLErr := err.(*url.Error)
|
||||
if !isURLErr {
|
||||
var urlErr *url.Error
|
||||
if !errors.As(err, &urlErr) {
|
||||
return nil
|
||||
}
|
||||
|
||||
opErr, isNetErr := (urlErr.Err).(*net.OpError)
|
||||
if isNetErr {
|
||||
switch e := (opErr.Err).(type) {
|
||||
case *net.DNSError:
|
||||
var opErr *net.OpError
|
||||
if errors.As(urlErr, &opErr) {
|
||||
var dnsErr *net.DNSError
|
||||
var parseErr *net.ParseError
|
||||
|
||||
if errors.As(opErr, &dnsErr) {
|
||||
setResult("dns_error", fields, tags)
|
||||
return e
|
||||
case *net.ParseError:
|
||||
return dnsErr
|
||||
} else if errors.As(opErr, &parseErr) {
|
||||
// Parse error has to do with parsing of IP addresses, so we
|
||||
// group it with address errors
|
||||
setResult("address_error", fields, tags)
|
||||
return e
|
||||
return parseErr
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -339,7 +339,7 @@ func (h *HTTPResponse) Gather(acc telegraf.Accumulator) error {
|
|||
var err error
|
||||
h.compiledStringMatch, err = regexp.Compile(h.ResponseStringMatch)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to compile regular expression %s : %s", h.ResponseStringMatch, err)
|
||||
return fmt.Errorf("failed to compile regular expression %q: %w", h.ResponseStringMatch, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -401,12 +401,12 @@ func (h *HTTPResponse) Gather(acc telegraf.Accumulator) error {
|
|||
func (h *HTTPResponse) setRequestAuth(request *http.Request) error {
|
||||
username, err := h.Username.Get()
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting username failed: %v", err)
|
||||
return fmt.Errorf("getting username failed: %w", err)
|
||||
}
|
||||
defer config.ReleaseSecret(username)
|
||||
password, err := h.Password.Get()
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting password failed: %v", err)
|
||||
return fmt.Errorf("getting password failed: %w", err)
|
||||
}
|
||||
defer config.ReleaseSecret(password)
|
||||
if len(username) != 0 || len(password) != 0 {
|
||||
|
|
|
|||
|
|
@ -96,19 +96,19 @@ func (h *Hugepages) Init() error {
|
|||
func (h *Hugepages) Gather(acc telegraf.Accumulator) error {
|
||||
if h.gatherRoot {
|
||||
if err := h.gatherRootStats(acc); err != nil {
|
||||
return fmt.Errorf("gathering root stats failed: %v", err)
|
||||
return fmt.Errorf("gathering root stats failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if h.gatherPerNode {
|
||||
if err := h.gatherStatsPerNode(acc); err != nil {
|
||||
return fmt.Errorf("gathering per node stats failed: %v", err)
|
||||
return fmt.Errorf("gathering per node stats failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if h.gatherMeminfo {
|
||||
if err := h.gatherStatsFromMeminfo(acc); err != nil {
|
||||
return fmt.Errorf("gathering meminfo stats failed: %v", err)
|
||||
return fmt.Errorf("gathering meminfo stats failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -160,7 +160,7 @@ func (h *Hugepages) gatherFromHugepagePath(
|
|||
// read metrics from: hugepages/hugepages-*/*
|
||||
hugepagesDirs, err := os.ReadDir(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading root dir failed: %v", err)
|
||||
return fmt.Errorf("reading root dir failed: %w", err)
|
||||
}
|
||||
|
||||
for _, hugepagesDir := range hugepagesDirs {
|
||||
|
|
@ -177,7 +177,7 @@ func (h *Hugepages) gatherFromHugepagePath(
|
|||
metricsPath := filepath.Join(path, hugepagesDir.Name())
|
||||
metricFiles, err := os.ReadDir(metricsPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading metric dir failed: %v", err)
|
||||
return fmt.Errorf("reading metric dir failed: %w", err)
|
||||
}
|
||||
|
||||
metrics := make(map[string]interface{})
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ import (
|
|||
"crypto/tls"
|
||||
_ "embed"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
|
|
@ -152,7 +153,7 @@ func (h *InfluxDBListener) Start(acc telegraf.Accumulator) error {
|
|||
|
||||
go func() {
|
||||
err = h.server.Serve(h.listener)
|
||||
if err != http.ErrServerClosed {
|
||||
if !errors.Is(err, http.ErrServerClosed) {
|
||||
h.Log.Infof("Error serving HTTP on %s", h.ServiceAddress)
|
||||
}
|
||||
}()
|
||||
|
|
@ -286,7 +287,8 @@ func (h *InfluxDBListener) handleWriteInternalParser(res http.ResponseWriter, re
|
|||
lastPos = pos
|
||||
|
||||
// Continue parsing metrics even if some are malformed
|
||||
if parseErr, ok := err.(*influx.ParseError); ok {
|
||||
var parseErr *influx.ParseError
|
||||
if errors.As(err, &parseErr) {
|
||||
parseErrorCount++
|
||||
errStr := parseErr.Error()
|
||||
if firstParseErrorStr == "" {
|
||||
|
|
@ -309,7 +311,7 @@ func (h *InfluxDBListener) handleWriteInternalParser(res http.ResponseWriter, re
|
|||
|
||||
h.acc.AddMetric(m)
|
||||
}
|
||||
if err != influx.EOF {
|
||||
if !errors.Is(err, influx.EOF) {
|
||||
h.Log.Debugf("Error parsing the request body: %v", err.Error())
|
||||
if err := badRequest(res, err.Error()); err != nil {
|
||||
h.Log.Debugf("error in bad-request: %v", err)
|
||||
|
|
@ -401,7 +403,8 @@ func (h *InfluxDBListener) handleWriteUpstreamParser(res http.ResponseWriter, re
|
|||
m, err = parser.Next()
|
||||
|
||||
// Continue parsing metrics even if some are malformed
|
||||
if parseErr, ok := err.(*influx_upstream.ParseError); ok {
|
||||
var parseErr *influx_upstream.ParseError
|
||||
if errors.As(err, &parseErr) {
|
||||
parseErrorCount++
|
||||
errStr := parseErr.Error()
|
||||
if firstParseErrorStr == "" {
|
||||
|
|
@ -424,7 +427,7 @@ func (h *InfluxDBListener) handleWriteUpstreamParser(res http.ResponseWriter, re
|
|||
|
||||
h.acc.AddMetric(m)
|
||||
}
|
||||
if err != influx_upstream.ErrEOF {
|
||||
if !errors.Is(err, influx_upstream.ErrEOF) {
|
||||
h.Log.Debugf("Error parsing the request body: %v", err.Error())
|
||||
if err := badRequest(res, err.Error()); err != nil {
|
||||
h.Log.Debugf("error in bad-request: %v", err)
|
||||
|
|
|
|||
|
|
@ -152,7 +152,7 @@ func (h *InfluxDBV2Listener) Start(acc telegraf.Accumulator) error {
|
|||
|
||||
go func() {
|
||||
err = h.server.Serve(h.listener)
|
||||
if err != http.ErrServerClosed {
|
||||
if !errors.Is(err, http.ErrServerClosed) {
|
||||
h.Log.Infof("Error serving HTTP on %s", h.ServiceAddress)
|
||||
}
|
||||
}()
|
||||
|
|
@ -250,7 +250,7 @@ func (h *InfluxDBV2Listener) handleWrite() http.HandlerFunc {
|
|||
if h.ParserType == "upstream" {
|
||||
parser := influx_upstream.Parser{}
|
||||
err = parser.Init()
|
||||
if err != ErrEOF && err != nil {
|
||||
if !errors.Is(err, ErrEOF) && err != nil {
|
||||
h.Log.Debugf("Error initializing parser: %v", err.Error())
|
||||
return
|
||||
}
|
||||
|
|
@ -265,7 +265,7 @@ func (h *InfluxDBV2Listener) handleWrite() http.HandlerFunc {
|
|||
} else {
|
||||
parser := influx.Parser{}
|
||||
err = parser.Init()
|
||||
if err != ErrEOF && err != nil {
|
||||
if !errors.Is(err, ErrEOF) && err != nil {
|
||||
h.Log.Debugf("Error initializing parser: %v", err.Error())
|
||||
return
|
||||
}
|
||||
|
|
@ -279,7 +279,7 @@ func (h *InfluxDBV2Listener) handleWrite() http.HandlerFunc {
|
|||
metrics, err = parser.Parse(bytes)
|
||||
}
|
||||
|
||||
if err != ErrEOF && err != nil {
|
||||
if !errors.Is(err, ErrEOF) && err != nil {
|
||||
h.Log.Debugf("Error parsing the request body: %v", err.Error())
|
||||
if err := badRequest(res, Invalid, err.Error()); err != nil {
|
||||
h.Log.Debugf("error in bad-request: %v", err)
|
||||
|
|
|
|||
|
|
@ -108,7 +108,7 @@ func (d *IntelDLB) Init() error {
|
|||
func (d *IntelDLB) Gather(acc telegraf.Accumulator) error {
|
||||
err := d.gatherMetricsFromSocket(acc)
|
||||
if err != nil {
|
||||
socketErr := fmt.Errorf("gathering metrics from socket by given commands failed: %v", err)
|
||||
socketErr := fmt.Errorf("gathering metrics from socket by given commands failed: %w", err)
|
||||
if d.UnreachableSocketBehavior == "error" {
|
||||
return socketErr
|
||||
}
|
||||
|
|
@ -117,7 +117,7 @@ func (d *IntelDLB) Gather(acc telegraf.Accumulator) error {
|
|||
|
||||
err = d.gatherRasMetrics(acc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("gathering RAS metrics failed: %v", err)
|
||||
return fmt.Errorf("gathering RAS metrics failed: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -305,7 +305,7 @@ func (d *IntelDLB) setInitMessageLength() error {
|
|||
buf := make([]byte, d.maxInitMessageLength)
|
||||
messageLength, err := d.connection.Read(buf)
|
||||
if err != nil {
|
||||
return d.closeSocketAndThrowError("custom", fmt.Errorf("failed to read InitMessage from socket - %v", err))
|
||||
return d.closeSocketAndThrowError("custom", fmt.Errorf("failed to read InitMessage from socket: %w", err))
|
||||
}
|
||||
if messageLength > len(buf) {
|
||||
return d.closeSocketAndThrowError("custom", fmt.Errorf("socket reply length is bigger than default buffer length"))
|
||||
|
|
@ -446,7 +446,7 @@ func checkSocketPath(path string) error {
|
|||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot get system information of '%v' file: %v", path, err)
|
||||
return fmt.Errorf("cannot get system information of %q file: %w", path, err)
|
||||
}
|
||||
|
||||
if pathInfo.Mode()&os.ModeSocket != os.ModeSocket {
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ type rasReaderImpl struct {
|
|||
func (rasReaderImpl) gatherPaths(pattern string) ([]string, error) {
|
||||
filePaths, err := filepath.Glob(pattern)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("glob failed for pattern: %s: %v", pattern, err)
|
||||
return nil, fmt.Errorf("glob failed for pattern %q: %w", pattern, err)
|
||||
}
|
||||
|
||||
if len(filePaths) == 0 {
|
||||
|
|
|
|||
|
|
@ -96,7 +96,7 @@ func (ea *iaEntitiesActivator) activateCoreEvents(entity *CoreEventEntity) error
|
|||
if entity.PerfGroup {
|
||||
err := ea.activateCoreEventsGroup(entity)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to activate core events group: %v", err)
|
||||
return fmt.Errorf("failed to activate core events group: %w", err)
|
||||
}
|
||||
} else {
|
||||
for _, event := range entity.parsedEvents {
|
||||
|
|
@ -169,7 +169,7 @@ func (ea *iaEntitiesActivator) activateCoreEventsGroup(entity *CoreEventEntity)
|
|||
|
||||
placements, err := ea.placementMaker.makeCorePlacements(entity.parsedCores, leader.Event)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to make core placements: %v", err)
|
||||
return fmt.Errorf("failed to make core placements: %w", err)
|
||||
}
|
||||
|
||||
for _, plc := range placements {
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ func (cp *configParser) parseEntities(coreEntities []*CoreEventEntity, uncoreEnt
|
|||
|
||||
coreEntity.parsedCores, err = cp.parseCores(coreEntity.Cores)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error during cores parsing: %v", err)
|
||||
return fmt.Errorf("error during cores parsing: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -69,7 +69,7 @@ func (cp *configParser) parseEntities(coreEntities []*CoreEventEntity, uncoreEnt
|
|||
|
||||
uncoreEntity.parsedSockets, err = cp.parseSockets(uncoreEntity.Sockets)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error during sockets parsing: %v", err)
|
||||
return fmt.Errorf("error during sockets parsing: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
@ -99,7 +99,7 @@ func (cp *configParser) parseCores(cores []string) ([]int, error) {
|
|||
}
|
||||
cores, err := cp.sys.allCPUs()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot obtain all cpus: %v", err)
|
||||
return nil, fmt.Errorf("cannot obtain all cpus: %w", err)
|
||||
}
|
||||
return cores, nil
|
||||
}
|
||||
|
|
@ -124,7 +124,7 @@ func (cp *configParser) parseSockets(sockets []string) ([]int, error) {
|
|||
}
|
||||
sockets, err := cp.sys.allSockets()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot obtain all sockets: %v", err)
|
||||
return nil, fmt.Errorf("cannot obtain all sockets: %w", err)
|
||||
}
|
||||
return sockets, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -128,7 +128,7 @@ func (*IntelPMU) SampleConfig() string {
|
|||
func (i *IntelPMU) Init() error {
|
||||
err := checkFiles(i.EventListPaths, i.fileInfo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error during event definitions paths validation: %v", err)
|
||||
return fmt.Errorf("error during event definitions paths validation: %w", err)
|
||||
}
|
||||
|
||||
reader, err := newReader(i.EventListPaths)
|
||||
|
|
@ -152,22 +152,22 @@ func (i *IntelPMU) initialization(parser entitiesParser, resolver entitiesResolv
|
|||
|
||||
err := parser.parseEntities(i.CoreEntities, i.UncoreEntities)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error during parsing configuration sections: %v", err)
|
||||
return fmt.Errorf("error during parsing configuration sections: %w", err)
|
||||
}
|
||||
|
||||
err = resolver.resolveEntities(i.CoreEntities, i.UncoreEntities)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error during events resolving: %v", err)
|
||||
return fmt.Errorf("error during events resolving: %w", err)
|
||||
}
|
||||
|
||||
err = i.checkFileDescriptors()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error during file descriptors checking: %v", err)
|
||||
return fmt.Errorf("error during file descriptors checking: %w", err)
|
||||
}
|
||||
|
||||
err = activator.activateEntities(i.CoreEntities, i.UncoreEntities)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error during events activation: %v", err)
|
||||
return fmt.Errorf("error during events activation: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -175,11 +175,11 @@ func (i *IntelPMU) initialization(parser entitiesParser, resolver entitiesResolv
|
|||
func (i *IntelPMU) checkFileDescriptors() error {
|
||||
coreFd, err := estimateCoresFd(i.CoreEntities)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to estimate number of core events file descriptors: %v", err)
|
||||
return fmt.Errorf("failed to estimate number of core events file descriptors: %w", err)
|
||||
}
|
||||
uncoreFd, err := estimateUncoreFd(i.UncoreEntities)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to estimate nubmer of uncore events file descriptors: %v", err)
|
||||
return fmt.Errorf("failed to estimate nubmer of uncore events file descriptors: %w", err)
|
||||
}
|
||||
if coreFd > math.MaxUint64-uncoreFd {
|
||||
return fmt.Errorf("requested number of file descriptors exceeds uint64")
|
||||
|
|
@ -213,7 +213,7 @@ func (i *IntelPMU) Gather(acc telegraf.Accumulator) error {
|
|||
}
|
||||
coreMetrics, uncoreMetrics, err := i.entitiesReader.readEntities(i.CoreEntities, i.UncoreEntities)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read entities events values: %v", err)
|
||||
return fmt.Errorf("failed to read entities events values: %w", err)
|
||||
}
|
||||
|
||||
for id, m := range coreMetrics {
|
||||
|
|
@ -275,7 +275,7 @@ func newReader(files []string) (*ia.JSONFilesReader, error) {
|
|||
for _, file := range files {
|
||||
err := reader.AddFiles(file)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to add files to reader: %v", err)
|
||||
return nil, fmt.Errorf("failed to add files to reader: %w", err)
|
||||
}
|
||||
}
|
||||
return reader, nil
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ func (e *iaEntitiesResolver) resolveEntities(coreEntities []*CoreEventEntity, un
|
|||
if entity.allEvents {
|
||||
newEvents, _, err := e.resolveAllEvents()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to resolve all events: %v", err)
|
||||
return fmt.Errorf("failed to resolve all events: %w", err)
|
||||
}
|
||||
entity.parsedEvents = newEvents
|
||||
continue
|
||||
|
|
@ -55,7 +55,7 @@ func (e *iaEntitiesResolver) resolveEntities(coreEntities []*CoreEventEntity, un
|
|||
if entity.allEvents {
|
||||
_, newEvents, err := e.resolveAllEvents()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to resolve all events: %v", err)
|
||||
return fmt.Errorf("failed to resolve all events: %w", err)
|
||||
}
|
||||
entity.parsedEvents = newEvents
|
||||
continue
|
||||
|
|
@ -84,8 +84,8 @@ func (e *iaEntitiesResolver) resolveAllEvents() (coreEvents []*eventWithQuals, u
|
|||
|
||||
perfEvents, err := e.transformer.Transform(e.reader, ia.NewNameMatcher())
|
||||
if err != nil {
|
||||
re, ok := err.(*ia.TransformationError)
|
||||
if !ok {
|
||||
var re *ia.TransformationError
|
||||
if !errors.As(err, &re) {
|
||||
return nil, nil, err
|
||||
}
|
||||
if e.log != nil && re != nil {
|
||||
|
|
@ -131,7 +131,7 @@ func (e *iaEntitiesResolver) resolveEvent(name string, qualifiers []string) (ia.
|
|||
matcher := ia.NewNameMatcher(name)
|
||||
perfEvents, err := e.transformer.Transform(e.reader, matcher)
|
||||
if err != nil {
|
||||
return custom, fmt.Errorf("failed to transform perf events: %v", err)
|
||||
return custom, fmt.Errorf("failed to transform perf events: %w", err)
|
||||
}
|
||||
if len(perfEvents) < 1 {
|
||||
return custom, fmt.Errorf("failed to resolve unknown event %q", name)
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ func (fs *fileServiceImpl) getCPUInfoStats() (map[string]*cpuInfo, error) {
|
|||
path := "/proc/cpuinfo"
|
||||
cpuInfoFile, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error while reading %s, err: %v", path, err)
|
||||
return nil, fmt.Errorf("error while reading %q: %w", path, err)
|
||||
}
|
||||
defer cpuInfoFile.Close()
|
||||
|
||||
|
|
@ -142,7 +142,7 @@ func (fs *fileServiceImpl) readFileAtOffsetToUint64(reader io.ReaderAt, offset i
|
|||
|
||||
_, err := reader.ReadAt(buffer, offset)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("error on reading file at offset %d, err: %v", offset, err)
|
||||
return 0, fmt.Errorf("error on reading file at offset %d: %w", offset, err)
|
||||
}
|
||||
|
||||
return binary.LittleEndian.Uint64(buffer), nil
|
||||
|
|
|
|||
|
|
@ -85,7 +85,7 @@ func (m *msrServiceImpl) retrieveCPUFrequencyForCore(core string) (float64, erro
|
|||
}
|
||||
cpuFreqFile, err := os.Open(cpuFreqPath)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("error opening scaling_cur_freq file on path %s, err: %v", cpuFreqPath, err)
|
||||
return 0, fmt.Errorf("error opening scaling_cur_freq file on path %q: %w", cpuFreqPath, err)
|
||||
}
|
||||
defer cpuFreqFile.Close()
|
||||
|
||||
|
|
@ -96,7 +96,7 @@ func (m *msrServiceImpl) retrieveCPUFrequencyForCore(core string) (float64, erro
|
|||
func (m *msrServiceImpl) retrieveUncoreFrequency(socketID string, typeFreq string, kind string, die string) (float64, error) {
|
||||
uncoreFreqPath, err := createUncoreFreqPath(socketID, typeFreq, kind, die)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("unable to create uncore freq read path for socketID %s, and frequency type %s err: %v", socketID, typeFreq, err)
|
||||
return 0, fmt.Errorf("unable to create uncore freq read path for socketID %q, and frequency type %q: %w", socketID, typeFreq, err)
|
||||
}
|
||||
err = checkFile(uncoreFreqPath)
|
||||
if err != nil {
|
||||
|
|
@ -104,7 +104,7 @@ func (m *msrServiceImpl) retrieveUncoreFrequency(socketID string, typeFreq strin
|
|||
}
|
||||
uncoreFreqFile, err := os.Open(uncoreFreqPath)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("error opening uncore frequncy file on %s, err: %v", uncoreFreqPath, err)
|
||||
return 0, fmt.Errorf("error opening uncore frequncy file on %q: %w", uncoreFreqPath, err)
|
||||
}
|
||||
defer uncoreFreqFile.Close()
|
||||
|
||||
|
|
@ -144,13 +144,13 @@ func (m *msrServiceImpl) openAndReadMsr(core string) error {
|
|||
}
|
||||
msrFile, err := os.Open(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error opening MSR file on path %s, err: %v", path, err)
|
||||
return fmt.Errorf("error opening MSR file on path %q: %w", path, err)
|
||||
}
|
||||
defer msrFile.Close()
|
||||
|
||||
err = m.readDataFromMsr(core, msrFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading data from MSR for core %s, err: %v", core, err)
|
||||
return fmt.Errorf("error reading data from MSR for core %q: %w", core, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -163,7 +163,7 @@ func (m *msrServiceImpl) readSingleMsr(core string, msr string) (uint64, error)
|
|||
}
|
||||
msrFile, err := os.Open(path)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("error opening MSR file on path %s, err: %v", path, err)
|
||||
return 0, fmt.Errorf("error opening MSR file on path %q: %w", path, err)
|
||||
}
|
||||
defer msrFile.Close()
|
||||
|
||||
|
|
@ -213,7 +213,7 @@ func (m *msrServiceImpl) readDataFromMsr(core string, reader io.ReaderAt) error
|
|||
|
||||
err := m.readValueFromFileAtOffset(ctx, ch, reader, off)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading MSR file, err: %v", err)
|
||||
return fmt.Errorf("error reading MSR file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -231,7 +231,7 @@ func (m *msrServiceImpl) readDataFromMsr(core string, reader io.ReaderAt) error
|
|||
newTemp := <-msrOffsetsWithChannels[temperatureLocation]
|
||||
|
||||
if err := g.Wait(); err != nil {
|
||||
return fmt.Errorf("received error during reading MSR values in goroutines: %v", err)
|
||||
return fmt.Errorf("received error during reading MSR values in goroutines: %w", err)
|
||||
}
|
||||
|
||||
m.cpuCoresData[core].c3Delta = newC3 - m.cpuCoresData[core].c3
|
||||
|
|
|
|||
|
|
@ -57,7 +57,7 @@ func (r *raplServiceImpl) retrieveAndCalculateData(socketID string) error {
|
|||
}
|
||||
socketEnergyUjFile, err := os.Open(socketEnergyUjPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error opening socket energy_uj file on path %s, err: %v", socketEnergyUjPath, err)
|
||||
return fmt.Errorf("error opening socket energy_uj file on path %q: %w", socketEnergyUjPath, err)
|
||||
}
|
||||
defer socketEnergyUjFile.Close()
|
||||
|
||||
|
|
@ -69,7 +69,7 @@ func (r *raplServiceImpl) retrieveAndCalculateData(socketID string) error {
|
|||
}
|
||||
dramEnergyUjFile, err := os.Open(dramEnergyUjPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error opening dram energy_uj file on path %s, err: %v", dramEnergyUjPath, err)
|
||||
return fmt.Errorf("error opening dram energy_uj file on path %q: %w", dramEnergyUjPath, err)
|
||||
}
|
||||
defer dramEnergyUjFile.Close()
|
||||
|
||||
|
|
@ -80,7 +80,7 @@ func (r *raplServiceImpl) retrieveAndCalculateData(socketID string) error {
|
|||
}
|
||||
socketMaxEnergyUjFile, err := os.Open(socketMaxEnergyUjPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error opening socket max_energy_range_uj file on path %s, err: %v", socketMaxEnergyUjPath, err)
|
||||
return fmt.Errorf("error opening socket max_energy_range_uj file on path %q: %w", socketMaxEnergyUjPath, err)
|
||||
}
|
||||
defer socketMaxEnergyUjFile.Close()
|
||||
|
||||
|
|
@ -91,7 +91,7 @@ func (r *raplServiceImpl) retrieveAndCalculateData(socketID string) error {
|
|||
}
|
||||
dramMaxEnergyUjFile, err := os.Open(dramMaxEnergyUjPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error opening dram max_energy_range_uj file on path %s, err: %v", dramMaxEnergyUjPath, err)
|
||||
return fmt.Errorf("error opening dram max_energy_range_uj file on path %q: %w", dramMaxEnergyUjPath, err)
|
||||
}
|
||||
defer dramMaxEnergyUjFile.Close()
|
||||
|
||||
|
|
@ -107,7 +107,7 @@ func (r *raplServiceImpl) getConstraintMaxPowerWatts(socketID string) (float64,
|
|||
}
|
||||
socketMaxPowerFile, err := os.Open(socketMaxPowerPath)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("error opening constraint_0_max_power_uw file on path %s, err: %v", socketMaxPowerPath, err)
|
||||
return 0, fmt.Errorf("error opening constraint_0_max_power_uw file on path %q: %w", socketMaxPowerPath, err)
|
||||
}
|
||||
defer socketMaxPowerFile.Close()
|
||||
|
||||
|
|
@ -186,7 +186,7 @@ func (r *raplServiceImpl) findDramFolder(raplFolders []string, socketID string)
|
|||
read, err := r.fs.readFile(nameFilePath)
|
||||
if err != nil {
|
||||
if val := r.logOnce[nameFilePath]; val == nil || val.Error() != err.Error() {
|
||||
r.log.Errorf("error reading file on path: %s, err: %v", nameFilePath, err)
|
||||
r.log.Errorf("error reading file on path %q: %v", nameFilePath, err)
|
||||
r.logOnce[nameFilePath] = err
|
||||
}
|
||||
continue
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ import (
|
|||
"bufio"
|
||||
"context"
|
||||
_ "embed"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
|
@ -335,7 +336,7 @@ func shutDownPqos(pqos *exec.Cmd) error {
|
|||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
for {
|
||||
if err := pqos.Process.Signal(syscall.Signal(0)); err == os.ErrProcessDone {
|
||||
if err := pqos.Process.Signal(syscall.Signal(0)); errors.Is(err, os.ErrProcessDone) {
|
||||
return nil
|
||||
} else if ctx.Err() != nil {
|
||||
break
|
||||
|
|
@ -347,7 +348,7 @@ func shutDownPqos(pqos *exec.Cmd) error {
|
|||
// fixed in https://github.com/intel/intel-cmt-cat/issues/197
|
||||
err := pqos.Process.Kill()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to shut down pqos: %v", err)
|
||||
return fmt.Errorf("failed to shut down pqos: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
@ -401,7 +402,6 @@ func validatePqosPath(pqosPath string) error {
|
|||
|
||||
func parseCoresConfig(cores []string) ([]string, error) {
|
||||
var allCores []int
|
||||
configError := fmt.Errorf("wrong cores input config data format")
|
||||
|
||||
parsedCores := make([]string, 0, len(cores))
|
||||
for _, singleCoreGroup := range cores {
|
||||
|
|
@ -411,10 +411,10 @@ func parseCoresConfig(cores []string) ([]string, error) {
|
|||
for _, coreStr := range separatedCores {
|
||||
actualCores, err := validateAndParseCores(coreStr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%v: %v", configError, err)
|
||||
return nil, fmt.Errorf("wrong cores input config data format: %w", err)
|
||||
}
|
||||
if checkForDuplicates(allCores, actualCores) {
|
||||
return nil, fmt.Errorf("%v: %v", configError, "core value cannot be duplicated")
|
||||
return nil, errors.New("wrong cores input config data format: core value cannot be duplicated")
|
||||
}
|
||||
actualGroupOfCores = append(actualGroupOfCores, actualCores...)
|
||||
allCores = append(allCores, actualGroupOfCores...)
|
||||
|
|
|
|||
|
|
@ -76,7 +76,7 @@ scan:
|
|||
irqs = append(irqs, *irq)
|
||||
}
|
||||
if scanner.Err() != nil {
|
||||
return nil, fmt.Errorf("error scanning file: %s", scanner.Err())
|
||||
return nil, fmt.Errorf("error scanning file: %w", scanner.Err())
|
||||
}
|
||||
return irqs, nil
|
||||
}
|
||||
|
|
@ -116,7 +116,7 @@ func parseFile(file string) ([]IRQ, error) {
|
|||
|
||||
irqs, err := parseInterrupts(f)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing %s: %s", file, err)
|
||||
return nil, fmt.Errorf("parsing %q: %w", file, err)
|
||||
}
|
||||
return irqs, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -58,7 +58,7 @@ func (m *Ipmi) Init() error {
|
|||
if m.Path == "" {
|
||||
path, err := exec.LookPath(cmd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("looking up %q failed: %v", cmd, err)
|
||||
return fmt.Errorf("looking up %q failed: %w", cmd, err)
|
||||
}
|
||||
m.Path = path
|
||||
}
|
||||
|
|
@ -129,7 +129,7 @@ func (m *Ipmi) parse(acc telegraf.Accumulator, server string) error {
|
|||
cmd := execCommand(name, dumpOpts...)
|
||||
out, err := internal.CombinedOutputTimeout(cmd, time.Duration(m.Timeout))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(sanitizeIPMICmd(cmd.Args), " "), err, string(out))
|
||||
return fmt.Errorf("failed to run command %q: %w - %s", strings.Join(sanitizeIPMICmd(cmd.Args), " "), err, string(out))
|
||||
}
|
||||
}
|
||||
opts = append(opts, "-S")
|
||||
|
|
@ -148,7 +148,7 @@ func (m *Ipmi) parse(acc telegraf.Accumulator, server string) error {
|
|||
out, err := internal.CombinedOutputTimeout(cmd, time.Duration(m.Timeout))
|
||||
timestamp := time.Now()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(sanitizeIPMICmd(cmd.Args), " "), err, string(out))
|
||||
return fmt.Errorf("failed to run command %q: %w - %s", strings.Join(sanitizeIPMICmd(cmd.Args), " "), err, string(out))
|
||||
}
|
||||
if m.MetricVersion == 2 {
|
||||
return m.parseV2(acc, hostname, out, timestamp)
|
||||
|
|
|
|||
|
|
@ -111,7 +111,7 @@ func setList(timeout config.Duration, useSudo bool) (*bytes.Buffer, error) {
|
|||
cmd.Stdout = &out
|
||||
err = internal.RunTimeout(cmd, time.Duration(timeout))
|
||||
if err != nil {
|
||||
return &out, fmt.Errorf("error running ipset save: %s", err)
|
||||
return &out, fmt.Errorf("error running ipset save: %w", err)
|
||||
}
|
||||
|
||||
return &out, nil
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ func (i *IPVS) Gather(acc telegraf.Accumulator) error {
|
|||
if i.handle == nil {
|
||||
h, err := ipvs.New("") // TODO: make the namespace configurable
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to open IPVS handle: %v", err)
|
||||
return fmt.Errorf("unable to open IPVS handle: %w", err)
|
||||
}
|
||||
i.handle = h
|
||||
}
|
||||
|
|
@ -44,7 +44,7 @@ func (i *IPVS) Gather(acc telegraf.Accumulator) error {
|
|||
if err != nil {
|
||||
i.handle.Close()
|
||||
i.handle = nil // trigger a reopen on next call to gather
|
||||
return fmt.Errorf("failed to list IPVS services: %v", err)
|
||||
return fmt.Errorf("failed to list IPVS services: %w", err)
|
||||
}
|
||||
for _, s := range services {
|
||||
fields := map[string]interface{}{
|
||||
|
|
|
|||
|
|
@ -84,7 +84,7 @@ func (j *Jenkins) Gather(acc telegraf.Accumulator) error {
|
|||
func (j *Jenkins) newHTTPClient() (*http.Client, error) {
|
||||
tlsCfg, err := j.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parse jenkins config[%s]: %v", j.URL, err)
|
||||
return nil, fmt.Errorf("error parse jenkins config %q: %w", j.URL, err)
|
||||
}
|
||||
return &http.Client{
|
||||
Transport: &http.Transport{
|
||||
|
|
@ -118,11 +118,11 @@ func (j *Jenkins) initialize(client *http.Client) error {
|
|||
// init filters
|
||||
j.jobFilter, err = filter.NewIncludeExcludeFilter(j.JobInclude, j.JobExclude)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error compiling job filters[%s]: %v", j.URL, err)
|
||||
return fmt.Errorf("error compiling job filters %q: %w", j.URL, err)
|
||||
}
|
||||
j.nodeFilter, err = filter.NewIncludeExcludeFilter(j.NodeInclude, j.NodeExclude)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error compiling node filters[%s]: %v", j.URL, err)
|
||||
return fmt.Errorf("error compiling node filters %q: %w", j.URL, err)
|
||||
}
|
||||
|
||||
// init tcp pool with default value
|
||||
|
|
|
|||
|
|
@ -91,7 +91,7 @@ func (j *Jolokia) doRequest(req *http.Request) ([]map[string]interface{}, error)
|
|||
// Unmarshal json
|
||||
var jsonOut []map[string]interface{}
|
||||
if err = json.Unmarshal(body, &jsonOut); err != nil {
|
||||
return nil, fmt.Errorf("error decoding JSON response: %s: %s", err, body)
|
||||
return nil, fmt.Errorf("error decoding JSON response %q: %w", body, err)
|
||||
}
|
||||
|
||||
return jsonOut, nil
|
||||
|
|
@ -216,12 +216,12 @@ func (j *Jolokia) Gather(acc telegraf.Accumulator) error {
|
|||
|
||||
req, err := j.prepareRequest(server, metrics)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("unable to create request: %s", err))
|
||||
acc.AddError(fmt.Errorf("unable to create request: %w", err))
|
||||
continue
|
||||
}
|
||||
out, err := j.doRequest(req)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("error performing request: %s", err))
|
||||
acc.AddError(fmt.Errorf("error performing request: %w", err))
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -51,7 +51,7 @@ func (ja *JolokiaAgent) Gather(acc telegraf.Accumulator) error {
|
|||
for _, url := range ja.URLs {
|
||||
client, err := ja.createClient(url)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("unable to create client for %s: %v", url, err))
|
||||
acc.AddError(fmt.Errorf("unable to create client for %q: %w", url, err))
|
||||
continue
|
||||
}
|
||||
ja.clients = append(ja.clients, client)
|
||||
|
|
@ -67,7 +67,7 @@ func (ja *JolokiaAgent) Gather(acc telegraf.Accumulator) error {
|
|||
|
||||
err := ja.gatherer.Gather(client, acc)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("unable to gather metrics for %s: %v", client.URL, err))
|
||||
acc.AddError(fmt.Errorf("unable to gather metrics for %q: %w", client.URL, err))
|
||||
}
|
||||
}(client)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -256,8 +256,7 @@ func (m *OpenConfigTelemetry) collectData(
|
|||
rpcStatus, _ := status.FromError(err)
|
||||
// If service is currently unavailable and may come back later, retry
|
||||
if rpcStatus.Code() != codes.Unavailable {
|
||||
acc.AddError(fmt.Errorf("could not subscribe to %s: %v", grpcServer,
|
||||
err))
|
||||
acc.AddError(fmt.Errorf("could not subscribe to %q: %w", grpcServer, err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -274,7 +273,7 @@ func (m *OpenConfigTelemetry) collectData(
|
|||
if err != nil {
|
||||
// If we encounter error in the stream, break so we can retry
|
||||
// the connection
|
||||
acc.AddError(fmt.Errorf("failed to read from %s: %s", grpcServer, err))
|
||||
acc.AddError(fmt.Errorf("failed to read from %q: %w", grpcServer, err))
|
||||
break
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -114,7 +114,7 @@ func (k *Kafka) receiver() {
|
|||
return
|
||||
case err := <-k.errs:
|
||||
if err != nil {
|
||||
k.acc.AddError(fmt.Errorf("consumer Error: %s", err))
|
||||
k.acc.AddError(fmt.Errorf("consumer error: %w", err))
|
||||
}
|
||||
case msg := <-k.in:
|
||||
if k.MaxMessageLen != 0 && len(msg.Value) > k.MaxMessageLen {
|
||||
|
|
@ -123,8 +123,7 @@ func (k *Kafka) receiver() {
|
|||
} else {
|
||||
metrics, err := k.parser.Parse(msg.Value)
|
||||
if err != nil {
|
||||
k.acc.AddError(fmt.Errorf("Message Parse Error\nmessage: %s\nerror: %s",
|
||||
string(msg.Value), err.Error()))
|
||||
k.acc.AddError(fmt.Errorf("error during parsing message %q: %w", string(msg.Value), err))
|
||||
}
|
||||
for _, metric := range metrics {
|
||||
k.acc.AddFields(metric.Name(), metric.Fields(), metric.Tags(), metric.Time())
|
||||
|
|
@ -138,7 +137,7 @@ func (k *Kafka) receiver() {
|
|||
err := k.Consumer.CommitUpto(msg)
|
||||
k.Unlock()
|
||||
if err != nil {
|
||||
k.acc.AddError(fmt.Errorf("committing to consumer failed: %v", err))
|
||||
k.acc.AddError(fmt.Errorf("committing to consumer failed: %w", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -150,7 +149,7 @@ func (k *Kafka) Stop() {
|
|||
defer k.Unlock()
|
||||
close(k.done)
|
||||
if err := k.Consumer.Close(); err != nil {
|
||||
k.acc.AddError(fmt.Errorf("error closing consumer: %s", err.Error()))
|
||||
k.acc.AddError(fmt.Errorf("error closing consumer: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -49,7 +49,7 @@ func (k *Kapacitor) Gather(acc telegraf.Accumulator) error {
|
|||
go func(url string) {
|
||||
defer wg.Done()
|
||||
if err := k.gatherURL(acc, url); err != nil {
|
||||
acc.AddError(fmt.Errorf("[url=%s]: %s", url, err))
|
||||
acc.AddError(fmt.Errorf("[url=%s]: %w", url, err))
|
||||
}
|
||||
}(u)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -136,7 +136,7 @@ func (k *Kibana) Gather(acc telegraf.Accumulator) error {
|
|||
go func(baseUrl string, acc telegraf.Accumulator) {
|
||||
defer wg.Done()
|
||||
if err := k.gatherKibanaStatus(baseUrl, acc); err != nil {
|
||||
acc.AddError(fmt.Errorf("[url=%s]: %s", baseUrl, err))
|
||||
acc.AddError(fmt.Errorf("[url=%s]: %w", baseUrl, err))
|
||||
return
|
||||
}
|
||||
}(serv, acc)
|
||||
|
|
|
|||
|
|
@ -265,7 +265,7 @@ func (k *Kubernetes) LoadJSON(url string, v interface{}) error {
|
|||
req.Header.Add("Accept", "application/json")
|
||||
resp, err = k.RoundTripper.RoundTrip(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error making HTTP request to %s: %s", url, err)
|
||||
return fmt.Errorf("error making HTTP request to %q: %w", url, err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
|
|
@ -274,7 +274,7 @@ func (k *Kubernetes) LoadJSON(url string, v interface{}) error {
|
|||
|
||||
err = json.NewDecoder(resp.Body).Decode(v)
|
||||
if err != nil {
|
||||
return fmt.Errorf(`Error parsing response: %s`, err)
|
||||
return fmt.Errorf("error parsing response: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -227,7 +227,7 @@ func (l *LeoFS) gatherServer(
|
|||
}
|
||||
fVal, err := strconv.ParseFloat(val, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to parse the value:%s, err:%s", val, err)
|
||||
return fmt.Errorf("unable to parse the value %q: %w", val, err)
|
||||
}
|
||||
fields[key] = fVal
|
||||
i++
|
||||
|
|
|
|||
|
|
@ -187,9 +187,9 @@ func (l *Libvirt) Gather(acc telegraf.Accumulator) error {
|
|||
func handleError(err error, errMessage string, utils utils) error {
|
||||
if err != nil {
|
||||
if chanErr := utils.Disconnect(); chanErr != nil {
|
||||
return fmt.Errorf("%s: %v; error occurred when disconnecting: %v", errMessage, err, chanErr)
|
||||
return fmt.Errorf("%s: %w; error occurred when disconnecting: %w", errMessage, err, chanErr)
|
||||
}
|
||||
return fmt.Errorf("%s: %v", errMessage, err)
|
||||
return fmt.Errorf("%s: %w", errMessage, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ package linux_cpu
|
|||
|
||||
import (
|
||||
_ "embed"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
|
@ -187,7 +188,7 @@ func validatePath(propPath string) error {
|
|||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot get system information for CPU property: [%s] - %v", propPath, err)
|
||||
return fmt.Errorf("cannot get system information for CPU property %q: %w", propPath, err)
|
||||
}
|
||||
|
||||
_ = f.Close() // File is not written to, closing should be safe
|
||||
|
|
@ -204,10 +205,10 @@ func readUintFromFile(propPath string) (uint64, error) {
|
|||
buffer := make([]byte, 22)
|
||||
|
||||
n, err := f.Read(buffer)
|
||||
if err != nil && err != io.EOF {
|
||||
return 0, fmt.Errorf("error on reading file, err: %v", err)
|
||||
if err != nil && !errors.Is(err, io.EOF) {
|
||||
return 0, fmt.Errorf("error on reading file: %w", err)
|
||||
} else if n == 0 {
|
||||
return 0, fmt.Errorf("error on reading file, file is empty")
|
||||
return 0, fmt.Errorf("error on reading file: file is empty")
|
||||
}
|
||||
|
||||
return strconv.ParseUint(string(buffer[:n-1]), 10, 64)
|
||||
|
|
|
|||
|
|
@ -124,7 +124,7 @@ func (*Logstash) SampleConfig() string {
|
|||
func (logstash *Logstash) Init() error {
|
||||
err := choice.CheckSlice(logstash.Collect, []string{"pipelines", "process", "jvm"})
|
||||
if err != nil {
|
||||
return fmt.Errorf(`cannot verify "collect" setting: %v`, err)
|
||||
return fmt.Errorf(`cannot verify "collect" setting: %w`, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -62,7 +62,7 @@ func (lvm *LVM) gatherPhysicalVolumes(acc telegraf.Accumulator) error {
|
|||
var report pvsReport
|
||||
err = json.Unmarshal(out, &report)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to unmarshal physical volume JSON: %s", err)
|
||||
return fmt.Errorf("failed to unmarshal physical volume JSON: %w", err)
|
||||
}
|
||||
|
||||
if len(report.Report) > 0 {
|
||||
|
|
@ -116,7 +116,7 @@ func (lvm *LVM) gatherVolumeGroups(acc telegraf.Accumulator) error {
|
|||
var report vgsReport
|
||||
err = json.Unmarshal(out, &report)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to unmarshal vol group JSON: %s", err)
|
||||
return fmt.Errorf("failed to unmarshal vol group JSON: %w", err)
|
||||
}
|
||||
|
||||
if len(report.Report) > 0 {
|
||||
|
|
@ -179,7 +179,7 @@ func (lvm *LVM) gatherLogicalVolumes(acc telegraf.Accumulator) error {
|
|||
var report lvsReport
|
||||
err = json.Unmarshal(out, &report)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to unmarshal logical vol JSON: %s", err)
|
||||
return fmt.Errorf("failed to unmarshal logical vol JSON: %w", err)
|
||||
}
|
||||
|
||||
if len(report.Report) > 0 {
|
||||
|
|
@ -234,8 +234,7 @@ func (lvm *LVM) runCmd(cmd string, args []string) ([]byte, error) {
|
|||
out, err := internal.StdOutputTimeout(execCmd, 5*time.Second)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"failed to run command %s: %s - %s",
|
||||
strings.Join(execCmd.Args, " "), err, string(out),
|
||||
"failed to run command %s: %w - %s", strings.Join(execCmd.Args, " "), err, string(out),
|
||||
)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -133,7 +133,7 @@ func (c *Marklogic) Gather(accumulator telegraf.Accumulator) error {
|
|||
go func(serv string) {
|
||||
defer wg.Done()
|
||||
if err := c.fetchAndInsertData(accumulator, serv); err != nil {
|
||||
accumulator.AddError(fmt.Errorf("[host=%s]: %s", serv, err))
|
||||
accumulator.AddError(fmt.Errorf("[host=%s]: %w", serv, err))
|
||||
}
|
||||
}(serv)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ func (ms *MemStats) Init() error {
|
|||
func (ms *MemStats) Gather(acc telegraf.Accumulator) error {
|
||||
vm, err := ms.ps.VMStat()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting virtual memory info: %s", err)
|
||||
return fmt.Errorf("error getting virtual memory info: %w", err)
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
|
|
|
|||
|
|
@ -110,7 +110,7 @@ func (c *ConfigurationOriginal) initFields(fieldDefs []fieldDefinition) ([]field
|
|||
for _, def := range fieldDefs {
|
||||
f, err := c.newFieldFromDefinition(def)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("initializing field %q failed: %v", def.Name, err)
|
||||
return nil, fmt.Errorf("initializing field %q failed: %w", def.Name, err)
|
||||
}
|
||||
fields = append(fields, f)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -139,7 +139,7 @@ func (c *ConfigurationPerRequest) Check() error {
|
|||
// Check for duplicate field definitions
|
||||
id, err := c.fieldID(seed, def, f)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot determine field id for %q: %v", f.Name, err)
|
||||
return fmt.Errorf("cannot determine field id for %q: %w", f.Name, err)
|
||||
}
|
||||
if seenFields[id] {
|
||||
return fmt.Errorf("field %q duplicated in measurement %q (slave %d/%q)", f.Name, f.Measurement, def.SlaveID, def.RegisterType)
|
||||
|
|
@ -230,7 +230,7 @@ func (c *ConfigurationPerRequest) initFields(fieldDefs []requestFieldDefinition,
|
|||
for _, def := range fieldDefs {
|
||||
f, err := c.newFieldFromDefinition(def, typed, byteOrder)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("initializing field %q failed: %v", def.Name, err)
|
||||
return nil, fmt.Errorf("initializing field %q failed: %w", def.Name, err)
|
||||
}
|
||||
fields = append(fields, f)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package modbus
|
|||
|
||||
import (
|
||||
_ "embed"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
|
|
@ -135,18 +136,18 @@ func (m *Modbus) Init() error {
|
|||
|
||||
// Check and process the configuration
|
||||
if err := cfg.Check(); err != nil {
|
||||
return fmt.Errorf("configuration invalid: %v", err)
|
||||
return fmt.Errorf("configuration invalid: %w", err)
|
||||
}
|
||||
|
||||
r, err := cfg.Process()
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot process configuration: %v", err)
|
||||
return fmt.Errorf("cannot process configuration: %w", err)
|
||||
}
|
||||
m.requests = r
|
||||
|
||||
// Setup client
|
||||
if err := m.initClient(); err != nil {
|
||||
return fmt.Errorf("initializing client failed: %v", err)
|
||||
return fmt.Errorf("initializing client failed: %w", err)
|
||||
}
|
||||
for slaveID, rqs := range m.requests {
|
||||
var nHoldingRegs, nInputsRegs, nDiscreteRegs, nCoilRegs uint16
|
||||
|
|
@ -192,8 +193,8 @@ func (m *Modbus) Gather(acc telegraf.Accumulator) error {
|
|||
m.Log.Debugf("Reading slave %d for %s...", slaveID, m.Controller)
|
||||
if err := m.readSlaveData(slaveID, requests); err != nil {
|
||||
acc.AddError(fmt.Errorf("slave %d: %w", slaveID, err))
|
||||
mberr, ok := err.(*mb.Error)
|
||||
if !ok || mberr.ExceptionCode != mb.ExceptionCodeServerDeviceBusy {
|
||||
var mbErr *mb.Error
|
||||
if !errors.As(err, &mbErr) || mbErr.ExceptionCode != mb.ExceptionCodeServerDeviceBusy {
|
||||
m.Log.Debugf("Reconnecting to %s...", m.Controller)
|
||||
if err := m.disconnect(); err != nil {
|
||||
return fmt.Errorf("disconnecting failed: %w", err)
|
||||
|
|
@ -337,8 +338,8 @@ func (m *Modbus) readSlaveData(slaveID byte, requests requestSet) error {
|
|||
}
|
||||
|
||||
// Exit in case a non-recoverable error occurred
|
||||
mberr, ok := err.(*mb.Error)
|
||||
if !ok || mberr.ExceptionCode != mb.ExceptionCodeServerDeviceBusy {
|
||||
var mbErr *mb.Error
|
||||
if !errors.As(err, &mbErr) || mbErr.ExceptionCode != mb.ExceptionCodeServerDeviceBusy {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -255,7 +255,7 @@ func (s *Server) gatherCollectionStats(colStatsDbs []string) (*ColStats, error)
|
|||
},
|
||||
}, colStatLine)
|
||||
if err != nil {
|
||||
s.authLog(fmt.Errorf("error getting col stats from %q: %v", colName, err))
|
||||
s.authLog(fmt.Errorf("error getting col stats from %q: %w", colName, err))
|
||||
continue
|
||||
}
|
||||
collection := &Collection{
|
||||
|
|
@ -296,7 +296,7 @@ func (s *Server) gatherData(
|
|||
if replSetStatus != nil {
|
||||
oplogStats, err = s.gatherOplogStats()
|
||||
if err != nil {
|
||||
s.authLog(fmt.Errorf("Unable to get oplog stats: %v", err))
|
||||
s.authLog(fmt.Errorf("unable to get oplog stats: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -311,7 +311,7 @@ func (s *Server) gatherData(
|
|||
|
||||
shardStats, err := s.gatherShardConnPoolStats(serverStatus.Version)
|
||||
if err != nil {
|
||||
s.authLog(fmt.Errorf("unable to gather shard connection pool stats: %s", err.Error()))
|
||||
s.authLog(fmt.Errorf("unable to gather shard connection pool stats: %w", err))
|
||||
}
|
||||
|
||||
var collectionStats *ColStats
|
||||
|
|
|
|||
|
|
@ -235,7 +235,7 @@ func (m *Monit) Gather(acc telegraf.Accumulator) error {
|
|||
decoder := xml.NewDecoder(resp.Body)
|
||||
decoder.CharsetReader = charset.NewReaderLabel
|
||||
if err := decoder.Decode(&status); err != nil {
|
||||
return fmt.Errorf("error parsing input: %v", err)
|
||||
return fmt.Errorf("error parsing input: %w", err)
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
|
|
|
|||
|
|
@ -579,8 +579,9 @@ func TestConnection(t *testing.T) {
|
|||
|
||||
err := r.Gather(&acc)
|
||||
require.Error(t, err)
|
||||
_, ok := err.(*url.Error)
|
||||
require.True(t, ok)
|
||||
|
||||
var urlErr *url.Error
|
||||
require.ErrorAs(t, err, &urlErr)
|
||||
}
|
||||
|
||||
func TestInvalidUsernameOrPassword(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -193,15 +193,14 @@ func (m *MQTTConsumer) connect() error {
|
|||
subscribeToken := m.client.SubscribeMultiple(topics, m.recvMessage)
|
||||
subscribeToken.Wait()
|
||||
if subscribeToken.Error() != nil {
|
||||
m.acc.AddError(fmt.Errorf("subscription error: topics: %s: %v",
|
||||
strings.Join(m.Topics[:], ","), subscribeToken.Error()))
|
||||
m.acc.AddError(fmt.Errorf("subscription error: topics %q: %w", strings.Join(m.Topics[:], ","), subscribeToken.Error()))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *MQTTConsumer) onConnectionLost(_ mqtt.Client, err error) {
|
||||
// Should already be disconnected, but make doubly sure
|
||||
m.client.Disconnect(5)
|
||||
m.acc.AddError(fmt.Errorf("connection lost: %v", err))
|
||||
m.acc.AddError(fmt.Errorf("connection lost: %w", err))
|
||||
m.Log.Debugf("Disconnected %v", m.Servers)
|
||||
m.state = Disconnected
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ package mysql
|
|||
import (
|
||||
"database/sql"
|
||||
_ "embed"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
|
@ -93,7 +94,7 @@ func (m *Mysql) Init() error {
|
|||
tlsid := "custom-" + tlsuuid.String()
|
||||
tlsConfig, err := m.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return fmt.Errorf("registering TLS config: %s", err)
|
||||
return fmt.Errorf("registering TLS config: %w", err)
|
||||
}
|
||||
if tlsConfig != nil {
|
||||
if err := mysql.RegisterTLSConfig(tlsid, tlsConfig); err != nil {
|
||||
|
|
@ -571,7 +572,7 @@ func (m *Mysql) gatherGlobalVariables(db *sql.DB, servtag string, acc telegraf.A
|
|||
|
||||
value, err := m.parseGlobalVariables(key, val)
|
||||
if err != nil {
|
||||
errString := fmt.Errorf("error parsing mysql global variable %q=%q: %v", key, string(val), err)
|
||||
errString := fmt.Errorf("error parsing mysql global variable %q=%q: %w", key, string(val), err)
|
||||
if m.MetricVersion < 2 {
|
||||
m.Log.Debug(errString)
|
||||
} else {
|
||||
|
|
@ -668,7 +669,7 @@ func (m *Mysql) gatherSlaveStatuses(db *sql.DB, servtag string, acc telegraf.Acc
|
|||
|
||||
value, err := m.parseValueByDatabaseTypeName(colValue, col.DatabaseTypeName())
|
||||
if err != nil {
|
||||
errString := fmt.Errorf("error parsing mysql slave status %q=%q: %v", colName, string(colValue), err)
|
||||
errString := fmt.Errorf("error parsing mysql slave status %q=%q: %w", colName, string(colValue), err)
|
||||
if m.MetricVersion < 2 {
|
||||
m.Log.Debug(errString)
|
||||
} else {
|
||||
|
|
@ -787,42 +788,42 @@ func (m *Mysql) gatherGlobalStatuses(db *sql.DB, servtag string, acc telegraf.Ac
|
|||
case "Queries":
|
||||
i, err := strconv.ParseInt(string(val), 10, 64)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("error mysql: parsing %s int value (%s)", key, err))
|
||||
acc.AddError(fmt.Errorf("error parsing mysql %q int value: %w", key, err))
|
||||
} else {
|
||||
fields["queries"] = i
|
||||
}
|
||||
case "Questions":
|
||||
i, err := strconv.ParseInt(string(val), 10, 64)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("error mysql: parsing %s int value (%s)", key, err))
|
||||
acc.AddError(fmt.Errorf("error parsing mysql %q int value: %w", key, err))
|
||||
} else {
|
||||
fields["questions"] = i
|
||||
}
|
||||
case "Slow_queries":
|
||||
i, err := strconv.ParseInt(string(val), 10, 64)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("error mysql: parsing %s int value (%s)", key, err))
|
||||
acc.AddError(fmt.Errorf("error parsing mysql %q int value: %w", key, err))
|
||||
} else {
|
||||
fields["slow_queries"] = i
|
||||
}
|
||||
case "Connections":
|
||||
i, err := strconv.ParseInt(string(val), 10, 64)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("error mysql: parsing %s int value (%s)", key, err))
|
||||
acc.AddError(fmt.Errorf("error parsing mysql %q int value: %w", key, err))
|
||||
} else {
|
||||
fields["connections"] = i
|
||||
}
|
||||
case "Syncs":
|
||||
i, err := strconv.ParseInt(string(val), 10, 64)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("error mysql: parsing %s int value (%s)", key, err))
|
||||
acc.AddError(fmt.Errorf("error parsing mysql %q int value: %w", key, err))
|
||||
} else {
|
||||
fields["syncs"] = i
|
||||
}
|
||||
case "Uptime":
|
||||
i, err := strconv.ParseInt(string(val), 10, 64)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("error mysql: parsing %s int value (%s)", key, err))
|
||||
acc.AddError(fmt.Errorf("error parsing mysql %q int value: %w", key, err))
|
||||
} else {
|
||||
fields["uptime"] = i
|
||||
}
|
||||
|
|
@ -831,7 +832,7 @@ func (m *Mysql) gatherGlobalStatuses(db *sql.DB, servtag string, acc telegraf.Ac
|
|||
key = strings.ToLower(key)
|
||||
value, err := v2.ConvertGlobalStatus(key, val)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("error parsing mysql global status %q=%q: %v", key, string(val), err))
|
||||
acc.AddError(fmt.Errorf("error parsing mysql global status %q=%q: %w", key, string(val), err))
|
||||
} else {
|
||||
fields[key] = value
|
||||
}
|
||||
|
|
@ -1304,7 +1305,7 @@ func (m *Mysql) gatherInnoDBMetrics(db *sql.DB, servtag string, acc telegraf.Acc
|
|||
key = strings.ToLower(key)
|
||||
value, err := m.parseValueByDatabaseTypeName(val, "BIGINT")
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("error parsing mysql InnoDB metric %q=%q: %v", key, string(val), err))
|
||||
acc.AddError(fmt.Errorf("error parsing mysql InnoDB metric %q=%q: %w", key, string(val), err))
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
@ -1469,7 +1470,7 @@ func (m *Mysql) gatherPerfTableLockWaits(db *sql.DB, servtag string, acc telegra
|
|||
var tableName string
|
||||
err := db.QueryRow(perfSchemaTablesQuery, "table_lock_waits_summary_by_table").Scan(&tableName)
|
||||
switch {
|
||||
case err == sql.ErrNoRows:
|
||||
case errors.Is(err, sql.ErrNoRows):
|
||||
return nil
|
||||
case err != nil:
|
||||
return err
|
||||
|
|
@ -1694,7 +1695,7 @@ func (m *Mysql) gatherPerfEventsStatements(db *sql.DB, servtag string, acc teleg
|
|||
|
||||
var (
|
||||
schemaName, digest, digestText string
|
||||
count, queryTime, errors, warnings float64
|
||||
count, queryTime, errs, warnings float64
|
||||
rowsAffected, rowsSent, rowsExamined float64
|
||||
tmpTables, tmpDiskTables float64
|
||||
sortMergePasses, sortRows float64
|
||||
|
|
@ -1708,7 +1709,7 @@ func (m *Mysql) gatherPerfEventsStatements(db *sql.DB, servtag string, acc teleg
|
|||
for rows.Next() {
|
||||
err = rows.Scan(
|
||||
&schemaName, &digest, &digestText,
|
||||
&count, &queryTime, &errors, &warnings,
|
||||
&count, &queryTime, &errs, &warnings,
|
||||
&rowsAffected, &rowsSent, &rowsExamined,
|
||||
&tmpTables, &tmpDiskTables,
|
||||
&sortMergePasses, &sortRows,
|
||||
|
|
@ -1725,7 +1726,7 @@ func (m *Mysql) gatherPerfEventsStatements(db *sql.DB, servtag string, acc teleg
|
|||
fields := map[string]interface{}{
|
||||
"events_statements_total": count,
|
||||
"events_statements_seconds_total": queryTime / picoSeconds,
|
||||
"events_statements_errors_total": errors,
|
||||
"events_statements_errors_total": errs,
|
||||
"events_statements_warnings_total": warnings,
|
||||
"events_statements_rows_affected_total": rowsAffected,
|
||||
"events_statements_rows_sent_total": rowsSent,
|
||||
|
|
|
|||
Loading…
Reference in New Issue