chore: Fix linter findings for errorlint (part7) (#12772)
Co-authored-by: Pawel Zak <Pawel Zak>
This commit is contained in:
parent
5c234d8749
commit
e2a510f157
|
|
@ -4,6 +4,7 @@ package rabbitmq
|
|||
import (
|
||||
_ "embed"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
|
@ -381,7 +382,8 @@ func (r *RabbitMQ) requestJSON(u string, target interface{}) error {
|
|||
return err
|
||||
}
|
||||
if err := json.Unmarshal(buf, target); err != nil {
|
||||
if _, ok := err.(*json.UnmarshalTypeError); ok {
|
||||
var jsonErr *json.UnmarshalTypeError
|
||||
if errors.As(err, &jsonErr) {
|
||||
// Try to get the error reason from the response
|
||||
var errResponse ErrorResponse
|
||||
if json.Unmarshal(buf, &errResponse) == nil && errResponse.Error != "" {
|
||||
|
|
@ -390,7 +392,7 @@ func (r *RabbitMQ) requestJSON(u string, target interface{}) error {
|
|||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("decoding answer from %q failed: %v", u, err)
|
||||
return fmt.Errorf("decoding answer from %q failed: %w", u, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -54,7 +54,7 @@ func (r *Raindrops) Gather(acc telegraf.Accumulator) error {
|
|||
func (r *Raindrops) gatherURL(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
resp, err := r.httpClient.Get(addr.String())
|
||||
if err != nil {
|
||||
return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err)
|
||||
return fmt.Errorf("error making HTTP request to %q: %w", addr.String(), err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
|
|
|
|||
|
|
@ -186,7 +186,7 @@ func validateDbPath(dbPath string) error {
|
|||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot get system information for db_path file: [%s] - %v", dbPath, err)
|
||||
return fmt.Errorf("cannot get system information for db_path file %q: %w", dbPath, err)
|
||||
}
|
||||
|
||||
if mode := pathInfo.Mode(); !mode.IsRegular() {
|
||||
|
|
|
|||
|
|
@ -199,7 +199,7 @@ func (r *Redfish) getData(address string, payload interface{}) error {
|
|||
|
||||
err = json.Unmarshal(body, &payload)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing input: %v", err)
|
||||
return fmt.Errorf("error parsing input: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -238,7 +238,7 @@ func (r *Redis) connect() error {
|
|||
|
||||
u, err := url.Parse(serv)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to parse to address %q: %s", serv, err.Error())
|
||||
return fmt.Errorf("unable to parse to address %q: %w", serv, err)
|
||||
}
|
||||
|
||||
username := ""
|
||||
|
|
@ -329,7 +329,7 @@ func (r *Redis) gatherCommandValues(client Client, acc telegraf.Accumulator) err
|
|||
val, err := client.Do(command.Type, command.Command...)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "unexpected type=") {
|
||||
return fmt.Errorf("could not get command result: %s", err)
|
||||
return fmt.Errorf("could not get command result: %w", err)
|
||||
}
|
||||
|
||||
return err
|
||||
|
|
|
|||
|
|
@ -62,7 +62,7 @@ func (r *RedisSentinel) Init() error {
|
|||
for _, serv := range r.Servers {
|
||||
u, err := url.Parse(serv)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to parse to address %q: %v", serv, err)
|
||||
return fmt.Errorf("unable to parse to address %q: %w", serv, err)
|
||||
}
|
||||
|
||||
password := ""
|
||||
|
|
@ -136,7 +136,7 @@ func castFieldValue(value string, fieldType configFieldType) (interface{}, error
|
|||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("casting value %v failed: %v", value, err)
|
||||
return nil, fmt.Errorf("casting value %q failed: %w", value, err)
|
||||
}
|
||||
|
||||
return castedValue, nil
|
||||
|
|
|
|||
|
|
@ -80,7 +80,7 @@ func (r *RethinkDB) gatherServer(server *Server, acc telegraf.Accumulator) error
|
|||
|
||||
server.session, err = gorethink.Connect(connectOpts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to connect to RethinkDB, %s", err.Error())
|
||||
return fmt.Errorf("unable to connect to RethinkDB: %w", err)
|
||||
}
|
||||
defer server.session.Close()
|
||||
|
||||
|
|
|
|||
|
|
@ -22,23 +22,23 @@ type Server struct {
|
|||
|
||||
func (s *Server) gatherData(acc telegraf.Accumulator) error {
|
||||
if err := s.getServerStatus(); err != nil {
|
||||
return fmt.Errorf("failed to get server_status, %s", err)
|
||||
return fmt.Errorf("failed to get server_status: %w", err)
|
||||
}
|
||||
|
||||
if err := s.validateVersion(); err != nil {
|
||||
return fmt.Errorf("failed version validation, %s", err.Error())
|
||||
return fmt.Errorf("failed version validation: %w", err)
|
||||
}
|
||||
|
||||
if err := s.addClusterStats(acc); err != nil {
|
||||
return fmt.Errorf("error adding cluster stats, %s", err.Error())
|
||||
return fmt.Errorf("error adding cluster stats: %w", err)
|
||||
}
|
||||
|
||||
if err := s.addMemberStats(acc); err != nil {
|
||||
return fmt.Errorf("error adding member stats, %s", err.Error())
|
||||
return fmt.Errorf("error adding member stats: %w", err)
|
||||
}
|
||||
|
||||
if err := s.addTablesStats(acc); err != nil {
|
||||
return fmt.Errorf("error adding table stats, %s", err.Error())
|
||||
return fmt.Errorf("error adding table stats: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -112,12 +112,12 @@ var ClusterTracking = []string{
|
|||
func (s *Server) addClusterStats(acc telegraf.Accumulator) error {
|
||||
cursor, err := gorethink.DB("rethinkdb").Table("stats").Get([]string{"cluster"}).Run(s.session)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cluster stats query error, %s", err.Error())
|
||||
return fmt.Errorf("cluster stats query error: %w", err)
|
||||
}
|
||||
defer cursor.Close()
|
||||
var clusterStats stats
|
||||
if err := cursor.One(&clusterStats); err != nil {
|
||||
return fmt.Errorf("failure to parse cluster stats, %s", err.Error())
|
||||
return fmt.Errorf("failure to parse cluster stats: %w", err)
|
||||
}
|
||||
|
||||
tags := s.getDefaultTags()
|
||||
|
|
@ -140,12 +140,12 @@ var MemberTracking = []string{
|
|||
func (s *Server) addMemberStats(acc telegraf.Accumulator) error {
|
||||
cursor, err := gorethink.DB("rethinkdb").Table("stats").Get([]string{"server", s.serverStatus.ID}).Run(s.session)
|
||||
if err != nil {
|
||||
return fmt.Errorf("member stats query error, %s", err.Error())
|
||||
return fmt.Errorf("member stats query error: %w", err)
|
||||
}
|
||||
defer cursor.Close()
|
||||
var memberStats stats
|
||||
if err := cursor.One(&memberStats); err != nil {
|
||||
return fmt.Errorf("failure to parse member stats, %s", err.Error())
|
||||
return fmt.Errorf("failure to parse member stats: %w", err)
|
||||
}
|
||||
|
||||
tags := s.getDefaultTags()
|
||||
|
|
@ -164,7 +164,7 @@ var TableTracking = []string{
|
|||
func (s *Server) addTablesStats(acc telegraf.Accumulator) error {
|
||||
tablesCursor, err := gorethink.DB("rethinkdb").Table("table_status").Run(s.session)
|
||||
if err != nil {
|
||||
return fmt.Errorf("table stats query error, %s", err.Error())
|
||||
return fmt.Errorf("table stats query error: %w", err)
|
||||
}
|
||||
|
||||
defer tablesCursor.Close()
|
||||
|
|
@ -187,13 +187,13 @@ func (s *Server) addTableStats(acc telegraf.Accumulator, table tableStatus) erro
|
|||
Get([]string{"table_server", table.ID, s.serverStatus.ID}).
|
||||
Run(s.session)
|
||||
if err != nil {
|
||||
return fmt.Errorf("table stats query error, %s", err.Error())
|
||||
return fmt.Errorf("table stats query error: %w", err)
|
||||
}
|
||||
defer cursor.Close()
|
||||
|
||||
var ts tableStats
|
||||
if err := cursor.One(&ts); err != nil {
|
||||
return fmt.Errorf("failure to parse table stats, %s", err.Error())
|
||||
return fmt.Errorf("failure to parse table stats: %w", err)
|
||||
}
|
||||
|
||||
tags := s.getDefaultTags()
|
||||
|
|
|
|||
|
|
@ -108,7 +108,7 @@ func (r *Riak) gatherServer(s string, acc telegraf.Accumulator) error {
|
|||
// Parse the given URL to extract the server tag
|
||||
u, err := url.Parse(s)
|
||||
if err != nil {
|
||||
return fmt.Errorf("riak unable to parse given server url %s: %s", s, err)
|
||||
return fmt.Errorf("riak unable to parse given server URL %q: %w", s, err)
|
||||
}
|
||||
|
||||
// Perform the GET request to the riak /stats endpoint
|
||||
|
|
@ -126,7 +126,7 @@ func (r *Riak) gatherServer(s string, acc telegraf.Accumulator) error {
|
|||
// Decode the response JSON into a new stats struct
|
||||
stats := &riakStats{}
|
||||
if err := json.NewDecoder(resp.Body).Decode(stats); err != nil {
|
||||
return fmt.Errorf("unable to decode riak response: %s", err)
|
||||
return fmt.Errorf("unable to decode riak response: %w", err)
|
||||
}
|
||||
|
||||
// Build a map of tags
|
||||
|
|
|
|||
|
|
@ -45,7 +45,7 @@ func (s *Sensors) Init() error {
|
|||
if s.path == "" {
|
||||
path, err := exec.LookPath(cmd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("looking up %q failed: %v", cmd, err)
|
||||
return fmt.Errorf("looking up %q failed: %w", cmd, err)
|
||||
}
|
||||
s.path = path
|
||||
}
|
||||
|
|
@ -78,7 +78,7 @@ func (s *Sensors) parse(acc telegraf.Accumulator) error {
|
|||
cmd := execCommand(s.path, "-A", "-u")
|
||||
out, err := internal.StdOutputTimeout(cmd, time.Duration(s.Timeout))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out))
|
||||
return fmt.Errorf("failed to run command %q: %w - %s", strings.Join(cmd.Args, " "), err, string(out))
|
||||
}
|
||||
lines := strings.Split(strings.TrimSpace(string(out)), "\n")
|
||||
for _, line := range lines {
|
||||
|
|
|
|||
|
|
@ -2,12 +2,12 @@ package sflow
|
|||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs/sflow/binaryio"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type PacketDecoder struct {
|
||||
|
|
@ -39,7 +39,7 @@ func (d *PacketDecoder) Decode(r io.Reader) error {
|
|||
}
|
||||
d.onPacket(packet)
|
||||
}
|
||||
if err != nil && errors.Cause(err) == io.EOF {
|
||||
if err != nil && errors.Is(err, io.EOF) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
|
|
@ -479,5 +479,8 @@ func (d *PacketDecoder) decodeUDPHeader(r io.Reader) (h UDPHeader, err error) {
|
|||
|
||||
func read(r io.Reader, data interface{}, name string) error {
|
||||
err := binary.Read(r, binary.BigEndian, data)
|
||||
return errors.Wrapf(err, "failed to read %s", name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read %q: %w", name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -115,7 +115,7 @@ func (s *SFlow) read(acc telegraf.Accumulator, conn net.PacketConn) {
|
|||
|
||||
func (s *SFlow) process(acc telegraf.Accumulator, buf []byte) {
|
||||
if err := s.decoder.Decode(bytes.NewBuffer(buf)); err != nil {
|
||||
acc.AddError(fmt.Errorf("unable to parse incoming packet: %s", err))
|
||||
acc.AddError(fmt.Errorf("unable to parse incoming packet: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -98,7 +98,7 @@ func (ss *SlabStats) runCmd(cmd string, args []string) ([]byte, error) {
|
|||
|
||||
out, err := internal.StdOutputTimeout(execCmd, 5*time.Second)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to run command %s: %s - %v", execCmd.Args, err, out)
|
||||
return nil, fmt.Errorf("failed to run command %q: %w - %v", execCmd.Args, err, out)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ package smart
|
|||
import (
|
||||
"bufio"
|
||||
_ "embed"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
|
|
@ -409,7 +410,7 @@ func (m *Smart) Init() error {
|
|||
if err != nil {
|
||||
m.PathSmartctl = ""
|
||||
//without smartctl, plugin will not be able to gather basic metrics
|
||||
return fmt.Errorf("smartctl not found: verify that smartctl is installed and it is in your PATH (or specified in config): %s", err.Error())
|
||||
return fmt.Errorf("smartctl not found: verify that smartctl is installed and it is in your PATH (or specified in config): %w", err)
|
||||
}
|
||||
|
||||
err = validatePath(m.PathNVMe)
|
||||
|
|
@ -502,7 +503,7 @@ func distinguishNVMeDevices(userDevices []string, availableNVMeDevices []string)
|
|||
func (m *Smart) scanDevices(ignoreExcludes bool, scanArgs ...string) ([]string, error) {
|
||||
out, err := runCmd(m.Timeout, m.UseSudo, m.PathSmartctl, scanArgs...)
|
||||
if err != nil {
|
||||
return []string{}, fmt.Errorf("failed to run command '%s %s': %s - %s", m.PathSmartctl, scanArgs, err, string(out))
|
||||
return []string{}, fmt.Errorf("failed to run command '%s %s': %w - %s", m.PathSmartctl, scanArgs, err, string(out))
|
||||
}
|
||||
var devices []string
|
||||
for _, line := range strings.Split(string(out), "\n") {
|
||||
|
|
@ -666,7 +667,7 @@ func gatherIntelNVMeDisk(acc telegraf.Accumulator, timeout config.Duration, uses
|
|||
|
||||
_, er := exitStatus(e)
|
||||
if er != nil {
|
||||
acc.AddError(fmt.Errorf("failed to run command '%s %s': %s - %s", nvme, strings.Join(args, " "), e, outStr))
|
||||
acc.AddError(fmt.Errorf("failed to run command '%s %s': %w - %s", nvme, strings.Join(args, " "), e, outStr))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -735,7 +736,7 @@ func (m *Smart) gatherDisk(acc telegraf.Accumulator, device string, wg *sync.Wai
|
|||
// Ignore all exit statuses except if it is a command line parse error
|
||||
exitStatus, er := exitStatus(e)
|
||||
if er != nil {
|
||||
acc.AddError(fmt.Errorf("failed to run command '%s %s': %s - %s", m.PathSmartctl, strings.Join(args, " "), e, outStr))
|
||||
acc.AddError(fmt.Errorf("failed to run command '%s %s': %w - %s", m.PathSmartctl, strings.Join(args, " "), e, outStr))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -882,8 +883,9 @@ func (m *Smart) gatherDisk(acc telegraf.Accumulator, device string, wg *sync.Wai
|
|||
// Command line parse errors are denoted by the exit code having the 0 bit set.
|
||||
// All other errors are drive/communication errors and should be ignored.
|
||||
func exitStatus(err error) (int, error) {
|
||||
if exiterr, ok := err.(*exec.ExitError); ok {
|
||||
if status, ok := exiterr.Sys().(syscall.WaitStatus); ok {
|
||||
var exitErr *exec.ExitError
|
||||
if errors.As(err, &exitErr) {
|
||||
if status, ok := exitErr.Sys().(syscall.WaitStatus); ok {
|
||||
return status.ExitStatus(), nil
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -46,8 +46,9 @@ func execCmd(arg0 string, args ...string) ([]byte, error) {
|
|||
|
||||
out, err := execCommand(arg0, args...).Output()
|
||||
if err != nil {
|
||||
if err, ok := err.(*exec.ExitError); ok {
|
||||
return nil, fmt.Errorf("%s: %w", bytes.TrimRight(err.Stderr, "\r\n"), err)
|
||||
var exitErr *exec.ExitError
|
||||
if errors.As(err, &exitErr) {
|
||||
return nil, fmt.Errorf("%s: %w", bytes.TrimRight(exitErr.Stderr, "\r\n"), err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -194,7 +195,8 @@ func snmpTranslateCall(oid string) (mibName string, oidNum string, oidText strin
|
|||
out, err = execCmd("snmptranslate", "-Td", "-Ob", oid)
|
||||
} else {
|
||||
out, err = execCmd("snmptranslate", "-Td", "-Ob", "-m", "all", oid)
|
||||
if err, ok := err.(*exec.Error); ok && err.Err == exec.ErrNotFound {
|
||||
var execErr *exec.Error
|
||||
if errors.As(err, &execErr) && errors.Is(execErr, exec.ErrNotFound) {
|
||||
// Silently discard error if snmptranslate not found and we have a numeric OID.
|
||||
// Meaning we can get by without the lookup.
|
||||
return "", oid, oid, "", nil
|
||||
|
|
|
|||
|
|
@ -488,7 +488,8 @@ func (t Table) Build(gs snmpConnection, walk bool, tr Translator) (*RTable, erro
|
|||
// Our callback always wraps errors in a walkError.
|
||||
// If this error isn't a walkError, we know it's not
|
||||
// from the callback
|
||||
if _, ok := err.(*walkError); !ok {
|
||||
var walkErr *walkError
|
||||
if !errors.As(err, &walkErr) {
|
||||
return nil, fmt.Errorf("performing bulk walk for field %s: %w", f.Name, err)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -173,15 +173,15 @@ func (s *SnmpTrap) Start(acc telegraf.Accumulator) error {
|
|||
|
||||
secname, err := s.SecName.Get()
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting secname failed: %v", err)
|
||||
return fmt.Errorf("getting secname failed: %w", err)
|
||||
}
|
||||
privPasswd, err := s.PrivPassword.Get()
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting secname failed: %v", err)
|
||||
return fmt.Errorf("getting secname failed: %w", err)
|
||||
}
|
||||
authPasswd, err := s.AuthPassword.Get()
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting secname failed: %v", err)
|
||||
return fmt.Errorf("getting secname failed: %w", err)
|
||||
}
|
||||
s.listener.Params.SecurityParameters = &gosnmp.UsmSecurityParameters{
|
||||
UserName: string(secname),
|
||||
|
|
|
|||
|
|
@ -68,7 +68,7 @@ func socketList(cmdName string, proto string, timeout config.Duration) (*bytes.B
|
|||
cmd.Stdout = &out
|
||||
err := internal.RunTimeout(cmd, time.Duration(timeout))
|
||||
if err != nil {
|
||||
return &out, fmt.Errorf("error running ss -in --%s: %v", proto, err)
|
||||
return &out, fmt.Errorf("error running ss -in --%s: %w", proto, err)
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -110,7 +110,7 @@ func (q *Query) parse(acc telegraf.Accumulator, rows *dbsql.Rows, t time.Time) (
|
|||
}
|
||||
if !skipParsing {
|
||||
if timestamp, err = internal.ParseTimestamp(q.TimeFormat, fieldvalue, ""); err != nil {
|
||||
return 0, fmt.Errorf("parsing time failed: %v", err)
|
||||
return 0, fmt.Errorf("parsing time failed: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -118,7 +118,7 @@ func (q *Query) parse(acc telegraf.Accumulator, rows *dbsql.Rows, t time.Time) (
|
|||
if q.tagFilter.Match(name) {
|
||||
tagvalue, err := internal.ToString(columnData[i])
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("converting tag column %q failed: %v", name, err)
|
||||
return 0, fmt.Errorf("converting tag column %q failed: %w", name, err)
|
||||
}
|
||||
if v := strings.TrimSpace(tagvalue); v != "" {
|
||||
tags[name] = v
|
||||
|
|
@ -129,7 +129,7 @@ func (q *Query) parse(acc telegraf.Accumulator, rows *dbsql.Rows, t time.Time) (
|
|||
if q.fieldFilterFloat.Match(name) {
|
||||
v, err := internal.ToFloat64(columnData[i])
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("converting field column %q to float failed: %v", name, err)
|
||||
return 0, fmt.Errorf("converting field column %q to float failed: %w", name, err)
|
||||
}
|
||||
fields[name] = v
|
||||
continue
|
||||
|
|
@ -138,7 +138,7 @@ func (q *Query) parse(acc telegraf.Accumulator, rows *dbsql.Rows, t time.Time) (
|
|||
if q.fieldFilterInt.Match(name) {
|
||||
v, err := internal.ToInt64(columnData[i])
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("converting field column %q to int failed: %v", name, err)
|
||||
return 0, fmt.Errorf("converting field column %q to int failed: %w", name, err)
|
||||
}
|
||||
fields[name] = v
|
||||
continue
|
||||
|
|
@ -147,7 +147,7 @@ func (q *Query) parse(acc telegraf.Accumulator, rows *dbsql.Rows, t time.Time) (
|
|||
if q.fieldFilterUint.Match(name) {
|
||||
v, err := internal.ToUint64(columnData[i])
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("converting field column %q to uint failed: %v", name, err)
|
||||
return 0, fmt.Errorf("converting field column %q to uint failed: %w", name, err)
|
||||
}
|
||||
fields[name] = v
|
||||
continue
|
||||
|
|
@ -156,7 +156,7 @@ func (q *Query) parse(acc telegraf.Accumulator, rows *dbsql.Rows, t time.Time) (
|
|||
if q.fieldFilterBool.Match(name) {
|
||||
v, err := internal.ToBool(columnData[i])
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("converting field column %q to bool failed: %v", name, err)
|
||||
return 0, fmt.Errorf("converting field column %q to bool failed: %w", name, err)
|
||||
}
|
||||
fields[name] = v
|
||||
continue
|
||||
|
|
@ -165,7 +165,7 @@ func (q *Query) parse(acc telegraf.Accumulator, rows *dbsql.Rows, t time.Time) (
|
|||
if q.fieldFilterString.Match(name) {
|
||||
v, err := internal.ToString(columnData[i])
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("converting field column %q to string failed: %v", name, err)
|
||||
return 0, fmt.Errorf("converting field column %q to string failed: %w", name, err)
|
||||
}
|
||||
fields[name] = v
|
||||
continue
|
||||
|
|
@ -255,7 +255,7 @@ func (s *SQL) Init() error {
|
|||
if q.Script != "" {
|
||||
query, err := os.ReadFile(q.Script)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading script %q failed: %v", q.Script, err)
|
||||
return fmt.Errorf("reading script %q failed: %w", q.Script, err)
|
||||
}
|
||||
s.Queries[i].Query = string(query)
|
||||
}
|
||||
|
|
@ -268,45 +268,45 @@ func (s *SQL) Init() error {
|
|||
// Compile the tag-filter
|
||||
tagfilter, err := filter.NewIncludeExcludeFilterDefaults(q.TagColumnsInclude, q.TagColumnsExclude, false, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating tag filter failed: %v", err)
|
||||
return fmt.Errorf("creating tag filter failed: %w", err)
|
||||
}
|
||||
s.Queries[i].tagFilter = tagfilter
|
||||
|
||||
// Compile the explicit type field-filter
|
||||
fieldfilterFloat, err := filter.NewIncludeExcludeFilterDefaults(q.FieldColumnsFloat, nil, false, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating field filter for float failed: %v", err)
|
||||
return fmt.Errorf("creating field filter for float failed: %w", err)
|
||||
}
|
||||
s.Queries[i].fieldFilterFloat = fieldfilterFloat
|
||||
|
||||
fieldfilterInt, err := filter.NewIncludeExcludeFilterDefaults(q.FieldColumnsInt, nil, false, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating field filter for int failed: %v", err)
|
||||
return fmt.Errorf("creating field filter for int failed: %w", err)
|
||||
}
|
||||
s.Queries[i].fieldFilterInt = fieldfilterInt
|
||||
|
||||
fieldfilterUint, err := filter.NewIncludeExcludeFilterDefaults(q.FieldColumnsUint, nil, false, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating field filter for uint failed: %v", err)
|
||||
return fmt.Errorf("creating field filter for uint failed: %w", err)
|
||||
}
|
||||
s.Queries[i].fieldFilterUint = fieldfilterUint
|
||||
|
||||
fieldfilterBool, err := filter.NewIncludeExcludeFilterDefaults(q.FieldColumnsBool, nil, false, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating field filter for bool failed: %v", err)
|
||||
return fmt.Errorf("creating field filter for bool failed: %w", err)
|
||||
}
|
||||
s.Queries[i].fieldFilterBool = fieldfilterBool
|
||||
|
||||
fieldfilterString, err := filter.NewIncludeExcludeFilterDefaults(q.FieldColumnsString, nil, false, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating field filter for string failed: %v", err)
|
||||
return fmt.Errorf("creating field filter for string failed: %w", err)
|
||||
}
|
||||
s.Queries[i].fieldFilterString = fieldfilterString
|
||||
|
||||
// Compile the field-filter
|
||||
fieldfilter, err := filter.NewIncludeExcludeFilter(q.FieldColumnsInclude, q.FieldColumnsExclude)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating field filter failed: %v", err)
|
||||
return fmt.Errorf("creating field filter failed: %w", err)
|
||||
}
|
||||
s.Queries[i].fieldFilter = fieldfilter
|
||||
|
||||
|
|
@ -360,7 +360,7 @@ func (s *SQL) Start(_ telegraf.Accumulator) error {
|
|||
// Connect to the database server
|
||||
dsn, err := s.Dsn.Get()
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting DSN failed: %v", err)
|
||||
return fmt.Errorf("getting DSN failed: %w", err)
|
||||
}
|
||||
defer config.ReleaseSecret(dsn)
|
||||
s.Log.Debug("Connecting...")
|
||||
|
|
@ -381,7 +381,7 @@ func (s *SQL) Start(_ telegraf.Accumulator) error {
|
|||
err = s.db.PingContext(ctx)
|
||||
cancel()
|
||||
if err != nil {
|
||||
return fmt.Errorf("connecting to database failed: %v", err)
|
||||
return fmt.Errorf("connecting to database failed: %w", err)
|
||||
}
|
||||
|
||||
// Prepare the statements
|
||||
|
|
@ -391,7 +391,7 @@ func (s *SQL) Start(_ telegraf.Accumulator) error {
|
|||
stmt, err := s.db.PrepareContext(ctx, q.Query)
|
||||
cancel()
|
||||
if err != nil {
|
||||
return fmt.Errorf("preparing query %q failed: %v", q.Query, err)
|
||||
return fmt.Errorf("preparing query %q failed: %w", q.Query, err)
|
||||
}
|
||||
s.Queries[i].statement = stmt
|
||||
}
|
||||
|
|
|
|||
|
|
@ -277,7 +277,7 @@ func (s *SQLServer) Start(acc telegraf.Accumulator) error {
|
|||
// get token from in-memory cache variable or from Azure Active Directory
|
||||
tokenProvider, err := s.getTokenProvider()
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("error creating AAD token provider for system assigned Azure managed identity : %s", err.Error()))
|
||||
acc.AddError(fmt.Errorf("error creating AAD token provider for system assigned Azure managed identity: %w", err))
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
@ -290,7 +290,7 @@ func (s *SQLServer) Start(acc telegraf.Accumulator) error {
|
|||
connector, err := mssql.NewAccessTokenConnector(string(dsn), tokenProvider)
|
||||
config.ReleaseSecret(dsn)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("error creating the SQL connector : %s", err.Error()))
|
||||
acc.AddError(fmt.Errorf("error creating the SQL connector: %w", err))
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
@ -326,9 +326,10 @@ func (s *SQLServer) gatherServer(pool *sql.DB, query Query, acc telegraf.Accumul
|
|||
serverName, databaseName := getConnectionIdentifiers(connectionString)
|
||||
|
||||
// Error msg based on the format in SSMS. SQLErrorClass() is another term for severity/level: http://msdn.microsoft.com/en-us/library/dd304156.aspx
|
||||
if sqlerr, ok := err.(mssql.Error); ok {
|
||||
var sqlErr mssql.Error
|
||||
if errors.As(err, &sqlErr) {
|
||||
return fmt.Errorf("query %s failed for server: %s and database: %s with Msg %d, Level %d, State %d:, Line %d, Error: %w", query.ScriptName,
|
||||
serverName, databaseName, sqlerr.SQLErrorNumber(), sqlerr.SQLErrorClass(), sqlerr.SQLErrorState(), sqlerr.SQLErrorLineNo(), err)
|
||||
serverName, databaseName, sqlErr.SQLErrorNumber(), sqlErr.SQLErrorClass(), sqlErr.SQLErrorState(), sqlErr.SQLErrorLineNo(), err)
|
||||
}
|
||||
|
||||
return fmt.Errorf("query %s failed for server: %s and database: %s with Error: %w", query.ScriptName, serverName, databaseName, err)
|
||||
|
|
|
|||
|
|
@ -147,7 +147,7 @@ func (smc *stackdriverMetricClient) ListMetricDescriptors(
|
|||
for {
|
||||
mdDesc, mdErr := mdResp.Next()
|
||||
if mdErr != nil {
|
||||
if mdErr != iterator.Done {
|
||||
if !errors.Is(mdErr, iterator.Done) {
|
||||
smc.log.Errorf("Failed iterating metric descriptor responses: %q: %v", req.String(), mdErr)
|
||||
}
|
||||
break
|
||||
|
|
@ -176,7 +176,7 @@ func (smc *stackdriverMetricClient) ListTimeSeries(
|
|||
for {
|
||||
tsDesc, tsErr := tsResp.Next()
|
||||
if tsErr != nil {
|
||||
if tsErr != iterator.Done {
|
||||
if !errors.Is(tsErr, iterator.Done) {
|
||||
smc.log.Errorf("Failed iterating time series responses: %q: %v", req.String(), tsErr)
|
||||
}
|
||||
break
|
||||
|
|
@ -408,7 +408,7 @@ func (s *Stackdriver) initializeStackdriverClient(ctx context.Context) error {
|
|||
if s.client == nil {
|
||||
client, err := monitoring.NewMetricClient(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create stackdriver monitoring client: %v", err)
|
||||
return fmt.Errorf("failed to create stackdriver monitoring client: %w", err)
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ import (
|
|||
"bufio"
|
||||
"bytes"
|
||||
_ "embed"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"regexp"
|
||||
|
|
@ -14,8 +15,6 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
|
|
@ -519,7 +518,7 @@ func (s *Statsd) parser() error {
|
|||
}
|
||||
default:
|
||||
if err := s.parseStatsdLine(line); err != nil {
|
||||
if errors.Cause(err) != errParsing {
|
||||
if !errors.Is(err, errParsing) {
|
||||
// Ignore parsing errors but error out on
|
||||
// everything else...
|
||||
return err
|
||||
|
|
|
|||
|
|
@ -62,20 +62,20 @@ func (s *Supervisor) Gather(acc telegraf.Accumulator) error {
|
|||
var rawProcessData []processInfo
|
||||
err := s.rpcClient.Call("supervisor.getAllProcessInfo", nil, &rawProcessData)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get processes info: %v", err)
|
||||
return fmt.Errorf("failed to get processes info: %w", err)
|
||||
}
|
||||
|
||||
// API call to get information about instance status
|
||||
var status supervisorInfo
|
||||
err = s.rpcClient.Call("supervisor.getState", nil, &status)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get processes info: %v", err)
|
||||
return fmt.Errorf("failed to get processes info: %w", err)
|
||||
}
|
||||
|
||||
// API call to get identification string
|
||||
err = s.rpcClient.Call("supervisor.getIdentification", nil, &status.Ident)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get instance identification: %v", err)
|
||||
return fmt.Errorf("failed to get instance identification: %w", err)
|
||||
}
|
||||
|
||||
// Iterating through array of structs with processes info and adding fields to accumulator
|
||||
|
|
@ -90,7 +90,7 @@ func (s *Supervisor) Gather(acc telegraf.Accumulator) error {
|
|||
// Adding instance info fields to accumulator
|
||||
instanceTags, instanceFields, err := s.parseInstanceData(status)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse instance data: %v", err)
|
||||
return fmt.Errorf("failed to parse instance data: %w", err)
|
||||
}
|
||||
acc.AddFields("supervisor_instance", instanceFields, instanceTags)
|
||||
return nil
|
||||
|
|
@ -113,7 +113,7 @@ func (s *Supervisor) parseProcessData(pInfo processInfo, status supervisorInfo)
|
|||
}
|
||||
splittedURL, err := beautifyServerString(s.Server)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to parse server string: %v", err)
|
||||
return nil, nil, fmt.Errorf("failed to parse server string: %w", err)
|
||||
}
|
||||
tags["id"] = status.Ident
|
||||
tags["source"] = splittedURL[0]
|
||||
|
|
@ -125,7 +125,7 @@ func (s *Supervisor) parseProcessData(pInfo processInfo, status supervisorInfo)
|
|||
func (s *Supervisor) parseInstanceData(status supervisorInfo) (map[string]string, map[string]interface{}, error) {
|
||||
splittedURL, err := beautifyServerString(s.Server)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to parse server string: %v", err)
|
||||
return nil, nil, fmt.Errorf("failed to parse server string: %w", err)
|
||||
}
|
||||
tags := map[string]string{}
|
||||
tags["id"] = status.Ident
|
||||
|
|
@ -144,12 +144,12 @@ func (s *Supervisor) Init() error {
|
|||
// Initializing XML-RPC client
|
||||
s.rpcClient, err = xmlrpc.NewClient(s.Server, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("XML-RPC client initialization failed: %v", err)
|
||||
return fmt.Errorf("XML-RPC client initialization failed: %w", err)
|
||||
}
|
||||
// Setting filter for additional metrics
|
||||
s.fieldFilter, err = filter.NewIncludeExcludeFilter(s.MetricsInc, s.MetricsExc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("metrics filter setup failed: %v", err)
|
||||
return fmt.Errorf("metrics filter setup failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ import (
|
|||
"context"
|
||||
_ "embed"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
|
|
@ -116,7 +117,7 @@ func (s *Suricata) handleServerConnection(ctx context.Context, acc telegraf.Accu
|
|||
// we want to handle EOF as an opportunity to wait for a new
|
||||
// connection -- this could, for example, happen when Suricata is
|
||||
// restarted while Telegraf is running.
|
||||
if err != io.EOF {
|
||||
if !errors.Is(err, io.EOF) {
|
||||
acc.AddError(err)
|
||||
return
|
||||
}
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ func (*SwapStats) SampleConfig() string {
|
|||
func (ss *SwapStats) Gather(acc telegraf.Accumulator) error {
|
||||
swap, err := ss.ps.SwapStat()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting swap memory info: %s", err)
|
||||
return fmt.Errorf("error getting swap memory info: %w", err)
|
||||
}
|
||||
|
||||
fieldsG := map[string]interface{}{
|
||||
|
|
|
|||
|
|
@ -1,14 +1,16 @@
|
|||
package syslog
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func timeMustParse(value string) time.Time {
|
||||
|
|
@ -94,8 +96,9 @@ func testRFC3164(t *testing.T, protocol string, address string, bestEffort bool)
|
|||
_, err = conn.Write(tc.data)
|
||||
conn.Close()
|
||||
if err != nil {
|
||||
if err, ok := err.(*net.OpError); ok {
|
||||
if err.Err.Error() == "write: message too long" {
|
||||
var opErr *net.OpError
|
||||
if errors.As(err, &opErr) {
|
||||
if opErr.Err.Error() == "write: message too long" {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package syslog
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
|
|
@ -258,8 +259,9 @@ func testRFC5426(t *testing.T, protocol string, address string, bestEffort bool)
|
|||
_, err = conn.Write(tc.data)
|
||||
conn.Close()
|
||||
if err != nil {
|
||||
if err, ok := err.(*net.OpError); ok {
|
||||
if err.Err.Error() == "write: message too long" {
|
||||
var opErr *net.OpError
|
||||
if errors.As(err, &opErr) {
|
||||
if opErr.Err.Error() == "write: message too long" {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -241,7 +241,7 @@ func (s *Syslog) listenStream(acc telegraf.Accumulator) {
|
|||
s.connectionsMu.Unlock()
|
||||
|
||||
if err := s.setKeepAlive(tcpConn); err != nil {
|
||||
acc.AddError(fmt.Errorf("unable to configure keep alive (%s): %s", s.Address, err))
|
||||
acc.AddError(fmt.Errorf("unable to configure keep alive %q: %w", s.Address, err))
|
||||
}
|
||||
|
||||
go s.handle(conn, acc)
|
||||
|
|
@ -274,7 +274,7 @@ func (s *Syslog) handle(conn net.Conn, acc telegraf.Accumulator) {
|
|||
s.store(*r, conn.RemoteAddr(), acc)
|
||||
if s.ReadTimeout != nil && time.Duration(*s.ReadTimeout) > 0 {
|
||||
if err := conn.SetReadDeadline(time.Now().Add(time.Duration(*s.ReadTimeout))); err != nil {
|
||||
acc.AddError(fmt.Errorf("setting read deadline failed: %v", err))
|
||||
acc.AddError(fmt.Errorf("setting read deadline failed: %w", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -301,7 +301,7 @@ func (s *Syslog) handle(conn net.Conn, acc telegraf.Accumulator) {
|
|||
|
||||
if s.ReadTimeout != nil && time.Duration(*s.ReadTimeout) > 0 {
|
||||
if err := conn.SetReadDeadline(time.Now().Add(time.Duration(*s.ReadTimeout))); err != nil {
|
||||
acc.AddError(fmt.Errorf("setting read deadline failed: %v", err))
|
||||
acc.AddError(fmt.Errorf("setting read deadline failed: %w", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ import (
|
|||
"bufio"
|
||||
_ "embed"
|
||||
"encoding/csv"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
|
@ -85,7 +86,7 @@ func (s *Sysstat) Init() error {
|
|||
if s.Sadf == "" {
|
||||
sadf, err := exec.LookPath(cmd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("looking up %q failed: %v", cmd, err)
|
||||
return fmt.Errorf("looking up %q failed: %w", cmd, err)
|
||||
}
|
||||
s.Sadf = sadf
|
||||
}
|
||||
|
|
@ -113,7 +114,7 @@ func (s *Sysstat) Gather(acc telegraf.Accumulator) error {
|
|||
|
||||
tmpfile, err := os.CreateTemp("", "sysstat-*")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create tmp file: %s", err)
|
||||
return fmt.Errorf("failed to create tmp file: %w", err)
|
||||
}
|
||||
defer os.Remove(tmpfile.Name())
|
||||
|
||||
|
|
@ -159,7 +160,7 @@ func (s *Sysstat) collect(tempfile string) error {
|
|||
cmd := execCommand(s.Sadc, options...)
|
||||
out, err := internal.CombinedOutputTimeout(cmd, time.Second*time.Duration(collectInterval+parseInterval))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out))
|
||||
return fmt.Errorf("failed to run command %q: %w - %q", strings.Join(cmd.Args, " "), err, string(out))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -218,7 +219,7 @@ func (s *Sysstat) parse(acc telegraf.Accumulator, option string, tmpfile string,
|
|||
m := make(map[string]groupData)
|
||||
for {
|
||||
record, err := csvReader.Read()
|
||||
if err == io.EOF {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
|
|
@ -272,8 +273,7 @@ func (s *Sysstat) parse(acc telegraf.Accumulator, option string, tmpfile string,
|
|||
}
|
||||
}
|
||||
if err := internal.WaitTimeout(cmd, time.Second*5); err != nil {
|
||||
return fmt.Errorf("command %s failed with %s",
|
||||
strings.Join(cmd.Args, " "), err)
|
||||
return fmt.Errorf("command %q failed with: %w", strings.Join(cmd.Args, " "), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package system
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
|
@ -198,7 +199,7 @@ func (s *SystemPS) NetConntrack(perCPU bool) ([]net.ConntrackStat, error) {
|
|||
|
||||
func (s *SystemPS) DiskIO(names []string) (map[string]disk.IOCountersStat, error) {
|
||||
m, err := disk.IOCounters(names...)
|
||||
if err == internal.ErrNotImplemented {
|
||||
if errors.Is(err, internal.ErrNotImplemented) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -207,7 +207,7 @@ func setSystemctl(timeout config.Duration, unitType string, pattern string) (*by
|
|||
cmd.Stdout = &out
|
||||
err = internal.RunTimeout(cmd, time.Duration(timeout))
|
||||
if err != nil {
|
||||
return &out, fmt.Errorf("error running systemctl %s: %s", strings.Join(params, " "), err)
|
||||
return &out, fmt.Errorf("error running systemctl %q: %w", strings.Join(params, " "), err)
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -26,9 +26,9 @@ func (t *Temperature) Gather(acc telegraf.Accumulator) error {
|
|||
temps, err := t.ps.Temperature()
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "not implemented yet") {
|
||||
return fmt.Errorf("plugin is not supported on this platform: %v", err)
|
||||
return fmt.Errorf("plugin is not supported on this platform: %w", err)
|
||||
}
|
||||
return fmt.Errorf("error getting temperatures info: %s", err)
|
||||
return fmt.Errorf("error getting temperatures info: %w", err)
|
||||
}
|
||||
for _, temp := range temps {
|
||||
tags := map[string]string{
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ package tengine
|
|||
import (
|
||||
"bufio"
|
||||
_ "embed"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
|
|
@ -123,7 +124,7 @@ func (n *Tengine) gatherURL(addr *url.URL, acc telegraf.Accumulator) error {
|
|||
var tengineStatus TengineStatus
|
||||
resp, err := n.client.Get(addr.String())
|
||||
if err != nil {
|
||||
return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err)
|
||||
return fmt.Errorf("error making HTTP request to %q: %w", addr.String(), err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
|
|
@ -134,7 +135,7 @@ func (n *Tengine) gatherURL(addr *url.URL, acc telegraf.Accumulator) error {
|
|||
for {
|
||||
line, err := r.ReadString('\n')
|
||||
|
||||
if err != nil || io.EOF == err {
|
||||
if err != nil || errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
lineSplit := strings.Split(strings.TrimSpace(line), ",")
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package udp_listener
|
|||
|
||||
import (
|
||||
_ "embed"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
|
|
@ -140,7 +141,7 @@ func (u *UDPListener) udpListen() error {
|
|||
if u.UDPBufferSize > 0 {
|
||||
err = u.listener.SetReadBuffer(u.UDPBufferSize) // if we want to move away from OS default
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set UDP read buffer to %d: %s", u.UDPBufferSize, err)
|
||||
return fmt.Errorf("failed to set UDP read buffer to %d: %w", u.UDPBufferSize, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -164,7 +165,8 @@ func (u *UDPListener) udpListenLoop() {
|
|||
|
||||
n, _, err := u.listener.ReadFromUDP(buf)
|
||||
if err != nil {
|
||||
if err, ok := err.(net.Error); !ok || !err.Timeout() {
|
||||
var netErr net.Error
|
||||
if !errors.As(err, &netErr) || !netErr.Timeout() {
|
||||
u.Log.Error(err.Error())
|
||||
}
|
||||
continue
|
||||
|
|
|
|||
|
|
@ -57,10 +57,10 @@ func unboundRunner(unbound Unbound) (*bytes.Buffer, error) {
|
|||
defer lookUpCancel()
|
||||
serverIps, err := resolver.LookupIPAddr(ctx, host)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error looking up ip for server: %s: %s", unbound.Server, err)
|
||||
return nil, fmt.Errorf("error looking up ip for server %q: %w", unbound.Server, err)
|
||||
}
|
||||
if len(serverIps) == 0 {
|
||||
return nil, fmt.Errorf("error no ip for server: %s: %s", unbound.Server, err)
|
||||
return nil, fmt.Errorf("error no ip for server %q: %w", unbound.Server, err)
|
||||
}
|
||||
server := serverIps[0].IP.String()
|
||||
if port != "" {
|
||||
|
|
@ -85,7 +85,7 @@ func unboundRunner(unbound Unbound) (*bytes.Buffer, error) {
|
|||
cmd.Stdout = &out
|
||||
err := internal.RunTimeout(cmd, time.Duration(unbound.Timeout))
|
||||
if err != nil {
|
||||
return &out, fmt.Errorf("error running unbound-control: %s (%s %v)", err, unbound.Binary, cmdArgs)
|
||||
return &out, fmt.Errorf("error running unbound-control %q %q: %w", unbound.Binary, cmdArgs, err)
|
||||
}
|
||||
|
||||
return &out, nil
|
||||
|
|
@ -107,7 +107,7 @@ func (s *Unbound) Gather(acc telegraf.Accumulator) error {
|
|||
|
||||
out, err := s.run(*s)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error gathering metrics: %s", err)
|
||||
return fmt.Errorf("error gathering metrics: %w", err)
|
||||
}
|
||||
|
||||
// Process values
|
||||
|
|
|
|||
|
|
@ -6,11 +6,12 @@ import (
|
|||
"fmt"
|
||||
"strings"
|
||||
|
||||
nut "github.com/robbiet480/go.nut"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/internal/choice"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
nut "github.com/robbiet480/go.nut"
|
||||
)
|
||||
|
||||
//go:embed sample.conf
|
||||
|
|
@ -113,7 +114,7 @@ func (u *Upsd) gatherUps(acc telegraf.Accumulator, name string, variables []nut.
|
|||
// Force expected float values to actually being float (e.g. if delivered as int)
|
||||
float, err := internal.ToFloat64(metrics[rawValue])
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("converting %s=%v failed: %v", rawValue, metrics[rawValue], err))
|
||||
acc.AddError(fmt.Errorf("converting %s=%v failed: %w", rawValue, metrics[rawValue], err))
|
||||
continue
|
||||
}
|
||||
fields[key] = float
|
||||
|
|
@ -121,7 +122,7 @@ func (u *Upsd) gatherUps(acc telegraf.Accumulator, name string, variables []nut.
|
|||
|
||||
val, err := internal.ToString(metrics["ups.firmware"])
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("converting ups.firmware=%v failed: %v", metrics["ups.firmware"], err))
|
||||
acc.AddError(fmt.Errorf("converting ups.firmware=%q failed: %w", metrics["ups.firmware"], err))
|
||||
} else {
|
||||
fields["firmware"] = val
|
||||
}
|
||||
|
|
|
|||
|
|
@ -92,7 +92,7 @@ func varnishRunner(cmdName string, useSudo bool, cmdArgs []string, timeout confi
|
|||
|
||||
err := internal.RunTimeout(cmd, time.Duration(timeout))
|
||||
if err != nil {
|
||||
return &out, fmt.Errorf("error running %s %v - %s", cmdName, cmdArgs, err)
|
||||
return &out, fmt.Errorf("error running %q %q: %w", cmdName, cmdArgs, err)
|
||||
}
|
||||
|
||||
return &out, nil
|
||||
|
|
@ -107,7 +107,7 @@ func (s *Varnish) Init() error {
|
|||
for _, re := range s.Regexps {
|
||||
compiled, err := regexp.Compile(re)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing regexp: %s", err)
|
||||
return fmt.Errorf("error parsing regexp: %w", err)
|
||||
}
|
||||
customRegexps = append(customRegexps, compiled)
|
||||
}
|
||||
|
|
@ -142,7 +142,7 @@ func (s *Varnish) Gather(acc telegraf.Accumulator) error {
|
|||
|
||||
statOut, err := s.run(s.Binary, s.UseSudo, statsArgs, s.Timeout)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error gathering metrics: %s", err)
|
||||
return fmt.Errorf("error gathering metrics: %w", err)
|
||||
}
|
||||
|
||||
if s.MetricVersion == 2 {
|
||||
|
|
@ -151,11 +151,11 @@ func (s *Varnish) Gather(acc telegraf.Accumulator) error {
|
|||
if s.admRun != nil {
|
||||
admOut, err := s.admRun(s.AdmBinary, s.UseSudo, admArgs, s.Timeout)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error gathering metrics: %s", err)
|
||||
return fmt.Errorf("error gathering metrics: %w", err)
|
||||
}
|
||||
activeVcl, err = getActiveVCLJson(admOut)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error gathering metrics: %s", err)
|
||||
return fmt.Errorf("error gathering metrics: %w", err)
|
||||
}
|
||||
}
|
||||
return s.processMetricsV2(activeVcl, acc, statOut)
|
||||
|
|
@ -271,12 +271,12 @@ func (s *Varnish) processMetricsV2(activeVcl string, acc telegraf.Accumulator, o
|
|||
//parse bitmap value
|
||||
if flag == "b" {
|
||||
if metricValue, parseError = strconv.ParseUint(number.String(), 10, 64); parseError != nil {
|
||||
parseError = fmt.Errorf("%s value uint64 error: %s", fieldName, parseError)
|
||||
parseError = fmt.Errorf("%q value uint64 error: %w", fieldName, parseError)
|
||||
}
|
||||
} else if metricValue, parseError = number.Int64(); parseError != nil {
|
||||
//try parse float
|
||||
if metricValue, parseError = number.Float64(); parseError != nil {
|
||||
parseError = fmt.Errorf("stat %s value %v is not valid number: %s", fieldName, value, parseError)
|
||||
parseError = fmt.Errorf("stat %q value %q is not valid number: %w", fieldName, value, parseError)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
|
|
|||
|
|
@ -64,14 +64,14 @@ func (n *Vault) Init() error {
|
|||
if n.TokenFile != "" {
|
||||
token, err := os.ReadFile(n.TokenFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading file failed: %v", err)
|
||||
return fmt.Errorf("reading file failed: %w", err)
|
||||
}
|
||||
n.Token = strings.TrimSpace(string(token))
|
||||
}
|
||||
|
||||
tlsCfg, err := n.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return fmt.Errorf("setting up TLS configuration failed: %v", err)
|
||||
return fmt.Errorf("setting up TLS configuration failed: %w", err)
|
||||
}
|
||||
|
||||
n.roundTripper = &http.Transport{
|
||||
|
|
@ -104,7 +104,7 @@ func (n *Vault) loadJSON(url string) (*SysMetrics, error) {
|
|||
|
||||
resp, err := n.roundTripper.RoundTrip(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error making HTTP request to %s: %s", url, err)
|
||||
return nil, fmt.Errorf("error making HTTP request to %q: %w", url, err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
|
|
@ -115,7 +115,7 @@ func (n *Vault) loadJSON(url string) (*SysMetrics, error) {
|
|||
var metrics SysMetrics
|
||||
err = json.NewDecoder(resp.Body).Decode(&metrics)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing json response: %s", err)
|
||||
return nil, fmt.Errorf("error parsing json response: %w", err)
|
||||
}
|
||||
|
||||
return &metrics, nil
|
||||
|
|
@ -125,7 +125,7 @@ func (n *Vault) loadJSON(url string) (*SysMetrics, error) {
|
|||
func buildVaultMetrics(acc telegraf.Accumulator, sysMetrics *SysMetrics) error {
|
||||
t, err := time.Parse(timeLayout, sysMetrics.Timestamp)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing time: %s", err)
|
||||
return fmt.Errorf("error parsing time: %w", err)
|
||||
}
|
||||
|
||||
for _, counters := range sysMetrics.Counters {
|
||||
|
|
@ -133,7 +133,7 @@ func buildVaultMetrics(acc telegraf.Accumulator, sysMetrics *SysMetrics) error {
|
|||
for key, val := range counters.baseInfo.Labels {
|
||||
convertedVal, err := internal.ToString(val)
|
||||
if err != nil {
|
||||
return fmt.Errorf("converting counter %s=%v failed: %v", key, val, err)
|
||||
return fmt.Errorf("converting counter %s=%v failed: %w", key, val, err)
|
||||
}
|
||||
tags[key] = convertedVal
|
||||
}
|
||||
|
|
@ -155,7 +155,7 @@ func buildVaultMetrics(acc telegraf.Accumulator, sysMetrics *SysMetrics) error {
|
|||
for key, val := range gauges.baseInfo.Labels {
|
||||
convertedVal, err := internal.ToString(val)
|
||||
if err != nil {
|
||||
return fmt.Errorf("converting gauges %s=%v failed: %v", key, val, err)
|
||||
return fmt.Errorf("converting gauges %s=%v failed: %w", key, val, err)
|
||||
}
|
||||
tags[key] = convertedVal
|
||||
}
|
||||
|
|
@ -172,7 +172,7 @@ func buildVaultMetrics(acc telegraf.Accumulator, sysMetrics *SysMetrics) error {
|
|||
for key, val := range summary.baseInfo.Labels {
|
||||
convertedVal, err := internal.ToString(val)
|
||||
if err != nil {
|
||||
return fmt.Errorf("converting summary %s=%v failed: %v", key, val, err)
|
||||
return fmt.Errorf("converting summary %s=%v failed: %w", key, val, err)
|
||||
}
|
||||
tags[key] = convertedVal
|
||||
}
|
||||
|
|
|
|||
|
|
@ -281,7 +281,7 @@ func (e *Endpoint) startDiscovery(ctx context.Context) {
|
|||
select {
|
||||
case <-e.discoveryTicker.C:
|
||||
err := e.discover(ctx)
|
||||
if err != nil && err != context.Canceled {
|
||||
if err != nil && !errors.Is(err, context.Canceled) {
|
||||
e.log.Errorf("Discovery for %s: %s", e.URL.Host, err.Error())
|
||||
}
|
||||
case <-ctx.Done():
|
||||
|
|
@ -295,7 +295,7 @@ func (e *Endpoint) startDiscovery(ctx context.Context) {
|
|||
|
||||
func (e *Endpoint) initalDiscovery(ctx context.Context) {
|
||||
err := e.discover(ctx)
|
||||
if err != nil && err != context.Canceled {
|
||||
if err != nil && !errors.Is(err, context.Canceled) {
|
||||
e.log.Errorf("Discovery for %s: %s", e.URL.Host, err.Error())
|
||||
}
|
||||
e.startDiscovery(ctx)
|
||||
|
|
@ -997,7 +997,7 @@ func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Tim
|
|||
len(bucket.MetricId), len(res.metrics)-metricIdx, res.name, e.URL.Host, len(res.objects))
|
||||
|
||||
// Don't send work items if the context has been cancelled.
|
||||
if ctx.Err() == context.Canceled {
|
||||
if errors.Is(ctx.Err(), context.Canceled) {
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ package vsphere
|
|||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
"errors"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
|
@ -136,7 +137,7 @@ func (v *VSphere) Gather(acc telegraf.Accumulator) error {
|
|||
go func(endpoint *Endpoint) {
|
||||
defer wg.Done()
|
||||
err := endpoint.Collect(context.Background(), acc)
|
||||
if err == context.Canceled {
|
||||
if errors.Is(err, context.Canceled) {
|
||||
// No need to signal errors if we were merely canceled.
|
||||
err = nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -92,13 +92,13 @@ func (wb *Webhooks) Start(acc telegraf.Accumulator) error {
|
|||
|
||||
ln, err := net.Listen("tcp", wb.ServiceAddress)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error starting server: %v", err)
|
||||
return fmt.Errorf("error starting server: %w", err)
|
||||
}
|
||||
|
||||
go func() {
|
||||
if err := wb.srv.Serve(ln); err != nil {
|
||||
if err != http.ErrServerClosed {
|
||||
acc.AddError(fmt.Errorf("error listening: %v", err))
|
||||
acc.AddError(fmt.Errorf("error listening: %w", err))
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
|
|
|||
|
|
@ -53,7 +53,7 @@ func (wg *Wireguard) Init() error {
|
|||
func (wg *Wireguard) Gather(acc telegraf.Accumulator) error {
|
||||
devices, err := wg.enumerateDevices()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error enumerating Wireguard devices: %v", err)
|
||||
return fmt.Errorf("error enumerating Wireguard devices: %w", err)
|
||||
}
|
||||
|
||||
for _, device := range devices {
|
||||
|
|
|
|||
|
|
@ -281,7 +281,7 @@ func (c *X509Cert) sourcesToURLs() error {
|
|||
source = reDriveLetter.ReplaceAllString(source, "$1")
|
||||
g, err := globpath.Compile(source)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not compile glob %v: %v", source, err)
|
||||
return fmt.Errorf("could not compile glob %q: %w", source, err)
|
||||
}
|
||||
c.globpaths = append(c.globpaths, g)
|
||||
} else {
|
||||
|
|
@ -290,7 +290,7 @@ func (c *X509Cert) sourcesToURLs() error {
|
|||
}
|
||||
u, err := url.Parse(source)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse cert location - %s", err.Error())
|
||||
return fmt.Errorf("failed to parse cert location: %w", err)
|
||||
}
|
||||
c.locations = append(c.locations, u)
|
||||
}
|
||||
|
|
@ -430,7 +430,7 @@ func (c *X509Cert) getCert(u *url.URL, timeout time.Duration) ([]*x509.Certifica
|
|||
defer smtpConn.Text.EndResponse(id)
|
||||
_, _, err = smtpConn.Text.ReadResponse(220)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("did not get 220 after STARTTLS: %s", err.Error())
|
||||
return nil, nil, fmt.Errorf("did not get 220 after STARTTLS: %w", err)
|
||||
}
|
||||
|
||||
tlsConn := tls.Client(ipConn, downloadTLSCfg)
|
||||
|
|
|
|||
|
|
@ -133,7 +133,7 @@ func (z *Zipkin) Listen(ln net.Listener, acc telegraf.Accumulator) {
|
|||
// This interferes with telegraf's internal data collection,
|
||||
// by making it appear as if a serious error occurred.
|
||||
if err != http.ErrServerClosed {
|
||||
acc.AddError(fmt.Errorf("error listening: %v", err))
|
||||
acc.AddError(fmt.Errorf("error listening: %w", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -654,7 +654,7 @@ func postThriftData(datafile, address, contentType string) error {
|
|||
client := &http.Client{}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("HTTP POST request to zipkin endpoint %s failed %v", address, err)
|
||||
return fmt.Errorf("HTTP POST request to zipkin endpoint %q failed: %w", address, err)
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
|
|
|
|||
Loading…
Reference in New Issue