chore(linters): Replace 'fmt.Errorf' with 'errors.New' wherever possible (#14698)
This commit is contained in:
parent
a7f0b06bfe
commit
ae7fbc5082
|
|
@ -2,7 +2,7 @@ package agent
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"errors"
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
@ -55,9 +55,9 @@ func TestAccAddError(t *testing.T) {
|
||||||
defer close(metrics)
|
defer close(metrics)
|
||||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||||
|
|
||||||
a.AddError(fmt.Errorf("foo"))
|
a.AddError(errors.New("foo"))
|
||||||
a.AddError(fmt.Errorf("bar"))
|
a.AddError(errors.New("bar"))
|
||||||
a.AddError(fmt.Errorf("baz"))
|
a.AddError(errors.New("baz"))
|
||||||
|
|
||||||
errs := bytes.Split(errBuf.Bytes(), []byte{'\n'})
|
errs := bytes.Split(errBuf.Bytes(), []byte{'\n'})
|
||||||
require.Len(t, errs, 4) // 4 because of trailing newline
|
require.Len(t, errs, 4) // 4 because of trailing newline
|
||||||
|
|
|
||||||
|
|
@ -503,7 +503,7 @@ func (c *Config) LoadConfigData(data []byte) error {
|
||||||
if val, ok := tbl.Fields["agent"]; ok {
|
if val, ok := tbl.Fields["agent"]; ok {
|
||||||
subTable, ok := val.(*ast.Table)
|
subTable, ok := val.(*ast.Table)
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("invalid configuration, error parsing agent table")
|
return errors.New("invalid configuration, error parsing agent table")
|
||||||
}
|
}
|
||||||
if err = c.toml.UnmarshalTable(subTable, c.Agent); err != nil {
|
if err = c.toml.UnmarshalTable(subTable, c.Agent); err != nil {
|
||||||
return fmt.Errorf("error parsing [agent]: %w", err)
|
return fmt.Errorf("error parsing [agent]: %w", err)
|
||||||
|
|
@ -794,7 +794,7 @@ func (c *Config) addAggregator(name string, table *ast.Table) error {
|
||||||
// Handle removed, deprecated plugins
|
// Handle removed, deprecated plugins
|
||||||
if di, deprecated := aggregators.Deprecations[name]; deprecated {
|
if di, deprecated := aggregators.Deprecations[name]; deprecated {
|
||||||
printHistoricPluginDeprecationNotice("aggregators", name, di)
|
printHistoricPluginDeprecationNotice("aggregators", name, di)
|
||||||
return fmt.Errorf("plugin deprecated")
|
return errors.New("plugin deprecated")
|
||||||
}
|
}
|
||||||
return fmt.Errorf("undefined but requested aggregator: %s", name)
|
return fmt.Errorf("undefined but requested aggregator: %s", name)
|
||||||
}
|
}
|
||||||
|
|
@ -980,7 +980,7 @@ func (c *Config) addProcessor(name string, table *ast.Table) error {
|
||||||
// Handle removed, deprecated plugins
|
// Handle removed, deprecated plugins
|
||||||
if di, deprecated := processors.Deprecations[name]; deprecated {
|
if di, deprecated := processors.Deprecations[name]; deprecated {
|
||||||
printHistoricPluginDeprecationNotice("processors", name, di)
|
printHistoricPluginDeprecationNotice("processors", name, di)
|
||||||
return fmt.Errorf("plugin deprecated")
|
return errors.New("plugin deprecated")
|
||||||
}
|
}
|
||||||
return fmt.Errorf("undefined but requested processor: %s", name)
|
return fmt.Errorf("undefined but requested processor: %s", name)
|
||||||
}
|
}
|
||||||
|
|
@ -1109,7 +1109,7 @@ func (c *Config) addOutput(name string, table *ast.Table) error {
|
||||||
// Handle removed, deprecated plugins
|
// Handle removed, deprecated plugins
|
||||||
if di, deprecated := outputs.Deprecations[name]; deprecated {
|
if di, deprecated := outputs.Deprecations[name]; deprecated {
|
||||||
printHistoricPluginDeprecationNotice("outputs", name, di)
|
printHistoricPluginDeprecationNotice("outputs", name, di)
|
||||||
return fmt.Errorf("plugin deprecated")
|
return errors.New("plugin deprecated")
|
||||||
}
|
}
|
||||||
return fmt.Errorf("undefined but requested output: %s", name)
|
return fmt.Errorf("undefined but requested output: %s", name)
|
||||||
}
|
}
|
||||||
|
|
@ -1191,7 +1191,7 @@ func (c *Config) addInput(name string, table *ast.Table) error {
|
||||||
// Handle removed, deprecated plugins
|
// Handle removed, deprecated plugins
|
||||||
if di, deprecated := inputs.Deprecations[name]; deprecated {
|
if di, deprecated := inputs.Deprecations[name]; deprecated {
|
||||||
printHistoricPluginDeprecationNotice("inputs", name, di)
|
printHistoricPluginDeprecationNotice("inputs", name, di)
|
||||||
return fmt.Errorf("plugin deprecated")
|
return errors.New("plugin deprecated")
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Errorf("undefined but requested input: %s", name)
|
return fmt.Errorf("undefined but requested input: %s", name)
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,7 @@
|
||||||
package config
|
package config
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
|
@ -171,7 +172,7 @@ func (c *Config) printUserDeprecation(category, name string, plugin interface{})
|
||||||
models.PrintPluginDeprecationNotice(info.LogLevel, info.Name, info.info)
|
models.PrintPluginDeprecationNotice(info.LogLevel, info.Name, info.info)
|
||||||
|
|
||||||
if info.LogLevel == telegraf.Error {
|
if info.LogLevel == telegraf.Error {
|
||||||
return fmt.Errorf("plugin deprecated")
|
return errors.New("plugin deprecated")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Print deprecated options
|
// Print deprecated options
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,7 @@
|
||||||
package snmp
|
package snmp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
@ -271,7 +272,7 @@ func SnmpTranslateCall(oid string) (mibName string, oidNum string, oidText strin
|
||||||
oidText = out.RenderQualified()
|
oidText = out.RenderQualified()
|
||||||
i := strings.Index(oidText, "::")
|
i := strings.Index(oidText, "::")
|
||||||
if i == -1 {
|
if i == -1 {
|
||||||
return "", oid, oid, oid, out, fmt.Errorf("not found")
|
return "", oid, oid, oid, out, errors.New("not found")
|
||||||
}
|
}
|
||||||
mibName = oidText[:i]
|
mibName = oidText[:i]
|
||||||
oidText = oidText[i+2:] + end
|
oidText = oidText[i+2:] + end
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,7 @@
|
||||||
package snmp
|
package snmp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
@ -46,7 +47,7 @@ func NewWrapper(s ClientConfig) (GosnmpWrapper, error) {
|
||||||
case 1:
|
case 1:
|
||||||
gs.Version = gosnmp.Version1
|
gs.Version = gosnmp.Version1
|
||||||
default:
|
default:
|
||||||
return GosnmpWrapper{}, fmt.Errorf("invalid version")
|
return GosnmpWrapper{}, errors.New("invalid version")
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.Version < 3 {
|
if s.Version < 3 {
|
||||||
|
|
@ -74,7 +75,7 @@ func NewWrapper(s ClientConfig) (GosnmpWrapper, error) {
|
||||||
case "authpriv":
|
case "authpriv":
|
||||||
gs.MsgFlags = gosnmp.AuthPriv
|
gs.MsgFlags = gosnmp.AuthPriv
|
||||||
default:
|
default:
|
||||||
return GosnmpWrapper{}, fmt.Errorf("invalid secLevel")
|
return GosnmpWrapper{}, errors.New("invalid secLevel")
|
||||||
}
|
}
|
||||||
|
|
||||||
sp.UserName = s.SecName
|
sp.UserName = s.SecName
|
||||||
|
|
@ -95,7 +96,7 @@ func NewWrapper(s ClientConfig) (GosnmpWrapper, error) {
|
||||||
case "":
|
case "":
|
||||||
sp.AuthenticationProtocol = gosnmp.NoAuth
|
sp.AuthenticationProtocol = gosnmp.NoAuth
|
||||||
default:
|
default:
|
||||||
return GosnmpWrapper{}, fmt.Errorf("invalid authProtocol")
|
return GosnmpWrapper{}, errors.New("invalid authProtocol")
|
||||||
}
|
}
|
||||||
|
|
||||||
sp.AuthenticationPassphrase = s.AuthPassword
|
sp.AuthenticationPassphrase = s.AuthPassword
|
||||||
|
|
@ -116,7 +117,7 @@ func NewWrapper(s ClientConfig) (GosnmpWrapper, error) {
|
||||||
case "":
|
case "":
|
||||||
sp.PrivacyProtocol = gosnmp.NoPriv
|
sp.PrivacyProtocol = gosnmp.NoPriv
|
||||||
default:
|
default:
|
||||||
return GosnmpWrapper{}, fmt.Errorf("invalid privProtocol")
|
return GosnmpWrapper{}, errors.New("invalid privProtocol")
|
||||||
}
|
}
|
||||||
|
|
||||||
sp.PrivacyPassphrase = s.PrivPassword
|
sp.PrivacyPassphrase = s.PrivPassword
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
package syslog
|
package syslog
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"errors"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -50,7 +50,7 @@ func (f *Framing) UnmarshalText(data []byte) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
*f = -1
|
*f = -1
|
||||||
return fmt.Errorf("unknown framing")
|
return errors.New("unknown framing")
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalText implements encoding.TextMarshaller
|
// MarshalText implements encoding.TextMarshaller
|
||||||
|
|
@ -59,5 +59,5 @@ func (f Framing) MarshalText() ([]byte, error) {
|
||||||
if s != "" {
|
if s != "" {
|
||||||
return []byte(s), nil
|
return []byte(s), nil
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("unknown framing")
|
return nil, errors.New("unknown framing")
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
package models
|
package models
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"errors"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
@ -532,7 +532,7 @@ func (m *mockOutput) Write(metrics []telegraf.Metric) error {
|
||||||
m.Lock()
|
m.Lock()
|
||||||
defer m.Unlock()
|
defer m.Unlock()
|
||||||
if m.failWrite {
|
if m.failWrite {
|
||||||
return fmt.Errorf("failed write")
|
return errors.New("failed write")
|
||||||
}
|
}
|
||||||
|
|
||||||
if m.metrics == nil {
|
if m.metrics == nil {
|
||||||
|
|
@ -572,7 +572,7 @@ func (m *perfOutput) SampleConfig() string {
|
||||||
|
|
||||||
func (m *perfOutput) Write(_ []telegraf.Metric) error {
|
func (m *perfOutput) Write(_ []telegraf.Metric) error {
|
||||||
if m.failWrite {
|
if m.failWrite {
|
||||||
return fmt.Errorf("failed write")
|
return errors.New("failed write")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,7 @@ package cookie
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
|
|
@ -12,9 +12,10 @@ import (
|
||||||
|
|
||||||
clockutil "github.com/benbjohnson/clock"
|
clockutil "github.com/benbjohnson/clock"
|
||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf/config"
|
"github.com/influxdata/telegraf/config"
|
||||||
"github.com/influxdata/telegraf/testutil"
|
"github.com/influxdata/telegraf/testutil"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
@ -189,7 +190,7 @@ func TestAuthConfig_Start(t *testing.T) {
|
||||||
renewal: renewal,
|
renewal: renewal,
|
||||||
endpoint: authEndpointWithBasicAuth,
|
endpoint: authEndpointWithBasicAuth,
|
||||||
},
|
},
|
||||||
wantErr: fmt.Errorf("cookie auth renewal received status code: 401 (Unauthorized) []"),
|
wantErr: errors.New("cookie auth renewal received status code: 401 (Unauthorized) []"),
|
||||||
firstAuthCount: 0,
|
firstAuthCount: 0,
|
||||||
lastAuthCount: 0,
|
lastAuthCount: 0,
|
||||||
firstHTTPResponse: http.StatusForbidden,
|
firstHTTPResponse: http.StatusForbidden,
|
||||||
|
|
@ -220,7 +221,7 @@ func TestAuthConfig_Start(t *testing.T) {
|
||||||
renewal: renewal,
|
renewal: renewal,
|
||||||
endpoint: authEndpointWithBody,
|
endpoint: authEndpointWithBody,
|
||||||
},
|
},
|
||||||
wantErr: fmt.Errorf("cookie auth renewal received status code: 401 (Unauthorized) []"),
|
wantErr: errors.New("cookie auth renewal received status code: 401 (Unauthorized) []"),
|
||||||
firstAuthCount: 0,
|
firstAuthCount: 0,
|
||||||
lastAuthCount: 0,
|
lastAuthCount: 0,
|
||||||
firstHTTPResponse: http.StatusForbidden,
|
firstHTTPResponse: http.StatusForbidden,
|
||||||
|
|
|
||||||
|
|
@ -1,12 +1,13 @@
|
||||||
package kafka
|
package kafka
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"errors"
|
||||||
"math"
|
"math"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/IBM/sarama"
|
"github.com/IBM/sarama"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
tgConf "github.com/influxdata/telegraf/config"
|
tgConf "github.com/influxdata/telegraf/config"
|
||||||
"github.com/influxdata/telegraf/plugins/common/tls"
|
"github.com/influxdata/telegraf/plugins/common/tls"
|
||||||
|
|
@ -141,7 +142,7 @@ func (k *Config) SetConfig(config *sarama.Config, log telegraf.Logger) error {
|
||||||
|
|
||||||
switch strings.ToLower(k.MetadataRetryType) {
|
switch strings.ToLower(k.MetadataRetryType) {
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("invalid metadata retry type")
|
return errors.New("invalid metadata retry type")
|
||||||
case "exponential":
|
case "exponential":
|
||||||
if k.MetadataRetryBackoff == 0 {
|
if k.MetadataRetryBackoff == 0 {
|
||||||
k.MetadataRetryBackoff = tgConf.Duration(250 * time.Millisecond)
|
k.MetadataRetryBackoff = tgConf.Duration(250 * time.Millisecond)
|
||||||
|
|
|
||||||
|
|
@ -2,6 +2,7 @@ package opcua
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log" //nolint:depguard // just for debug
|
"log" //nolint:depguard // just for debug
|
||||||
"net/url"
|
"net/url"
|
||||||
|
|
@ -67,12 +68,12 @@ func (o *OpcUAClientConfig) validateOptionalFields() error {
|
||||||
|
|
||||||
func (o *OpcUAClientConfig) validateEndpoint() error {
|
func (o *OpcUAClientConfig) validateEndpoint() error {
|
||||||
if o.Endpoint == "" {
|
if o.Endpoint == "" {
|
||||||
return fmt.Errorf("endpoint url is empty")
|
return errors.New("endpoint url is empty")
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := url.Parse(o.Endpoint)
|
_, err := url.Parse(o.Endpoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("endpoint url is invalid")
|
return errors.New("endpoint url is invalid")
|
||||||
}
|
}
|
||||||
|
|
||||||
switch o.SecurityPolicy {
|
switch o.SecurityPolicy {
|
||||||
|
|
@ -224,7 +225,7 @@ func (o *OpcUAClient) Disconnect(ctx context.Context) error {
|
||||||
o.Client = nil
|
o.Client = nil
|
||||||
return err
|
return err
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("invalid controller")
|
return errors.New("invalid controller")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -95,7 +95,7 @@ type InputClientConfig struct {
|
||||||
|
|
||||||
func (o *InputClientConfig) Validate() error {
|
func (o *InputClientConfig) Validate() error {
|
||||||
if o.MetricName == "" {
|
if o.MetricName == "" {
|
||||||
return fmt.Errorf("metric name is empty")
|
return errors.New("metric name is empty")
|
||||||
}
|
}
|
||||||
|
|
||||||
err := choice.Check(string(o.Timestamp), []string{"", "gather", "server", "source"})
|
err := choice.Check(string(o.Timestamp), []string{"", "gather", "server", "source"})
|
||||||
|
|
@ -278,11 +278,11 @@ func validateNodeToAdd(existing map[metricParts]struct{}, nmm *NodeMetricMapping
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(nmm.Tag.Namespace) == 0 {
|
if len(nmm.Tag.Namespace) == 0 {
|
||||||
return fmt.Errorf("empty node namespace not allowed")
|
return errors.New("empty node namespace not allowed")
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(nmm.Tag.Identifier) == 0 {
|
if len(nmm.Tag.Identifier) == 0 {
|
||||||
return fmt.Errorf("empty node identifier not allowed")
|
return errors.New("empty node identifier not allowed")
|
||||||
}
|
}
|
||||||
|
|
||||||
mp := newMP(nmm)
|
mp := newMP(nmm)
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,6 @@ package input
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
|
@ -39,9 +38,9 @@ func TestTagsSliceToMap_dupeKey(t *testing.T) {
|
||||||
|
|
||||||
func TestTagsSliceToMap_empty(t *testing.T) {
|
func TestTagsSliceToMap_empty(t *testing.T) {
|
||||||
_, err := tagsSliceToMap([][]string{{"foo", ""}})
|
_, err := tagsSliceToMap([][]string{{"foo", ""}})
|
||||||
require.Equal(t, fmt.Errorf("tag 1 has empty value"), err)
|
require.Equal(t, errors.New("tag 1 has empty value"), err)
|
||||||
_, err = tagsSliceToMap([][]string{{"", "bar"}})
|
_, err = tagsSliceToMap([][]string{{"", "bar"}})
|
||||||
require.Equal(t, fmt.Errorf("tag 1 has empty name"), err)
|
require.Equal(t, errors.New("tag 1 has empty name"), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestValidateOPCTags(t *testing.T) {
|
func TestValidateOPCTags(t *testing.T) {
|
||||||
|
|
@ -91,7 +90,7 @@ func TestValidateOPCTags(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
fmt.Errorf("tag 1 has empty value"),
|
errors.New("tag 1 has empty value"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"empty tag name not allowed",
|
"empty tag name not allowed",
|
||||||
|
|
@ -105,7 +104,7 @@ func TestValidateOPCTags(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
fmt.Errorf("tag 1 has empty name"),
|
errors.New("tag 1 has empty name"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"different metric tag names",
|
"different metric tag names",
|
||||||
|
|
@ -370,7 +369,7 @@ func TestValidateNodeToAdd(t *testing.T) {
|
||||||
}, map[string]string{})
|
}, map[string]string{})
|
||||||
return nmm
|
return nmm
|
||||||
}(),
|
}(),
|
||||||
err: fmt.Errorf("empty node namespace not allowed"),
|
err: errors.New("empty node namespace not allowed"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "empty identifier type not allowed",
|
name: "empty identifier type not allowed",
|
||||||
|
|
|
||||||
|
|
@ -8,6 +8,7 @@ import (
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"crypto/x509/pkix"
|
"crypto/x509/pkix"
|
||||||
"encoding/pem"
|
"encoding/pem"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
"net"
|
"net"
|
||||||
|
|
@ -34,7 +35,7 @@ func generateCert(host string, rsaBits int, certFile, keyFile string, dur time.D
|
||||||
dir, _ := newTempDir()
|
dir, _ := newTempDir()
|
||||||
|
|
||||||
if len(host) == 0 {
|
if len(host) == 0 {
|
||||||
return "", "", fmt.Errorf("missing required host parameter")
|
return "", "", errors.New("missing required host parameter")
|
||||||
}
|
}
|
||||||
if rsaBits == 0 {
|
if rsaBits == 0 {
|
||||||
rsaBits = 2048
|
rsaBits = 2048
|
||||||
|
|
@ -181,7 +182,7 @@ func (o *OpcUAClient) generateClientOpts(endpoints []*ua.EndpointDescription) ([
|
||||||
} else {
|
} else {
|
||||||
pk, ok := c.PrivateKey.(*rsa.PrivateKey)
|
pk, ok := c.PrivateKey.(*rsa.PrivateKey)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("invalid private key")
|
return nil, errors.New("invalid private key")
|
||||||
}
|
}
|
||||||
cert = c.Certificate[0]
|
cert = c.Certificate[0]
|
||||||
opts = append(opts, opcua.PrivateKey(pk), opcua.Certificate(cert))
|
opts = append(opts, opcua.PrivateKey(pk), opcua.Certificate(cert))
|
||||||
|
|
@ -276,7 +277,7 @@ func (o *OpcUAClient) generateClientOpts(endpoints []*ua.EndpointDescription) ([
|
||||||
}
|
}
|
||||||
|
|
||||||
if serverEndpoint == nil { // Didn't find an endpoint with matching policy and mode.
|
if serverEndpoint == nil { // Didn't find an endpoint with matching policy and mode.
|
||||||
return nil, fmt.Errorf("unable to find suitable server endpoint with selected sec-policy and sec-mode")
|
return nil, errors.New("unable to find suitable server endpoint with selected sec-policy and sec-mode")
|
||||||
}
|
}
|
||||||
|
|
||||||
secPolicy = serverEndpoint.SecurityPolicyURI
|
secPolicy = serverEndpoint.SecurityPolicyURI
|
||||||
|
|
|
||||||
|
|
@ -2,6 +2,7 @@ package shim
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
|
|
@ -90,7 +91,7 @@ func (s *Shim) Run(pollInterval time.Duration) error {
|
||||||
return fmt.Errorf("RunOutput error: %w", err)
|
return fmt.Errorf("RunOutput error: %w", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("nothing to run")
|
return errors.New("nothing to run")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,6 @@ package starlark
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
|
@ -97,7 +96,7 @@ func (d FieldDict) Get(key starlark.Value) (v starlark.Value, found bool, err er
|
||||||
// using x[k]=v syntax, like a dictionary.
|
// using x[k]=v syntax, like a dictionary.
|
||||||
func (d FieldDict) SetKey(k, v starlark.Value) error {
|
func (d FieldDict) SetKey(k, v starlark.Value) error {
|
||||||
if d.fieldIterCount > 0 {
|
if d.fieldIterCount > 0 {
|
||||||
return fmt.Errorf("cannot insert during iteration")
|
return errors.New("cannot insert during iteration")
|
||||||
}
|
}
|
||||||
|
|
||||||
key, ok := k.(starlark.String)
|
key, ok := k.(starlark.String)
|
||||||
|
|
@ -131,7 +130,7 @@ func (d FieldDict) Items() []starlark.Tuple {
|
||||||
|
|
||||||
func (d FieldDict) Clear() error {
|
func (d FieldDict) Clear() error {
|
||||||
if d.fieldIterCount > 0 {
|
if d.fieldIterCount > 0 {
|
||||||
return fmt.Errorf("cannot delete during iteration")
|
return errors.New("cannot delete during iteration")
|
||||||
}
|
}
|
||||||
|
|
||||||
keys := make([]string, 0, len(d.metric.FieldList()))
|
keys := make([]string, 0, len(d.metric.FieldList()))
|
||||||
|
|
@ -147,7 +146,7 @@ func (d FieldDict) Clear() error {
|
||||||
|
|
||||||
func (d FieldDict) PopItem() (starlark.Value, error) {
|
func (d FieldDict) PopItem() (starlark.Value, error) {
|
||||||
if d.fieldIterCount > 0 {
|
if d.fieldIterCount > 0 {
|
||||||
return nil, fmt.Errorf("cannot delete during iteration")
|
return nil, errors.New("cannot delete during iteration")
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(d.metric.FieldList()) == 0 {
|
if len(d.metric.FieldList()) == 0 {
|
||||||
|
|
@ -163,7 +162,7 @@ func (d FieldDict) PopItem() (starlark.Value, error) {
|
||||||
sk := starlark.String(k)
|
sk := starlark.String(k)
|
||||||
sv, err := asStarlarkValue(v)
|
sv, err := asStarlarkValue(v)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("could not convert to starlark value")
|
return nil, errors.New("could not convert to starlark value")
|
||||||
}
|
}
|
||||||
|
|
||||||
return starlark.Tuple{sk, sv}, nil
|
return starlark.Tuple{sk, sv}, nil
|
||||||
|
|
@ -171,7 +170,7 @@ func (d FieldDict) PopItem() (starlark.Value, error) {
|
||||||
|
|
||||||
func (d FieldDict) Delete(k starlark.Value) (v starlark.Value, found bool, err error) {
|
func (d FieldDict) Delete(k starlark.Value) (v starlark.Value, found bool, err error) {
|
||||||
if d.fieldIterCount > 0 {
|
if d.fieldIterCount > 0 {
|
||||||
return nil, false, fmt.Errorf("cannot delete during iteration")
|
return nil, false, errors.New("cannot delete during iteration")
|
||||||
}
|
}
|
||||||
|
|
||||||
if key, ok := k.(starlark.String); ok {
|
if key, ok := k.(starlark.String); ok {
|
||||||
|
|
|
||||||
|
|
@ -97,7 +97,7 @@ func (m *Metric) Attr(name string) (starlark.Value, error) {
|
||||||
// SetField implements the starlark.HasSetField interface.
|
// SetField implements the starlark.HasSetField interface.
|
||||||
func (m *Metric) SetField(name string, value starlark.Value) error {
|
func (m *Metric) SetField(name string, value starlark.Value) error {
|
||||||
if m.frozen {
|
if m.frozen {
|
||||||
return fmt.Errorf("cannot modify frozen metric")
|
return errors.New("cannot modify frozen metric")
|
||||||
}
|
}
|
||||||
|
|
||||||
switch name {
|
switch name {
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,6 @@ package starlark
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"go.starlark.net/starlark"
|
"go.starlark.net/starlark"
|
||||||
|
|
@ -91,7 +90,7 @@ func (d TagDict) Get(key starlark.Value) (v starlark.Value, found bool, err erro
|
||||||
// using x[k]=v syntax, like a dictionary.
|
// using x[k]=v syntax, like a dictionary.
|
||||||
func (d TagDict) SetKey(k, v starlark.Value) error {
|
func (d TagDict) SetKey(k, v starlark.Value) error {
|
||||||
if d.tagIterCount > 0 {
|
if d.tagIterCount > 0 {
|
||||||
return fmt.Errorf("cannot insert during iteration")
|
return errors.New("cannot insert during iteration")
|
||||||
}
|
}
|
||||||
|
|
||||||
key, ok := k.(starlark.String)
|
key, ok := k.(starlark.String)
|
||||||
|
|
@ -122,7 +121,7 @@ func (d TagDict) Items() []starlark.Tuple {
|
||||||
|
|
||||||
func (d TagDict) Clear() error {
|
func (d TagDict) Clear() error {
|
||||||
if d.tagIterCount > 0 {
|
if d.tagIterCount > 0 {
|
||||||
return fmt.Errorf("cannot delete during iteration")
|
return errors.New("cannot delete during iteration")
|
||||||
}
|
}
|
||||||
|
|
||||||
keys := make([]string, 0, len(d.metric.TagList()))
|
keys := make([]string, 0, len(d.metric.TagList()))
|
||||||
|
|
@ -138,7 +137,7 @@ func (d TagDict) Clear() error {
|
||||||
|
|
||||||
func (d TagDict) PopItem() (v starlark.Value, err error) {
|
func (d TagDict) PopItem() (v starlark.Value, err error) {
|
||||||
if d.tagIterCount > 0 {
|
if d.tagIterCount > 0 {
|
||||||
return nil, fmt.Errorf("cannot delete during iteration")
|
return nil, errors.New("cannot delete during iteration")
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tag := range d.metric.TagList() {
|
for _, tag := range d.metric.TagList() {
|
||||||
|
|
@ -157,7 +156,7 @@ func (d TagDict) PopItem() (v starlark.Value, err error) {
|
||||||
|
|
||||||
func (d TagDict) Delete(k starlark.Value) (v starlark.Value, found bool, err error) {
|
func (d TagDict) Delete(k starlark.Value) (v starlark.Value, found bool, err error) {
|
||||||
if d.tagIterCount > 0 {
|
if d.tagIterCount > 0 {
|
||||||
return nil, false, fmt.Errorf("cannot delete during iteration")
|
return nil, false, errors.New("cannot delete during iteration")
|
||||||
}
|
}
|
||||||
|
|
||||||
if key, ok := k.(starlark.String); ok {
|
if key, ok := k.(starlark.String); ok {
|
||||||
|
|
|
||||||
|
|
@ -10,8 +10,9 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf/internal/choice"
|
|
||||||
"github.com/youmark/pkcs8"
|
"github.com/youmark/pkcs8"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf/internal/choice"
|
||||||
)
|
)
|
||||||
|
|
||||||
const TLSMinVersionDefault = tls.VersionTLS12
|
const TLSMinVersionDefault = tls.VersionTLS12
|
||||||
|
|
@ -254,7 +255,7 @@ func loadCertificate(config *tls.Config, certFile, keyFile, privateKeyPassphrase
|
||||||
} else if keyPEMBlock.Headers["Proc-Type"] == "4,ENCRYPTED" {
|
} else if keyPEMBlock.Headers["Proc-Type"] == "4,ENCRYPTED" {
|
||||||
// The key is an encrypted private key with the DEK-Info header.
|
// The key is an encrypted private key with the DEK-Info header.
|
||||||
// This is currently unsupported because of the deprecation of x509.IsEncryptedPEMBlock and x509.DecryptPEMBlock.
|
// This is currently unsupported because of the deprecation of x509.IsEncryptedPEMBlock and x509.DecryptPEMBlock.
|
||||||
return fmt.Errorf("password-protected keys in pkcs#1 format are not supported")
|
return errors.New("password-protected keys in pkcs#1 format are not supported")
|
||||||
} else {
|
} else {
|
||||||
cert, err = tls.X509KeyPair(certBytes, keyBytes)
|
cert, err = tls.X509KeyPair(certBytes, keyBytes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
||||||
|
|
@ -3,16 +3,18 @@ package azure_monitor
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/monitor/armmonitor"
|
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/monitor/armmonitor"
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources"
|
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources"
|
||||||
"github.com/influxdata/telegraf/testutil"
|
|
||||||
"github.com/influxdata/toml"
|
"github.com/influxdata/toml"
|
||||||
receiver "github.com/logzio/azure-monitor-metrics-receiver"
|
receiver "github.com/logzio/azure-monitor-metrics-receiver"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf/testutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
type mockAzureClientsManager struct{}
|
type mockAzureClientsManager struct{}
|
||||||
|
|
@ -98,7 +100,7 @@ func (marc *mockAzureResourcesClient) ListByResourceGroup(
|
||||||
return responses, nil
|
return responses, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, fmt.Errorf("resource group was not found")
|
return nil, errors.New("resource group was not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mamdc *mockAzureMetricDefinitionsClient) List(
|
func (mamdc *mockAzureMetricDefinitionsClient) List(
|
||||||
|
|
@ -139,7 +141,7 @@ func (mamdc *mockAzureMetricDefinitionsClient) List(
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return armmonitor.MetricDefinitionsClientListResponse{}, fmt.Errorf("resource ID was not found")
|
return armmonitor.MetricDefinitionsClientListResponse{}, errors.New("resource ID was not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mamc *mockAzureMetricsClient) List(
|
func (mamc *mockAzureMetricsClient) List(
|
||||||
|
|
@ -192,7 +194,7 @@ func (mamc *mockAzureMetricsClient) List(
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return armmonitor.MetricsClientListResponse{}, fmt.Errorf("resource ID was not found")
|
return armmonitor.MetricsClientListResponse{}, errors.New("resource ID was not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestInit_ResourceTargetsOnly(t *testing.T) {
|
func TestInit_ResourceTargetsOnly(t *testing.T) {
|
||||||
|
|
|
||||||
|
|
@ -304,7 +304,7 @@ func (c *CiscoTelemetryMDT) handleTCPClient(conn net.Conn) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return fmt.Errorf("TCP dialout premature EOF")
|
return errors.New("TCP dialout premature EOF")
|
||||||
}
|
}
|
||||||
|
|
||||||
c.handleTelemetry(payload.Bytes())
|
c.handleTelemetry(payload.Bytes())
|
||||||
|
|
@ -752,7 +752,7 @@ func (c *CiscoTelemetryMDT) parseContentField(grouper *metric.SeriesGrouper, fie
|
||||||
if len(rn) > 0 {
|
if len(rn) > 0 {
|
||||||
tags[prefix] = rn
|
tags[prefix] = rn
|
||||||
} else if !dn { // Check for distinguished name being present
|
} else if !dn { // Check for distinguished name being present
|
||||||
c.acc.AddError(fmt.Errorf("NX-OS decoding failed: missing dn field"))
|
c.acc.AddError(errors.New("NX-OS decoding failed: missing dn field"))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
_ "embed"
|
_ "embed"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
@ -309,11 +310,11 @@ func (ps *PubSub) getGCPSubscription(subID string) (subscription, error) {
|
||||||
|
|
||||||
func (ps *PubSub) Init() error {
|
func (ps *PubSub) Init() error {
|
||||||
if ps.Subscription == "" {
|
if ps.Subscription == "" {
|
||||||
return fmt.Errorf(`"subscription" is required`)
|
return errors.New(`"subscription" is required`)
|
||||||
}
|
}
|
||||||
|
|
||||||
if ps.Project == "" {
|
if ps.Project == "" {
|
||||||
return fmt.Errorf(`"project" is required`)
|
return errors.New(`"project" is required`)
|
||||||
}
|
}
|
||||||
|
|
||||||
switch ps.ContentEncoding {
|
switch ps.ContentEncoding {
|
||||||
|
|
|
||||||
|
|
@ -4,7 +4,7 @@ package cloud_pubsub_push
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
|
|
@ -256,7 +256,7 @@ func (*testOutput) SampleConfig() string {
|
||||||
|
|
||||||
func (t *testOutput) Write(_ []telegraf.Metric) error {
|
func (t *testOutput) Write(_ []telegraf.Metric) error {
|
||||||
if t.failWrite {
|
if t.failWrite {
|
||||||
return fmt.Errorf("failed write")
|
return errors.New("failed write")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -5,6 +5,7 @@ package conntrack
|
||||||
|
|
||||||
import (
|
import (
|
||||||
_ "embed"
|
_ "embed"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
@ -110,7 +111,7 @@ func (c *Conntrack) Gather(acc telegraf.Accumulator) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(stats) == 0 {
|
if len(stats) == 0 {
|
||||||
acc.AddError(fmt.Errorf("conntrack input failed to collect stats"))
|
acc.AddError(errors.New("conntrack input failed to collect stats"))
|
||||||
}
|
}
|
||||||
|
|
||||||
cpuTag := "all"
|
cpuTag := "all"
|
||||||
|
|
@ -146,8 +147,7 @@ func (c *Conntrack) Gather(acc telegraf.Accumulator) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(fields) == 0 {
|
if len(fields) == 0 {
|
||||||
return fmt.Errorf("Conntrack input failed to collect metrics. " +
|
return errors.New("conntrack input failed to collect metrics, make sure that the kernel module is loaded")
|
||||||
"Is the conntrack kernel module loaded?")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
acc.AddFields(inputName, fields, nil)
|
acc.AddFields(inputName, fields, nil)
|
||||||
|
|
|
||||||
|
|
@ -31,8 +31,7 @@ func TestNoFilesFound(t *testing.T) {
|
||||||
acc := &testutil.Accumulator{}
|
acc := &testutil.Accumulator{}
|
||||||
err := c.Gather(acc)
|
err := c.Gather(acc)
|
||||||
|
|
||||||
require.EqualError(t, err, "Conntrack input failed to collect metrics. "+
|
require.EqualError(t, err, "conntrack input failed to collect metrics, make sure that the kernel module is loaded")
|
||||||
"Is the conntrack kernel module loaded?")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDefaultsUsed(t *testing.T) {
|
func TestDefaultsUsed(t *testing.T) {
|
||||||
|
|
|
||||||
|
|
@ -3,6 +3,7 @@ package cpu
|
||||||
|
|
||||||
import (
|
import (
|
||||||
_ "embed"
|
_ "embed"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
|
@ -92,7 +93,7 @@ func (c *CPUStats) Gather(acc telegraf.Accumulator) error {
|
||||||
totalDelta := total - lastTotal
|
totalDelta := total - lastTotal
|
||||||
|
|
||||||
if totalDelta < 0 {
|
if totalDelta < 0 {
|
||||||
err = fmt.Errorf("current total CPU time is less than previous total CPU time")
|
err = errors.New("current total CPU time is less than previous total CPU time")
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -3,12 +3,11 @@
|
||||||
package dmcache
|
package dmcache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"errors"
|
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -4,6 +4,7 @@ package dpdk
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"net"
|
"net"
|
||||||
|
|
@ -524,7 +525,7 @@ func Test_getCommandsAndParamsCombinations(t *testing.T) {
|
||||||
t.Run("when ethdev commands are enabled but params fetching command returns error then error should be logged in accumulator", func(t *testing.T) {
|
t.Run("when ethdev commands are enabled but params fetching command returns error then error should be logged in accumulator", func(t *testing.T) {
|
||||||
mockConn, dpdk, mockAcc := prepareEnvironment()
|
mockConn, dpdk, mockAcc := prepareEnvironment()
|
||||||
defer mockConn.AssertExpectations(t)
|
defer mockConn.AssertExpectations(t)
|
||||||
simulateResponse(mockConn, `{notAJson}`, fmt.Errorf("some error"))
|
simulateResponse(mockConn, `{notAJson}`, errors.New("some error"))
|
||||||
|
|
||||||
dpdk.DeviceTypes = []string{"ethdev"}
|
dpdk.DeviceTypes = []string{"ethdev"}
|
||||||
dpdk.ethdevCommands = []string{"/ethdev/stats", "/ethdev/xstats"}
|
dpdk.ethdevCommands = []string{"/ethdev/stats", "/ethdev/xstats"}
|
||||||
|
|
|
||||||
|
|
@ -4,6 +4,7 @@ package elasticsearch_query
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
_ "embed"
|
_ "embed"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
@ -63,7 +64,7 @@ func (*ElasticsearchQuery) SampleConfig() string {
|
||||||
// Init the plugin.
|
// Init the plugin.
|
||||||
func (e *ElasticsearchQuery) Init() error {
|
func (e *ElasticsearchQuery) Init() error {
|
||||||
if e.URLs == nil {
|
if e.URLs == nil {
|
||||||
return fmt.Errorf("elasticsearch urls is not defined")
|
return errors.New("elasticsearch urls is not defined")
|
||||||
}
|
}
|
||||||
|
|
||||||
err := e.connectToES()
|
err := e.connectToES()
|
||||||
|
|
@ -77,10 +78,10 @@ func (e *ElasticsearchQuery) Init() error {
|
||||||
|
|
||||||
for i, agg := range e.Aggregations {
|
for i, agg := range e.Aggregations {
|
||||||
if agg.MeasurementName == "" {
|
if agg.MeasurementName == "" {
|
||||||
return fmt.Errorf("field 'measurement_name' is not set")
|
return errors.New("field 'measurement_name' is not set")
|
||||||
}
|
}
|
||||||
if agg.DateField == "" {
|
if agg.DateField == "" {
|
||||||
return fmt.Errorf("field 'date_field' is not set")
|
return errors.New("field 'date_field' is not set")
|
||||||
}
|
}
|
||||||
err = e.initAggregation(ctx, agg, i)
|
err = e.initAggregation(ctx, agg, i)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -160,7 +161,7 @@ func (e *ElasticsearchQuery) connectToES() error {
|
||||||
|
|
||||||
// quit if ES version is not supported
|
// quit if ES version is not supported
|
||||||
if len(esVersionSplit) == 0 {
|
if len(esVersionSplit) == 0 {
|
||||||
return fmt.Errorf("elasticsearch version check failed")
|
return errors.New("elasticsearch version check failed")
|
||||||
}
|
}
|
||||||
|
|
||||||
i, err := strconv.Atoi(esVersionSplit[0])
|
i, err := strconv.Atoi(esVersionSplit[0])
|
||||||
|
|
|
||||||
|
|
@ -4,6 +4,7 @@ package example
|
||||||
import (
|
import (
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
_ "embed"
|
_ "embed"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
@ -51,7 +52,7 @@ func (*Example) SampleConfig() string {
|
||||||
func (m *Example) Init() error {
|
func (m *Example) Init() error {
|
||||||
// Check your options according to your requirements
|
// Check your options according to your requirements
|
||||||
if m.DeviceName == "" {
|
if m.DeviceName == "" {
|
||||||
return fmt.Errorf("device name cannot be empty")
|
return errors.New("device name cannot be empty")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set your defaults.
|
// Set your defaults.
|
||||||
|
|
@ -85,7 +86,7 @@ func (m *Example) Init() error {
|
||||||
func (m *Example) Gather(acc telegraf.Accumulator) error {
|
func (m *Example) Gather(acc telegraf.Accumulator) error {
|
||||||
// Imagine some completely arbitrary error occurring here
|
// Imagine some completely arbitrary error occurring here
|
||||||
if m.NumberFields > 10 {
|
if m.NumberFields > 10 {
|
||||||
return fmt.Errorf("too many fields")
|
return errors.New("too many fields")
|
||||||
}
|
}
|
||||||
|
|
||||||
// For illustration, we gather three metrics in one go
|
// For illustration, we gather three metrics in one go
|
||||||
|
|
@ -95,7 +96,7 @@ func (m *Example) Gather(acc telegraf.Accumulator) error {
|
||||||
// all later metrics. Simply accumulate errors in this case
|
// all later metrics. Simply accumulate errors in this case
|
||||||
// and ignore the metric.
|
// and ignore the metric.
|
||||||
if m.EnableRandomVariable && m.DeviceName == "flappy" && run > 1 {
|
if m.EnableRandomVariable && m.DeviceName == "flappy" && run > 1 {
|
||||||
acc.AddError(fmt.Errorf("too many runs for random values"))
|
acc.AddError(errors.New("too many runs for random values"))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -7,7 +7,7 @@ package exec
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"errors"
|
||||||
"runtime"
|
"runtime"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
@ -133,7 +133,7 @@ func TestCommandError(t *testing.T) {
|
||||||
require.NoError(t, parser.Init())
|
require.NoError(t, parser.Init())
|
||||||
e := &Exec{
|
e := &Exec{
|
||||||
Log: testutil.Logger{},
|
Log: testutil.Logger{},
|
||||||
runner: newRunnerMock(nil, nil, fmt.Errorf("exit status code 1")),
|
runner: newRunnerMock(nil, nil, errors.New("exit status code 1")),
|
||||||
Commands: []string{"badcommand"},
|
Commands: []string{"badcommand"},
|
||||||
parser: parser,
|
parser: parser,
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -3,6 +3,7 @@ package fibaro
|
||||||
|
|
||||||
import (
|
import (
|
||||||
_ "embed"
|
_ "embed"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
@ -71,7 +72,7 @@ func (f *Fibaro) Init() error {
|
||||||
f.DeviceType = "HC2"
|
f.DeviceType = "HC2"
|
||||||
case "HC2", "HC3":
|
case "HC2", "HC3":
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("invalid option for device type")
|
return errors.New("invalid option for device type")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
||||||
|
|
@ -4,6 +4,7 @@ package fireboard
|
||||||
import (
|
import (
|
||||||
_ "embed"
|
_ "embed"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
@ -57,7 +58,7 @@ func (*Fireboard) SampleConfig() string {
|
||||||
// Init the things
|
// Init the things
|
||||||
func (r *Fireboard) Init() error {
|
func (r *Fireboard) Init() error {
|
||||||
if len(r.AuthToken) == 0 {
|
if len(r.AuthToken) == 0 {
|
||||||
return fmt.Errorf("you must specify an authToken")
|
return errors.New("you must specify an authToken")
|
||||||
}
|
}
|
||||||
if len(r.URL) == 0 {
|
if len(r.URL) == 0 {
|
||||||
r.URL = "https://fireboard.io/api/v1/devices.json"
|
r.URL = "https://fireboard.io/api/v1/devices.json"
|
||||||
|
|
|
||||||
|
|
@ -4,6 +4,7 @@ package fluentd
|
||||||
import (
|
import (
|
||||||
_ "embed"
|
_ "embed"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
@ -63,7 +64,7 @@ func parse(data []byte) (datapointArray []pluginData, err error) {
|
||||||
var endpointData endpointInfo
|
var endpointData endpointInfo
|
||||||
|
|
||||||
if err = json.Unmarshal(data, &endpointData); err != nil {
|
if err = json.Unmarshal(data, &endpointData); err != nil {
|
||||||
err = fmt.Errorf("processing JSON structure")
|
err = errors.New("processing JSON structure")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -110,13 +111,13 @@ func (h *Fluentd) Gather(acc telegraf.Accumulator) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
if resp.StatusCode != http.StatusOK {
|
||||||
return fmt.Errorf("http status ok not met")
|
return errors.New("http status ok not met")
|
||||||
}
|
}
|
||||||
|
|
||||||
dataPoints, err := parse(body)
|
dataPoints, err := parse(body)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("problem with parsing")
|
return errors.New("problem with parsing")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Go through all plugins one by one
|
// Go through all plugins one by one
|
||||||
|
|
|
||||||
|
|
@ -4,6 +4,7 @@ package gnmi
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
_ "embed"
|
_ "embed"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
@ -98,7 +99,7 @@ func (*GNMI) SampleConfig() string {
|
||||||
func (c *GNMI) Init() error {
|
func (c *GNMI) Init() error {
|
||||||
// Check options
|
// Check options
|
||||||
if time.Duration(c.Redial) <= 0 {
|
if time.Duration(c.Redial) <= 0 {
|
||||||
return fmt.Errorf("redial duration must be positive")
|
return errors.New("redial duration must be positive")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check vendor_specific options configured by user
|
// Check vendor_specific options configured by user
|
||||||
|
|
@ -144,7 +145,7 @@ func (c *GNMI) Init() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if c.TagSubscriptions[idx].TagOnly != c.TagSubscriptions[0].TagOnly {
|
if c.TagSubscriptions[idx].TagOnly != c.TagSubscriptions[0].TagOnly {
|
||||||
return fmt.Errorf("do not mix legacy tag_only subscriptions and tag subscriptions")
|
return errors.New("do not mix legacy tag_only subscriptions and tag subscriptions")
|
||||||
}
|
}
|
||||||
switch c.TagSubscriptions[idx].Match {
|
switch c.TagSubscriptions[idx].Match {
|
||||||
case "":
|
case "":
|
||||||
|
|
@ -157,7 +158,7 @@ func (c *GNMI) Init() error {
|
||||||
case "name":
|
case "name":
|
||||||
case "elements":
|
case "elements":
|
||||||
if len(c.TagSubscriptions[idx].Elements) == 0 {
|
if len(c.TagSubscriptions[idx].Elements) == 0 {
|
||||||
return fmt.Errorf("tag_subscription must have at least one element")
|
return errors.New("tag_subscription must have at least one element")
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unknown match type %q for tag-subscription %q", c.TagSubscriptions[idx].Match, c.TagSubscriptions[idx].Name)
|
return fmt.Errorf("unknown match type %q for tag-subscription %q", c.TagSubscriptions[idx].Match, c.TagSubscriptions[idx].Name)
|
||||||
|
|
|
||||||
|
|
@ -4,7 +4,6 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
@ -75,7 +74,7 @@ func TestWaitError(t *testing.T) {
|
||||||
grpcServer := grpc.NewServer()
|
grpcServer := grpc.NewServer()
|
||||||
gnmiServer := &MockServer{
|
gnmiServer := &MockServer{
|
||||||
SubscribeF: func(server gnmiLib.GNMI_SubscribeServer) error {
|
SubscribeF: func(server gnmiLib.GNMI_SubscribeServer) error {
|
||||||
return fmt.Errorf("testerror")
|
return errors.New("testerror")
|
||||||
},
|
},
|
||||||
GRPCServer: grpcServer,
|
GRPCServer: grpcServer,
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -238,7 +238,7 @@ func (gcs *GCS) setUpDefaultClient() error {
|
||||||
|
|
||||||
func (gcs *GCS) setOffset() error {
|
func (gcs *GCS) setOffset() error {
|
||||||
if gcs.client == nil {
|
if gcs.client == nil {
|
||||||
return fmt.Errorf("CANNOT SET OFFSET IF CLIENT IS NOT SET")
|
return errors.New("CANNOT SET OFFSET IF CLIENT IS NOT SET")
|
||||||
}
|
}
|
||||||
|
|
||||||
if gcs.OffsetKey != "" {
|
if gcs.OffsetKey != "" {
|
||||||
|
|
|
||||||
|
|
@ -204,7 +204,7 @@ func (h *haproxy) importCsvResult(r io.Reader, acc telegraf.Accumulator, host st
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(headers[0]) <= 2 || headers[0][:2] != "# " {
|
if len(headers[0]) <= 2 || headers[0][:2] != "# " {
|
||||||
return fmt.Errorf("did not receive standard haproxy headers")
|
return errors.New("did not receive standard haproxy headers")
|
||||||
}
|
}
|
||||||
headers[0] = headers[0][2:]
|
headers[0] = headers[0][2:]
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -5,6 +5,7 @@ package http
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
_ "embed"
|
_ "embed"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
@ -54,14 +55,14 @@ func (*HTTP) SampleConfig() string {
|
||||||
func (h *HTTP) Init() error {
|
func (h *HTTP) Init() error {
|
||||||
// For backward compatibility
|
// For backward compatibility
|
||||||
if h.TokenFile != "" && h.BearerToken != "" && h.TokenFile != h.BearerToken {
|
if h.TokenFile != "" && h.BearerToken != "" && h.TokenFile != h.BearerToken {
|
||||||
return fmt.Errorf("conflicting settings for 'bearer_token' and 'token_file'")
|
return errors.New("conflicting settings for 'bearer_token' and 'token_file'")
|
||||||
} else if h.TokenFile == "" && h.BearerToken != "" {
|
} else if h.TokenFile == "" && h.BearerToken != "" {
|
||||||
h.TokenFile = h.BearerToken
|
h.TokenFile = h.BearerToken
|
||||||
}
|
}
|
||||||
|
|
||||||
// We cannot use multiple sources for tokens
|
// We cannot use multiple sources for tokens
|
||||||
if h.TokenFile != "" && !h.Token.Empty() {
|
if h.TokenFile != "" && !h.Token.Empty() {
|
||||||
return fmt.Errorf("either use 'token_file' or 'token' not both")
|
return errors.New("either use 'token_file' or 'token' not both")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create the client
|
// Create the client
|
||||||
|
|
|
||||||
|
|
@ -6,6 +6,7 @@ package hugepages
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
_ "embed"
|
_ "embed"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
@ -258,7 +259,7 @@ func (h *Hugepages) parseHugepagesConfig() error {
|
||||||
|
|
||||||
// empty array
|
// empty array
|
||||||
if len(h.Types) == 0 {
|
if len(h.Types) == 0 {
|
||||||
return fmt.Errorf("plugin was configured with nothing to read")
|
return errors.New("plugin was configured with nothing to read")
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, hugepagesType := range h.Types {
|
for _, hugepagesType := range h.Types {
|
||||||
|
|
|
||||||
|
|
@ -3,10 +3,11 @@
|
||||||
package infiniband
|
package infiniband
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"errors"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/Mellanox/rdmamap"
|
"github.com/Mellanox/rdmamap"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -15,7 +16,7 @@ func (i *Infiniband) Gather(acc telegraf.Accumulator) error {
|
||||||
rdmaDevices := rdmamap.GetRdmaDeviceList()
|
rdmaDevices := rdmamap.GetRdmaDeviceList()
|
||||||
|
|
||||||
if len(rdmaDevices) == 0 {
|
if len(rdmaDevices) == 0 {
|
||||||
return fmt.Errorf("no InfiniBand devices found in /sys/class/infiniband/")
|
return errors.New("no InfiniBand devices found in /sys/class/infiniband/")
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, dev := range rdmaDevices {
|
for _, dev := range rdmaDevices {
|
||||||
|
|
|
||||||
|
|
@ -63,12 +63,12 @@ func (b *Baseband) SampleConfig() string {
|
||||||
// Init performs one time setup of the plugin
|
// Init performs one time setup of the plugin
|
||||||
func (b *Baseband) Init() error {
|
func (b *Baseband) Init() error {
|
||||||
if b.SocketAccessTimeout < 0 {
|
if b.SocketAccessTimeout < 0 {
|
||||||
return fmt.Errorf("socket_access_timeout should be positive number or equal to 0 (to disable timeouts)")
|
return errors.New("socket_access_timeout should be positive number or equal to 0 (to disable timeouts)")
|
||||||
}
|
}
|
||||||
|
|
||||||
waitForTelemetryDuration := time.Duration(b.WaitForTelemetryTimeout)
|
waitForTelemetryDuration := time.Duration(b.WaitForTelemetryTimeout)
|
||||||
if waitForTelemetryDuration < 50*time.Millisecond {
|
if waitForTelemetryDuration < 50*time.Millisecond {
|
||||||
return fmt.Errorf("wait_for_telemetry_timeout should be equal or larger than 50ms")
|
return errors.New("wait_for_telemetry_timeout should be equal or larger than 50ms")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Filling default values
|
// Filling default values
|
||||||
|
|
|
||||||
|
|
@ -144,13 +144,13 @@ func (lc *logConnector) readNumVFs() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Errorf("numVFs data wasn't found in the log file")
|
return errors.New("numVFs data wasn't found in the log file")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find a line which contains a substring in the log file
|
// Find a line which contains a substring in the log file
|
||||||
func (lc *logConnector) getSubstringLine(offsetLine int, substring string) (int, string, error) {
|
func (lc *logConnector) getSubstringLine(offsetLine int, substring string) (int, string, error) {
|
||||||
if len(substring) == 0 {
|
if len(substring) == 0 {
|
||||||
return 0, "", fmt.Errorf("substring is empty")
|
return 0, "", errors.New("substring is empty")
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := offsetLine; i < len(lc.lines); i++ {
|
for i := offsetLine; i < len(lc.lines); i++ {
|
||||||
|
|
@ -208,13 +208,13 @@ func (lc *logConnector) getMetric(offsetLine int, name string) (int, *logMetric,
|
||||||
|
|
||||||
dataRaw := strings.TrimSpace(infoData[1])
|
dataRaw := strings.TrimSpace(infoData[1])
|
||||||
if len(dataRaw) == 0 {
|
if len(dataRaw) == 0 {
|
||||||
return offsetLine, nil, fmt.Errorf("the content of the log file is incorrect, metric's data is incorrect")
|
return offsetLine, nil, errors.New("the content of the log file is incorrect, metric's data is incorrect")
|
||||||
}
|
}
|
||||||
|
|
||||||
data := strings.Split(dataRaw, " ")
|
data := strings.Split(dataRaw, " ")
|
||||||
for i := range data {
|
for i := range data {
|
||||||
if len(data[i]) == 0 {
|
if len(data[i]) == 0 {
|
||||||
return offsetLine, nil, fmt.Errorf("the content of the log file is incorrect, metric's data is empty")
|
return offsetLine, nil, errors.New("the content of the log file is incorrect, metric's data is empty")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return i + 2, &logMetric{operationName: operationName, data: data}, nil
|
return i + 2, &logMetric{operationName: operationName, data: data}, nil
|
||||||
|
|
@ -224,18 +224,18 @@ func (lc *logConnector) getMetric(offsetLine int, name string) (int, *logMetric,
|
||||||
func (lc *logConnector) parseNumVFs(s string) (int, error) {
|
func (lc *logConnector) parseNumVFs(s string) (int, error) {
|
||||||
i := strings.LastIndex(s, deviceStatusStartPrefix)
|
i := strings.LastIndex(s, deviceStatusStartPrefix)
|
||||||
if i == -1 {
|
if i == -1 {
|
||||||
return 0, fmt.Errorf("couldn't find device status prefix in line")
|
return 0, errors.New("couldn't find device status prefix in line")
|
||||||
}
|
}
|
||||||
|
|
||||||
j := strings.Index(s[i:], deviceStatusEndPrefix)
|
j := strings.Index(s[i:], deviceStatusEndPrefix)
|
||||||
if j == -1 {
|
if j == -1 {
|
||||||
return 0, fmt.Errorf("couldn't find device end prefix in line")
|
return 0, errors.New("couldn't find device end prefix in line")
|
||||||
}
|
}
|
||||||
|
|
||||||
startIndex := i + len(deviceStatusStartPrefix) + 1
|
startIndex := i + len(deviceStatusStartPrefix) + 1
|
||||||
endIndex := i + j - 1
|
endIndex := i + j - 1
|
||||||
if len(s) < startIndex || startIndex >= endIndex {
|
if len(s) < startIndex || startIndex >= endIndex {
|
||||||
return 0, fmt.Errorf("incorrect format of the line")
|
return 0, errors.New("incorrect format of the line")
|
||||||
}
|
}
|
||||||
|
|
||||||
return strconv.Atoi(s[startIndex:endIndex])
|
return strconv.Atoi(s[startIndex:endIndex])
|
||||||
|
|
|
||||||
|
|
@ -3,6 +3,7 @@
|
||||||
package intel_baseband
|
package intel_baseband
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"time"
|
"time"
|
||||||
|
|
@ -50,7 +51,7 @@ func (sc *socketConnector) sendCommandToSocket(c byte) error {
|
||||||
|
|
||||||
func (sc *socketConnector) writeCommandToSocket(c byte) error {
|
func (sc *socketConnector) writeCommandToSocket(c byte) error {
|
||||||
if sc.connection == nil {
|
if sc.connection == nil {
|
||||||
return fmt.Errorf("connection had not been established before")
|
return errors.New("connection had not been established before")
|
||||||
}
|
}
|
||||||
var err error
|
var err error
|
||||||
if sc.accessTimeout == 0 {
|
if sc.accessTimeout == 0 {
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,7 @@
|
||||||
package intel_baseband
|
package intel_baseband
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"errors"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
|
@ -36,7 +36,7 @@ func TestWriteCommandToSocket(t *testing.T) {
|
||||||
|
|
||||||
t.Run("handling timeout setting error", func(t *testing.T) {
|
t.Run("handling timeout setting error", func(t *testing.T) {
|
||||||
conn := &mocks.Conn{}
|
conn := &mocks.Conn{}
|
||||||
conn.On("SetWriteDeadline", mock.Anything).Return(fmt.Errorf("deadline set error"))
|
conn.On("SetWriteDeadline", mock.Anything).Return(errors.New("deadline set error"))
|
||||||
connector := socketConnector{connection: conn}
|
connector := socketConnector{connection: conn}
|
||||||
|
|
||||||
err := connector.writeCommandToSocket(0x00)
|
err := connector.writeCommandToSocket(0x00)
|
||||||
|
|
@ -50,7 +50,7 @@ func TestWriteCommandToSocket(t *testing.T) {
|
||||||
t.Run("handling net.Write error", func(t *testing.T) {
|
t.Run("handling net.Write error", func(t *testing.T) {
|
||||||
var unsupportedCommand byte = 0x99
|
var unsupportedCommand byte = 0x99
|
||||||
conn := &mocks.Conn{}
|
conn := &mocks.Conn{}
|
||||||
conn.On("Write", []byte{unsupportedCommand, 0x00}).Return(0, fmt.Errorf("unsupported command"))
|
conn.On("Write", []byte{unsupportedCommand, 0x00}).Return(0, errors.New("unsupported command"))
|
||||||
conn.On("SetWriteDeadline", mock.Anything).Return(nil)
|
conn.On("SetWriteDeadline", mock.Anything).Return(nil)
|
||||||
connector := socketConnector{connection: conn}
|
connector := socketConnector{connection: conn}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -74,7 +74,7 @@ func logMetricDataToValue(data string) (int, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if value < 0 {
|
if value < 0 {
|
||||||
return 0, fmt.Errorf("metric can't be negative")
|
return 0, errors.New("metric can't be negative")
|
||||||
}
|
}
|
||||||
|
|
||||||
return value, nil
|
return value, nil
|
||||||
|
|
|
||||||
|
|
@ -4,7 +4,6 @@ package intel_baseband
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
@ -164,8 +163,8 @@ func TestLogMetricDataToValue(t *testing.T) {
|
||||||
{"00", 0, nil},
|
{"00", 0, nil},
|
||||||
{"5", 5, nil},
|
{"5", 5, nil},
|
||||||
{"-010", 0, errors.New("metric can't be negative")},
|
{"-010", 0, errors.New("metric can't be negative")},
|
||||||
{"", 0, fmt.Errorf("invalid syntax")},
|
{"", 0, errors.New("invalid syntax")},
|
||||||
{"0Nax10", 0, fmt.Errorf("invalid syntax")},
|
{"0Nax10", 0, errors.New("invalid syntax")},
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Run("check correct returned values", func(t *testing.T) {
|
t.Run("check correct returned values", func(t *testing.T) {
|
||||||
|
|
|
||||||
|
|
@ -7,6 +7,7 @@ package intel_dlb
|
||||||
import (
|
import (
|
||||||
_ "embed"
|
_ "embed"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
|
|
@ -308,7 +309,7 @@ func (d *IntelDLB) setInitMessageLength() error {
|
||||||
return d.closeSocketAndThrowError("custom", fmt.Errorf("failed to read InitMessage from socket: %w", err))
|
return d.closeSocketAndThrowError("custom", fmt.Errorf("failed to read InitMessage from socket: %w", err))
|
||||||
}
|
}
|
||||||
if messageLength > len(buf) {
|
if messageLength > len(buf) {
|
||||||
return d.closeSocketAndThrowError("custom", fmt.Errorf("socket reply length is bigger than default buffer length"))
|
return d.closeSocketAndThrowError("custom", errors.New("socket reply length is bigger than default buffer length"))
|
||||||
}
|
}
|
||||||
|
|
||||||
var initMsg initMessage
|
var initMsg initMessage
|
||||||
|
|
@ -339,7 +340,7 @@ func (d *IntelDLB) writeReadSocketMessage(messageToWrite string) (int, []byte, e
|
||||||
}
|
}
|
||||||
|
|
||||||
if replyMsgLen == 0 {
|
if replyMsgLen == 0 {
|
||||||
return 0, nil, d.closeSocketAndThrowError("message", fmt.Errorf("message length is empty"))
|
return 0, nil, d.closeSocketAndThrowError("message", errors.New("message length is empty"))
|
||||||
}
|
}
|
||||||
|
|
||||||
return replyMsgLen, socketReply, nil
|
return replyMsgLen, socketReply, nil
|
||||||
|
|
@ -347,13 +348,13 @@ func (d *IntelDLB) writeReadSocketMessage(messageToWrite string) (int, []byte, e
|
||||||
|
|
||||||
func (d *IntelDLB) parseJSON(replyMsgLen int, socketReply []byte, parsedDeviceInfo interface{}) error {
|
func (d *IntelDLB) parseJSON(replyMsgLen int, socketReply []byte, parsedDeviceInfo interface{}) error {
|
||||||
if len(socketReply) == 0 {
|
if len(socketReply) == 0 {
|
||||||
return d.closeSocketAndThrowError("json", fmt.Errorf("socket reply is empty"))
|
return d.closeSocketAndThrowError("json", errors.New("socket reply is empty"))
|
||||||
}
|
}
|
||||||
if replyMsgLen > len(socketReply) {
|
if replyMsgLen > len(socketReply) {
|
||||||
return d.closeSocketAndThrowError("json", fmt.Errorf("socket reply length is bigger than it should be"))
|
return d.closeSocketAndThrowError("json", errors.New("socket reply length is bigger than it should be"))
|
||||||
}
|
}
|
||||||
if replyMsgLen == 0 {
|
if replyMsgLen == 0 {
|
||||||
return d.closeSocketAndThrowError("json", fmt.Errorf("socket reply message is empty"))
|
return d.closeSocketAndThrowError("json", errors.New("socket reply message is empty"))
|
||||||
}
|
}
|
||||||
// Assign reply to variable, e.g.: {"/eventdev/dev_list": [0, 1]}
|
// Assign reply to variable, e.g.: {"/eventdev/dev_list": [0, 1]}
|
||||||
jsonDeviceIndexes := socketReply[:replyMsgLen]
|
jsonDeviceIndexes := socketReply[:replyMsgLen]
|
||||||
|
|
@ -405,7 +406,7 @@ func (d *IntelDLB) closeSocketAndThrowError(errType string, err error) error {
|
||||||
|
|
||||||
func (d *IntelDLB) checkAndAddDLBDevice() error {
|
func (d *IntelDLB) checkAndAddDLBDevice() error {
|
||||||
if d.rasReader == nil {
|
if d.rasReader == nil {
|
||||||
return fmt.Errorf("rasreader was not initialized")
|
return errors.New("rasreader was not initialized")
|
||||||
}
|
}
|
||||||
filePaths, err := d.rasReader.gatherPaths(dlbDeviceIDLocation)
|
filePaths, err := d.rasReader.gatherPaths(dlbDeviceIDLocation)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
||||||
|
|
@ -5,6 +5,7 @@ package intel_dlb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
|
|
@ -132,7 +133,7 @@ func TestDLB_Init(t *testing.T) {
|
||||||
Log: testutil.Logger{},
|
Log: testutil.Logger{},
|
||||||
}
|
}
|
||||||
const emptyPath = ""
|
const emptyPath = ""
|
||||||
fileMock.On("gatherPaths", mock.Anything).Return([]string{emptyPath}, fmt.Errorf("can't find device folder")).Once()
|
fileMock.On("gatherPaths", mock.Anything).Return([]string{emptyPath}, errors.New("can't find device folder")).Once()
|
||||||
err := dlb.Init()
|
err := dlb.Init()
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
require.Contains(t, err.Error(), "can't find device folder")
|
require.Contains(t, err.Error(), "can't find device folder")
|
||||||
|
|
@ -147,7 +148,7 @@ func TestDLB_writeReadSocketMessage(t *testing.T) {
|
||||||
connection: mockConn,
|
connection: mockConn,
|
||||||
Log: testutil.Logger{},
|
Log: testutil.Logger{},
|
||||||
}
|
}
|
||||||
mockConn.On("Write", []byte{}).Return(0, fmt.Errorf("write error")).Once().
|
mockConn.On("Write", []byte{}).Return(0, errors.New("write error")).Once().
|
||||||
On("Close").Return(nil).Once()
|
On("Close").Return(nil).Once()
|
||||||
|
|
||||||
_, _, err := dlb.writeReadSocketMessage("")
|
_, _, err := dlb.writeReadSocketMessage("")
|
||||||
|
|
@ -163,7 +164,7 @@ func TestDLB_writeReadSocketMessage(t *testing.T) {
|
||||||
connection: mockConn,
|
connection: mockConn,
|
||||||
Log: testutil.Logger{},
|
Log: testutil.Logger{},
|
||||||
}
|
}
|
||||||
simulateResponse(mockConn, "", fmt.Errorf("read error"))
|
simulateResponse(mockConn, "", errors.New("read error"))
|
||||||
|
|
||||||
_, _, err := dlb.writeReadSocketMessage("")
|
_, _, err := dlb.writeReadSocketMessage("")
|
||||||
|
|
||||||
|
|
@ -584,7 +585,7 @@ func TestDLB_processCommandResult(t *testing.T) {
|
||||||
mockConn.On("Read", mock.Anything).Run(func(arg mock.Arguments) {
|
mockConn.On("Read", mock.Anything).Run(func(arg mock.Arguments) {
|
||||||
elem := arg.Get(0).([]byte)
|
elem := arg.Get(0).([]byte)
|
||||||
copy(elem, response)
|
copy(elem, response)
|
||||||
}).Return(len(response), fmt.Errorf("read error")).Once()
|
}).Return(len(response), errors.New("read error")).Once()
|
||||||
mockConn.On("Close").Return(nil)
|
mockConn.On("Close").Return(nil)
|
||||||
|
|
||||||
err := dlb.gatherMetricsFromSocket(mockAcc)
|
err := dlb.gatherMetricsFromSocket(mockAcc)
|
||||||
|
|
@ -652,7 +653,7 @@ func Test_checkAndAddDLBDevice(t *testing.T) {
|
||||||
rasReader: fileMock,
|
rasReader: fileMock,
|
||||||
Log: testutil.Logger{},
|
Log: testutil.Logger{},
|
||||||
}
|
}
|
||||||
fileMock.On("gatherPaths", mock.AnythingOfType("string")).Return(nil, fmt.Errorf("can't find device folder")).Once()
|
fileMock.On("gatherPaths", mock.AnythingOfType("string")).Return(nil, errors.New("can't find device folder")).Once()
|
||||||
|
|
||||||
err := dlb.checkAndAddDLBDevice()
|
err := dlb.checkAndAddDLBDevice()
|
||||||
|
|
||||||
|
|
@ -670,7 +671,7 @@ func Test_checkAndAddDLBDevice(t *testing.T) {
|
||||||
}
|
}
|
||||||
const globPath = "/sys/devices/pci0000:00/0000:00:00.0/device"
|
const globPath = "/sys/devices/pci0000:00/0000:00:00.0/device"
|
||||||
fileMock.On("gatherPaths", mock.Anything).Return([]string{globPath}, nil).Once().
|
fileMock.On("gatherPaths", mock.Anything).Return([]string{globPath}, nil).Once().
|
||||||
On("readFromFile", mock.Anything).Return([]byte("0x2710"), fmt.Errorf("read error while getting device folders")).Once()
|
On("readFromFile", mock.Anything).Return([]byte("0x2710"), errors.New("read error while getting device folders")).Once()
|
||||||
|
|
||||||
err := dlb.checkAndAddDLBDevice()
|
err := dlb.checkAndAddDLBDevice()
|
||||||
|
|
||||||
|
|
@ -701,7 +702,7 @@ func Test_checkAndAddDLBDevice(t *testing.T) {
|
||||||
}
|
}
|
||||||
const globPath = "/sys/devices/pci0000:00/0000:00:00.0/device"
|
const globPath = "/sys/devices/pci0000:00/0000:00:00.0/device"
|
||||||
fileMock.On("gatherPaths", mock.Anything).Return([]string{globPath}, nil).Once().
|
fileMock.On("gatherPaths", mock.Anything).Return([]string{globPath}, nil).Once().
|
||||||
On("readFromFile", mock.Anything).Return([]byte("0x2710"), fmt.Errorf("read error while getting device folders")).Once()
|
On("readFromFile", mock.Anything).Return([]byte("0x2710"), errors.New("read error while getting device folders")).Once()
|
||||||
|
|
||||||
err := dlb.checkAndAddDLBDevice()
|
err := dlb.checkAndAddDLBDevice()
|
||||||
|
|
||||||
|
|
@ -778,7 +779,7 @@ func Test_readRasMetrics(t *testing.T) {
|
||||||
err error
|
err error
|
||||||
errMsg string
|
errMsg string
|
||||||
}{
|
}{
|
||||||
{"error when reading fails", []byte(aerCorrectableData), fmt.Errorf("read error"), "read error"},
|
{"error when reading fails", []byte(aerCorrectableData), errors.New("read error"), "read error"},
|
||||||
{"error when empty data is given", []byte(""), nil, "no value to parse"},
|
{"error when empty data is given", []byte(""), nil, "no value to parse"},
|
||||||
{"error when trying to split empty data", []byte("x1 x2"), nil, "failed to parse value"},
|
{"error when trying to split empty data", []byte("x1 x2"), nil, "failed to parse value"},
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -384,7 +384,7 @@ func eval(eq string, params map[string]interface{}) (interface{}, error) {
|
||||||
// gval doesn't support hexadecimals
|
// gval doesn't support hexadecimals
|
||||||
eq = hexToDecRegex.ReplaceAllStringFunc(eq, hexToDec)
|
eq = hexToDecRegex.ReplaceAllStringFunc(eq, hexToDec)
|
||||||
if eq == "" {
|
if eq == "" {
|
||||||
return nil, fmt.Errorf("error during hex to decimal conversion")
|
return nil, errors.New("error during hex to decimal conversion")
|
||||||
}
|
}
|
||||||
result, err := gval.Evaluate(eq, params)
|
result, err := gval.Evaluate(eq, params)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
||||||
|
|
@ -88,10 +88,10 @@ func (ea *iaEntitiesActivator) activateEntities(coreEntities []*CoreEventEntity,
|
||||||
|
|
||||||
func (ea *iaEntitiesActivator) activateCoreEvents(entity *CoreEventEntity) error {
|
func (ea *iaEntitiesActivator) activateCoreEvents(entity *CoreEventEntity) error {
|
||||||
if entity == nil {
|
if entity == nil {
|
||||||
return fmt.Errorf("core events entity is nil")
|
return errors.New("core events entity is nil")
|
||||||
}
|
}
|
||||||
if ea.placementMaker == nil {
|
if ea.placementMaker == nil {
|
||||||
return fmt.Errorf("placement maker is nil")
|
return errors.New("placement maker is nil")
|
||||||
}
|
}
|
||||||
if entity.PerfGroup {
|
if entity.PerfGroup {
|
||||||
err := ea.activateCoreEventsGroup(entity)
|
err := ea.activateCoreEventsGroup(entity)
|
||||||
|
|
@ -101,7 +101,7 @@ func (ea *iaEntitiesActivator) activateCoreEvents(entity *CoreEventEntity) error
|
||||||
} else {
|
} else {
|
||||||
for _, event := range entity.parsedEvents {
|
for _, event := range entity.parsedEvents {
|
||||||
if event == nil {
|
if event == nil {
|
||||||
return fmt.Errorf("core parsed event is nil")
|
return errors.New("core parsed event is nil")
|
||||||
}
|
}
|
||||||
placements, err := ea.placementMaker.makeCorePlacements(entity.parsedCores, event.custom.Event)
|
placements, err := ea.placementMaker.makeCorePlacements(entity.parsedCores, event.custom.Event)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -119,14 +119,14 @@ func (ea *iaEntitiesActivator) activateCoreEvents(entity *CoreEventEntity) error
|
||||||
|
|
||||||
func (ea *iaEntitiesActivator) activateUncoreEvents(entity *UncoreEventEntity) error {
|
func (ea *iaEntitiesActivator) activateUncoreEvents(entity *UncoreEventEntity) error {
|
||||||
if entity == nil {
|
if entity == nil {
|
||||||
return fmt.Errorf("uncore events entity is nil")
|
return errors.New("uncore events entity is nil")
|
||||||
}
|
}
|
||||||
if ea.perfActivator == nil || ea.placementMaker == nil {
|
if ea.perfActivator == nil || ea.placementMaker == nil {
|
||||||
return fmt.Errorf("events activator or placement maker is nil")
|
return errors.New("events activator or placement maker is nil")
|
||||||
}
|
}
|
||||||
for _, event := range entity.parsedEvents {
|
for _, event := range entity.parsedEvents {
|
||||||
if event == nil {
|
if event == nil {
|
||||||
return fmt.Errorf("uncore parsed event is nil")
|
return errors.New("uncore parsed event is nil")
|
||||||
}
|
}
|
||||||
perfEvent := event.custom.Event
|
perfEvent := event.custom.Event
|
||||||
if perfEvent == nil {
|
if perfEvent == nil {
|
||||||
|
|
@ -152,16 +152,16 @@ func (ea *iaEntitiesActivator) activateUncoreEvents(entity *UncoreEventEntity) e
|
||||||
|
|
||||||
func (ea *iaEntitiesActivator) activateCoreEventsGroup(entity *CoreEventEntity) error {
|
func (ea *iaEntitiesActivator) activateCoreEventsGroup(entity *CoreEventEntity) error {
|
||||||
if ea.perfActivator == nil || ea.placementMaker == nil {
|
if ea.perfActivator == nil || ea.placementMaker == nil {
|
||||||
return fmt.Errorf("missing perf activator or placement maker")
|
return errors.New("missing perf activator or placement maker")
|
||||||
}
|
}
|
||||||
if entity == nil || len(entity.parsedEvents) < 1 {
|
if entity == nil || len(entity.parsedEvents) < 1 {
|
||||||
return fmt.Errorf("missing parsed events")
|
return errors.New("missing parsed events")
|
||||||
}
|
}
|
||||||
|
|
||||||
events := make([]ia.CustomizableEvent, 0, len(entity.parsedEvents))
|
events := make([]ia.CustomizableEvent, 0, len(entity.parsedEvents))
|
||||||
for _, event := range entity.parsedEvents {
|
for _, event := range entity.parsedEvents {
|
||||||
if event == nil {
|
if event == nil {
|
||||||
return fmt.Errorf("core event is nil")
|
return errors.New("core event is nil")
|
||||||
}
|
}
|
||||||
events = append(events, event.custom)
|
events = append(events, event.custom)
|
||||||
}
|
}
|
||||||
|
|
@ -184,10 +184,10 @@ func (ea *iaEntitiesActivator) activateCoreEventsGroup(entity *CoreEventEntity)
|
||||||
|
|
||||||
func (ea *iaEntitiesActivator) activateEventForPlacements(event *eventWithQuals, placements []ia.PlacementProvider) ([]*ia.ActiveEvent, error) {
|
func (ea *iaEntitiesActivator) activateEventForPlacements(event *eventWithQuals, placements []ia.PlacementProvider) ([]*ia.ActiveEvent, error) {
|
||||||
if event == nil {
|
if event == nil {
|
||||||
return nil, fmt.Errorf("core event is nil")
|
return nil, errors.New("core event is nil")
|
||||||
}
|
}
|
||||||
if ea.perfActivator == nil {
|
if ea.perfActivator == nil {
|
||||||
return nil, fmt.Errorf("missing perf activator")
|
return nil, errors.New("missing perf activator")
|
||||||
}
|
}
|
||||||
|
|
||||||
activeEvents := make([]*ia.ActiveEvent, 0, len(placements))
|
activeEvents := make([]*ia.ActiveEvent, 0, len(placements))
|
||||||
|
|
|
||||||
|
|
@ -58,7 +58,7 @@ func TestActivateEntities(t *testing.T) {
|
||||||
func TestActivateUncoreEvents(t *testing.T) {
|
func TestActivateUncoreEvents(t *testing.T) {
|
||||||
mActivator := &mockEventsActivator{}
|
mActivator := &mockEventsActivator{}
|
||||||
mMaker := &mockPlacementMaker{}
|
mMaker := &mockPlacementMaker{}
|
||||||
errMock := fmt.Errorf("error mock")
|
errMock := errors.New("error mock")
|
||||||
|
|
||||||
t.Run("entity is nil", func(t *testing.T) {
|
t.Run("entity is nil", func(t *testing.T) {
|
||||||
mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator}
|
mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator}
|
||||||
|
|
@ -155,7 +155,7 @@ func TestActivateUncoreEvents(t *testing.T) {
|
||||||
func TestActivateCoreEvents(t *testing.T) {
|
func TestActivateCoreEvents(t *testing.T) {
|
||||||
mMaker := &mockPlacementMaker{}
|
mMaker := &mockPlacementMaker{}
|
||||||
mActivator := &mockEventsActivator{}
|
mActivator := &mockEventsActivator{}
|
||||||
errMock := fmt.Errorf("error mock")
|
errMock := errors.New("error mock")
|
||||||
|
|
||||||
t.Run("entity is nil", func(t *testing.T) {
|
t.Run("entity is nil", func(t *testing.T) {
|
||||||
mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator}
|
mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator}
|
||||||
|
|
|
||||||
|
|
@ -3,6 +3,7 @@
|
||||||
package intel_pmu
|
package intel_pmu
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
@ -24,12 +25,12 @@ type configParser struct {
|
||||||
|
|
||||||
func (cp *configParser) parseEntities(coreEntities []*CoreEventEntity, uncoreEntities []*UncoreEventEntity) (err error) {
|
func (cp *configParser) parseEntities(coreEntities []*CoreEventEntity, uncoreEntities []*UncoreEventEntity) (err error) {
|
||||||
if len(coreEntities) == 0 && len(uncoreEntities) == 0 {
|
if len(coreEntities) == 0 && len(uncoreEntities) == 0 {
|
||||||
return fmt.Errorf("neither core nor uncore entities configured")
|
return errors.New("neither core nor uncore entities configured")
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, coreEntity := range coreEntities {
|
for _, coreEntity := range coreEntities {
|
||||||
if coreEntity == nil {
|
if coreEntity == nil {
|
||||||
return fmt.Errorf("core entity is nil")
|
return errors.New("core entity is nil")
|
||||||
}
|
}
|
||||||
if coreEntity.Events == nil {
|
if coreEntity.Events == nil {
|
||||||
if cp.log != nil {
|
if cp.log != nil {
|
||||||
|
|
@ -39,7 +40,7 @@ func (cp *configParser) parseEntities(coreEntities []*CoreEventEntity, uncoreEnt
|
||||||
} else {
|
} else {
|
||||||
events := cp.parseEvents(coreEntity.Events)
|
events := cp.parseEvents(coreEntity.Events)
|
||||||
if events == nil {
|
if events == nil {
|
||||||
return fmt.Errorf("an empty list of core events was provided")
|
return errors.New("an empty list of core events was provided")
|
||||||
}
|
}
|
||||||
coreEntity.parsedEvents = events
|
coreEntity.parsedEvents = events
|
||||||
}
|
}
|
||||||
|
|
@ -52,7 +53,7 @@ func (cp *configParser) parseEntities(coreEntities []*CoreEventEntity, uncoreEnt
|
||||||
|
|
||||||
for _, uncoreEntity := range uncoreEntities {
|
for _, uncoreEntity := range uncoreEntities {
|
||||||
if uncoreEntity == nil {
|
if uncoreEntity == nil {
|
||||||
return fmt.Errorf("uncore entity is nil")
|
return errors.New("uncore entity is nil")
|
||||||
}
|
}
|
||||||
if uncoreEntity.Events == nil {
|
if uncoreEntity.Events == nil {
|
||||||
if cp.log != nil {
|
if cp.log != nil {
|
||||||
|
|
@ -62,7 +63,7 @@ func (cp *configParser) parseEntities(coreEntities []*CoreEventEntity, uncoreEnt
|
||||||
} else {
|
} else {
|
||||||
events := cp.parseEvents(uncoreEntity.Events)
|
events := cp.parseEvents(uncoreEntity.Events)
|
||||||
if events == nil {
|
if events == nil {
|
||||||
return fmt.Errorf("an empty list of uncore events was provided")
|
return errors.New("an empty list of uncore events was provided")
|
||||||
}
|
}
|
||||||
uncoreEntity.parsedEvents = events
|
uncoreEntity.parsedEvents = events
|
||||||
}
|
}
|
||||||
|
|
@ -95,7 +96,7 @@ func (cp *configParser) parseCores(cores []string) ([]int, error) {
|
||||||
cp.log.Debug("all possible cores will be configured")
|
cp.log.Debug("all possible cores will be configured")
|
||||||
}
|
}
|
||||||
if cp.sys == nil {
|
if cp.sys == nil {
|
||||||
return nil, fmt.Errorf("system info provider is nil")
|
return nil, errors.New("system info provider is nil")
|
||||||
}
|
}
|
||||||
cores, err := cp.sys.allCPUs()
|
cores, err := cp.sys.allCPUs()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -104,7 +105,7 @@ func (cp *configParser) parseCores(cores []string) ([]int, error) {
|
||||||
return cores, nil
|
return cores, nil
|
||||||
}
|
}
|
||||||
if len(cores) == 0 {
|
if len(cores) == 0 {
|
||||||
return nil, fmt.Errorf("an empty list of cores was provided")
|
return nil, errors.New("an empty list of cores was provided")
|
||||||
}
|
}
|
||||||
|
|
||||||
result, err := cp.parseIntRanges(cores)
|
result, err := cp.parseIntRanges(cores)
|
||||||
|
|
@ -120,7 +121,7 @@ func (cp *configParser) parseSockets(sockets []string) ([]int, error) {
|
||||||
cp.log.Debug("all possible sockets will be configured")
|
cp.log.Debug("all possible sockets will be configured")
|
||||||
}
|
}
|
||||||
if cp.sys == nil {
|
if cp.sys == nil {
|
||||||
return nil, fmt.Errorf("system info provider is nil")
|
return nil, errors.New("system info provider is nil")
|
||||||
}
|
}
|
||||||
sockets, err := cp.sys.allSockets()
|
sockets, err := cp.sys.allSockets()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -129,7 +130,7 @@ func (cp *configParser) parseSockets(sockets []string) ([]int, error) {
|
||||||
return sockets, nil
|
return sockets, nil
|
||||||
}
|
}
|
||||||
if len(sockets) == 0 {
|
if len(sockets) == 0 {
|
||||||
return nil, fmt.Errorf("an empty list of sockets was provided")
|
return nil, errors.New("an empty list of sockets was provided")
|
||||||
}
|
}
|
||||||
|
|
||||||
result, err := cp.parseIntRanges(sockets)
|
result, err := cp.parseIntRanges(sockets)
|
||||||
|
|
|
||||||
|
|
@ -148,7 +148,7 @@ func (i *IntelPMU) Init() error {
|
||||||
|
|
||||||
func (i *IntelPMU) initialization(parser entitiesParser, resolver entitiesResolver, activator entitiesActivator) error {
|
func (i *IntelPMU) initialization(parser entitiesParser, resolver entitiesResolver, activator entitiesActivator) error {
|
||||||
if parser == nil || resolver == nil || activator == nil {
|
if parser == nil || resolver == nil || activator == nil {
|
||||||
return fmt.Errorf("entities parser and/or resolver and/or activator is nil")
|
return errors.New("entities parser and/or resolver and/or activator is nil")
|
||||||
}
|
}
|
||||||
|
|
||||||
err := parser.parseEntities(i.CoreEntities, i.UncoreEntities)
|
err := parser.parseEntities(i.CoreEntities, i.UncoreEntities)
|
||||||
|
|
@ -183,7 +183,7 @@ func (i *IntelPMU) checkFileDescriptors() error {
|
||||||
return fmt.Errorf("failed to estimate number of uncore events file descriptors: %w", err)
|
return fmt.Errorf("failed to estimate number of uncore events file descriptors: %w", err)
|
||||||
}
|
}
|
||||||
if coreFd > math.MaxUint64-uncoreFd {
|
if coreFd > math.MaxUint64-uncoreFd {
|
||||||
return fmt.Errorf("requested number of file descriptors exceeds uint64")
|
return errors.New("requested number of file descriptors exceeds uint64")
|
||||||
}
|
}
|
||||||
allFd := coreFd + uncoreFd
|
allFd := coreFd + uncoreFd
|
||||||
|
|
||||||
|
|
@ -210,7 +210,7 @@ func (i *IntelPMU) checkFileDescriptors() error {
|
||||||
|
|
||||||
func (i *IntelPMU) Gather(acc telegraf.Accumulator) error {
|
func (i *IntelPMU) Gather(acc telegraf.Accumulator) error {
|
||||||
if i.entitiesReader == nil {
|
if i.entitiesReader == nil {
|
||||||
return fmt.Errorf("entities reader is nil")
|
return errors.New("entities reader is nil")
|
||||||
}
|
}
|
||||||
coreMetrics, uncoreMetrics, err := i.entitiesReader.readEntities(i.CoreEntities, i.UncoreEntities)
|
coreMetrics, uncoreMetrics, err := i.entitiesReader.readEntities(i.CoreEntities, i.UncoreEntities)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -342,7 +342,7 @@ func multiplyAndAdd(factorA uint64, factorB uint64, sum uint64) (uint64, error)
|
||||||
|
|
||||||
func readMaxFD(reader fileInfoProvider) (uint64, error) {
|
func readMaxFD(reader fileInfoProvider) (uint64, error) {
|
||||||
if reader == nil {
|
if reader == nil {
|
||||||
return 0, fmt.Errorf("file reader is nil")
|
return 0, errors.New("file reader is nil")
|
||||||
}
|
}
|
||||||
buf, err := reader.readFile(fileMaxPath)
|
buf, err := reader.readFile(fileMaxPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -358,10 +358,10 @@ func readMaxFD(reader fileInfoProvider) (uint64, error) {
|
||||||
func checkFiles(paths []string, fileInfo fileInfoProvider) error {
|
func checkFiles(paths []string, fileInfo fileInfoProvider) error {
|
||||||
// No event definition JSON locations present
|
// No event definition JSON locations present
|
||||||
if len(paths) == 0 {
|
if len(paths) == 0 {
|
||||||
return fmt.Errorf("no paths were given")
|
return errors.New("no paths were given")
|
||||||
}
|
}
|
||||||
if fileInfo == nil {
|
if fileInfo == nil {
|
||||||
return fmt.Errorf("file info provider is nil")
|
return errors.New("file info provider is nil")
|
||||||
}
|
}
|
||||||
// Wrong files
|
// Wrong files
|
||||||
for _, path := range paths {
|
for _, path := range paths {
|
||||||
|
|
|
||||||
|
|
@ -10,9 +10,10 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf/testutil"
|
|
||||||
ia "github.com/intel/iaevents"
|
ia "github.com/intel/iaevents"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf/testutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestInitialization(t *testing.T) {
|
func TestInitialization(t *testing.T) {
|
||||||
|
|
@ -135,7 +136,7 @@ func TestGather(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("error while reading entities", func(t *testing.T) {
|
t.Run("error while reading entities", func(t *testing.T) {
|
||||||
errMock := fmt.Errorf("houston we have a problem")
|
errMock := errors.New("houston we have a problem")
|
||||||
mEntitiesValuesReader.On("readEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).
|
mEntitiesValuesReader.On("readEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).
|
||||||
Return(nil, nil, errMock).Once()
|
Return(nil, nil, errMock).Once()
|
||||||
|
|
||||||
|
|
@ -440,7 +441,7 @@ func TestReadMaxFD(t *testing.T) {
|
||||||
maxFD uint64
|
maxFD uint64
|
||||||
failMsg string
|
failMsg string
|
||||||
}{
|
}{
|
||||||
{"read file error", fmt.Errorf("mock error"), nil, 0, openErrorMsg},
|
{"read file error", errors.New("mock error"), nil, 0, openErrorMsg},
|
||||||
{"file content parse error", nil, []byte("wrong format"), 0, parseErrorMsg},
|
{"file content parse error", nil, []byte("wrong format"), 0, parseErrorMsg},
|
||||||
{"negative value reading", nil, []byte("-10000"), 0, parseErrorMsg},
|
{"negative value reading", nil, []byte("-10000"), 0, parseErrorMsg},
|
||||||
{"max uint exceeded", nil, []byte("18446744073709551616"), 0, parseErrorMsg},
|
{"max uint exceeded", nil, []byte("18446744073709551616"), 0, parseErrorMsg},
|
||||||
|
|
|
||||||
|
|
@ -3,6 +3,7 @@
|
||||||
package intel_pmu
|
package intel_pmu
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
|
@ -88,10 +89,10 @@ func (ie *iaEntitiesValuesReader) readEntities(coreEntities []*CoreEventEntity,
|
||||||
|
|
||||||
func (ie *iaEntitiesValuesReader) readCoreEvents(entity *CoreEventEntity) ([]coreMetric, error) {
|
func (ie *iaEntitiesValuesReader) readCoreEvents(entity *CoreEventEntity) ([]coreMetric, error) {
|
||||||
if ie.eventReader == nil || ie.timer == nil {
|
if ie.eventReader == nil || ie.timer == nil {
|
||||||
return nil, fmt.Errorf("event values reader or timer is nil")
|
return nil, errors.New("event values reader or timer is nil")
|
||||||
}
|
}
|
||||||
if entity == nil {
|
if entity == nil {
|
||||||
return nil, fmt.Errorf("entity is nil")
|
return nil, errors.New("entity is nil")
|
||||||
}
|
}
|
||||||
metrics := make([]coreMetric, len(entity.activeEvents))
|
metrics := make([]coreMetric, len(entity.activeEvents))
|
||||||
errGroup := errgroup.Group{}
|
errGroup := errgroup.Group{}
|
||||||
|
|
@ -101,7 +102,7 @@ func (ie *iaEntitiesValuesReader) readCoreEvents(entity *CoreEventEntity) ([]cor
|
||||||
actualEvent := event
|
actualEvent := event
|
||||||
|
|
||||||
if event == nil || event.PerfEvent == nil {
|
if event == nil || event.PerfEvent == nil {
|
||||||
return nil, fmt.Errorf("active event or corresponding perf event is nil")
|
return nil, errors.New("active event or corresponding perf event is nil")
|
||||||
}
|
}
|
||||||
|
|
||||||
errGroup.Go(func() error {
|
errGroup.Go(func() error {
|
||||||
|
|
@ -130,7 +131,7 @@ func (ie *iaEntitiesValuesReader) readCoreEvents(entity *CoreEventEntity) ([]cor
|
||||||
|
|
||||||
func (ie *iaEntitiesValuesReader) readUncoreEvents(entity *UncoreEventEntity) ([]uncoreMetric, error) {
|
func (ie *iaEntitiesValuesReader) readUncoreEvents(entity *UncoreEventEntity) ([]uncoreMetric, error) {
|
||||||
if entity == nil {
|
if entity == nil {
|
||||||
return nil, fmt.Errorf("entity is nil")
|
return nil, errors.New("entity is nil")
|
||||||
}
|
}
|
||||||
var uncoreMetrics []uncoreMetric
|
var uncoreMetrics []uncoreMetric
|
||||||
|
|
||||||
|
|
@ -158,10 +159,10 @@ func (ie *iaEntitiesValuesReader) readUncoreEvents(entity *UncoreEventEntity) ([
|
||||||
|
|
||||||
func (ie *iaEntitiesValuesReader) readMultiEventSeparately(multiEvent multiEvent) ([]uncoreMetric, error) {
|
func (ie *iaEntitiesValuesReader) readMultiEventSeparately(multiEvent multiEvent) ([]uncoreMetric, error) {
|
||||||
if ie.eventReader == nil || ie.timer == nil {
|
if ie.eventReader == nil || ie.timer == nil {
|
||||||
return nil, fmt.Errorf("event values reader or timer is nil")
|
return nil, errors.New("event values reader or timer is nil")
|
||||||
}
|
}
|
||||||
if len(multiEvent.activeEvents) < 1 || multiEvent.perfEvent == nil {
|
if len(multiEvent.activeEvents) < 1 || multiEvent.perfEvent == nil {
|
||||||
return nil, fmt.Errorf("no active events or perf event is nil")
|
return nil, errors.New("no active events or perf event is nil")
|
||||||
}
|
}
|
||||||
activeEvents := multiEvent.activeEvents
|
activeEvents := multiEvent.activeEvents
|
||||||
perfEvent := multiEvent.perfEvent
|
perfEvent := multiEvent.perfEvent
|
||||||
|
|
@ -199,10 +200,10 @@ func (ie *iaEntitiesValuesReader) readMultiEventSeparately(multiEvent multiEvent
|
||||||
|
|
||||||
func (ie *iaEntitiesValuesReader) readMultiEventAgg(multiEvent multiEvent) (uncoreMetric, error) {
|
func (ie *iaEntitiesValuesReader) readMultiEventAgg(multiEvent multiEvent) (uncoreMetric, error) {
|
||||||
if ie.eventReader == nil || ie.timer == nil {
|
if ie.eventReader == nil || ie.timer == nil {
|
||||||
return uncoreMetric{}, fmt.Errorf("event values reader or timer is nil")
|
return uncoreMetric{}, errors.New("event values reader or timer is nil")
|
||||||
}
|
}
|
||||||
if len(multiEvent.activeEvents) < 1 || multiEvent.perfEvent == nil {
|
if len(multiEvent.activeEvents) < 1 || multiEvent.perfEvent == nil {
|
||||||
return uncoreMetric{}, fmt.Errorf("no active events or perf event is nil")
|
return uncoreMetric{}, errors.New("no active events or perf event is nil")
|
||||||
}
|
}
|
||||||
activeEvents := multiEvent.activeEvents
|
activeEvents := multiEvent.activeEvents
|
||||||
perfEvent := multiEvent.perfEvent
|
perfEvent := multiEvent.perfEvent
|
||||||
|
|
|
||||||
|
|
@ -3,6 +3,7 @@
|
||||||
package intel_pmu
|
package intel_pmu
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
@ -61,7 +62,7 @@ func TestReadCoreEvents(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("reading failed", func(t *testing.T) {
|
t.Run("reading failed", func(t *testing.T) {
|
||||||
errMock := fmt.Errorf("mock error")
|
errMock := errors.New("mock error")
|
||||||
event := &ia.ActiveEvent{PerfEvent: &ia.PerfEvent{Name: "event1"}}
|
event := &ia.ActiveEvent{PerfEvent: &ia.PerfEvent{Name: "event1"}}
|
||||||
|
|
||||||
entity := &CoreEventEntity{}
|
entity := &CoreEventEntity{}
|
||||||
|
|
@ -138,7 +139,7 @@ func TestReadMultiEventSeparately(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("reading failed", func(t *testing.T) {
|
t.Run("reading failed", func(t *testing.T) {
|
||||||
errMock := fmt.Errorf("mock error")
|
errMock := errors.New("mock error")
|
||||||
perfEvent := &ia.PerfEvent{Name: "event"}
|
perfEvent := &ia.PerfEvent{Name: "event"}
|
||||||
|
|
||||||
event := &ia.ActiveEvent{PerfEvent: perfEvent}
|
event := &ia.ActiveEvent{PerfEvent: perfEvent}
|
||||||
|
|
@ -190,7 +191,7 @@ func TestReadMultiEventAgg(t *testing.T) {
|
||||||
mReader := &mockValuesReader{}
|
mReader := &mockValuesReader{}
|
||||||
mTimer := &moonClock{}
|
mTimer := &moonClock{}
|
||||||
mEntitiesReader := &iaEntitiesValuesReader{mReader, mTimer}
|
mEntitiesReader := &iaEntitiesValuesReader{mReader, mTimer}
|
||||||
errMock := fmt.Errorf("mock error")
|
errMock := errors.New("mock error")
|
||||||
|
|
||||||
t.Run("event reader is nil", func(t *testing.T) {
|
t.Run("event reader is nil", func(t *testing.T) {
|
||||||
event := multiEvent{}
|
event := multiEvent{}
|
||||||
|
|
@ -294,7 +295,7 @@ func TestReadMultiEventAgg(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestReadUncoreEvents(t *testing.T) {
|
func TestReadUncoreEvents(t *testing.T) {
|
||||||
errMock := fmt.Errorf("mock error")
|
errMock := errors.New("mock error")
|
||||||
|
|
||||||
t.Run("entity is nil", func(t *testing.T) {
|
t.Run("entity is nil", func(t *testing.T) {
|
||||||
metrics, err := (&iaEntitiesValuesReader{}).readUncoreEvents(nil)
|
metrics, err := (&iaEntitiesValuesReader{}).readUncoreEvents(nil)
|
||||||
|
|
|
||||||
|
|
@ -7,8 +7,9 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
|
||||||
ia "github.com/intel/iaevents"
|
ia "github.com/intel/iaevents"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
)
|
)
|
||||||
|
|
||||||
type entitiesResolver interface {
|
type entitiesResolver interface {
|
||||||
|
|
@ -24,7 +25,7 @@ type iaEntitiesResolver struct {
|
||||||
func (e *iaEntitiesResolver) resolveEntities(coreEntities []*CoreEventEntity, uncoreEntities []*UncoreEventEntity) error {
|
func (e *iaEntitiesResolver) resolveEntities(coreEntities []*CoreEventEntity, uncoreEntities []*UncoreEventEntity) error {
|
||||||
for _, entity := range coreEntities {
|
for _, entity := range coreEntities {
|
||||||
if entity == nil {
|
if entity == nil {
|
||||||
return fmt.Errorf("core entity is nil")
|
return errors.New("core entity is nil")
|
||||||
}
|
}
|
||||||
if entity.allEvents {
|
if entity.allEvents {
|
||||||
newEvents, _, err := e.resolveAllEvents()
|
newEvents, _, err := e.resolveAllEvents()
|
||||||
|
|
@ -36,7 +37,7 @@ func (e *iaEntitiesResolver) resolveEntities(coreEntities []*CoreEventEntity, un
|
||||||
}
|
}
|
||||||
for _, event := range entity.parsedEvents {
|
for _, event := range entity.parsedEvents {
|
||||||
if event == nil {
|
if event == nil {
|
||||||
return fmt.Errorf("parsed core event is nil")
|
return errors.New("parsed core event is nil")
|
||||||
}
|
}
|
||||||
customEvent, err := e.resolveEvent(event.name, event.qualifiers)
|
customEvent, err := e.resolveEvent(event.name, event.qualifiers)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -50,7 +51,7 @@ func (e *iaEntitiesResolver) resolveEntities(coreEntities []*CoreEventEntity, un
|
||||||
}
|
}
|
||||||
for _, entity := range uncoreEntities {
|
for _, entity := range uncoreEntities {
|
||||||
if entity == nil {
|
if entity == nil {
|
||||||
return fmt.Errorf("uncore entity is nil")
|
return errors.New("uncore entity is nil")
|
||||||
}
|
}
|
||||||
if entity.allEvents {
|
if entity.allEvents {
|
||||||
_, newEvents, err := e.resolveAllEvents()
|
_, newEvents, err := e.resolveAllEvents()
|
||||||
|
|
@ -62,7 +63,7 @@ func (e *iaEntitiesResolver) resolveEntities(coreEntities []*CoreEventEntity, un
|
||||||
}
|
}
|
||||||
for _, event := range entity.parsedEvents {
|
for _, event := range entity.parsedEvents {
|
||||||
if event == nil {
|
if event == nil {
|
||||||
return fmt.Errorf("parsed uncore event is nil")
|
return errors.New("parsed uncore event is nil")
|
||||||
}
|
}
|
||||||
customEvent, err := e.resolveEvent(event.name, event.qualifiers)
|
customEvent, err := e.resolveEvent(event.name, event.qualifiers)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
||||||
|
|
@ -115,10 +115,10 @@ func (r *IntelRDT) Initialize() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(r.Cores) != 0 && len(r.Processes) != 0 {
|
if len(r.Cores) != 0 && len(r.Processes) != 0 {
|
||||||
return fmt.Errorf("monitoring start error, process and core tracking can not be done simultaneously")
|
return errors.New("monitoring start error, process and core tracking can not be done simultaneously")
|
||||||
}
|
}
|
||||||
if len(r.Cores) == 0 && len(r.Processes) == 0 {
|
if len(r.Cores) == 0 && len(r.Processes) == 0 {
|
||||||
return fmt.Errorf("monitoring start error, at least one of cores or processes must be provided in config")
|
return errors.New("monitoring start error, at least one of cores or processes must be provided in config")
|
||||||
}
|
}
|
||||||
if r.SamplingInterval == 0 {
|
if r.SamplingInterval == 0 {
|
||||||
r.SamplingInterval = defaultSamplingInterval
|
r.SamplingInterval = defaultSamplingInterval
|
||||||
|
|
@ -206,7 +206,7 @@ func (r *IntelRDT) associateProcessesWithPIDs(providedProcesses []string) (map[s
|
||||||
|
|
||||||
availableProcesses, err := r.Processor.getAllProcesses()
|
availableProcesses, err := r.Processor.getAllProcesses()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot gather information of all available processes")
|
return nil, errors.New("cannot gather information of all available processes")
|
||||||
}
|
}
|
||||||
for _, availableProcess := range availableProcesses {
|
for _, availableProcess := range availableProcesses {
|
||||||
if choice.Contains(availableProcess.Name, providedProcesses) {
|
if choice.Contains(availableProcess.Name, providedProcesses) {
|
||||||
|
|
@ -388,14 +388,14 @@ func createArgsForGroups(coresOrPIDs []string) string {
|
||||||
|
|
||||||
func validatePqosPath(pqosPath string) error {
|
func validatePqosPath(pqosPath string) error {
|
||||||
if len(pqosPath) == 0 {
|
if len(pqosPath) == 0 {
|
||||||
return fmt.Errorf("monitoring start error, can not find pqos executable")
|
return errors.New("monitoring start error, can not find pqos executable")
|
||||||
}
|
}
|
||||||
pathInfo, err := os.Stat(pqosPath)
|
pathInfo, err := os.Stat(pqosPath)
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
return fmt.Errorf("monitoring start error, provided pqos path not exist")
|
return errors.New("monitoring start error, provided pqos path not exist")
|
||||||
}
|
}
|
||||||
if mode := pathInfo.Mode(); !mode.IsRegular() {
|
if mode := pathInfo.Mode(); !mode.IsRegular() {
|
||||||
return fmt.Errorf("monitoring start error, provided pqos path does not point to a regular file")
|
return errors.New("monitoring start error, provided pqos path does not point to a regular file")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
@ -431,7 +431,7 @@ func validateAndParseCores(coreStr string) ([]int, error) {
|
||||||
rangeValues := strings.Split(coreStr, "-")
|
rangeValues := strings.Split(coreStr, "-")
|
||||||
|
|
||||||
if len(rangeValues) != 2 {
|
if len(rangeValues) != 2 {
|
||||||
return nil, fmt.Errorf("more than two values in range")
|
return nil, errors.New("more than two values in range")
|
||||||
}
|
}
|
||||||
|
|
||||||
startValue, err := strconv.Atoi(rangeValues[0])
|
startValue, err := strconv.Atoi(rangeValues[0])
|
||||||
|
|
@ -444,7 +444,7 @@ func validateAndParseCores(coreStr string) ([]int, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if startValue > stopValue {
|
if startValue > stopValue {
|
||||||
return nil, fmt.Errorf("first value cannot be higher than second")
|
return nil, errors.New("first value cannot be higher than second")
|
||||||
}
|
}
|
||||||
|
|
||||||
rangeOfCores := makeRange(startValue, stopValue)
|
rangeOfCores := makeRange(startValue, stopValue)
|
||||||
|
|
@ -464,7 +464,7 @@ func findPIDsInMeasurement(measurements string) (string, error) {
|
||||||
var insideQuoteRegex = regexp.MustCompile(`"(.*?)"`)
|
var insideQuoteRegex = regexp.MustCompile(`"(.*?)"`)
|
||||||
pidsMatch := insideQuoteRegex.FindStringSubmatch(measurements)
|
pidsMatch := insideQuoteRegex.FindStringSubmatch(measurements)
|
||||||
if len(pidsMatch) < 2 {
|
if len(pidsMatch) < 2 {
|
||||||
return "", fmt.Errorf("cannot find PIDs in measurement line")
|
return "", errors.New("cannot find PIDs in measurement line")
|
||||||
}
|
}
|
||||||
pids := pidsMatch[1]
|
pids := pidsMatch[1]
|
||||||
return pids, nil
|
return pids, nil
|
||||||
|
|
@ -489,7 +489,7 @@ func splitCSVLineIntoValues(line string) (splitCSVLine, error) {
|
||||||
|
|
||||||
func validateInterval(interval int32) error {
|
func validateInterval(interval int32) error {
|
||||||
if interval < 0 {
|
if interval < 0 {
|
||||||
return fmt.Errorf("interval cannot be lower than 0")
|
return errors.New("interval cannot be lower than 0")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -4,6 +4,7 @@ package internet_speed
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
_ "embed"
|
_ "embed"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"os"
|
"os"
|
||||||
|
|
@ -142,7 +143,7 @@ func (is *InternetSpeed) findClosestServer() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(is.servers) < 1 {
|
if len(is.servers) < 1 {
|
||||||
return fmt.Errorf("no servers found")
|
return errors.New("no servers found")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return the first match or the server with the lowest latency
|
// Return the first match or the server with the lowest latency
|
||||||
|
|
@ -168,7 +169,7 @@ func (is *InternetSpeed) findClosestServer() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Errorf("no server set: filter excluded all servers or no available server found")
|
return errors.New("no server set: filter excluded all servers or no available server found")
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
|
|
||||||
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
_ "embed"
|
_ "embed"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
|
|
@ -77,7 +78,7 @@ func (m *Ipmi) Init() error {
|
||||||
// Gather is the main execution function for the plugin
|
// Gather is the main execution function for the plugin
|
||||||
func (m *Ipmi) Gather(acc telegraf.Accumulator) error {
|
func (m *Ipmi) Gather(acc telegraf.Accumulator) error {
|
||||||
if len(m.Path) == 0 {
|
if len(m.Path) == 0 {
|
||||||
return fmt.Errorf("ipmitool not found: verify that ipmitool is installed and that ipmitool is in your PATH")
|
return errors.New("ipmitool not found: verify that ipmitool is installed and that ipmitool is in your PATH")
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(m.Servers) > 0 {
|
if len(m.Servers) > 0 {
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,6 @@ package ipset
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
|
@ -40,7 +39,7 @@ func TestIpset(t *testing.T) {
|
||||||
value: `create hash:net family inet hashsize 1024 maxelem 65536 counters
|
value: `create hash:net family inet hashsize 1024 maxelem 65536 counters
|
||||||
add myset 4.5.6.7 packets 123 bytes
|
add myset 4.5.6.7 packets 123 bytes
|
||||||
`,
|
`,
|
||||||
err: fmt.Errorf("error parsing line (expected at least 7 fields): \t\t\t\tadd myset 4.5.6.7 packets 123 bytes"),
|
err: errors.New("error parsing line (expected at least 7 fields): \t\t\t\tadd myset 4.5.6.7 packets 123 bytes"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Non-empty sets, counters, no comment",
|
name: "Non-empty sets, counters, no comment",
|
||||||
|
|
|
||||||
|
|
@ -4,6 +4,7 @@ package jenkins
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
_ "embed"
|
_ "embed"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
|
@ -147,7 +148,7 @@ func (j *Jenkins) initialize(client *http.Client) error {
|
||||||
func (j *Jenkins) gatherNodeData(n node, acc telegraf.Accumulator) error {
|
func (j *Jenkins) gatherNodeData(n node, acc telegraf.Accumulator) error {
|
||||||
tags := map[string]string{}
|
tags := map[string]string{}
|
||||||
if n.DisplayName == "" {
|
if n.DisplayName == "" {
|
||||||
return fmt.Errorf("error empty node name")
|
return errors.New("error empty node name")
|
||||||
}
|
}
|
||||||
|
|
||||||
tags["node_name"] = n.DisplayName
|
tags["node_name"] = n.DisplayName
|
||||||
|
|
|
||||||
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
_ "embed"
|
_ "embed"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
@ -235,14 +236,14 @@ func (j *Jolokia) Gather(acc telegraf.Accumulator) error {
|
||||||
server.Host, server.Port, metrics[i].Mbean, metrics[i].Attribute, status))
|
server.Host, server.Port, metrics[i].Mbean, metrics[i].Attribute, status))
|
||||||
continue
|
continue
|
||||||
} else if !ok {
|
} else if !ok {
|
||||||
acc.AddError(fmt.Errorf("missing status in response body"))
|
acc.AddError(errors.New("missing status in response body"))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if values, ok := resp["value"]; ok {
|
if values, ok := resp["value"]; ok {
|
||||||
j.extractValues(metrics[i].Name, values, fields)
|
j.extractValues(metrics[i].Name, values, fields)
|
||||||
} else {
|
} else {
|
||||||
acc.AddError(fmt.Errorf("missing key 'value' in output response"))
|
acc.AddError(errors.New("missing key 'value' in output response"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,6 @@
|
||||||
package jolokia
|
package jolokia
|
||||||
|
|
||||||
import (
|
import (
|
||||||
_ "fmt"
|
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
|
||||||
|
|
@ -4,6 +4,7 @@ package jti_openconfig_telemetry
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
_ "embed"
|
_ "embed"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
|
@ -368,7 +369,7 @@ func (m *OpenConfigTelemetry) authenticate(ctx context.Context, server string, g
|
||||||
func (m *OpenConfigTelemetry) Start(acc telegraf.Accumulator) error {
|
func (m *OpenConfigTelemetry) Start(acc telegraf.Accumulator) error {
|
||||||
// Build sensors config
|
// Build sensors config
|
||||||
if m.splitSensorConfig() == 0 {
|
if m.splitSensorConfig() == 0 {
|
||||||
return fmt.Errorf("no valid sensor configuration available")
|
return errors.New("no valid sensor configuration available")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parse TLS config
|
// Parse TLS config
|
||||||
|
|
|
||||||
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"compress/zlib"
|
"compress/zlib"
|
||||||
"context"
|
"context"
|
||||||
_ "embed"
|
_ "embed"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
@ -261,7 +262,7 @@ func (k *KinesisConsumer) GetCheckpoint(streamName, shardID string) (string, err
|
||||||
// Set wraps the checkpoint's SetCheckpoint function (called by consumer library)
|
// Set wraps the checkpoint's SetCheckpoint function (called by consumer library)
|
||||||
func (k *KinesisConsumer) SetCheckpoint(streamName, shardID, sequenceNumber string) error {
|
func (k *KinesisConsumer) SetCheckpoint(streamName, shardID, sequenceNumber string) error {
|
||||||
if sequenceNumber == "" {
|
if sequenceNumber == "" {
|
||||||
return fmt.Errorf("sequence number should not be empty")
|
return errors.New("sequence number should not be empty")
|
||||||
}
|
}
|
||||||
|
|
||||||
k.checkpointTex.Lock()
|
k.checkpointTex.Lock()
|
||||||
|
|
|
||||||
|
|
@ -4,6 +4,7 @@ package leofs
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
_ "embed"
|
_ "embed"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
@ -206,7 +207,7 @@ func (l *LeoFS) gatherServer(
|
||||||
defer internal.WaitTimeout(cmd, time.Second*5) //nolint:errcheck // ignore the returned error as we cannot do anything about it anyway
|
defer internal.WaitTimeout(cmd, time.Second*5) //nolint:errcheck // ignore the returned error as we cannot do anything about it anyway
|
||||||
scanner := bufio.NewScanner(stdout)
|
scanner := bufio.NewScanner(stdout)
|
||||||
if !scanner.Scan() {
|
if !scanner.Scan() {
|
||||||
return fmt.Errorf("unable to retrieve the node name")
|
return errors.New("unable to retrieve the node name")
|
||||||
}
|
}
|
||||||
nodeName, err := retrieveTokenAfterColon(scanner.Text())
|
nodeName, err := retrieveTokenAfterColon(scanner.Text())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
||||||
|
|
@ -3,13 +3,13 @@ package libvirt
|
||||||
|
|
||||||
import (
|
import (
|
||||||
_ "embed"
|
_ "embed"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"golang.org/x/sync/errgroup"
|
|
||||||
|
|
||||||
golibvirt "github.com/digitalocean/go-libvirt"
|
golibvirt "github.com/digitalocean/go-libvirt"
|
||||||
libvirtutils "github.com/thomasklein94/packer-plugin-libvirt/libvirt-utils"
|
libvirtutils "github.com/thomasklein94/packer-plugin-libvirt/libvirt-utils"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
"github.com/influxdata/telegraf/plugins/inputs"
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
|
|
@ -84,7 +84,7 @@ func (l *Libvirt) Init() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if !l.isThereAnythingToGather() {
|
if !l.isThereAnythingToGather() {
|
||||||
return fmt.Errorf("all configuration options are empty or invalid. Did not find anything to gather")
|
return errors.New("all configuration options are empty or invalid. Did not find anything to gather")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
package libvirt
|
package libvirt
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"errors"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
|
@ -68,7 +68,7 @@ func TestLibvirt_Gather(t *testing.T) {
|
||||||
Log: testutil.Logger{},
|
Log: testutil.Logger{},
|
||||||
utils: &mockLibvirtUtils,
|
utils: &mockLibvirtUtils,
|
||||||
}
|
}
|
||||||
mockLibvirtUtils.On("EnsureConnected", mock.Anything).Return(fmt.Errorf("failed to connect")).Once()
|
mockLibvirtUtils.On("EnsureConnected", mock.Anything).Return(errors.New("failed to connect")).Once()
|
||||||
err := l.Gather(&acc)
|
err := l.Gather(&acc)
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
require.Contains(t, err.Error(), "failed to connect")
|
require.Contains(t, err.Error(), "failed to connect")
|
||||||
|
|
@ -84,7 +84,7 @@ func TestLibvirt_Gather(t *testing.T) {
|
||||||
StatisticsGroups: []string{"state"},
|
StatisticsGroups: []string{"state"},
|
||||||
}
|
}
|
||||||
mockLibvirtUtils.On("EnsureConnected", mock.Anything).Return(nil).Once().
|
mockLibvirtUtils.On("EnsureConnected", mock.Anything).Return(nil).Once().
|
||||||
On("GatherAllDomains", mock.Anything).Return(nil, fmt.Errorf("gather domain error")).Once().
|
On("GatherAllDomains", mock.Anything).Return(nil, errors.New("gather domain error")).Once().
|
||||||
On("Disconnect").Return(nil).Once()
|
On("Disconnect").Return(nil).Once()
|
||||||
|
|
||||||
err := l.Gather(&acc)
|
err := l.Gather(&acc)
|
||||||
|
|
@ -120,7 +120,7 @@ func TestLibvirt_Gather(t *testing.T) {
|
||||||
mockLibvirtUtils.On("EnsureConnected", mock.Anything).Return(nil).Once().
|
mockLibvirtUtils.On("EnsureConnected", mock.Anything).Return(nil).Once().
|
||||||
On("GatherAllDomains", mock.Anything).Return(domains, nil).Once().
|
On("GatherAllDomains", mock.Anything).Return(domains, nil).Once().
|
||||||
On("GatherStatsForDomains", mock.Anything, mock.Anything).
|
On("GatherStatsForDomains", mock.Anything, mock.Anything).
|
||||||
Return(nil, fmt.Errorf("gathering metric by number error")).Once().
|
Return(nil, errors.New("gathering metric by number error")).Once().
|
||||||
On("Disconnect").Return(nil).Once()
|
On("Disconnect").Return(nil).Once()
|
||||||
|
|
||||||
err := l.Init()
|
err := l.Init()
|
||||||
|
|
|
||||||
|
|
@ -58,7 +58,7 @@ func (g *LinuxCPU) Init() error {
|
||||||
|
|
||||||
if len(g.Metrics) == 0 {
|
if len(g.Metrics) == 0 {
|
||||||
// The user has not enabled any of the metrics
|
// The user has not enabled any of the metrics
|
||||||
return fmt.Errorf("no metrics selected")
|
return errors.New("no metrics selected")
|
||||||
}
|
}
|
||||||
|
|
||||||
cpus, err := g.discoverCpus()
|
cpus, err := g.discoverCpus()
|
||||||
|
|
@ -66,7 +66,7 @@ func (g *LinuxCPU) Init() error {
|
||||||
return err
|
return err
|
||||||
} else if len(cpus) == 0 {
|
} else if len(cpus) == 0 {
|
||||||
// Although the user has specified metrics to collect, `discoverCpus` failed to find the required metrics
|
// Although the user has specified metrics to collect, `discoverCpus` failed to find the required metrics
|
||||||
return fmt.Errorf("no CPUs detected to track")
|
return errors.New("no CPUs detected to track")
|
||||||
}
|
}
|
||||||
g.cpus = cpus
|
g.cpus = cpus
|
||||||
|
|
||||||
|
|
@ -208,7 +208,7 @@ func readUintFromFile(propPath string) (uint64, error) {
|
||||||
if err != nil && !errors.Is(err, io.EOF) {
|
if err != nil && !errors.Is(err, io.EOF) {
|
||||||
return 0, fmt.Errorf("error on reading file: %w", err)
|
return 0, fmt.Errorf("error on reading file: %w", err)
|
||||||
} else if n == 0 {
|
} else if n == 0 {
|
||||||
return 0, fmt.Errorf("error on reading file: file is empty")
|
return 0, errors.New("error on reading file: file is empty")
|
||||||
}
|
}
|
||||||
|
|
||||||
return strconv.ParseUint(string(buffer[:n-1]), 10, 64)
|
return strconv.ParseUint(string(buffer[:n-1]), 10, 64)
|
||||||
|
|
|
||||||
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"context"
|
"context"
|
||||||
_ "embed"
|
_ "embed"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
|
@ -145,24 +146,24 @@ func (m *Mcrouter) ParseAddress(address string) (parsedAddress string, protocol
|
||||||
u, parseError := url.Parse(parsedAddress)
|
u, parseError := url.Parse(parsedAddress)
|
||||||
|
|
||||||
if parseError != nil {
|
if parseError != nil {
|
||||||
return "", "", fmt.Errorf("invalid server address")
|
return "", "", errors.New("invalid server address")
|
||||||
}
|
}
|
||||||
|
|
||||||
if u.Scheme != "tcp" && u.Scheme != "unix" {
|
if u.Scheme != "tcp" && u.Scheme != "unix" {
|
||||||
return "", "", fmt.Errorf("invalid server protocol")
|
return "", "", errors.New("invalid server protocol")
|
||||||
}
|
}
|
||||||
|
|
||||||
protocol = u.Scheme
|
protocol = u.Scheme
|
||||||
|
|
||||||
if protocol == "unix" {
|
if protocol == "unix" {
|
||||||
if u.Path == "" {
|
if u.Path == "" {
|
||||||
return "", "", fmt.Errorf("invalid unix socket path")
|
return "", "", errors.New("invalid unix socket path")
|
||||||
}
|
}
|
||||||
|
|
||||||
parsedAddress = u.Path
|
parsedAddress = u.Path
|
||||||
} else {
|
} else {
|
||||||
if u.Host == "" {
|
if u.Host == "" {
|
||||||
return "", "", fmt.Errorf("invalid host")
|
return "", "", errors.New("invalid host")
|
||||||
}
|
}
|
||||||
|
|
||||||
host = u.Hostname()
|
host = u.Hostname()
|
||||||
|
|
|
||||||
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
_ "embed"
|
_ "embed"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
@ -145,7 +146,7 @@ func (m *Memcached) gatherServer(
|
||||||
}
|
}
|
||||||
|
|
||||||
if conn == nil {
|
if conn == nil {
|
||||||
return fmt.Errorf("Failed to create net connection")
|
return errors.New("failed to create net connection")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Extend connection
|
// Extend connection
|
||||||
|
|
|
||||||
|
|
@ -128,11 +128,11 @@ func (m *Modbus) SampleConfig() string {
|
||||||
func (m *Modbus) Init() error {
|
func (m *Modbus) Init() error {
|
||||||
//check device name
|
//check device name
|
||||||
if m.Name == "" {
|
if m.Name == "" {
|
||||||
return fmt.Errorf("device name is empty")
|
return errors.New("device name is empty")
|
||||||
}
|
}
|
||||||
|
|
||||||
if m.Retries < 0 {
|
if m.Retries < 0 {
|
||||||
return fmt.Errorf("retries cannot be negative")
|
return errors.New("retries cannot be negative")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Determine the configuration style
|
// Determine the configuration style
|
||||||
|
|
|
||||||
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
_ "embed"
|
_ "embed"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
@ -68,13 +69,13 @@ func (m *MongoDB) Init() error {
|
||||||
InsecureSkipVerify: m.ClientConfig.InsecureSkipVerify,
|
InsecureSkipVerify: m.ClientConfig.InsecureSkipVerify,
|
||||||
}
|
}
|
||||||
if len(m.Ssl.CaCerts) == 0 {
|
if len(m.Ssl.CaCerts) == 0 {
|
||||||
return fmt.Errorf("you must explicitly set insecure_skip_verify to skip certificate validation")
|
return errors.New("you must explicitly set insecure_skip_verify to skip certificate validation")
|
||||||
}
|
}
|
||||||
|
|
||||||
roots := x509.NewCertPool()
|
roots := x509.NewCertPool()
|
||||||
for _, caCert := range m.Ssl.CaCerts {
|
for _, caCert := range m.Ssl.CaCerts {
|
||||||
if ok := roots.AppendCertsFromPEM([]byte(caCert)); !ok {
|
if ok := roots.AppendCertsFromPEM([]byte(caCert)); !ok {
|
||||||
return fmt.Errorf("failed to parse root certificate")
|
return errors.New("failed to parse root certificate")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
m.tlsConfig.RootCAs = roots
|
m.tlsConfig.RootCAs = roots
|
||||||
|
|
|
||||||
|
|
@ -2,6 +2,7 @@ package mongodb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
@ -106,13 +107,13 @@ func (s *Server) gatherTopStatData() (*TopStats, error) {
|
||||||
|
|
||||||
totals, ok := dest["totals"].(map[string]interface{})
|
totals, ok := dest["totals"].(map[string]interface{})
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("collection totals not found or not a map")
|
return nil, errors.New("collection totals not found or not a map")
|
||||||
}
|
}
|
||||||
delete(totals, "note")
|
delete(totals, "note")
|
||||||
|
|
||||||
recorded, err := bson.Marshal(totals)
|
recorded, err := bson.Marshal(totals)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unable to marshal totals")
|
return nil, errors.New("unable to marshal totals")
|
||||||
}
|
}
|
||||||
|
|
||||||
topInfo := make(map[string]TopStatCollection)
|
topInfo := make(map[string]TopStatCollection)
|
||||||
|
|
|
||||||
|
|
@ -147,15 +147,15 @@ func (m *MQTTConsumer) Init() error {
|
||||||
m.TopicParsing[i].SplitTopic = strings.Split(p.Topic, "/")
|
m.TopicParsing[i].SplitTopic = strings.Split(p.Topic, "/")
|
||||||
|
|
||||||
if len(splitMeasurement) != len(m.TopicParsing[i].SplitTopic) && len(splitMeasurement) != 1 {
|
if len(splitMeasurement) != len(m.TopicParsing[i].SplitTopic) && len(splitMeasurement) != 1 {
|
||||||
return fmt.Errorf("config error topic parsing: measurement length does not equal topic length")
|
return errors.New("config error topic parsing: measurement length does not equal topic length")
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(m.TopicParsing[i].SplitFields) != len(m.TopicParsing[i].SplitTopic) && p.Fields != "" {
|
if len(m.TopicParsing[i].SplitFields) != len(m.TopicParsing[i].SplitTopic) && p.Fields != "" {
|
||||||
return fmt.Errorf("config error topic parsing: fields length does not equal topic length")
|
return errors.New("config error topic parsing: fields length does not equal topic length")
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(m.TopicParsing[i].SplitTags) != len(m.TopicParsing[i].SplitTopic) && p.Tags != "" {
|
if len(m.TopicParsing[i].SplitTags) != len(m.TopicParsing[i].SplitTopic) && p.Tags != "" {
|
||||||
return fmt.Errorf("config error topic parsing: tags length does not equal topic length")
|
return errors.New("config error topic parsing: tags length does not equal topic length")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -383,7 +383,7 @@ func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) {
|
||||||
password.Destroy()
|
password.Destroy()
|
||||||
}
|
}
|
||||||
if len(m.Servers) == 0 {
|
if len(m.Servers) == 0 {
|
||||||
return opts, fmt.Errorf("could not get host information")
|
return opts, errors.New("could not get host information")
|
||||||
}
|
}
|
||||||
for _, server := range m.Servers {
|
for _, server := range m.Servers {
|
||||||
// Preserve support for host:port style servers; deprecated in Telegraf 1.4.4
|
// Preserve support for host:port style servers; deprecated in Telegraf 1.4.4
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
package mqtt_consumer
|
package mqtt_consumer
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"errors"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
|
@ -325,7 +325,7 @@ func TestTopicTag(t *testing.T) {
|
||||||
tag := ""
|
tag := ""
|
||||||
return &tag
|
return &tag
|
||||||
},
|
},
|
||||||
expectedError: fmt.Errorf("config error topic parsing: fields length does not equal topic length"),
|
expectedError: errors.New("config error topic parsing: fields length does not equal topic length"),
|
||||||
topicParsing: []TopicParsingConfig{
|
topicParsing: []TopicParsingConfig{
|
||||||
{
|
{
|
||||||
Topic: "telegraf/+/test/hello",
|
Topic: "telegraf/+/test/hello",
|
||||||
|
|
|
||||||
|
|
@ -5,7 +5,6 @@ import (
|
||||||
"context"
|
"context"
|
||||||
_ "embed"
|
_ "embed"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/nsqio/go-nsq"
|
"github.com/nsqio/go-nsq"
|
||||||
|
|
@ -112,7 +111,7 @@ func (n *NSQConsumer) Start(ac telegraf.Accumulator) error {
|
||||||
|
|
||||||
// Check if we have anything to connect to
|
// Check if we have anything to connect to
|
||||||
if len(n.Nsqlookupd) == 0 && len(n.Nsqd) == 0 {
|
if len(n.Nsqlookupd) == 0 && len(n.Nsqd) == 0 {
|
||||||
return fmt.Errorf("either 'nsqd' or 'nsqlookupd' needs to be specified")
|
return errors.New("either 'nsqd' or 'nsqlookupd' needs to be specified")
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(n.Nsqlookupd) > 0 {
|
if len(n.Nsqlookupd) > 0 {
|
||||||
|
|
|
||||||
|
|
@ -2,6 +2,7 @@ package opcua_listener
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"time"
|
"time"
|
||||||
|
|
@ -44,9 +45,9 @@ func checkDataChangeFilterParameters(params *input.DataChangeFilter) error {
|
||||||
params.DeadbandType != input.Percent:
|
params.DeadbandType != input.Percent:
|
||||||
return fmt.Errorf("deadband_type '%s' not supported", params.DeadbandType)
|
return fmt.Errorf("deadband_type '%s' not supported", params.DeadbandType)
|
||||||
case params.DeadbandValue == nil:
|
case params.DeadbandValue == nil:
|
||||||
return fmt.Errorf("deadband_value was not set")
|
return errors.New("deadband_value was not set")
|
||||||
case *params.DeadbandValue < 0:
|
case *params.DeadbandValue < 0:
|
||||||
return fmt.Errorf("negative deadband_value not supported")
|
return errors.New("negative deadband_value not supported")
|
||||||
default:
|
default:
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,9 @@
|
||||||
package opensearch_query
|
package opensearch_query
|
||||||
|
|
||||||
import "fmt"
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
type BucketAggregationRequest map[string]*aggregationFunction
|
type BucketAggregationRequest map[string]*aggregationFunction
|
||||||
|
|
||||||
|
|
@ -25,7 +28,7 @@ func (b BucketAggregationRequest) AddNestedAggregation(name string, a Aggregatio
|
||||||
|
|
||||||
func (b BucketAggregationRequest) BucketSize(name string, size int) error {
|
func (b BucketAggregationRequest) BucketSize(name string, size int) error {
|
||||||
if size <= 0 {
|
if size <= 0 {
|
||||||
return fmt.Errorf("invalid size; must be integer value > 0")
|
return errors.New("invalid size; must be integer value > 0")
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, ok := b[name]; !ok {
|
if _, ok := b[name]; !ok {
|
||||||
|
|
|
||||||
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
_ "embed"
|
_ "embed"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
@ -65,7 +66,7 @@ func (*OpensearchQuery) SampleConfig() string {
|
||||||
// Init the plugin.
|
// Init the plugin.
|
||||||
func (o *OpensearchQuery) Init() error {
|
func (o *OpensearchQuery) Init() error {
|
||||||
if o.URLs == nil {
|
if o.URLs == nil {
|
||||||
return fmt.Errorf("no urls defined")
|
return errors.New("no urls defined")
|
||||||
}
|
}
|
||||||
|
|
||||||
err := o.newClient()
|
err := o.newClient()
|
||||||
|
|
@ -75,10 +76,10 @@ func (o *OpensearchQuery) Init() error {
|
||||||
|
|
||||||
for i, agg := range o.Aggregations {
|
for i, agg := range o.Aggregations {
|
||||||
if agg.MeasurementName == "" {
|
if agg.MeasurementName == "" {
|
||||||
return fmt.Errorf("field 'measurement_name' is not set")
|
return errors.New("field 'measurement_name' is not set")
|
||||||
}
|
}
|
||||||
if agg.DateField == "" {
|
if agg.DateField == "" {
|
||||||
return fmt.Errorf("field 'date_field' is not set")
|
return errors.New("field 'date_field' is not set")
|
||||||
}
|
}
|
||||||
err = o.initAggregation(agg, i)
|
err = o.initAggregation(agg, i)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
||||||
|
|
@ -13,6 +13,7 @@ package openstack
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
_ "embed"
|
_ "embed"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"regexp"
|
"regexp"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
@ -113,10 +114,10 @@ func (o *OpenStack) Init() error {
|
||||||
}
|
}
|
||||||
sort.Strings(o.EnabledServices)
|
sort.Strings(o.EnabledServices)
|
||||||
if o.Username == "" || o.Password == "" {
|
if o.Username == "" || o.Password == "" {
|
||||||
return fmt.Errorf("username or password can not be empty string")
|
return errors.New("username or password can not be empty string")
|
||||||
}
|
}
|
||||||
if o.TagValue == "" {
|
if o.TagValue == "" {
|
||||||
return fmt.Errorf("tag_value option can not be empty string")
|
return errors.New("tag_value option can not be empty string")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check the enabled services
|
// Check the enabled services
|
||||||
|
|
|
||||||
|
|
@ -77,9 +77,7 @@ func TestErrorGetP4Info(t *testing.T) {
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
getForwardingPipelineConfigResponse: nil,
|
getForwardingPipelineConfigResponse: nil,
|
||||||
getForwardingPipelineConfigResponseError: fmt.Errorf(
|
getForwardingPipelineConfigResponseError: errors.New("error when retrieving forwarding pipeline config"),
|
||||||
"error when retrieving forwarding pipeline config",
|
|
||||||
),
|
|
||||||
}, {
|
}, {
|
||||||
getForwardingPipelineConfigResponse: &p4v1.GetForwardingPipelineConfigResponse{
|
getForwardingPipelineConfigResponse: &p4v1.GetForwardingPipelineConfigResponse{
|
||||||
Config: nil,
|
Config: nil,
|
||||||
|
|
@ -583,7 +581,7 @@ func TestFailReadCounterEntryFromEntry(t *testing.T) {
|
||||||
func TestFailReadAllEntries(t *testing.T) {
|
func TestFailReadAllEntries(t *testing.T) {
|
||||||
p4RtClient := &fakeP4RuntimeClient{
|
p4RtClient := &fakeP4RuntimeClient{
|
||||||
readFn: func(ctx context.Context, in *p4v1.ReadRequest, opts ...grpc.CallOption) (p4v1.P4Runtime_ReadClient, error) {
|
readFn: func(ctx context.Context, in *p4v1.ReadRequest, opts ...grpc.CallOption) (p4v1.P4Runtime_ReadClient, error) {
|
||||||
return nil, errors.New("Connection error")
|
return nil, errors.New("connection error")
|
||||||
},
|
},
|
||||||
getForwardingPipelineConfigFn: func(
|
getForwardingPipelineConfigFn: func(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
|
|
@ -617,8 +615,7 @@ func TestFailReadAllEntries(t *testing.T) {
|
||||||
require.Equal(
|
require.Equal(
|
||||||
t,
|
t,
|
||||||
acc.Errors[0],
|
acc.Errors[0],
|
||||||
fmt.Errorf("reading counter entries with ID=1111 failed with error: %w",
|
fmt.Errorf("reading counter entries with ID=1111 failed with error: %w", errors.New("connection error")),
|
||||||
errors.New("Connection error")),
|
|
||||||
)
|
)
|
||||||
testutil.RequireMetricsEqual(
|
testutil.RequireMetricsEqual(
|
||||||
t,
|
t,
|
||||||
|
|
|
||||||
|
|
@ -160,10 +160,10 @@ func (p *Ping) nativePing(destination string) (*pingStats, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if strings.Contains(err.Error(), "operation not permitted") {
|
if strings.Contains(err.Error(), "operation not permitted") {
|
||||||
if runtime.GOOS == "linux" {
|
if runtime.GOOS == "linux" {
|
||||||
return nil, fmt.Errorf("permission changes required, enable CAP_NET_RAW capabilities (refer to the ping plugin's README.md for more info)")
|
return nil, errors.New("permission changes required, enable CAP_NET_RAW capabilities (refer to the ping plugin's README.md for more info)")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, fmt.Errorf("permission changes required, refer to the ping plugin's README.md for more info")
|
return nil, errors.New("permission changes required, refer to the ping plugin's README.md for more info")
|
||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -4,7 +4,6 @@ package ping
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"sort"
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
@ -518,7 +517,7 @@ func TestDNSLookupError(t *testing.T) {
|
||||||
Method: "native",
|
Method: "native",
|
||||||
IPv6: false,
|
IPv6: false,
|
||||||
nativePingFunc: func(destination string) (*pingStats, error) {
|
nativePingFunc: func(destination string) (*pingStats, error) {
|
||||||
return nil, fmt.Errorf("unknown")
|
return nil, errors.New("unknown")
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,7 @@
|
||||||
package powerdns_recursor
|
package powerdns_recursor
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
@ -57,7 +58,7 @@ func writeNativeUIntToConn(conn net.Conn, value uint) error {
|
||||||
case 8:
|
case 8:
|
||||||
internal.HostEndianness.PutUint64(intData, uint64(value))
|
internal.HostEndianness.PutUint64(intData, uint64(value))
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unsupported system configuration")
|
return errors.New("unsupported system configuration")
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := conn.Write(intData)
|
_, err := conn.Write(intData)
|
||||||
|
|
@ -83,6 +84,6 @@ func readNativeUIntFromConn(conn net.Conn) (uint, error) {
|
||||||
case 8:
|
case 8:
|
||||||
return uint(internal.HostEndianness.Uint64(intData)), nil
|
return uint(internal.HostEndianness.Uint64(intData)), nil
|
||||||
default:
|
default:
|
||||||
return 0, fmt.Errorf("unsupported system configuration")
|
return 0, errors.New("unsupported system configuration")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,7 @@
|
||||||
package powerdns_recursor
|
package powerdns_recursor
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
|
|
@ -64,7 +65,7 @@ func (p *PowerdnsRecursor) gatherFromV1Server(address string, acc telegraf.Accum
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
return fmt.Errorf("no data received")
|
return errors.New("no data received")
|
||||||
}
|
}
|
||||||
|
|
||||||
metrics := string(buf)
|
metrics := string(buf)
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,7 @@
|
||||||
package powerdns_recursor
|
package powerdns_recursor
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
|
|
@ -68,7 +69,7 @@ func (p *PowerdnsRecursor) gatherFromV2Server(address string, acc telegraf.Accum
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
return fmt.Errorf("no status code received")
|
return errors.New("no status code received")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read the response data.
|
// Read the response data.
|
||||||
|
|
@ -78,7 +79,7 @@ func (p *PowerdnsRecursor) gatherFromV2Server(address string, acc telegraf.Accum
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
return fmt.Errorf("no data received")
|
return errors.New("no data received")
|
||||||
}
|
}
|
||||||
|
|
||||||
metrics := string(buf)
|
metrics := string(buf)
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,7 @@
|
||||||
package powerdns_recursor
|
package powerdns_recursor
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"time"
|
"time"
|
||||||
|
|
@ -49,7 +50,7 @@ func (p *PowerdnsRecursor) gatherFromV3Server(address string, acc telegraf.Accum
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
return fmt.Errorf("no status code received")
|
return errors.New("no status code received")
|
||||||
}
|
}
|
||||||
|
|
||||||
responseLength, err := readNativeUIntFromConn(conn)
|
responseLength, err := readNativeUIntFromConn(conn)
|
||||||
|
|
@ -57,7 +58,7 @@ func (p *PowerdnsRecursor) gatherFromV3Server(address string, acc telegraf.Accum
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if responseLength == 0 {
|
if responseLength == 0 {
|
||||||
return fmt.Errorf("received data length was '0'")
|
return errors.New("received data length was '0'")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Don't allow more than 64kb of data to prevent DOS / issues
|
// Don't allow more than 64kb of data to prevent DOS / issues
|
||||||
|
|
|
||||||
|
|
@ -3,6 +3,7 @@
|
||||||
package processes
|
package processes
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"runtime"
|
"runtime"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
@ -192,7 +193,7 @@ func (t *tester) testProcFile2(_ string) ([]byte, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func testExecPSError(_ bool) ([]byte, error) {
|
func testExecPSError(_ bool) ([]byte, error) {
|
||||||
return []byte("\nSTAT\nD\nI\nL\nR\nR+\nS\nS+\nSNs\nSs\nU\nZ\n"), fmt.Errorf("error")
|
return []byte("\nSTAT\nD\nI\nL\nR\nR+\nS\nS+\nSNs\nSs\nU\nZ\n"), errors.New("error")
|
||||||
}
|
}
|
||||||
|
|
||||||
const testProcStat = `10 (rcuob/0) %s 2 0 0 0 -1 2129984 0 0 0 0 0 0 0 0 20 0 %s 0 11 0 0 18446744073709551615 0 0 0 0 0 0 0 ` +
|
const testProcStat = `10 (rcuob/0) %s 2 0 0 0 -1 2129984 0 0 0 0 0 0 0 0 20 0 %s 0 11 0 0 18446744073709551615 0 0 0 0 0 0 0 ` +
|
||||||
|
|
|
||||||
|
|
@ -1,14 +1,15 @@
|
||||||
package procstat
|
package procstat
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"errors"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/shirou/gopsutil/v3/process"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
"github.com/influxdata/telegraf/metric"
|
"github.com/influxdata/telegraf/metric"
|
||||||
"github.com/shirou/gopsutil/v3/process"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type Process interface {
|
type Process interface {
|
||||||
|
|
@ -59,7 +60,7 @@ func (p *Proc) percent(_ time.Duration) (float64, error) {
|
||||||
cpuPerc, err := p.Process.Percent(time.Duration(0))
|
cpuPerc, err := p.Process.Percent(time.Duration(0))
|
||||||
if !p.hasCPUTimes && err == nil {
|
if !p.hasCPUTimes && err == nil {
|
||||||
p.hasCPUTimes = true
|
p.hasCPUTimes = true
|
||||||
return 0, fmt.Errorf("must call Percent twice to compute percent cpu")
|
return 0, errors.New("must call Percent twice to compute percent cpu")
|
||||||
}
|
}
|
||||||
return cpuPerc, err
|
return cpuPerc, err
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,7 @@
|
||||||
package procstat
|
package procstat
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
|
|
@ -269,7 +270,7 @@ func TestGather_CreateProcessErrorOk(t *testing.T) {
|
||||||
Log: testutil.Logger{},
|
Log: testutil.Logger{},
|
||||||
finder: newTestFinder([]PID{pid}),
|
finder: newTestFinder([]PID{pid}),
|
||||||
createProcess: func(PID) (Process, error) {
|
createProcess: func(PID) (Process, error) {
|
||||||
return nil, fmt.Errorf("createProcess error")
|
return nil, errors.New("createProcess error")
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
require.NoError(t, p.Init())
|
require.NoError(t, p.Init())
|
||||||
|
|
|
||||||
|
|
@ -404,7 +404,7 @@ func gatherOverview(r *RabbitMQ, acc telegraf.Accumulator) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if overview.QueueTotals == nil || overview.ObjectTotals == nil || overview.MessageStats == nil {
|
if overview.QueueTotals == nil || overview.ObjectTotals == nil || overview.MessageStats == nil {
|
||||||
acc.AddError(fmt.Errorf("Wrong answer from rabbitmq. Probably auth issue"))
|
acc.AddError(errors.New("wrong answer from rabbitmq, probably auth issue"))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,7 @@
|
||||||
package rabbitmq
|
package rabbitmq
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
|
|
@ -598,7 +599,7 @@ func TestRabbitMQGeneratesMetricsSet2(t *testing.T) {
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
expectedErrors := []error{
|
expectedErrors := []error{
|
||||||
fmt.Errorf("error response trying to get \"/api/federation-links\": \"Object Not Found\" (reason: \"Not Found\")"),
|
errors.New("error response trying to get \"/api/federation-links\": \"Object Not Found\" (reason: \"Not Found\")"),
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run the test
|
// Run the test
|
||||||
|
|
@ -625,11 +626,11 @@ func TestRabbitMQMetricFilerts(t *testing.T) {
|
||||||
defer ts.Close()
|
defer ts.Close()
|
||||||
|
|
||||||
metricErrors := map[string]error{
|
metricErrors := map[string]error{
|
||||||
"exchange": fmt.Errorf("getting \"/api/exchanges\" failed: 404 Not Found"),
|
"exchange": errors.New("getting \"/api/exchanges\" failed: 404 Not Found"),
|
||||||
"federation": fmt.Errorf("getting \"/api/federation-links\" failed: 404 Not Found"),
|
"federation": errors.New("getting \"/api/federation-links\" failed: 404 Not Found"),
|
||||||
"node": fmt.Errorf("getting \"/api/nodes\" failed: 404 Not Found"),
|
"node": errors.New("getting \"/api/nodes\" failed: 404 Not Found"),
|
||||||
"overview": fmt.Errorf("getting \"/api/overview\" failed: 404 Not Found"),
|
"overview": errors.New("getting \"/api/overview\" failed: 404 Not Found"),
|
||||||
"queue": fmt.Errorf("getting \"/api/queues\" failed: 404 Not Found"),
|
"queue": errors.New("getting \"/api/queues\" failed: 404 Not Found"),
|
||||||
}
|
}
|
||||||
|
|
||||||
// Include test
|
// Include test
|
||||||
|
|
|
||||||
|
|
@ -4,6 +4,7 @@ package redfish
|
||||||
import (
|
import (
|
||||||
_ "embed"
|
_ "embed"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
|
|
@ -158,19 +159,19 @@ func (*Redfish) SampleConfig() string {
|
||||||
|
|
||||||
func (r *Redfish) Init() error {
|
func (r *Redfish) Init() error {
|
||||||
if r.Address == "" {
|
if r.Address == "" {
|
||||||
return fmt.Errorf("did not provide IP")
|
return errors.New("did not provide IP")
|
||||||
}
|
}
|
||||||
|
|
||||||
if r.Username == "" && r.Password == "" {
|
if r.Username == "" && r.Password == "" {
|
||||||
return fmt.Errorf("did not provide username and password")
|
return errors.New("did not provide username and password")
|
||||||
}
|
}
|
||||||
|
|
||||||
if r.ComputerSystemID == "" {
|
if r.ComputerSystemID == "" {
|
||||||
return fmt.Errorf("did not provide the computer system ID of the resource")
|
return errors.New("did not provide the computer system ID of the resource")
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(r.IncludeMetrics) == 0 {
|
if len(r.IncludeMetrics) == 0 {
|
||||||
return fmt.Errorf("no metrics specified to collect")
|
return errors.New("no metrics specified to collect")
|
||||||
}
|
}
|
||||||
for _, metric := range r.IncludeMetrics {
|
for _, metric := range r.IncludeMetrics {
|
||||||
switch metric {
|
switch metric {
|
||||||
|
|
|
||||||
|
|
@ -4,6 +4,7 @@ package redis_sentinel
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
_ "embed"
|
_ "embed"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
|
@ -238,14 +239,14 @@ func (client *RedisSentinelClient) gatherMasterStats(acc telegraf.Accumulator) (
|
||||||
for _, master := range masters {
|
for _, master := range masters {
|
||||||
master, ok := master.([]interface{})
|
master, ok := master.([]interface{})
|
||||||
if !ok {
|
if !ok {
|
||||||
return masterNames, fmt.Errorf("unable to process master response")
|
return masterNames, errors.New("unable to process master response")
|
||||||
}
|
}
|
||||||
|
|
||||||
m := toMap(master)
|
m := toMap(master)
|
||||||
|
|
||||||
masterName, ok := m["name"]
|
masterName, ok := m["name"]
|
||||||
if !ok {
|
if !ok {
|
||||||
return masterNames, fmt.Errorf("unable to resolve master name")
|
return masterNames, errors.New("unable to resolve master name")
|
||||||
}
|
}
|
||||||
masterNames = append(masterNames, masterName)
|
masterNames = append(masterNames, masterName)
|
||||||
|
|
||||||
|
|
@ -279,7 +280,7 @@ func (client *RedisSentinelClient) gatherReplicaStats(acc telegraf.Accumulator,
|
||||||
for _, replica := range replicas {
|
for _, replica := range replicas {
|
||||||
replica, ok := replica.([]interface{})
|
replica, ok := replica.([]interface{})
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("unable to process replica response")
|
return errors.New("unable to process replica response")
|
||||||
}
|
}
|
||||||
|
|
||||||
rm := toMap(replica)
|
rm := toMap(replica)
|
||||||
|
|
@ -311,7 +312,7 @@ func (client *RedisSentinelClient) gatherSentinelStats(acc telegraf.Accumulator,
|
||||||
for _, sentinel := range sentinels {
|
for _, sentinel := range sentinels {
|
||||||
sentinel, ok := sentinel.([]interface{})
|
sentinel, ok := sentinel.([]interface{})
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("unable to process sentinel response")
|
return errors.New("unable to process sentinel response")
|
||||||
}
|
}
|
||||||
|
|
||||||
sm := toMap(sentinel)
|
sm := toMap(sentinel)
|
||||||
|
|
|
||||||
|
|
@ -1031,7 +1031,7 @@ func parseCommaSeparatedInt(fields, _ map[string]interface{}, str string) error
|
||||||
// '16 829 004' --> 16829004
|
// '16 829 004' --> 16829004
|
||||||
numRegex, err := regexp.Compile(`[^0-9\-]+`)
|
numRegex, err := regexp.Compile(`[^0-9\-]+`)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to compile numeric regex")
|
return errors.New("failed to compile numeric regex")
|
||||||
}
|
}
|
||||||
value = numRegex.ReplaceAllString(value, "")
|
value = numRegex.ReplaceAllString(value, "")
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
package snmp
|
package snmp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"errors"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
@ -612,7 +612,7 @@ func TestSnmpTranslateCache_hitGosmi(t *testing.T) {
|
||||||
oidNum: "b",
|
oidNum: "b",
|
||||||
oidText: "c",
|
oidText: "c",
|
||||||
conversion: "d",
|
conversion: "d",
|
||||||
err: fmt.Errorf("e"),
|
err: errors.New("e"),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
mibName, oidNum, oidText, conversion, err := getGosmiTr(t).SnmpTranslate("foo")
|
mibName, oidNum, oidText, conversion, err := getGosmiTr(t).SnmpTranslate("foo")
|
||||||
|
|
@ -620,7 +620,7 @@ func TestSnmpTranslateCache_hitGosmi(t *testing.T) {
|
||||||
require.Equal(t, "b", oidNum)
|
require.Equal(t, "b", oidNum)
|
||||||
require.Equal(t, "c", oidText)
|
require.Equal(t, "c", oidText)
|
||||||
require.Equal(t, "d", conversion)
|
require.Equal(t, "d", conversion)
|
||||||
require.Equal(t, fmt.Errorf("e"), err)
|
require.Equal(t, errors.New("e"), err)
|
||||||
gosmiSnmpTranslateCaches = nil
|
gosmiSnmpTranslateCaches = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -645,7 +645,7 @@ func TestSnmpTableCache_hitGosmi(t *testing.T) {
|
||||||
oidNum: "b",
|
oidNum: "b",
|
||||||
oidText: "c",
|
oidText: "c",
|
||||||
fields: []Field{{Name: "d"}},
|
fields: []Field{{Name: "d"}},
|
||||||
err: fmt.Errorf("e"),
|
err: errors.New("e"),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
mibName, oidNum, oidText, fields, err := getGosmiTr(t).SnmpTable("foo")
|
mibName, oidNum, oidText, fields, err := getGosmiTr(t).SnmpTable("foo")
|
||||||
|
|
@ -653,7 +653,7 @@ func TestSnmpTableCache_hitGosmi(t *testing.T) {
|
||||||
require.Equal(t, "b", oidNum)
|
require.Equal(t, "b", oidNum)
|
||||||
require.Equal(t, "c", oidText)
|
require.Equal(t, "c", oidText)
|
||||||
require.Equal(t, []Field{{Name: "d"}}, fields)
|
require.Equal(t, []Field{{Name: "d"}}, fields)
|
||||||
require.Equal(t, fmt.Errorf("e"), err)
|
require.Equal(t, errors.New("e"), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTableJoin_walkGosmi(t *testing.T) {
|
func TestTableJoin_walkGosmi(t *testing.T) {
|
||||||
|
|
|
||||||
|
|
@ -132,7 +132,7 @@ func (n *netsnmpTranslator) snmpTableCall(oid string) (
|
||||||
scanner.Scan()
|
scanner.Scan()
|
||||||
cols := scanner.Text()
|
cols := scanner.Text()
|
||||||
if len(cols) == 0 {
|
if len(cols) == 0 {
|
||||||
return "", "", "", nil, fmt.Errorf("could not find any columns in table")
|
return "", "", "", nil, errors.New("could not find any columns in table")
|
||||||
}
|
}
|
||||||
for _, col := range strings.Split(cols, " ") {
|
for _, col := range strings.Split(cols, " ") {
|
||||||
if len(col) == 0 {
|
if len(col) == 0 {
|
||||||
|
|
|
||||||
|
|
@ -89,7 +89,7 @@ func (s *Snmp) Init() error {
|
||||||
case "netsnmp":
|
case "netsnmp":
|
||||||
s.translator = NewNetsnmpTranslator()
|
s.translator = NewNetsnmpTranslator()
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("invalid translator value")
|
return errors.New("invalid translator value")
|
||||||
}
|
}
|
||||||
|
|
||||||
s.connectionCache = make([]snmpConnection, len(s.Agents))
|
s.connectionCache = make([]snmpConnection, len(s.Agents))
|
||||||
|
|
@ -146,7 +146,7 @@ func (t *Table) Init(tr Translator) error {
|
||||||
//makes sure oid or name is set in config file
|
//makes sure oid or name is set in config file
|
||||||
//otherwise snmp will produce metrics with an empty name
|
//otherwise snmp will produce metrics with an empty name
|
||||||
if t.Oid == "" && t.Name == "" {
|
if t.Oid == "" && t.Name == "" {
|
||||||
return fmt.Errorf("SNMP table in config file is not named. One or both of the oid and name settings must be set")
|
return errors.New("SNMP table in config file is not named. One or both of the oid and name settings must be set")
|
||||||
}
|
}
|
||||||
|
|
||||||
if t.initialized {
|
if t.initialized {
|
||||||
|
|
@ -165,7 +165,7 @@ func (t *Table) Init(tr Translator) error {
|
||||||
}
|
}
|
||||||
if t.Fields[i].SecondaryIndexTable {
|
if t.Fields[i].SecondaryIndexTable {
|
||||||
if secondaryIndexTablePresent {
|
if secondaryIndexTablePresent {
|
||||||
return fmt.Errorf("only one field can be SecondaryIndexTable")
|
return errors.New("only one field can be SecondaryIndexTable")
|
||||||
}
|
}
|
||||||
secondaryIndexTablePresent = true
|
secondaryIndexTablePresent = true
|
||||||
}
|
}
|
||||||
|
|
@ -270,11 +270,11 @@ func (f *Field) init(tr Translator) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if f.SecondaryIndexTable && f.SecondaryIndexUse {
|
if f.SecondaryIndexTable && f.SecondaryIndexUse {
|
||||||
return fmt.Errorf("SecondaryIndexTable and UseSecondaryIndex are exclusive")
|
return errors.New("SecondaryIndexTable and UseSecondaryIndex are exclusive")
|
||||||
}
|
}
|
||||||
|
|
||||||
if !f.SecondaryIndexTable && !f.SecondaryIndexUse && f.SecondaryOuterJoin {
|
if !f.SecondaryIndexTable && !f.SecondaryIndexUse && f.SecondaryOuterJoin {
|
||||||
return fmt.Errorf("SecondaryOuterJoin set to true, but field is not being used in join")
|
return errors.New("SecondaryOuterJoin set to true, but field is not being used in join")
|
||||||
}
|
}
|
||||||
|
|
||||||
f.initialized = true
|
f.initialized = true
|
||||||
|
|
@ -425,13 +425,13 @@ func (t Table) Build(gs snmpConnection, walk bool, tr Translator) (*RTable, erro
|
||||||
// index, and being added on the same row.
|
// index, and being added on the same row.
|
||||||
if pkt, err := gs.Get([]string{oid}); err != nil {
|
if pkt, err := gs.Get([]string{oid}); err != nil {
|
||||||
if errors.Is(err, gosnmp.ErrUnknownSecurityLevel) {
|
if errors.Is(err, gosnmp.ErrUnknownSecurityLevel) {
|
||||||
return nil, fmt.Errorf("unknown security level (sec_level)")
|
return nil, errors.New("unknown security level (sec_level)")
|
||||||
} else if errors.Is(err, gosnmp.ErrUnknownUsername) {
|
} else if errors.Is(err, gosnmp.ErrUnknownUsername) {
|
||||||
return nil, fmt.Errorf("unknown username (sec_name)")
|
return nil, errors.New("unknown username (sec_name)")
|
||||||
} else if errors.Is(err, gosnmp.ErrWrongDigest) {
|
} else if errors.Is(err, gosnmp.ErrWrongDigest) {
|
||||||
return nil, fmt.Errorf("wrong digest (auth_protocol, auth_password)")
|
return nil, errors.New("wrong digest (auth_protocol, auth_password)")
|
||||||
} else if errors.Is(err, gosnmp.ErrDecryption) {
|
} else if errors.Is(err, gosnmp.ErrDecryption) {
|
||||||
return nil, fmt.Errorf("decryption error (priv_protocol, priv_password)")
|
return nil, errors.New("decryption error (priv_protocol, priv_password)")
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("performing get on field %s: %w", f.Name, err)
|
return nil, fmt.Errorf("performing get on field %s: %w", f.Name, err)
|
||||||
} else if pkt != nil && len(pkt.Variables) > 0 && pkt.Variables[0].Type != gosnmp.NoSuchObject && pkt.Variables[0].Type != gosnmp.NoSuchInstance {
|
} else if pkt != nil && len(pkt.Variables) > 0 && pkt.Variables[0].Type != gosnmp.NoSuchObject && pkt.Variables[0].Type != gosnmp.NoSuchInstance {
|
||||||
|
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue