chore: eliminate unnecessary use of quoted strings in printf (#12722)

This commit is contained in:
Thomas Casteleyn 2023-02-23 14:49:36 +01:00 committed by GitHub
parent 245705cf24
commit 6a2f6f301f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
113 changed files with 313 additions and 320 deletions

View File

@ -710,7 +710,7 @@ func (a *Agent) connectOutput(ctx context.Context, output *models.RunningOutput)
err := output.Output.Connect() err := output.Output.Connect()
if err != nil { if err != nil {
log.Printf("E! [agent] Failed to connect to [%s], retrying in 15s, "+ log.Printf("E! [agent] Failed to connect to [%s], retrying in 15s, "+
"error was '%s'", output.LogName(), err) "error was %q", output.LogName(), err)
err := internal.SleepContext(ctx, 15*time.Second) err := internal.SleepContext(ctx, 15*time.Second)
if err != nil { if err != nil {

View File

@ -364,7 +364,7 @@ func printConfig(name string, p telegraf.PluginDescriber, op string, commented b
if di.RemovalIn != "" { if di.RemovalIn != "" {
removalNote = " and will be removed in " + di.RemovalIn removalNote = " and will be removed in " + di.RemovalIn
} }
outputBuffer.Write([]byte(fmt.Sprintf("\n%s ## DEPRECATED: The '%s' plugin is deprecated in version %s%s, %s.", outputBuffer.Write([]byte(fmt.Sprintf("\n%s ## DEPRECATED: The %q plugin is deprecated in version %s%s, %s.",
comment, name, di.Since, removalNote, di.Notice))) comment, name, di.Since, removalNote, di.Notice)))
} }

View File

@ -399,7 +399,7 @@ func getDefaultConfigPath() ([]string, error) {
if _, err := os.Stat(etcfolder); err == nil { if _, err := os.Stat(etcfolder); err == nil {
files, err := WalkDirectory(etcfolder) files, err := WalkDirectory(etcfolder)
if err != nil { if err != nil {
log.Printf("W! unable walk '%s': %s", etcfolder, err) log.Printf("W! unable walk %q: %s", etcfolder, err)
} }
for _, file := range files { for _, file := range files {
log.Printf("I! Using config file: %s", file) log.Printf("I! Using config file: %s", file)

View File

@ -37,9 +37,9 @@ var tests = []SnakeTest{
func TestSnakeCase(t *testing.T) { func TestSnakeCase(t *testing.T) {
for _, test := range tests { for _, test := range tests {
if SnakeCase(test.input) != test.output { t.Run(test.input, func(t *testing.T) {
t.Errorf(`SnakeCase("%s"), wanted "%s", got \%s"`, test.input, test.output, SnakeCase(test.input)) require.Equal(t, test.output, SnakeCase(test.input))
} })
} }
} }

View File

@ -131,7 +131,7 @@ func (w *FileWriter) rotateIfNeeded() error {
(w.maxSizeInBytes > 0 && w.bytesWritten >= w.maxSizeInBytes) { (w.maxSizeInBytes > 0 && w.bytesWritten >= w.maxSizeInBytes) {
if err := w.rotate(); err != nil { if err := w.rotate(); err != nil {
//Ignore rotation errors and keep the log open //Ignore rotation errors and keep the log open
fmt.Printf("unable to rotate the file '%s', %s", w.filename, err.Error()) fmt.Printf("unable to rotate the file %q, %s", w.filename, err.Error())
} }
return w.openCurrent() return w.openCurrent()
} }

View File

@ -526,5 +526,5 @@ func assertContainsTaggedField(t *testing.T, acc *testutil.Accumulator, metricNa
return return
} }
require.Fail(t, fmt.Sprintf("unknown measurement '%s' with tags: %v, fields: %v", metricName, tags, fields)) require.Fail(t, fmt.Sprintf("unknown measurement %q with tags: %v, fields: %v", metricName, tags, fields))
} }

View File

@ -134,7 +134,7 @@ func (c *Client) read(requests []ReadRequest) ([]ReadResponse, error) {
req, err := http.NewRequest("POST", requestURL, bytes.NewBuffer(requestBody)) req, err := http.NewRequest("POST", requestURL, bytes.NewBuffer(requestBody))
if err != nil { if err != nil {
//err is not contained in returned error - it may contain sensitive data (password) which should not be logged //err is not contained in returned error - it may contain sensitive data (password) which should not be logged
return nil, fmt.Errorf("unable to create new request for: '%s'", c.URL) return nil, fmt.Errorf("unable to create new request for: %q", c.URL)
} }
req.Header.Add("Content-type", "application/json") req.Header.Add("Content-type", "application/json")
@ -149,7 +149,7 @@ func (c *Client) read(requests []ReadRequest) ([]ReadResponse, error) {
defer resp.Body.Close() defer resp.Body.Close()
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("response from url \"%s\" has status code %d (%s), expected %d (%s)", return nil, fmt.Errorf("response from url %q has status code %d (%s), expected %d (%s)",
c.URL, resp.StatusCode, http.StatusText(resp.StatusCode), http.StatusOK, http.StatusText(http.StatusOK)) c.URL, resp.StatusCode, http.StatusText(resp.StatusCode), http.StatusOK, http.StatusText(http.StatusOK))
} }

View File

@ -50,13 +50,13 @@ func (o *OpcUAClientConfig) validateEndpoint() error {
switch o.SecurityPolicy { switch o.SecurityPolicy {
case "None", "Basic128Rsa15", "Basic256", "Basic256Sha256", "auto": case "None", "Basic128Rsa15", "Basic256", "Basic256Sha256", "auto":
default: default:
return fmt.Errorf("invalid security type '%s' in '%s'", o.SecurityPolicy, o.Endpoint) return fmt.Errorf("invalid security type %q in %q", o.SecurityPolicy, o.Endpoint)
} }
switch o.SecurityMode { switch o.SecurityMode {
case "None", "Sign", "SignAndEncrypt", "auto": case "None", "Sign", "SignAndEncrypt", "auto":
default: default:
return fmt.Errorf("invalid security type '%s' in '%s'", o.SecurityMode, o.Endpoint) return fmt.Errorf("invalid security type %q in %q", o.SecurityMode, o.Endpoint)
} }
return nil return nil

View File

@ -236,7 +236,7 @@ func tagsSliceToMap(tags [][]string) (map[string]string, error) {
func validateNodeToAdd(existing map[metricParts]struct{}, nmm *NodeMetricMapping) error { func validateNodeToAdd(existing map[metricParts]struct{}, nmm *NodeMetricMapping) error {
if nmm.Tag.FieldName == "" { if nmm.Tag.FieldName == "" {
return fmt.Errorf("empty name in '%s'", nmm.Tag.FieldName) return fmt.Errorf("empty name in %q", nmm.Tag.FieldName)
} }
if len(nmm.Tag.Namespace) == 0 { if len(nmm.Tag.Namespace) == 0 {
@ -249,19 +249,19 @@ func validateNodeToAdd(existing map[metricParts]struct{}, nmm *NodeMetricMapping
mp := newMP(nmm) mp := newMP(nmm)
if _, exists := existing[mp]; exists { if _, exists := existing[mp]; exists {
return fmt.Errorf("name '%s' is duplicated (metric name '%s', tags '%s')", return fmt.Errorf("name %q is duplicated (metric name %q, tags %q)",
mp.fieldName, mp.metricName, mp.tags) mp.fieldName, mp.metricName, mp.tags)
} }
switch nmm.Tag.IdentifierType { switch nmm.Tag.IdentifierType {
case "i": case "i":
if _, err := strconv.Atoi(nmm.Tag.Identifier); err != nil { if _, err := strconv.Atoi(nmm.Tag.Identifier); err != nil {
return fmt.Errorf("identifier type '%s' does not match the type of identifier '%s'", nmm.Tag.IdentifierType, nmm.Tag.Identifier) return fmt.Errorf("identifier type %q does not match the type of identifier %q", nmm.Tag.IdentifierType, nmm.Tag.Identifier)
} }
case "s", "g", "b": case "s", "g", "b":
// Valid identifier type - do nothing. // Valid identifier type - do nothing.
default: default:
return fmt.Errorf("invalid identifier type '%s' in '%s'", nmm.Tag.IdentifierType, nmm.Tag.FieldName) return fmt.Errorf("invalid identifier type %q in %q", nmm.Tag.IdentifierType, nmm.Tag.FieldName)
} }
existing[mp] = struct{}{} existing[mp] = struct{}{}
@ -382,7 +382,7 @@ func (o *OpcUAInputClient) MetricForNode(nodeIdx int) telegraf.Metric {
fields["Quality"] = strings.TrimSpace(fmt.Sprint(o.LastReceivedData[nodeIdx].Quality)) fields["Quality"] = strings.TrimSpace(fmt.Sprint(o.LastReceivedData[nodeIdx].Quality))
if !o.StatusCodeOK(o.LastReceivedData[nodeIdx].Quality) { if !o.StatusCodeOK(o.LastReceivedData[nodeIdx].Quality) {
mp := newMP(nmm) mp := newMP(nmm)
o.Log.Debugf("status not OK for node '%s'(metric name '%s', tags '%s')", o.Log.Debugf("status not OK for node %q(metric name %q, tags %q)",
mp.fieldName, mp.metricName, mp.tags) mp.fieldName, mp.metricName, mp.tags)
} }

View File

@ -1,7 +1,11 @@
package input package input
import ( import (
"errors"
"fmt" "fmt"
"testing"
"time"
"github.com/gopcua/opcua/ua" "github.com/gopcua/opcua/ua"
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/config"
@ -9,8 +13,6 @@ import (
"github.com/influxdata/telegraf/plugins/common/opcua" "github.com/influxdata/telegraf/plugins/common/opcua"
"github.com/influxdata/telegraf/testutil" "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"testing"
"time"
) )
func TestTagsSliceToMap(t *testing.T) { func TestTagsSliceToMap(t *testing.T) {
@ -74,7 +76,7 @@ func TestValidateOPCTags(t *testing.T) {
}, },
}, },
}, },
fmt.Errorf("name 'fn' is duplicated (metric name 'mn', tags 't1=v1, t2=v2')"), errors.New(`name "fn" is duplicated (metric name "mn", tags "t1=v1, t2=v2")`),
}, },
{ {
"empty tag value not allowed", "empty tag value not allowed",
@ -352,7 +354,7 @@ func TestValidateNodeToAdd(t *testing.T) {
}, map[string]string{}) }, map[string]string{})
return nmm return nmm
}(), }(),
err: fmt.Errorf("empty name in ''"), err: errors.New(`empty name in ""`),
}, },
{ {
name: "empty namespace not allowed", name: "empty namespace not allowed",
@ -382,7 +384,7 @@ func TestValidateNodeToAdd(t *testing.T) {
}, map[string]string{}) }, map[string]string{})
return nmm return nmm
}(), }(),
err: fmt.Errorf("invalid identifier type '' in 'f'"), err: errors.New(`invalid identifier type "" in "f"`),
}, },
{ {
name: "invalid identifier type not allowed", name: "invalid identifier type not allowed",
@ -397,7 +399,7 @@ func TestValidateNodeToAdd(t *testing.T) {
}, map[string]string{}) }, map[string]string{})
return nmm return nmm
}(), }(),
err: fmt.Errorf("invalid identifier type 'j' in 'f'"), err: errors.New(`invalid identifier type "j" in "f"`),
}, },
{ {
name: "duplicate metric not allowed", name: "duplicate metric not allowed",
@ -414,7 +416,7 @@ func TestValidateNodeToAdd(t *testing.T) {
}, map[string]string{}) }, map[string]string{})
return nmm return nmm
}(), }(),
err: fmt.Errorf("name 'f' is duplicated (metric name 'testmetric', tags 't1=v1, t2=v2')"), err: errors.New(`name "f" is duplicated (metric name "testmetric", tags "t1=v1, t2=v2")`),
}, },
{ {
name: "identifier type mismatch", name: "identifier type mismatch",
@ -429,7 +431,7 @@ func TestValidateNodeToAdd(t *testing.T) {
}, map[string]string{}) }, map[string]string{})
return nmm return nmm
}(), }(),
err: fmt.Errorf("identifier type 'i' does not match the type of identifier 'hf'"), err: errors.New(`identifier type "i" does not match the type of identifier "hf"`),
}, },
} }

View File

@ -56,7 +56,7 @@ func (s *Shim) RunInput(pollInterval time.Duration) error {
go func() { go func() {
err := s.writeProcessedMetrics() err := s.writeProcessedMetrics()
if err != nil { if err != nil {
s.log.Warnf("%s", err) s.log.Warn(err.Error())
} }
wg.Done() wg.Done()
}() }()

View File

@ -47,7 +47,7 @@ func (s *Shim) RunProcessor() error {
go func() { go func() {
err := s.writeProcessedMetrics() err := s.writeProcessedMetrics()
if err != nil { if err != nil {
s.log.Warnf("%s", err) s.log.Warn(err.Error())
} }
wg.Done() wg.Done()
}() }()

View File

@ -77,7 +77,7 @@ type builtinMethod func(b *starlark.Builtin, args starlark.Tuple, kwargs []starl
func builtinAttr(recv starlark.Value, name string, methods map[string]builtinMethod) (starlark.Value, error) { func builtinAttr(recv starlark.Value, name string, methods map[string]builtinMethod) (starlark.Value, error) {
method := methods[name] method := methods[name]
if method == nil { if method == nil {
return starlark.None, fmt.Errorf("no such method '%s'", name) return starlark.None, fmt.Errorf("no such method %q", name)
} }
// Allocate a closure over 'method'. // Allocate a closure over 'method'.

View File

@ -104,7 +104,7 @@ func (m *Metric) SetField(name string, value starlark.Value) error {
return errors.New("cannot set fields") return errors.New("cannot set fields")
default: default:
return starlark.NoSuchAttrError( return starlark.NoSuchAttrError(
fmt.Sprintf("cannot assign to field '%s'", name)) fmt.Sprintf("cannot assign to field %q", name))
} }
} }

View File

@ -77,7 +77,7 @@ func (c *ClientConfig) TLSConfig() (*tls.Config, error) {
case "freely": case "freely":
renegotiationMethod = tls.RenegotiateFreelyAsClient renegotiationMethod = tls.RenegotiateFreelyAsClient
default: default:
return nil, fmt.Errorf("unrecognized renegotation method '%s', choose from: 'never', 'once', 'freely'", c.RenegotiationMethod) return nil, fmt.Errorf("unrecognized renegotation method %q, choose from: 'never', 'once', 'freely'", c.RenegotiationMethod)
} }
tlsConfig := &tls.Config{ tlsConfig := &tls.Config{

View File

@ -88,7 +88,7 @@ func (a *AzureStorageQueue) Gather(acc telegraf.Accumulator) error {
ctx := context.TODO() ctx := context.TODO()
for marker := (azqueue.Marker{}); marker.NotDone(); { for marker := (azqueue.Marker{}); marker.NotDone(); {
a.Log.Debugf("Listing queues of storage account '%s'", a.StorageAccountName) a.Log.Debugf("Listing queues of storage account %q", a.StorageAccountName)
queuesSegment, err := serviceURL.ListQueuesSegment(ctx, marker, queuesSegment, err := serviceURL.ListQueuesSegment(ctx, marker,
azqueue.ListQueuesSegmentOptions{ azqueue.ListQueuesSegmentOptions{
Detail: azqueue.ListQueuesSegmentDetails{Metadata: false}, Detail: azqueue.ListQueuesSegmentDetails{Metadata: false},
@ -99,7 +99,7 @@ func (a *AzureStorageQueue) Gather(acc telegraf.Accumulator) error {
marker = queuesSegment.NextMarker marker = queuesSegment.NextMarker
for _, queueItem := range queuesSegment.QueueItems { for _, queueItem := range queuesSegment.QueueItems {
a.Log.Debugf("Processing queue '%s' of storage account '%s'", queueItem.Name, a.StorageAccountName) a.Log.Debugf("Processing queue %q of storage account %q", queueItem.Name, a.StorageAccountName)
queueURL := serviceURL.NewQueueURL(queueItem.Name) queueURL := serviceURL.NewQueueURL(queueItem.Name)
properties, err := queueURL.GetProperties(ctx) properties, err := queueURL.GetProperties(ctx)
if err != nil { if err != nil {

View File

@ -137,7 +137,7 @@ func (bond *Bond) gatherBondPart(bondName string, rawFile string, acc telegraf.A
if err := scanner.Err(); err != nil { if err := scanner.Err(); err != nil {
return err return err
} }
return fmt.Errorf("Couldn't find status info for '%s' ", bondName) return fmt.Errorf("Couldn't find status info for %q", bondName)
} }
func (bond *Bond) readSysFiles(bondDir string) (sysFiles, error) { func (bond *Bond) readSysFiles(bondDir string) (sysFiles, error) {

View File

@ -130,7 +130,7 @@ func (j javaMetric) addTagsFields(out map[string]interface{}) {
} }
j.acc.AddFields(tokens["class"]+tokens["type"], fields, tags) j.acc.AddFields(tokens["class"]+tokens["type"], fields, tags)
} else { } else {
j.acc.AddError(fmt.Errorf("missing key 'value' in '%s' output response: %v", j.metric, out)) j.acc.AddError(fmt.Errorf("missing key 'value' in %q output response: %v", j.metric, out))
} }
} }
@ -154,7 +154,7 @@ func (c cassandraMetric) addTagsFields(out map[string]interface{}) {
tokens["scope"] == "*") { tokens["scope"] == "*") {
valuesMap, ok := out["value"] valuesMap, ok := out["value"]
if !ok { if !ok {
c.acc.AddError(fmt.Errorf("missing key 'value' in '%s' output response: %v", c.metric, out)) c.acc.AddError(fmt.Errorf("missing key 'value' in %q output response: %v", c.metric, out))
return return
} }
for k, v := range valuesMap.(map[string]interface{}) { for k, v := range valuesMap.(map[string]interface{}) {
@ -163,7 +163,7 @@ func (c cassandraMetric) addTagsFields(out map[string]interface{}) {
} else { } else {
values, ok := out["value"] values, ok := out["value"]
if !ok { if !ok {
c.acc.AddError(fmt.Errorf("missing key 'value' in '%s' output response: %v", c.metric, out)) c.acc.AddError(fmt.Errorf("missing key 'value' in %q output response: %v", c.metric, out))
return return
} }
addCassandraMetric(r.(map[string]interface{})["mbean"].(string), c, values.(map[string]interface{})) addCassandraMetric(r.(map[string]interface{})["mbean"].(string), c, values.(map[string]interface{}))
@ -185,7 +185,7 @@ func (c *Cassandra) getAttr(requestURL *url.URL) (map[string]interface{}, error)
// Process response // Process response
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
err = fmt.Errorf("response from url \"%s\" has status code %d (%s), expected %d (%s)", err = fmt.Errorf("response from url %q has status code %d (%s), expected %d (%s)",
requestURL, requestURL,
resp.StatusCode, resp.StatusCode,
http.StatusText(resp.StatusCode), http.StatusText(resp.StatusCode),

View File

@ -177,7 +177,7 @@ func assertFoundSocket(t *testing.T, dir, sockType string, i int, sockets []*soc
require.NoError(t, err) require.NoError(t, err)
if s.socket == expected { if s.socket == expected {
found = true found = true
require.Equal(t, s.sockType, sockType, "Unexpected socket type for '%s'", s) require.Equal(t, s.sockType, sockType, "Unexpected socket type for %q", s)
require.Equal(t, s.sockID, strconv.Itoa(i)) require.Equal(t, s.sockID, strconv.Itoa(i))
} }
} }

View File

@ -97,7 +97,7 @@ func (c *Conntrack) Gather(acc telegraf.Accumulator) error {
fields[metricKey], err = strconv.ParseFloat(v, 64) fields[metricKey], err = strconv.ParseFloat(v, 64)
if err != nil { if err != nil {
acc.AddError(fmt.Errorf("failed to parse metric, expected number but "+ acc.AddError(fmt.Errorf("failed to parse metric, expected number but "+
" found '%s': %v", v, err)) " found %q: %w", v, err))
} }
} }
} }

View File

@ -132,14 +132,14 @@ func assertContainsTaggedFloat(
return return
} }
} else { } else {
require.Fail(t, fmt.Sprintf("Measurement \"%s\" does not have type float64", measurement)) require.Fail(t, fmt.Sprintf("Measurement %q does not have type float64", measurement))
} }
} }
} }
} }
} }
msg := fmt.Sprintf( msg := fmt.Sprintf(
"Could not find measurement \"%s\" with requested tags within %f of %f, Actual: %f", "Could not find measurement %q with requested tags within %f of %f, Actual: %f",
measurement, delta, expectedValue, actualValue) measurement, delta, expectedValue, actualValue)
require.Fail(t, msg) require.Fail(t, msg)
} }

View File

@ -117,7 +117,7 @@ func (d *Disque) gatherServer(addr *url.URL, acc telegraf.Accumulator) error {
return err return err
} }
if line[0] != '+' { if line[0] != '+' {
return fmt.Errorf("%s", strings.TrimSpace(line)[1:]) return errors.New(strings.TrimSpace(line)[1:])
} }
} }
} }

View File

@ -67,12 +67,12 @@ func recurseResponse(acc telegraf.Accumulator, aggNameFunction map[string]string
for _, aggName := range aggNames { for _, aggName := range aggNames {
aggFunction, found := aggNameFunction[aggName] aggFunction, found := aggNameFunction[aggName]
if !found { if !found {
return m, fmt.Errorf("child aggregation function '%s' not found %v", aggName, aggNameFunction) return m, fmt.Errorf("child aggregation function %q not found %v", aggName, aggNameFunction)
} }
resp := getResponseAggregation(aggFunction, aggName, bucketResponse) resp := getResponseAggregation(aggFunction, aggName, bucketResponse)
if resp == nil { if resp == nil {
return m, fmt.Errorf("child aggregation '%s' not found", aggName) return m, fmt.Errorf("child aggregation %q not found", aggName)
} }
switch resp := resp.(type) { switch resp := resp.(type) {

View File

@ -210,7 +210,7 @@ func getFunctionAggregation(function string, aggfield string) (elastic5.Aggregat
case "max": case "max":
agg = elastic5.NewMaxAggregation().Field(aggfield) agg = elastic5.NewMaxAggregation().Field(aggfield)
default: default:
return nil, fmt.Errorf("aggregation function '%s' not supported", function) return nil, fmt.Errorf("aggregation function %q not supported", function)
} }
return agg, nil return agg, nil

View File

@ -84,7 +84,7 @@ func (e *ElasticsearchQuery) Init() error {
} }
err = e.initAggregation(ctx, agg, i) err = e.initAggregation(ctx, agg, i)
if err != nil { if err != nil {
e.Log.Errorf("%s", err) e.Log.Error(err.Error())
return nil return nil
} }
} }
@ -100,7 +100,7 @@ func (e *ElasticsearchQuery) initAggregation(ctx context.Context, agg esAggregat
for _, metricField := range agg.MetricFields { for _, metricField := range agg.MetricFields {
if _, ok := agg.mapMetricFields[metricField]; !ok { if _, ok := agg.mapMetricFields[metricField]; !ok {
return fmt.Errorf("metric field '%s' not found on index '%s'", metricField, agg.Index) return fmt.Errorf("metric field %q not found on index %q", metricField, agg.Index)
} }
} }

View File

@ -256,7 +256,7 @@ func (c *CommandEthtool) Interfaces(includeNamespaces bool) ([]NamespacedInterfa
handle, err := netns.GetFromPath(filepath.Join(namespaceDirectory, name)) handle, err := netns.GetFromPath(filepath.Join(namespaceDirectory, name))
if err != nil { if err != nil {
c.Log.Warnf(`Could not get handle for namespace "%s": %s`, name, err) c.Log.Warnf("Could not get handle for namespace %q: %s", name, err.Error())
continue continue
} }
handles[name] = handle handles[name] = handle
@ -282,7 +282,7 @@ func (c *CommandEthtool) Interfaces(includeNamespaces bool) ([]NamespacedInterfa
Log: c.Log, Log: c.Log,
} }
if err := c.namespaceGoroutines[namespace].Start(); err != nil { if err := c.namespaceGoroutines[namespace].Start(); err != nil {
c.Log.Errorf(`Failed to start goroutine for namespace "%s": %s`, namespace, err) c.Log.Errorf("Failed to start goroutine for namespace %q: %s", namespace, err.Error())
delete(c.namespaceGoroutines, namespace) delete(c.namespaceGoroutines, namespace)
continue continue
} }
@ -290,7 +290,7 @@ func (c *CommandEthtool) Interfaces(includeNamespaces bool) ([]NamespacedInterfa
interfaces, err := c.namespaceGoroutines[namespace].Interfaces() interfaces, err := c.namespaceGoroutines[namespace].Interfaces()
if err != nil { if err != nil {
c.Log.Warnf(`Could not get interfaces from namespace "%s": %s`, namespace, err) c.Log.Warnf("Could not get interfaces from namespace %q: %s", namespace, err.Error())
continue continue
} }
allInterfaces = append(allInterfaces, interfaces...) allInterfaces = append(allInterfaces, interfaces...)

View File

@ -36,7 +36,6 @@ func (n *NamespaceGoroutine) Interfaces() ([]NamespacedInterface, error) {
interfaces, err := n.Do(func(n *NamespaceGoroutine) (interface{}, error) { interfaces, err := n.Do(func(n *NamespaceGoroutine) (interface{}, error) {
interfaces, err := net.Interfaces() interfaces, err := net.Interfaces()
if err != nil { if err != nil {
n.Log.Errorf(`Could not get interfaces in namespace "%s": %s`, n.name, err)
return nil, err return nil, err
} }
namespacedInterfaces := make([]NamespacedInterface, 0, len(interfaces)) namespacedInterfaces := make([]NamespacedInterface, 0, len(interfaces))
@ -94,7 +93,7 @@ func (n *NamespaceGoroutine) Start() error {
} }
if !initialNamespace.Equal(n.handle) { if !initialNamespace.Equal(n.handle) {
if err := netns.Set(n.handle); err != nil { if err := netns.Set(n.handle); err != nil {
n.Log.Errorf(`Could not switch to namespace "%s": %s`, n.name, err) n.Log.Errorf("Could not switch to namespace %q: %s", n.name, err.Error())
started <- err started <- err
return return
} }
@ -103,7 +102,7 @@ func (n *NamespaceGoroutine) Start() error {
// Every namespace needs its own connection to ethtool // Every namespace needs its own connection to ethtool
e, err := ethtoolLib.NewEthtool() e, err := ethtoolLib.NewEthtool()
if err != nil { if err != nil {
n.Log.Errorf(`Could not create ethtool client for namespace "%s": %s`, n.name, err) n.Log.Errorf("Could not create ethtool client for namespace %q: %s", n.name, err.Error())
started <- err started <- err
return return
} }

View File

@ -85,7 +85,7 @@ func (f *Fibaro) getJSON(path string, dataStruct interface{}) error {
defer resp.Body.Close() defer resp.Body.Close()
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
err = fmt.Errorf("response from url \"%s\" has status code %d (%s), expected %d (%s)", err = fmt.Errorf("response from url %q has status code %d (%s), expected %d (%s)",
requestURL, requestURL,
resp.StatusCode, resp.StatusCode,
http.StatusText(resp.StatusCode), http.StatusText(resp.StatusCode),

View File

@ -79,7 +79,7 @@ func (*Fluentd) SampleConfig() string {
func (h *Fluentd) Gather(acc telegraf.Accumulator) error { func (h *Fluentd) Gather(acc telegraf.Accumulator) error {
_, err := url.Parse(h.Endpoint) _, err := url.Parse(h.Endpoint)
if err != nil { if err != nil {
return fmt.Errorf("invalid URL \"%s\"", h.Endpoint) return fmt.Errorf("invalid URL %q", h.Endpoint)
} }
if h.client == nil { if h.client == nil {
@ -98,7 +98,7 @@ func (h *Fluentd) Gather(acc telegraf.Accumulator) error {
resp, err := h.client.Get(h.Endpoint) resp, err := h.client.Get(h.Endpoint)
if err != nil { if err != nil {
return fmt.Errorf("unable to perform HTTP client GET on \"%s\": %v", h.Endpoint, err) return fmt.Errorf("unable to perform HTTP client GET on %q: %w", h.Endpoint, err)
} }
defer resp.Body.Close() defer resp.Body.Close()
@ -106,7 +106,7 @@ func (h *Fluentd) Gather(acc telegraf.Accumulator) error {
body, err := io.ReadAll(resp.Body) body, err := io.ReadAll(resp.Body)
if err != nil { if err != nil {
return fmt.Errorf("unable to read the HTTP body \"%s\": %v", string(body), err) return fmt.Errorf("unable to read the HTTP body %q: %w", string(body), err)
} }
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {

View File

@ -381,7 +381,7 @@ func fetchJSON(t *testing.T, boundary string, rc io.ReadCloser) (string, error)
splits := strings.Split(string(bodyBytes), boundary) splits := strings.Split(string(bodyBytes), boundary)
offsetPart := splits[2] offsetPart := splits[2]
offsets := strings.Split(offsetPart, "\n") offsets := strings.Split(offsetPart, "\n")
fmt.Printf("%s", offsets[3]) fmt.Print(offsets[3])
return offsets[3], nil return offsets[3], nil
} }

View File

@ -138,7 +138,7 @@ func (h *GrayLog) gatherServer(
} }
requestURL, err := url.Parse(serverURL) requestURL, err := url.Parse(serverURL)
if err != nil { if err != nil {
return fmt.Errorf("unable to parse address '%s': %s", serverURL, err) return fmt.Errorf("unable to parse address %q: %w", serverURL, err)
} }
host, port, _ := net.SplitHostPort(requestURL.Host) host, port, _ := net.SplitHostPort(requestURL.Host)
@ -207,7 +207,7 @@ func (h *GrayLog) sendRequest(serverURL string) (string, float64, error) {
// Prepare URL // Prepare URL
requestURL, err := url.Parse(serverURL) requestURL, err := url.Parse(serverURL)
if err != nil { if err != nil {
return "", -1, fmt.Errorf("invalid server URL \"%s\"", serverURL) return "", -1, fmt.Errorf("invalid server URL %q", serverURL)
} }
// Add X-Requested-By header // Add X-Requested-By header
headers["X-Requested-By"] = "Telegraf" headers["X-Requested-By"] = "Telegraf"
@ -245,7 +245,7 @@ func (h *GrayLog) sendRequest(serverURL string) (string, float64, error) {
// Process response // Process response
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
err = fmt.Errorf("response from url \"%s\" has status code %d (%s), expected %d (%s)", err = fmt.Errorf("response from url %q has status code %d (%s), expected %d (%s)",
requestURL.String(), requestURL.String(),
resp.StatusCode, resp.StatusCode,
http.StatusText(resp.StatusCode), http.StatusText(resp.StatusCode),

View File

@ -137,12 +137,12 @@ func (h *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error {
u, err := url.Parse(addr) u, err := url.Parse(addr)
if err != nil { if err != nil {
return fmt.Errorf("unable parse server address '%s': %s", addr, err) return fmt.Errorf("unable parse server address %q: %w", addr, err)
} }
req, err := http.NewRequest("GET", addr, nil) req, err := http.NewRequest("GET", addr, nil)
if err != nil { if err != nil {
return fmt.Errorf("unable to create new request '%s': %s", addr, err) return fmt.Errorf("unable to create new request %q: %w", addr, err)
} }
if u.User != nil { if u.User != nil {
p, _ := u.User.Password() p, _ := u.User.Password()
@ -157,16 +157,16 @@ func (h *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error {
res, err := h.client.Do(req) res, err := h.client.Do(req)
if err != nil { if err != nil {
return fmt.Errorf("unable to connect to haproxy server '%s': %s", addr, err) return fmt.Errorf("unable to connect to haproxy server %q: %w", addr, err)
} }
defer res.Body.Close() defer res.Body.Close()
if res.StatusCode != 200 { if res.StatusCode != 200 {
return fmt.Errorf("unable to get valid stat result from '%s', http response code : %d", addr, res.StatusCode) return fmt.Errorf("unable to get valid stat result from %q, http response code : %d", addr, res.StatusCode)
} }
if err := h.importCsvResult(res.Body, acc, u.Host); err != nil { if err := h.importCsvResult(res.Body, acc, u.Host); err != nil {
return fmt.Errorf("unable to parse stat result from '%s': %s", addr, err) return fmt.Errorf("unable to parse stat result from %q: %w", addr, err)
} }
return nil return nil
@ -246,7 +246,7 @@ func (h *haproxy) importCsvResult(r io.Reader, acc telegraf.Accumulator, host st
case "type": case "type":
vi, err := strconv.ParseInt(v, 10, 64) vi, err := strconv.ParseInt(v, 10, 64)
if err != nil { if err != nil {
return fmt.Errorf("unable to parse type value '%s'", v) return fmt.Errorf("unable to parse type value %q", v)
} }
if vi >= int64(len(typeNames)) { if vi >= int64(len(typeNames)) {
return fmt.Errorf("received unknown type value: %d", vi) return fmt.Errorf("received unknown type value: %d", vi)

View File

@ -176,7 +176,7 @@ func (h *HTTPJSON) sendRequest(serverURL string) (string, float64, error) {
// Prepare URL // Prepare URL
requestURL, err := url.Parse(serverURL) requestURL, err := url.Parse(serverURL)
if err != nil { if err != nil {
return "", -1, fmt.Errorf("Invalid server URL \"%s\"", serverURL) return "", -1, fmt.Errorf("Invalid server URL %q", serverURL)
} }
data := url.Values{} data := url.Values{}
@ -228,7 +228,7 @@ func (h *HTTPJSON) sendRequest(serverURL string) (string, float64, error) {
// Process response // Process response
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
err = fmt.Errorf("Response from url \"%s\" has status code %d (%s), expected %d (%s)", err = fmt.Errorf("Response from url %q has status code %d (%s), expected %d (%s)",
requestURL.String(), requestURL.String(),
resp.StatusCode, resp.StatusCode,
http.StatusText(resp.StatusCode), http.StatusText(resp.StatusCode),

View File

@ -195,7 +195,7 @@ func (h *Hugepages) gatherFromHugepagePath(
metricValue, err := strconv.Atoi(string(bytes.TrimSuffix(metricBytes, newlineByte))) metricValue, err := strconv.Atoi(string(bytes.TrimSuffix(metricBytes, newlineByte)))
if err != nil { if err != nil {
return fmt.Errorf("failed to convert content of '%s': %v", metricFullPath, err) return fmt.Errorf("failed to convert content of %q: %w", metricFullPath, err)
} }
metrics[metricName] = metricValue metrics[metricName] = metricValue
@ -238,7 +238,7 @@ func (h *Hugepages) gatherStatsFromMeminfo(acc telegraf.Accumulator) error {
fieldValue, err := strconv.Atoi(string(fields[1])) fieldValue, err := strconv.Atoi(string(fields[1]))
if err != nil { if err != nil {
return fmt.Errorf("failed to convert content of '%s': %v", fieldName, err) return fmt.Errorf("failed to convert content of %q: %w", fieldName, err)
} }
metrics[metricName] = fieldValue metrics[metricName] = fieldValue
@ -270,7 +270,7 @@ func (h *Hugepages) parseHugepagesConfig() error {
case meminfoHugepages: case meminfoHugepages:
h.gatherMeminfo = true h.gatherMeminfo = true
default: default:
return fmt.Errorf("provided hugepages type `%s` is not valid", hugepagesType) return fmt.Errorf("provided hugepages type %q is not valid", hugepagesType)
} }
} }

View File

@ -156,12 +156,12 @@ func (d *IntelDLB) readRasMetrics(devicePath, metricPath string) (map[string]int
for _, metric := range metrics { for _, metric := range metrics {
metricPart := strings.Split(metric, " ") metricPart := strings.Split(metric, " ")
if len(metricPart) < 2 { if len(metricPart) < 2 {
return nil, fmt.Errorf("error occurred: no value to parse - %+q", metricPart) return nil, fmt.Errorf("no value to parse: %+q", metricPart)
} }
metricVal, err := strconv.ParseUint(metricPart[1], 10, 10) metricVal, err := strconv.ParseUint(metricPart[1], 10, 10)
if err != nil { if err != nil {
return nil, fmt.Errorf("error occurred: failed to parse value '%s': '%s'", metricPart[1], err) return nil, fmt.Errorf("failed to parse value %q: %w", metricPart[1], err)
} }
rasMetric[metricPart[0]] = metricVal rasMetric[metricPart[0]] = metricVal
} }

View File

@ -74,13 +74,13 @@ func (ea *iaEntitiesActivator) activateEntities(coreEntities []*CoreEventEntity,
for _, coreEventsEntity := range coreEntities { for _, coreEventsEntity := range coreEntities {
err := ea.activateCoreEvents(coreEventsEntity) err := ea.activateCoreEvents(coreEventsEntity)
if err != nil { if err != nil {
return fmt.Errorf("failed to activate core events `%s`: %v", coreEventsEntity.EventsTag, err) return fmt.Errorf("failed to activate core events %q: %w", coreEventsEntity.EventsTag, err)
} }
} }
for _, uncoreEventsEntity := range uncoreEntities { for _, uncoreEventsEntity := range uncoreEntities {
err := ea.activateUncoreEvents(uncoreEventsEntity) err := ea.activateUncoreEvents(uncoreEventsEntity)
if err != nil { if err != nil {
return fmt.Errorf("failed to activate uncore events `%s`: %v", uncoreEventsEntity.EventsTag, err) return fmt.Errorf("failed to activate uncore events %q: %w", uncoreEventsEntity.EventsTag, err)
} }
} }
return nil return nil
@ -105,11 +105,11 @@ func (ea *iaEntitiesActivator) activateCoreEvents(entity *CoreEventEntity) error
} }
placements, err := ea.placementMaker.makeCorePlacements(entity.parsedCores, event.custom.Event) placements, err := ea.placementMaker.makeCorePlacements(entity.parsedCores, event.custom.Event)
if err != nil { if err != nil {
return fmt.Errorf("failed to create core placements for event `%s`: %v", event.name, err) return fmt.Errorf("failed to create core placements for event %q: %w", event.name, err)
} }
activeEvents, err := ea.activateEventForPlacements(event, placements) activeEvents, err := ea.activateEventForPlacements(event, placements)
if err != nil { if err != nil {
return fmt.Errorf("failed to activate core event `%s`: %v", event.name, err) return fmt.Errorf("failed to activate core event %q: %w", event.name, err)
} }
entity.activeEvents = append(entity.activeEvents, activeEvents...) entity.activeEvents = append(entity.activeEvents, activeEvents...)
} }
@ -130,18 +130,18 @@ func (ea *iaEntitiesActivator) activateUncoreEvents(entity *UncoreEventEntity) e
} }
perfEvent := event.custom.Event perfEvent := event.custom.Event
if perfEvent == nil { if perfEvent == nil {
return fmt.Errorf("perf event of `%s` event is nil", event.name) return fmt.Errorf("perf event of %q event is nil", event.name)
} }
options := event.custom.Options options := event.custom.Options
for _, socket := range entity.parsedSockets { for _, socket := range entity.parsedSockets {
placements, err := ea.placementMaker.makeUncorePlacements(socket, perfEvent) placements, err := ea.placementMaker.makeUncorePlacements(socket, perfEvent)
if err != nil { if err != nil {
return fmt.Errorf("failed to create uncore placements for event `%s`: %v", event.name, err) return fmt.Errorf("failed to create uncore placements for event %q: %w", event.name, err)
} }
activeMultiEvent, err := ea.perfActivator.activateMulti(perfEvent, placements, options) activeMultiEvent, err := ea.perfActivator.activateMulti(perfEvent, placements, options)
if err != nil { if err != nil {
return fmt.Errorf("failed to activate multi event `%s`: %v", event.name, err) return fmt.Errorf("failed to activate multi event %q: %w", event.name, err)
} }
events := activeMultiEvent.Events() events := activeMultiEvent.Events()
entity.activeMultiEvents = append(entity.activeMultiEvents, multiEvent{events, perfEvent, socket}) entity.activeMultiEvents = append(entity.activeMultiEvents, multiEvent{events, perfEvent, socket})
@ -197,7 +197,7 @@ func (ea *iaEntitiesActivator) activateEventForPlacements(event *eventWithQuals,
activeEvent, err := ea.perfActivator.activateEvent(perfEvent, placement, options) activeEvent, err := ea.perfActivator.activateEvent(perfEvent, placement, options)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to activate event `%s`: %v", event.name, err) return nil, fmt.Errorf("failed to activate event %q: %w", event.name, err)
} }
activeEvents = append(activeEvents, activeEvent) activeEvents = append(activeEvents, activeEvent)
} }

View File

@ -37,7 +37,7 @@ func TestActivateEntities(t *testing.T) {
mEntities := []*CoreEventEntity{{EventsTag: tag}} mEntities := []*CoreEventEntity{{EventsTag: tag}}
err := mEntitiesActivator.activateEntities(mEntities, nil) err := mEntitiesActivator.activateEntities(mEntities, nil)
require.Error(t, err) require.Error(t, err)
require.Contains(t, err.Error(), fmt.Sprintf("failed to activate core events `%s`", tag)) require.Contains(t, err.Error(), fmt.Sprintf("failed to activate core events %q", tag))
}) })
// more uncore test cases in TestActivateUncoreEvents // more uncore test cases in TestActivateUncoreEvents
@ -46,7 +46,7 @@ func TestActivateEntities(t *testing.T) {
mEntities := []*UncoreEventEntity{{EventsTag: tag}} mEntities := []*UncoreEventEntity{{EventsTag: tag}}
err := mEntitiesActivator.activateEntities(nil, mEntities) err := mEntitiesActivator.activateEntities(nil, mEntities)
require.Error(t, err) require.Error(t, err)
require.Contains(t, err.Error(), fmt.Sprintf("failed to activate uncore events `%s`", tag)) require.Contains(t, err.Error(), fmt.Sprintf("failed to activate uncore events %q", tag))
}) })
t.Run("nothing to do", func(t *testing.T) { t.Run("nothing to do", func(t *testing.T) {
@ -81,7 +81,7 @@ func TestActivateUncoreEvents(t *testing.T) {
mEntity := &UncoreEventEntity{parsedEvents: []*eventWithQuals{{name: name, custom: ia.CustomizableEvent{Event: nil}}}} mEntity := &UncoreEventEntity{parsedEvents: []*eventWithQuals{{name: name, custom: ia.CustomizableEvent{Event: nil}}}}
err := mEntitiesActivator.activateUncoreEvents(mEntity) err := mEntitiesActivator.activateUncoreEvents(mEntity)
require.Error(t, err) require.Error(t, err)
require.Contains(t, err.Error(), fmt.Sprintf("perf event of `%s` event is nil", name)) require.Contains(t, err.Error(), fmt.Sprintf("perf event of %q event is nil", name))
}) })
t.Run("placement maker and perf activator is nil", func(t *testing.T) { t.Run("placement maker and perf activator is nil", func(t *testing.T) {
@ -101,7 +101,7 @@ func TestActivateUncoreEvents(t *testing.T) {
err := mEntitiesActivator.activateUncoreEvents(mEntity) err := mEntitiesActivator.activateUncoreEvents(mEntity)
require.Error(t, err) require.Error(t, err)
require.Contains(t, err.Error(), fmt.Sprintf("ailed to create uncore placements for event `%s`", eventName)) require.Contains(t, err.Error(), fmt.Sprintf("ailed to create uncore placements for event %q", eventName))
mMaker.AssertExpectations(t) mMaker.AssertExpectations(t)
}) })
@ -117,7 +117,7 @@ func TestActivateUncoreEvents(t *testing.T) {
err := mEntitiesActivator.activateUncoreEvents(mEntity) err := mEntitiesActivator.activateUncoreEvents(mEntity)
require.Error(t, err) require.Error(t, err)
require.Contains(t, err.Error(), fmt.Sprintf("failed to activate multi event `%s`", eventName)) require.Contains(t, err.Error(), fmt.Sprintf("failed to activate multi event %q", eventName))
mMaker.AssertExpectations(t) mMaker.AssertExpectations(t)
mActivator.AssertExpectations(t) mActivator.AssertExpectations(t)
}) })
@ -188,7 +188,7 @@ func TestActivateCoreEvents(t *testing.T) {
err := mEntitiesActivator.activateCoreEvents(mEntity) err := mEntitiesActivator.activateCoreEvents(mEntity)
require.Error(t, err) require.Error(t, err)
require.Contains(t, err.Error(), fmt.Sprintf("failed to create core placements for event `%s`", parsedEvents[0].name)) require.Contains(t, err.Error(), fmt.Sprintf("failed to create core placements for event %q", parsedEvents[0].name))
mMaker.AssertExpectations(t) mMaker.AssertExpectations(t)
}) })
@ -206,7 +206,7 @@ func TestActivateCoreEvents(t *testing.T) {
err := mEntitiesActivator.activateCoreEvents(mEntity) err := mEntitiesActivator.activateCoreEvents(mEntity)
require.Error(t, err) require.Error(t, err)
require.Contains(t, err.Error(), fmt.Sprintf("failed to activate core event `%s`", parsedEvents[0].name)) require.Contains(t, err.Error(), fmt.Sprintf("failed to activate core event %q", parsedEvents[0].name))
mMaker.AssertExpectations(t) mMaker.AssertExpectations(t)
mActivator.AssertExpectations(t) mActivator.AssertExpectations(t)
}) })
@ -407,7 +407,7 @@ func TestActivateEventForPlacement(t *testing.T) {
mPerfActivator.On("activateEvent", mPerfEvent, placement1, mOptions).Once().Return(nil, errors.New("err")) mPerfActivator.On("activateEvent", mPerfEvent, placement1, mOptions).Once().Return(nil, errors.New("err"))
activeEvents, err := mActivator.activateEventForPlacements(mEvent, mPlacements) activeEvents, err := mActivator.activateEventForPlacements(mEvent, mPlacements)
require.Error(t, err) require.Error(t, err)
require.Contains(t, err.Error(), fmt.Sprintf("failed to activate event `%s`", mEvent.name)) require.Contains(t, err.Error(), fmt.Sprintf("failed to activate event %q", mEvent.name))
require.Nil(t, activeEvents) require.Nil(t, activeEvents)
mPerfActivator.AssertExpectations(t) mPerfActivator.AssertExpectations(t)
}) })

View File

@ -83,7 +83,7 @@ func (cp *configParser) parseEvents(events []string) []*eventWithQuals {
events, duplications := removeDuplicateStrings(events) events, duplications := removeDuplicateStrings(events)
for _, duplication := range duplications { for _, duplication := range duplications {
if cp.log != nil { if cp.log != nil {
cp.log.Warnf("duplicated event `%s` will be removed", duplication) cp.log.Warnf("duplicated event %q will be removed", duplication)
} }
} }
return parseEventsWithQualifiers(events) return parseEventsWithQualifiers(events)
@ -198,7 +198,7 @@ func parseIDs(allIDsStrings []string) ([]int, error) {
// Single value // Single value
num, err := strconv.Atoi(id) num, err := strconv.Atoi(id)
if err != nil { if err != nil {
return nil, fmt.Errorf("wrong format for id number `%s`: %v", id, err) return nil, fmt.Errorf("wrong format for id number %q: %w", id, err)
} }
if len(result)+1 > maxIDsSize { if len(result)+1 > maxIDsSize {
return nil, fmt.Errorf("requested number of IDs exceeds max size `%d`", maxIDsSize) return nil, fmt.Errorf("requested number of IDs exceeds max size `%d`", maxIDsSize)

View File

@ -219,14 +219,14 @@ func (i *IntelPMU) Gather(acc telegraf.Accumulator) error {
for id, m := range coreMetrics { for id, m := range coreMetrics {
scaled := ia.EventScaledValue(m.values) scaled := ia.EventScaledValue(m.values)
if !scaled.IsUint64() { if !scaled.IsUint64() {
return fmt.Errorf("cannot process `%s` scaled value `%s`: exceeds uint64", m.name, scaled.String()) return fmt.Errorf("cannot process %q scaled value %q: exceeds uint64", m.name, scaled.String())
} }
coreMetrics[id].scaled = scaled.Uint64() coreMetrics[id].scaled = scaled.Uint64()
} }
for id, m := range uncoreMetrics { for id, m := range uncoreMetrics {
scaled := ia.EventScaledValue(m.values) scaled := ia.EventScaledValue(m.values)
if !scaled.IsUint64() { if !scaled.IsUint64() {
return fmt.Errorf("cannot process `%s` scaled value `%s`: exceeds uint64", m.name, scaled.String()) return fmt.Errorf("cannot process %q scaled value %q: exceeds uint64", m.name, scaled.String())
} }
uncoreMetrics[id].scaled = scaled.Uint64() uncoreMetrics[id].scaled = scaled.Uint64()
} }
@ -248,7 +248,7 @@ func (i *IntelPMU) Stop() {
} }
err := event.Deactivate() err := event.Deactivate()
if err != nil { if err != nil {
i.Log.Warnf("failed to deactivate core event `%s`: %v", event, err) i.Log.Warnf("failed to deactivate core event %q: %w", event, err)
} }
} }
} }
@ -263,7 +263,7 @@ func (i *IntelPMU) Stop() {
} }
err := event.Deactivate() err := event.Deactivate()
if err != nil { if err != nil {
i.Log.Warnf("failed to deactivate uncore event `%s`: %v", event, err) i.Log.Warnf("failed to deactivate uncore event %q: %w", event, err)
} }
} }
} }
@ -325,10 +325,10 @@ func multiplyAndAdd(factorA uint64, factorB uint64, sum uint64) (uint64, error)
bigB := new(big.Int).SetUint64(factorB) bigB := new(big.Int).SetUint64(factorB)
activeEvents := new(big.Int).Mul(bigA, bigB) activeEvents := new(big.Int).Mul(bigA, bigB)
if !activeEvents.IsUint64() { if !activeEvents.IsUint64() {
return 0, fmt.Errorf("value `%s` cannot be represented as uint64", activeEvents.String()) return 0, fmt.Errorf("value %q cannot be represented as uint64", activeEvents.String())
} }
if sum > math.MaxUint64-activeEvents.Uint64() { if sum > math.MaxUint64-activeEvents.Uint64() {
return 0, fmt.Errorf("value `%s` exceeds uint64", new(big.Int).Add(activeEvents, new(big.Int).SetUint64(sum))) return 0, fmt.Errorf("value %q exceeds uint64", new(big.Int).Add(activeEvents, new(big.Int).SetUint64(sum)))
} }
sum += activeEvents.Uint64() sum += activeEvents.Uint64()
return sum, nil return sum, nil
@ -340,11 +340,11 @@ func readMaxFD(reader fileInfoProvider) (uint64, error) {
} }
buf, err := reader.readFile(fileMaxPath) buf, err := reader.readFile(fileMaxPath)
if err != nil { if err != nil {
return 0, fmt.Errorf("cannot open `%s` file: %v", fileMaxPath, err) return 0, fmt.Errorf("cannot open file %q: %w", fileMaxPath, err)
} }
max, err := strconv.ParseUint(strings.Trim(string(buf), "\n "), 10, 64) max, err := strconv.ParseUint(strings.Trim(string(buf), "\n "), 10, 64)
if err != nil { if err != nil {
return 0, fmt.Errorf("cannot parse file content of `%s`: %v", fileMaxPath, err) return 0, fmt.Errorf("cannot parse file content of %q: %w", fileMaxPath, err)
} }
return max, nil return max, nil
} }
@ -362,16 +362,16 @@ func checkFiles(paths []string, fileInfo fileInfoProvider) error {
lInfo, err := fileInfo.lstat(path) lInfo, err := fileInfo.lstat(path)
if err != nil { if err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {
return fmt.Errorf("file `%s` doesn't exist", path) return fmt.Errorf("file %q doesn't exist", path)
} }
return fmt.Errorf("cannot obtain file info of `%s`: %v", path, err) return fmt.Errorf("cannot obtain file info of %q: %w", path, err)
} }
mode := lInfo.Mode() mode := lInfo.Mode()
if mode&os.ModeSymlink != 0 { if mode&os.ModeSymlink != 0 {
return fmt.Errorf("file %s is a symlink", path) return fmt.Errorf("file %q is a symlink", path)
} }
if !mode.IsRegular() { if !mode.IsRegular() {
return fmt.Errorf("file `%s` doesn't point to a reagular file", path) return fmt.Errorf("file %q doesn't point to a reagular file", path)
} }
} }
return nil return nil

View File

@ -251,7 +251,7 @@ func TestGather(t *testing.T) {
tag: "BIG_FISH", tag: "BIG_FISH",
}, },
}, },
errMSg: "cannot process `I_AM_TOO_BIG` scaled value `36893488147419103230`: exceeds uint64", errMSg: `cannot process "I_AM_TOO_BIG" scaled value "36893488147419103230": exceeds uint64`,
}, },
{ {
name: "uncore scaled value greater then max uint64", name: "uncore scaled value greater then max uint64",
@ -262,7 +262,7 @@ func TestGather(t *testing.T) {
tag: "BIG_FISH", tag: "BIG_FISH",
}, },
}, },
errMSg: "cannot process `I_AM_TOO_BIG_UNCORE` scaled value `36893488147419103230`: exceeds uint64", errMSg: `cannot process "I_AM_TOO_BIG_UNCORE" scaled value "36893488147419103230": exceeds uint64`,
}, },
} }
@ -430,8 +430,8 @@ func TestReadMaxFD(t *testing.T) {
require.Zero(t, result) require.Zero(t, result)
}) })
openErrorMsg := fmt.Sprintf("cannot open `%s` file", fileMaxPath) openErrorMsg := fmt.Sprintf("cannot open file %q", fileMaxPath)
parseErrorMsg := fmt.Sprintf("cannot parse file content of `%s`", fileMaxPath) parseErrorMsg := fmt.Sprintf("cannot parse file content of %q", fileMaxPath)
tests := []struct { tests := []struct {
name string name string
@ -489,7 +489,7 @@ func TestAddFiles(t *testing.T) {
err := checkFiles(paths, mFileInfo) err := checkFiles(paths, mFileInfo)
require.Error(t, err) require.Error(t, err)
require.Contains(t, err.Error(), fmt.Sprintf("cannot obtain file info of `%s`", file)) require.Contains(t, err.Error(), fmt.Sprintf("cannot obtain file info of %q", file))
mFileInfo.AssertExpectations(t) mFileInfo.AssertExpectations(t)
}) })
@ -500,7 +500,7 @@ func TestAddFiles(t *testing.T) {
err := checkFiles(paths, mFileInfo) err := checkFiles(paths, mFileInfo)
require.Error(t, err) require.Error(t, err)
require.Contains(t, err.Error(), fmt.Sprintf("file `%s` doesn't exist", file)) require.Contains(t, err.Error(), fmt.Sprintf("file %q doesn't exist", file))
mFileInfo.AssertExpectations(t) mFileInfo.AssertExpectations(t)
}) })
@ -512,7 +512,7 @@ func TestAddFiles(t *testing.T) {
err := checkFiles(paths, mFileInfo) err := checkFiles(paths, mFileInfo)
require.Error(t, err) require.Error(t, err)
require.Contains(t, err.Error(), fmt.Sprintf("file %s is a symlink", file)) require.Contains(t, err.Error(), fmt.Sprintf("file %q is a symlink", file))
mFileInfo.AssertExpectations(t) mFileInfo.AssertExpectations(t)
}) })
@ -524,7 +524,7 @@ func TestAddFiles(t *testing.T) {
err := checkFiles(paths, mFileInfo) err := checkFiles(paths, mFileInfo)
require.Error(t, err) require.Error(t, err)
require.Contains(t, err.Error(), fmt.Sprintf("file `%s` doesn't point to a reagular file", file)) require.Contains(t, err.Error(), fmt.Sprintf("file %q doesn't point to a reagular file", file))
mFileInfo.AssertExpectations(t) mFileInfo.AssertExpectations(t)
}) })

View File

@ -107,7 +107,7 @@ func (ie *iaEntitiesValuesReader) readCoreEvents(entity *CoreEventEntity) ([]cor
errGroup.Go(func() error { errGroup.Go(func() error {
values, err := ie.eventReader.readValue(actualEvent) values, err := ie.eventReader.readValue(actualEvent)
if err != nil { if err != nil {
return fmt.Errorf("failed to read core event `%s` values: %v", actualEvent, err) return fmt.Errorf("failed to read core event %q values: %w", actualEvent, err)
} }
cpu, _ := actualEvent.PMUPlacement() cpu, _ := actualEvent.PMUPlacement()
newMetric := coreMetric{ newMetric := coreMetric{
@ -176,7 +176,7 @@ func (ie *iaEntitiesValuesReader) readMultiEventSeparately(multiEvent multiEvent
group.Go(func() error { group.Go(func() error {
values, err := ie.eventReader.readValue(actualEvent) values, err := ie.eventReader.readValue(actualEvent)
if err != nil { if err != nil {
return fmt.Errorf("failed to read uncore event `%s` values: %v", actualEvent, err) return fmt.Errorf("failed to read uncore event %q values: %w", actualEvent, err)
} }
newMetric := uncoreMetric{ newMetric := uncoreMetric{
values: values, values: values,
@ -217,7 +217,7 @@ func (ie *iaEntitiesValuesReader) readMultiEventAgg(multiEvent multiEvent) (unco
group.Go(func() error { group.Go(func() error {
value, err := ie.eventReader.readValue(actualEvent) value, err := ie.eventReader.readValue(actualEvent)
if err != nil { if err != nil {
return fmt.Errorf("failed to read uncore event `%s` values: %v", actualEvent, err) return fmt.Errorf("failed to read uncore event %q values: %w", actualEvent, err)
} }
values[id] = value values[id] = value
return nil return nil
@ -230,7 +230,7 @@ func (ie *iaEntitiesValuesReader) readMultiEventAgg(multiEvent multiEvent) (unco
bRaw, bEnabled, bRunning := ia.AggregateValues(values) bRaw, bEnabled, bRunning := ia.AggregateValues(values)
if !bRaw.IsUint64() || !bEnabled.IsUint64() || !bRunning.IsUint64() { if !bRaw.IsUint64() || !bEnabled.IsUint64() || !bRunning.IsUint64() {
return uncoreMetric{}, fmt.Errorf("cannot aggregate `%s` values, uint64 exceeding", perfEvent) return uncoreMetric{}, fmt.Errorf("cannot aggregate %q values, uint64 exceeding", perfEvent)
} }
aggValues := ia.CounterValue{ aggValues := ia.CounterValue{
Raw: bRaw.Uint64(), Raw: bRaw.Uint64(),

View File

@ -72,7 +72,7 @@ func TestReadCoreEvents(t *testing.T) {
metrics, err := mEntitiesReader.readCoreEvents(entity) metrics, err := mEntitiesReader.readCoreEvents(entity)
require.Error(t, err) require.Error(t, err)
require.Contains(t, err.Error(), fmt.Sprintf("failed to read core event `%s` values: %v", event, errMock)) require.Contains(t, err.Error(), fmt.Sprintf("failed to read core event %q values: %v", event, errMock))
require.Nil(t, metrics) require.Nil(t, metrics)
mReader.AssertExpectations(t) mReader.AssertExpectations(t)
}) })
@ -149,7 +149,7 @@ func TestReadMultiEventSeparately(t *testing.T) {
metrics, err := mEntitiesReader.readMultiEventSeparately(multi) metrics, err := mEntitiesReader.readMultiEventSeparately(multi)
require.Error(t, err) require.Error(t, err)
require.Contains(t, err.Error(), fmt.Sprintf("failed to read uncore event `%s` values: %v", event, errMock)) require.Contains(t, err.Error(), fmt.Sprintf("failed to read uncore event %q values: %v", event, errMock))
require.Nil(t, metrics) require.Nil(t, metrics)
mReader.AssertExpectations(t) mReader.AssertExpectations(t)
}) })
@ -248,7 +248,7 @@ func TestReadMultiEventAgg(t *testing.T) {
{&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 1, Enabled: 0, Running: 0}}, {&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 1, Enabled: 0, Running: 0}},
}, },
result: ia.CounterValue{}, result: ia.CounterValue{},
errMsg: fmt.Sprintf("cannot aggregate `%s` values, uint64 exceeding", perfEvent), errMsg: fmt.Sprintf("cannot aggregate %q values, uint64 exceeding", perfEvent),
}, },
{ {
name: "reading fail", name: "reading fail",

View File

@ -40,10 +40,10 @@ func (e *iaEntitiesResolver) resolveEntities(coreEntities []*CoreEventEntity, un
} }
customEvent, err := e.resolveEvent(event.name, event.qualifiers) customEvent, err := e.resolveEvent(event.name, event.qualifiers)
if err != nil { if err != nil {
return fmt.Errorf("failed to resolve core event `%s`: %v", event.name, err) return fmt.Errorf("failed to resolve core event %q: %w", event.name, err)
} }
if customEvent.Event.Uncore { if customEvent.Event.Uncore {
return fmt.Errorf("uncore event `%s` found in core entity", event.name) return fmt.Errorf("uncore event %q found in core entity", event.name)
} }
event.custom = customEvent event.custom = customEvent
} }
@ -66,10 +66,10 @@ func (e *iaEntitiesResolver) resolveEntities(coreEntities []*CoreEventEntity, un
} }
customEvent, err := e.resolveEvent(event.name, event.qualifiers) customEvent, err := e.resolveEvent(event.name, event.qualifiers)
if err != nil { if err != nil {
return fmt.Errorf("failed to resolve uncore event `%s`: %v", event.name, err) return fmt.Errorf("failed to resolve uncore event %q: %w", event.name, err)
} }
if !customEvent.Event.Uncore { if !customEvent.Event.Uncore {
return fmt.Errorf("core event `%s` found in uncore entity", event.name) return fmt.Errorf("core event %q found in uncore entity", event.name)
} }
event.custom = customEvent event.custom = customEvent
} }
@ -109,7 +109,7 @@ func (e *iaEntitiesResolver) resolveAllEvents() (coreEvents []*eventWithQuals, u
// build options for event // build options for event
newEvent.custom.Options, err = ia.NewOptions().Build() newEvent.custom.Options, err = ia.NewOptions().Build()
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("failed to build options for event `%s`: %v", perfEvent.Name, err) return nil, nil, fmt.Errorf("failed to build options for event %q: %w", perfEvent.Name, err)
} }
if perfEvent.Uncore { if perfEvent.Uncore {
uncoreEvents = append(uncoreEvents, newEvent) uncoreEvents = append(uncoreEvents, newEvent)
@ -134,12 +134,12 @@ func (e *iaEntitiesResolver) resolveEvent(name string, qualifiers []string) (ia.
return custom, fmt.Errorf("failed to transform perf events: %v", err) return custom, fmt.Errorf("failed to transform perf events: %v", err)
} }
if len(perfEvents) < 1 { if len(perfEvents) < 1 {
return custom, fmt.Errorf("failed to resolve unknown event `%s`", name) return custom, fmt.Errorf("failed to resolve unknown event %q", name)
} }
// build options for event // build options for event
options, err := ia.NewOptions().SetAttrModifiers(qualifiers).Build() options, err := ia.NewOptions().SetAttrModifiers(qualifiers).Build()
if err != nil { if err != nil {
return custom, fmt.Errorf("failed to build options for event `%s`: %v", name, err) return custom, fmt.Errorf("failed to build options for event %q: %w", name, err)
} }
custom = ia.CustomizableEvent{ custom = ia.CustomizableEvent{
Event: perfEvents[0], Event: perfEvents[0],

View File

@ -60,7 +60,7 @@ func TestResolveEntities(t *testing.T) {
err := mResolver.resolveEntities([]*CoreEventEntity{mCoreEntity}, nil) err := mResolver.resolveEntities([]*CoreEventEntity{mCoreEntity}, nil)
require.Error(t, err) require.Error(t, err)
require.Contains(t, err.Error(), fmt.Sprintf("failed to resolve core event `%s`", name)) require.Contains(t, err.Error(), fmt.Sprintf("failed to resolve core event %q", name))
mTransformer.AssertExpectations(t) mTransformer.AssertExpectations(t)
}) })
@ -73,7 +73,7 @@ func TestResolveEntities(t *testing.T) {
err := mResolver.resolveEntities(nil, []*UncoreEventEntity{mUncoreEntity}) err := mResolver.resolveEntities(nil, []*UncoreEventEntity{mUncoreEntity})
require.Error(t, err) require.Error(t, err)
require.Contains(t, err.Error(), fmt.Sprintf("failed to resolve uncore event `%s`", name)) require.Contains(t, err.Error(), fmt.Sprintf("failed to resolve uncore event %q", name))
mTransformer.AssertExpectations(t) mTransformer.AssertExpectations(t)
}) })
@ -159,7 +159,7 @@ func TestResolveEntities(t *testing.T) {
err := mResolver.resolveEntities([]*CoreEventEntity{mCoreEntity}, nil) err := mResolver.resolveEntities([]*CoreEventEntity{mCoreEntity}, nil)
require.Error(t, err) require.Error(t, err)
require.Contains(t, err.Error(), fmt.Sprintf("uncore event `%s` found in core entity", eventName)) require.Contains(t, err.Error(), fmt.Sprintf("uncore event %q found in core entity", eventName))
mTransformer.AssertExpectations(t) mTransformer.AssertExpectations(t)
}) })
@ -179,7 +179,7 @@ func TestResolveEntities(t *testing.T) {
err := mResolver.resolveEntities(nil, []*UncoreEventEntity{mUncoreEntity}) err := mResolver.resolveEntities(nil, []*UncoreEventEntity{mUncoreEntity})
require.Error(t, err) require.Error(t, err)
require.Contains(t, err.Error(), fmt.Sprintf("core event `%s` found in uncore entity", eventName)) require.Contains(t, err.Error(), fmt.Sprintf("core event %q found in uncore entity", eventName))
mTransformer.AssertExpectations(t) mTransformer.AssertExpectations(t)
}) })
@ -349,7 +349,7 @@ func TestResolveEvent(t *testing.T) {
_, err := mResolver.resolveEvent(event, qualifiers) _, err := mResolver.resolveEvent(event, qualifiers)
require.Error(t, err) require.Error(t, err)
require.Contains(t, err.Error(), fmt.Sprintf("failed to build options for event `%s`", event)) require.Contains(t, err.Error(), fmt.Sprintf("failed to build options for event %q", event))
mTransformer.AssertExpectations(t) mTransformer.AssertExpectations(t)
}) })

View File

@ -160,13 +160,13 @@ func checkFile(path string) error {
lInfo, err := os.Lstat(path) lInfo, err := os.Lstat(path)
if err != nil { if err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {
return fmt.Errorf("file `%s` doesn't exist", path) return fmt.Errorf("file %q doesn't exist", path)
} }
return fmt.Errorf("cannot obtain file info of `%s`: %v", path, err) return fmt.Errorf("cannot obtain file info of %q: %w", path, err)
} }
mode := lInfo.Mode() mode := lInfo.Mode()
if mode&os.ModeSymlink != 0 { if mode&os.ModeSymlink != 0 {
return fmt.Errorf("file `%s` is a symlink", path) return fmt.Errorf("file %q is a symlink", path)
} }
return nil return nil
} }

View File

@ -273,7 +273,7 @@ func (m *Ipmi) extractFieldsFromRegex(re *regexp.Regexp, input string) map[strin
results := make(map[string]string) results := make(map[string]string)
subexpNames := re.SubexpNames() subexpNames := re.SubexpNames()
if len(subexpNames) > len(submatches) { if len(subexpNames) > len(submatches) {
m.Log.Debugf("No matches found in '%s'", input) m.Log.Debugf("No matches found in %q", input)
return results return results
} }
for i, name := range subexpNames { for i, name := range subexpNames {

View File

@ -73,7 +73,7 @@ func (j *Jolokia) doRequest(req *http.Request) ([]map[string]interface{}, error)
// Process response // Process response
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
err = fmt.Errorf("response from url \"%s\" has status code %d (%s), expected %d (%s)", err = fmt.Errorf("response from url %q has status code %d (%s), expected %d (%s)",
req.RequestURI, req.RequestURI,
resp.StatusCode, resp.StatusCode,
http.StatusText(resp.StatusCode), http.StatusText(resp.StatusCode),
@ -231,7 +231,7 @@ func (j *Jolokia) Gather(acc telegraf.Accumulator) error {
} }
for i, resp := range out { for i, resp := range out {
if status, ok := resp["status"]; ok && status != float64(200) { if status, ok := resp["status"]; ok && status != float64(200) {
acc.AddError(fmt.Errorf("not expected status value in response body (%s:%s mbean=\"%s\" attribute=\"%s\"): %3.f", acc.AddError(fmt.Errorf("not expected status value in response body (%s:%s mbean=%q attribute=%q): %3.f",
server.Host, server.Port, metrics[i].Mbean, metrics[i].Attribute, status)) server.Host, server.Port, metrics[i].Mbean, metrics[i].Attribute, status))
continue continue
} else if !ok { } else if !ok {

View File

@ -75,7 +75,7 @@ func (k *Kafka) Start(acc telegraf.Accumulator) error {
case "newest": case "newest":
config.Offsets.Initial = sarama.OffsetNewest config.Offsets.Initial = sarama.OffsetNewest
default: default:
k.Log.Infof("WARNING: Kafka consumer invalid offset '%s', using 'oldest'\n", k.Log.Infof("WARNING: Kafka consumer invalid offset %q, using 'oldest'\n",
k.Offset) k.Offset)
config.Offsets.Initial = sarama.OffsetOldest config.Offsets.Initial = sarama.OffsetOldest
} }

View File

@ -218,7 +218,7 @@ func (k *Kibana) gatherKibanaStatus(baseURL string, acc telegraf.Accumulator) er
func (k *Kibana) gatherJSONData(url string, v interface{}) (host string, err error) { func (k *Kibana) gatherJSONData(url string, v interface{}) (host string, err error) {
request, err := http.NewRequest("GET", url, nil) request, err := http.NewRequest("GET", url, nil)
if err != nil { if err != nil {
return "", fmt.Errorf("unable to create new request '%s': %v", url, err) return "", fmt.Errorf("unable to create new request %q: %w", url, err)
} }
if (k.Username != "") || (k.Password != "") { if (k.Username != "") || (k.Password != "") {

View File

@ -6,16 +6,17 @@ import (
_ "embed" _ "embed"
"encoding/json" "encoding/json"
"fmt" "fmt"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"net/http" "net/http"
"os" "os"
"strings" "strings"
"sync" "sync"
"time" "time"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/filter"
@ -129,7 +130,7 @@ func getNodeURLs(log telegraf.Logger) ([]string, error) {
for _, n := range nodes.Items { for _, n := range nodes.Items {
address := getNodeAddress(n) address := getNodeAddress(n)
if address == "" { if address == "" {
log.Warn("Unable to node addresses for Node '%s'", n.Name) log.Warnf("Unable to node addresses for Node %q", n.Name)
continue continue
} }
nodeUrls = append(nodeUrls, "https://"+address+":10250") nodeUrls = append(nodeUrls, "https://"+address+":10250")

View File

@ -163,13 +163,13 @@ func (c *ConfigurationOriginal) validateFieldDefinitions(fieldDefs []fieldDefini
for _, item := range fieldDefs { for _, item := range fieldDefs {
//check empty name //check empty name
if item.Name == "" { if item.Name == "" {
return fmt.Errorf("empty name in '%s'", registerType) return fmt.Errorf("empty name in %q", registerType)
} }
//search name duplicate //search name duplicate
canonicalName := item.Measurement + "." + item.Name canonicalName := item.Measurement + "." + item.Name
if nameEncountered[canonicalName] { if nameEncountered[canonicalName] {
return fmt.Errorf("name '%s' is duplicated in measurement '%s' '%s' - '%s'", item.Name, item.Measurement, registerType, item.Name) return fmt.Errorf("name %q is duplicated in measurement %q %q - %q", item.Name, item.Measurement, registerType, item.Name)
} }
nameEncountered[canonicalName] = true nameEncountered[canonicalName] = true
@ -178,7 +178,7 @@ func (c *ConfigurationOriginal) validateFieldDefinitions(fieldDefs []fieldDefini
switch item.ByteOrder { switch item.ByteOrder {
case "AB", "BA", "ABCD", "CDAB", "BADC", "DCBA", "ABCDEFGH", "HGFEDCBA", "BADCFEHG", "GHEFCDAB": case "AB", "BA", "ABCD", "CDAB", "BADC", "DCBA", "ABCDEFGH", "HGFEDCBA", "BADCFEHG", "GHEFCDAB":
default: default:
return fmt.Errorf("invalid byte order '%s' in '%s' - '%s'", item.ByteOrder, registerType, item.Name) return fmt.Errorf("invalid byte order %q in %q - %q", item.ByteOrder, registerType, item.Name)
} }
// search data type // search data type
@ -187,31 +187,31 @@ func (c *ConfigurationOriginal) validateFieldDefinitions(fieldDefs []fieldDefini
"UINT16", "INT16", "UINT32", "INT32", "UINT64", "INT64", "UINT16", "INT16", "UINT32", "INT32", "UINT64", "INT64",
"FLOAT16-IEEE", "FLOAT32-IEEE", "FLOAT64-IEEE", "FLOAT32", "FIXED", "UFIXED": "FLOAT16-IEEE", "FLOAT32-IEEE", "FLOAT64-IEEE", "FLOAT32", "FIXED", "UFIXED":
default: default:
return fmt.Errorf("invalid data type '%s' in '%s' - '%s'", item.DataType, registerType, item.Name) return fmt.Errorf("invalid data type %q in %q - %q", item.DataType, registerType, item.Name)
} }
// check scale // check scale
if item.Scale == 0.0 { if item.Scale == 0.0 {
return fmt.Errorf("invalid scale '%f' in '%s' - '%s'", item.Scale, registerType, item.Name) return fmt.Errorf("invalid scale '%f' in %q - %q", item.Scale, registerType, item.Name)
} }
} }
// check address // check address
if len(item.Address) != 1 && len(item.Address) != 2 && len(item.Address) != 4 { if len(item.Address) != 1 && len(item.Address) != 2 && len(item.Address) != 4 {
return fmt.Errorf("invalid address '%v' length '%v' in '%s' - '%s'", item.Address, len(item.Address), registerType, item.Name) return fmt.Errorf("invalid address '%v' length '%v' in %q - %q", item.Address, len(item.Address), registerType, item.Name)
} }
if registerType == cInputRegisters || registerType == cHoldingRegisters { if registerType == cInputRegisters || registerType == cHoldingRegisters {
if 2*len(item.Address) != len(item.ByteOrder) { if 2*len(item.Address) != len(item.ByteOrder) {
return fmt.Errorf("invalid byte order '%s' and address '%v' in '%s' - '%s'", item.ByteOrder, item.Address, registerType, item.Name) return fmt.Errorf("invalid byte order %q and address '%v' in %q - %q", item.ByteOrder, item.Address, registerType, item.Name)
} }
// search duplicated // search duplicated
if len(item.Address) > len(removeDuplicates(item.Address)) { if len(item.Address) > len(removeDuplicates(item.Address)) {
return fmt.Errorf("duplicate address '%v' in '%s' - '%s'", item.Address, registerType, item.Name) return fmt.Errorf("duplicate address '%v' in %q - %q", item.Address, registerType, item.Name)
} }
} else if len(item.Address) != 1 { } else if len(item.Address) != 1 {
return fmt.Errorf("invalid address'%v' length'%v' in '%s' - '%s'", item.Address, len(item.Address), registerType, item.Name) return fmt.Errorf("invalid address'%v' length'%v' in %q - %q", item.Address, len(item.Address), registerType, item.Name)
} }
} }
return nil return nil

View File

@ -403,17 +403,17 @@ func typeConvert(types map[string]string, topicValue string, key string) (interf
case "uint": case "uint":
newType, err = strconv.ParseUint(topicValue, 10, 64) newType, err = strconv.ParseUint(topicValue, 10, 64)
if err != nil { if err != nil {
return nil, fmt.Errorf("unable to convert field '%s' to type uint: %v", topicValue, err) return nil, fmt.Errorf("unable to convert field %q to type uint: %w", topicValue, err)
} }
case "int": case "int":
newType, err = strconv.ParseInt(topicValue, 10, 64) newType, err = strconv.ParseInt(topicValue, 10, 64)
if err != nil { if err != nil {
return nil, fmt.Errorf("unable to convert field '%s' to type int: %v", topicValue, err) return nil, fmt.Errorf("unable to convert field %q to type int: %w", topicValue, err)
} }
case "float": case "float":
newType, err = strconv.ParseFloat(topicValue, 64) newType, err = strconv.ParseFloat(topicValue, 64)
if err != nil { if err != nil {
return nil, fmt.Errorf("unable to convert field '%s' to type float: %v", topicValue, err) return nil, fmt.Errorf("unable to convert field %q to type float: %w", topicValue, err)
} }
default: default:
return nil, fmt.Errorf("converting to the type %s is not supported: use int, uint, or float", desiredType) return nil, fmt.Errorf("converting to the type %s is not supported: use int, uint, or float", desiredType)

View File

@ -306,12 +306,7 @@ func (n *NFSClient) Gather(acc telegraf.Accumulator) error {
return err return err
} }
if err := scanner.Err(); err != nil { return scanner.Err()
n.Log.Errorf("%s", err)
return err
}
return nil
} }
func (n *NFSClient) Init() error { func (n *NFSClient) Init() error {

View File

@ -51,7 +51,7 @@ func (n *Nginx) Gather(acc telegraf.Accumulator) error {
for _, u := range n.Urls { for _, u := range n.Urls {
addr, err := url.Parse(u) addr, err := url.Parse(u)
if err != nil { if err != nil {
acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err)) acc.AddError(fmt.Errorf("Unable to parse address %q: %w", u, err))
continue continue
} }

View File

@ -52,7 +52,7 @@ func (n *NginxPlus) Gather(acc telegraf.Accumulator) error {
for _, u := range n.Urls { for _, u := range n.Urls {
addr, err := url.Parse(u) addr, err := url.Parse(u)
if err != nil { if err != nil {
acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err)) acc.AddError(fmt.Errorf("Unable to parse address %q: %w", u, err))
continue continue
} }

View File

@ -74,7 +74,7 @@ func (n *NginxPlusAPI) Gather(acc telegraf.Accumulator) error {
for _, u := range n.Urls { for _, u := range n.Urls {
addr, err := url.Parse(u) addr, err := url.Parse(u)
if err != nil { if err != nil {
acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err)) acc.AddError(fmt.Errorf("Unable to parse address %q: %w", u, err))
continue continue
} }

View File

@ -51,7 +51,7 @@ func (n *NginxSTS) Gather(acc telegraf.Accumulator) error {
for _, u := range n.Urls { for _, u := range n.Urls {
addr, err := url.Parse(u) addr, err := url.Parse(u)
if err != nil { if err != nil {
acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err)) acc.AddError(fmt.Errorf("Unable to parse address %q: %w", u, err))
continue continue
} }

View File

@ -51,7 +51,7 @@ func (n *NginxVTS) Gather(acc telegraf.Accumulator) error {
for _, u := range n.Urls { for _, u := range n.Urls {
addr, err := url.Parse(u) addr, err := url.Parse(u)
if err != nil { if err != nil {
acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err)) acc.AddError(fmt.Errorf("Unable to parse address %q: %w", u, err))
continue continue
} }

View File

@ -165,7 +165,7 @@ func buildURL(e string) (*url.URL, error) {
u := fmt.Sprintf(requestPattern, e) u := fmt.Sprintf(requestPattern, e)
addr, err := url.Parse(u) addr, err := url.Parse(u)
if err != nil { if err != nil {
return nil, fmt.Errorf("unable to parse address '%s': %s", u, err) return nil, fmt.Errorf("unable to parse address %q: %w", u, err)
} }
return addr, nil return addr, nil
} }

View File

@ -3,13 +3,14 @@ package opcua_listener
import ( import (
"context" "context"
"fmt" "fmt"
"reflect"
"time"
"github.com/gopcua/opcua" "github.com/gopcua/opcua"
"github.com/gopcua/opcua/ua" "github.com/gopcua/opcua/ua"
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/plugins/common/opcua/input" "github.com/influxdata/telegraf/plugins/common/opcua/input"
"reflect"
"time"
) )
type SubscribeClientConfig struct { type SubscribeClientConfig struct {
@ -140,7 +141,7 @@ func (o *SubscribeClient) processReceivedNotifications() {
i := int(monitoredItemNotif.ClientHandle) i := int(monitoredItemNotif.ClientHandle)
oldValue := o.LastReceivedData[i].Value oldValue := o.LastReceivedData[i].Value
o.UpdateNodeValue(i, monitoredItemNotif.Value) o.UpdateNodeValue(i, monitoredItemNotif.Value)
o.Log.Debugf("Data change notification: node '%s' value changed from %f to %f", o.Log.Debugf("Data change notification: node %q value changed from %f to %f",
o.NodeIDs[i].String(), oldValue, o.LastReceivedData[i].Value) o.NodeIDs[i].String(), oldValue, o.LastReceivedData[i].Value)
o.metrics <- o.MetricForNode(i) o.metrics <- o.MetricForNode(i)
} }

View File

@ -90,7 +90,7 @@ func (o *OpensearchQuery) Init() error {
func (o *OpensearchQuery) initAggregation(agg osAggregation, i int) (err error) { func (o *OpensearchQuery) initAggregation(agg osAggregation, i int) (err error) {
for _, metricField := range agg.MetricFields { for _, metricField := range agg.MetricFields {
if _, ok := agg.mapMetricFields[metricField]; !ok { if _, ok := agg.mapMetricFields[metricField]; !ok {
return fmt.Errorf("metric field '%s' not found on index '%s'", metricField, agg.Index) return fmt.Errorf("metric field %q not found on index %q", metricField, agg.Index)
} }
} }

View File

@ -49,7 +49,7 @@ var metricsSchemata = map[string]common.MetricsSchema{
func newMetricsService(logger common.Logger, writer *writeToAccumulator, schema string) (*metricsService, error) { func newMetricsService(logger common.Logger, writer *writeToAccumulator, schema string) (*metricsService, error) {
ms, found := metricsSchemata[schema] ms, found := metricsSchemata[schema]
if !found { if !found {
return nil, fmt.Errorf("schema '%s' not recognized", schema) return nil, fmt.Errorf("schema %q not recognized", schema)
} }
converter, err := otel2influx.NewOtelMetricsToLineProtocol(logger, writer, ms) converter, err := otel2influx.NewOtelMetricsToLineProtocol(logger, writer, ms)

View File

@ -57,7 +57,7 @@ func (pf *PF) Gather(acc telegraf.Accumulator) error {
var errParseHeader = fmt.Errorf("Cannot find header in %s output", pfctlCommand) var errParseHeader = fmt.Errorf("Cannot find header in %s output", pfctlCommand)
func errMissingData(tag string) error { func errMissingData(tag string) error {
return fmt.Errorf("struct data for tag \"%s\" not found in %s output", tag, pfctlCommand) return fmt.Errorf("struct data for tag %q not found in %s output", tag, pfctlCommand)
} }
type pfctlOutputStanza struct { type pfctlOutputStanza struct {

View File

@ -113,7 +113,7 @@ func (p *phpfpm) gatherServer(addr string, acc telegraf.Accumulator) error {
if strings.HasPrefix(addr, "fcgi://") || strings.HasPrefix(addr, "cgi://") { if strings.HasPrefix(addr, "fcgi://") || strings.HasPrefix(addr, "cgi://") {
u, err := url.Parse(addr) u, err := url.Parse(addr)
if err != nil { if err != nil {
return fmt.Errorf("unable parse server address '%s': %s", addr, err) return fmt.Errorf("unable parse server address %q: %w", addr, err)
} }
socketAddr := strings.Split(u.Host, ":") socketAddr := strings.Split(u.Host, ":")
fcgiIP := socketAddr[0] fcgiIP := socketAddr[0]
@ -165,22 +165,22 @@ func (p *phpfpm) gatherFcgi(fcgi *conn, statusPath string, acc telegraf.Accumula
func (p *phpfpm) gatherHTTP(addr string, acc telegraf.Accumulator) error { func (p *phpfpm) gatherHTTP(addr string, acc telegraf.Accumulator) error {
u, err := url.Parse(addr) u, err := url.Parse(addr)
if err != nil { if err != nil {
return fmt.Errorf("unable parse server address '%s': %v", addr, err) return fmt.Errorf("unable parse server address %q: %w", addr, err)
} }
req, err := http.NewRequest("GET", u.String(), nil) req, err := http.NewRequest("GET", u.String(), nil)
if err != nil { if err != nil {
return fmt.Errorf("unable to create new request '%s': %v", addr, err) return fmt.Errorf("unable to create new request %q: %w", addr, err)
} }
res, err := p.client.Do(req) res, err := p.client.Do(req)
if err != nil { if err != nil {
return fmt.Errorf("unable to connect to phpfpm status page '%s': %v", addr, err) return fmt.Errorf("unable to connect to phpfpm status page %q: %w", addr, err)
} }
defer res.Body.Close() defer res.Body.Close()
if res.StatusCode != 200 { if res.StatusCode != 200 {
return fmt.Errorf("unable to get valid stat result from '%s': %v", addr, err) return fmt.Errorf("unable to get valid stat result from %q: %w", addr, err)
} }
importMetric(res.Body, acc, addr) importMetric(res.Body, acc, addr)

View File

@ -45,8 +45,7 @@ func (pg *NativeFinder) PidFile(path string) ([]PID, error) {
var pids []PID var pids []PID
pidString, err := os.ReadFile(path) pidString, err := os.ReadFile(path)
if err != nil { if err != nil {
return pids, fmt.Errorf("Failed to read pidfile '%s'. Error: '%s'", return pids, fmt.Errorf("Failed to read pidfile %q: %w", path, err)
path, err)
} }
pid, err := strconv.ParseInt(strings.TrimSpace(string(pidString)), 10, 32) pid, err := strconv.ParseInt(strings.TrimSpace(string(pidString)), 10, 32)
if err != nil { if err != nil {

View File

@ -27,7 +27,7 @@ func (pg *Pgrep) PidFile(path string) ([]PID, error) {
var pids []PID var pids []PID
pidString, err := os.ReadFile(path) pidString, err := os.ReadFile(path)
if err != nil { if err != nil {
return pids, fmt.Errorf("Failed to read pidfile '%s'. Error: '%s'", return pids, fmt.Errorf("Failed to read pidfile %q: %w",
path, err) path, err)
} }
pid, err := strconv.ParseInt(strings.TrimSpace(string(pidString)), 10, 32) pid, err := strconv.ParseInt(strings.TrimSpace(string(pidString)), 10, 32)

View File

@ -432,7 +432,7 @@ func (p *Procstat) simpleSystemdUnitPIDs() ([]PID, error) {
} }
pid, err := strconv.ParseInt(string(kv[1]), 10, 32) pid, err := strconv.ParseInt(string(kv[1]), 10, 32)
if err != nil { if err != nil {
return nil, fmt.Errorf("invalid pid '%s'", kv[1]) return nil, fmt.Errorf("invalid pid %q", kv[1])
} }
pids = append(pids, PID(pid)) pids = append(pids, PID(pid))
} }
@ -448,7 +448,7 @@ func (p *Procstat) cgroupPIDs() []PidsTags {
items, err := filepath.Glob(procsPath) items, err := filepath.Glob(procsPath)
if err != nil { if err != nil {
return []PidsTags{{nil, nil, fmt.Errorf("glob failed '%s'", err)}} return []PidsTags{{nil, nil, fmt.Errorf("glob failed: %w", err)}}
} }
pidTags := make([]PidsTags, 0, len(items)) pidTags := make([]PidsTags, 0, len(items))
@ -483,7 +483,7 @@ func (p *Procstat) singleCgroupPIDs(path string) ([]PID, error) {
} }
pid, err := strconv.ParseInt(string(pidBS), 10, 32) pid, err := strconv.ParseInt(string(pidBS), 10, 32)
if err != nil { if err != nil {
return nil, fmt.Errorf("invalid pid '%s'", pidBS) return nil, fmt.Errorf("invalid pid %q", pidBS)
} }
pids = append(pids, PID(pid)) pids = append(pids, PID(pid))
} }

View File

@ -284,7 +284,7 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error
addr := "http://localhost" + path addr := "http://localhost" + path
req, err = http.NewRequest("GET", addr, nil) req, err = http.NewRequest("GET", addr, nil)
if err != nil { if err != nil {
return fmt.Errorf("unable to create new request '%s': %s", addr, err) return fmt.Errorf("unable to create new request %q: %w", addr, err)
} }
// ignore error because it's been handled before getting here // ignore error because it's been handled before getting here
@ -306,7 +306,7 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error
} }
req, err = http.NewRequest("GET", u.URL.String(), nil) req, err = http.NewRequest("GET", u.URL.String(), nil)
if err != nil { if err != nil {
return fmt.Errorf("unable to create new request '%s': %s", u.URL.String(), err) return fmt.Errorf("unable to create new request %q: %w", u.URL.String(), err)
} }
} }

View File

@ -93,19 +93,19 @@ func (pa *PuppetAgent) Gather(acc telegraf.Accumulator) error {
} }
if _, err := os.Stat(pa.Location); err != nil { if _, err := os.Stat(pa.Location); err != nil {
return fmt.Errorf("%s", err) return err
} }
fh, err := os.ReadFile(pa.Location) fh, err := os.ReadFile(pa.Location)
if err != nil { if err != nil {
return fmt.Errorf("%s", err) return err
} }
var puppetState State var puppetState State
err = yaml.Unmarshal(fh, &puppetState) err = yaml.Unmarshal(fh, &puppetState)
if err != nil { if err != nil {
return fmt.Errorf("%s", err) return err
} }
tags := map[string]string{"location": pa.Location} tags := map[string]string{"location": pa.Location}

View File

@ -35,7 +35,7 @@ func (r *Raindrops) Gather(acc telegraf.Accumulator) error {
for _, u := range r.Urls { for _, u := range r.Urls {
addr, err := url.Parse(u) addr, err := url.Parse(u)
if err != nil { if err != nil {
acc.AddError(fmt.Errorf("unable to parse address '%s': %s", u, err)) acc.AddError(fmt.Errorf("unable to parse address %q: %w", u, err))
continue continue
} }

View File

@ -126,7 +126,7 @@ func (r *RavenDB) requestJSON(u string, target interface{}) error {
r.Log.Debugf("%s: %s", u, resp.Status) r.Log.Debugf("%s: %s", u, resp.Status)
if resp.StatusCode >= 400 { if resp.StatusCode >= 400 {
return fmt.Errorf("invalid response code to request '%s': %d - %s", r.URL, resp.StatusCode, resp.Status) return fmt.Errorf("invalid response code to request %q: %d - %s", r.URL, resp.StatusCode, resp.Status)
} }
return json.NewDecoder(resp.Body).Decode(target) return json.NewDecoder(resp.Body).Decode(target)

View File

@ -38,7 +38,7 @@ func (r *RethinkDB) Gather(acc telegraf.Accumulator) error {
for _, serv := range r.Servers { for _, serv := range r.Servers {
u, err := url.Parse(serv) u, err := url.Parse(serv)
if err != nil { if err != nil {
acc.AddError(fmt.Errorf("unable to parse to address '%s': %s", serv, err)) acc.AddError(fmt.Errorf("unable to parse to address %q: %w", serv, err))
continue continue
} else if u.Scheme == "" { } else if u.Scheme == "" {
// fallback to simple string based address (i.e. "10.0.0.1:10000") // fallback to simple string based address (i.e. "10.0.0.1:10000")

View File

@ -326,7 +326,7 @@ func (rsl *RiemannSocketListener) Start(acc telegraf.Accumulator) error {
rsl.listen(ctx) rsl.listen(ctx)
}() }()
default: default:
return fmt.Errorf("unknown protocol '%s' in '%s'", protocol, rsl.ServiceAddress) return fmt.Errorf("unknown protocol %q in %q", protocol, rsl.ServiceAddress)
} }
return nil return nil

View File

@ -402,7 +402,7 @@ func (m *Smart) Init() error {
} }
if !contains(knownReadMethods, m.ReadMethod) { if !contains(knownReadMethods, m.ReadMethod) {
return fmt.Errorf("provided read method `%s` is not valid", m.ReadMethod) return fmt.Errorf("provided read method %q is not valid", m.ReadMethod)
} }
err := validatePath(m.PathSmartctl) err := validatePath(m.PathSmartctl)
@ -864,7 +864,7 @@ func (m *Smart) gatherDisk(acc telegraf.Accumulator, device string, wg *sync.Wai
} }
if err := parse(fields, deviceFields, matches[2]); err != nil { if err := parse(fields, deviceFields, matches[2]); err != nil {
acc.AddError(fmt.Errorf("error parsing %s: '%s': %s", attr.Name, matches[2], err.Error())) acc.AddError(fmt.Errorf("error parsing %s: %q: %w", attr.Name, matches[2], err))
continue continue
} }
// if the field is classified as an attribute, only add it // if the field is classified as an attribute, only add it
@ -923,7 +923,7 @@ func parseRawValue(rawVal string) (int64, error) {
unit := regexp.MustCompile("^(.*)([hms])$") unit := regexp.MustCompile("^(.*)([hms])$")
parts := strings.Split(rawVal, "+") parts := strings.Split(rawVal, "+")
if len(parts) == 0 { if len(parts) == 0 {
return 0, fmt.Errorf("couldn't parse RAW_VALUE '%s'", rawVal) return 0, fmt.Errorf("couldn't parse RAW_VALUE %q", rawVal)
} }
duration := int64(0) duration := int64(0)

View File

@ -104,10 +104,10 @@ func TestFieldInitGosmi(t *testing.T) {
for _, txl := range translations { for _, txl := range translations {
f := Field{Oid: txl.inputOid, Name: txl.inputName, Conversion: txl.inputConversion} f := Field{Oid: txl.inputOid, Name: txl.inputName, Conversion: txl.inputConversion}
err := f.init(tr) err := f.init(tr)
require.NoError(t, err, "inputOid='%s' inputName='%s'", txl.inputOid, txl.inputName) require.NoError(t, err, "inputOid=%q inputName=%q", txl.inputOid, txl.inputName)
assert.Equal(t, txl.expectedOid, f.Oid, "inputOid='%s' inputName='%s' inputConversion='%s'", txl.inputOid, txl.inputName, txl.inputConversion) assert.Equal(t, txl.expectedOid, f.Oid, "inputOid=%q inputName=%q inputConversion=%q", txl.inputOid, txl.inputName, txl.inputConversion)
assert.Equal(t, txl.expectedName, f.Name, "inputOid='%s' inputName='%s' inputConversion='%s'", txl.inputOid, txl.inputName, txl.inputConversion) assert.Equal(t, txl.expectedName, f.Name, "inputOid=%q inputName=%q inputConversion=%q", txl.inputOid, txl.inputName, txl.inputConversion)
} }
} }

View File

@ -768,7 +768,7 @@ func fieldConvert(tr Translator, conv string, ent gosnmp.SnmpPDU) (v interface{}
return tr.SnmpFormatEnum(ent.Name, ent.Value, true) return tr.SnmpFormatEnum(ent.Name, ent.Value, true)
} }
return nil, fmt.Errorf("invalid conversion type '%s'", conv) return nil, fmt.Errorf("invalid conversion type %q", conv)
} }
func init() { func init() {

View File

@ -121,11 +121,11 @@ func TestFieldInit(t *testing.T) {
for _, txl := range translations { for _, txl := range translations {
f := Field{Oid: txl.inputOid, Name: txl.inputName, Conversion: txl.inputConversion} f := Field{Oid: txl.inputOid, Name: txl.inputName, Conversion: txl.inputConversion}
err := f.init(tr) err := f.init(tr)
if !assert.NoError(t, err, "inputOid='%s' inputName='%s'", txl.inputOid, txl.inputName) { if !assert.NoError(t, err, "inputOid=%q inputName=%q", txl.inputOid, txl.inputName) {
continue continue
} }
assert.Equal(t, txl.expectedOid, f.Oid, "inputOid='%s' inputName='%s' inputConversion='%s'", txl.inputOid, txl.inputName, txl.inputConversion) assert.Equal(t, txl.expectedOid, f.Oid, "inputOid=%q inputName=%q inputConversion=%q", txl.inputOid, txl.inputName, txl.inputConversion)
assert.Equal(t, txl.expectedName, f.Name, "inputOid='%s' inputName='%s' inputConversion='%s'", txl.inputOid, txl.inputName, txl.inputConversion) assert.Equal(t, txl.expectedName, f.Name, "inputOid=%q inputName=%q inputConversion=%q", txl.inputOid, txl.inputName, txl.inputConversion)
} }
} }

View File

@ -128,7 +128,7 @@ func (s *SnmpTrap) Start(acc telegraf.Accumulator) error {
case "authpriv": case "authpriv":
s.listener.Params.MsgFlags = gosnmp.AuthPriv s.listener.Params.MsgFlags = gosnmp.AuthPriv
default: default:
return fmt.Errorf("unknown security level '%s'", s.SecLevel) return fmt.Errorf("unknown security level %q", s.SecLevel)
} }
var authenticationProtocol gosnmp.SnmpV3AuthProtocol var authenticationProtocol gosnmp.SnmpV3AuthProtocol
@ -148,7 +148,7 @@ func (s *SnmpTrap) Start(acc telegraf.Accumulator) error {
case "": case "":
authenticationProtocol = gosnmp.NoAuth authenticationProtocol = gosnmp.NoAuth
default: default:
return fmt.Errorf("unknown authentication protocol '%s'", s.AuthProtocol) return fmt.Errorf("unknown authentication protocol %q", s.AuthProtocol)
} }
var privacyProtocol gosnmp.SnmpV3PrivProtocol var privacyProtocol gosnmp.SnmpV3PrivProtocol
@ -168,7 +168,7 @@ func (s *SnmpTrap) Start(acc telegraf.Accumulator) error {
case "": case "":
privacyProtocol = gosnmp.NoPriv privacyProtocol = gosnmp.NoPriv
default: default:
return fmt.Errorf("unknown privacy protocol '%s'", s.PrivProtocol) return fmt.Errorf("unknown privacy protocol %q", s.PrivProtocol)
} }
secname, err := s.SecName.Get() secname, err := s.SecName.Get()
@ -211,7 +211,7 @@ func (s *SnmpTrap) Start(acc telegraf.Accumulator) error {
// gosnmp.TrapListener currently supports udp only. For forward // gosnmp.TrapListener currently supports udp only. For forward
// compatibility, require udp in the service address // compatibility, require udp in the service address
if protocol != "udp" { if protocol != "udp" {
return fmt.Errorf("unknown protocol '%s' in '%s'", protocol, s.ServiceAddress) return fmt.Errorf("unknown protocol %q in %q", protocol, s.ServiceAddress)
} }
// If (*TrapListener).Listen immediately returns an error we need // If (*TrapListener).Listen immediately returns an error we need

View File

@ -50,14 +50,14 @@ func (s *Statsd) parseEventMessage(now time.Time, message string, defaultHostnam
titleLen, err := strconv.ParseInt(rawLen[0], 10, 64) titleLen, err := strconv.ParseInt(rawLen[0], 10, 64)
if err != nil { if err != nil {
return fmt.Errorf("invalid message format, could not parse title.length: '%s'", rawLen[0]) return fmt.Errorf("invalid message format, could not parse title.length: %q", rawLen[0])
} }
if len(rawLen[1]) < 1 { if len(rawLen[1]) < 1 {
return fmt.Errorf("invalid message format, could not parse text.length: '%s'", rawLen[0]) return fmt.Errorf("invalid message format, could not parse text.length: %q", rawLen[0])
} }
textLen, err := strconv.ParseInt(rawLen[1][:len(rawLen[1])-1], 10, 64) textLen, err := strconv.ParseInt(rawLen[1][:len(rawLen[1])-1], 10, 64)
if err != nil { if err != nil {
return fmt.Errorf("invalid message format, could not parse text.length: '%s'", rawLen[0]) return fmt.Errorf("invalid message format, could not parse text.length: %q", rawLen[0])
} }
if titleLen+textLen+1 > int64(len(message)) { if titleLen+textLen+1 > int64(len(message)) {
return fmt.Errorf("invalid message format, title.length and text.length exceed total message length") return fmt.Errorf("invalid message format, title.length and text.length exceed total message length")
@ -121,7 +121,7 @@ func (s *Statsd) parseEventMessage(now time.Time, message string, defaultHostnam
fields["source_type_name"] = rawMetadataFields[i][2:] fields["source_type_name"] = rawMetadataFields[i][2:]
default: default:
if rawMetadataFields[i][0] != '#' { if rawMetadataFields[i][0] != '#' {
return fmt.Errorf("unknown metadata type: '%s'", rawMetadataFields[i]) return fmt.Errorf("unknown metadata type: %q", rawMetadataFields[i])
} }
parseDataDogTags(tags, rawMetadataFields[i][1:]) parseDataDogTags(tags, rawMetadataFields[i][1:])
} }

View File

@ -79,7 +79,7 @@ func (k *Synproxy) getSynproxyStat() (map[string]interface{}, error) {
x, err := strconv.ParseUint(val, 16, 32) x, err := strconv.ParseUint(val, 16, 32)
// If field is not a valid hexstring // If field is not a valid hexstring
if err != nil { if err != nil {
return nil, fmt.Errorf("invalid value '%s' found", val) return nil, fmt.Errorf("invalid value %q found", val)
} }
if hname[i] != "" { if hname[i] != "" {
fields[hname[i]] = fields[hname[i]].(uint32) + uint32(x) fields[hname[i]] = fields[hname[i]].(uint32) + uint32(x)

View File

@ -93,7 +93,7 @@ func (s *Syslog) Start(acc telegraf.Accumulator) error {
case "udp", "udp4", "udp6", "ip", "ip4", "ip6", "unixgram": case "udp", "udp4", "udp6", "ip", "ip4", "ip6", "unixgram":
s.isStream = false s.isStream = false
default: default:
return fmt.Errorf("unknown protocol '%s' in '%s'", scheme, s.Address) return fmt.Errorf("unknown protocol %q in %q", scheme, s.Address)
} }
if scheme == "unix" || scheme == "unixpacket" || scheme == "unixgram" { if scheme == "unix" || scheme == "unixpacket" || scheme == "unixgram" {
@ -150,12 +150,12 @@ func (s *Syslog) Stop() {
func getAddressParts(a string) (scheme string, host string, err error) { func getAddressParts(a string) (scheme string, host string, err error) {
parts := strings.SplitN(a, "://", 2) parts := strings.SplitN(a, "://", 2)
if len(parts) != 2 { if len(parts) != 2 {
return "", "", fmt.Errorf("missing protocol within address '%s'", a) return "", "", fmt.Errorf("missing protocol within address %q", a)
} }
u, err := url.Parse(filepath.ToSlash(a)) //convert backslashes to slashes (to make Windows path a valid URL) u, err := url.Parse(filepath.ToSlash(a)) //convert backslashes to slashes (to make Windows path a valid URL)
if err != nil { if err != nil {
return "", "", fmt.Errorf("could not parse address '%s': %v", a, err) return "", "", fmt.Errorf("could not parse address %q: %w", a, err)
} }
switch u.Scheme { switch u.Scheme {
case "unix", "unixpacket", "unixgram": case "unix", "unixpacket", "unixgram":

View File

@ -35,14 +35,14 @@ func TestAddress(t *testing.T) {
Address: "localhost:6514", Address: "localhost:6514",
} }
err = rec.Start(&testutil.Accumulator{}) err = rec.Start(&testutil.Accumulator{})
require.EqualError(t, err, "missing protocol within address 'localhost:6514'") require.EqualError(t, err, `missing protocol within address "localhost:6514"`)
require.Error(t, err) require.Error(t, err)
rec = &Syslog{ rec = &Syslog{
Address: "unsupported://example.com:6514", Address: "unsupported://example.com:6514",
} }
err = rec.Start(&testutil.Accumulator{}) err = rec.Start(&testutil.Accumulator{})
require.EqualError(t, err, "unknown protocol 'unsupported' in 'example.com:6514'") require.EqualError(t, err, `unknown protocol "unsupported" in "example.com:6514"`)
require.Error(t, err) require.Error(t, err)
tmpdir := t.TempDir() tmpdir := t.TempDir()

View File

@ -202,7 +202,7 @@ func (s *Sysstat) parse(acc telegraf.Accumulator, option string, tmpfile string,
return err return err
} }
if err := cmd.Start(); err != nil { if err := cmd.Start(); err != nil {
return fmt.Errorf("running command '%s' failed: %s", strings.Join(cmd.Args, " "), err) return fmt.Errorf("running command %q failed: %w", strings.Join(cmd.Args, " "), err)
} }
r := bufio.NewReader(stdout) r := bufio.NewReader(stdout)

View File

@ -51,7 +51,7 @@ func (n *Tengine) Gather(acc telegraf.Accumulator) error {
for _, u := range n.Urls { for _, u := range n.Urls {
addr, err := url.Parse(u) addr, err := url.Parse(u)
if err != nil { if err != nil {
acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err)) acc.AddError(fmt.Errorf("Unable to parse address %q: %w", u, err))
continue continue
} }

View File

@ -52,7 +52,7 @@ func (u *Uwsgi) Gather(acc telegraf.Accumulator) error {
defer wg.Done() defer wg.Done()
n, err := url.Parse(s) n, err := url.Parse(s)
if err != nil { if err != nil {
acc.AddError(fmt.Errorf("could not parse uWSGI Stats Server url '%s': %s", s, err.Error())) acc.AddError(fmt.Errorf("could not parse uWSGI Stats Server url %q: %w", s, err))
return return
} }
@ -97,13 +97,13 @@ func (u *Uwsgi) gatherServer(acc telegraf.Accumulator, address *url.URL) error {
r = resp.Body r = resp.Body
s.source = address.Host s.source = address.Host
default: default:
return fmt.Errorf("'%s' is not a supported scheme", address.Scheme) return fmt.Errorf("%q is not a supported scheme", address.Scheme)
} }
defer r.Close() defer r.Close()
if err := json.NewDecoder(r).Decode(&s); err != nil { if err := json.NewDecoder(r).Decode(&s); err != nil {
return fmt.Errorf("failed to decode json payload from '%s': %s", address.String(), err.Error()) return fmt.Errorf("failed to decode json payload from %q: %w", address.String(), err)
} }
u.gatherStatServer(acc, &s) u.gatherStatServer(acc, &s)

View File

@ -567,7 +567,7 @@ func PdhFormatError(msgId uint32) string {
buf := make([]uint16, 300) buf := make([]uint16, 300)
_, err := windows.FormatMessage(flags, uintptr(libpdhDll.Handle), msgId, 0, buf, nil) _, err := windows.FormatMessage(flags, uintptr(libpdhDll.Handle), msgId, 0, buf, nil)
if err == nil { if err == nil {
return fmt.Sprintf("%s", UTF16PtrToString(&buf[0])) return UTF16PtrToString(&buf[0])
} }
return fmt.Sprintf("(pdhErr=%d) %s", msgId, err.Error()) return fmt.Sprintf("(pdhErr=%d) %s", msgId, err.Error())
} }

View File

@ -357,7 +357,7 @@ func (m *WinPerfCounters) ParseConfig() error {
} }
for _, counter := range PerfObject.Counters { for _, counter := range PerfObject.Counters {
if len(PerfObject.Instances) == 0 { if len(PerfObject.Instances) == 0 {
m.Log.Warnf("Missing 'Instances' param for object '%s'\n", PerfObject.ObjectName) m.Log.Warnf("Missing 'Instances' param for object %q", PerfObject.ObjectName)
} }
for _, instance := range PerfObject.Instances { for _, instance := range PerfObject.Instances {
objectname := PerfObject.ObjectName objectname := PerfObject.ObjectName
@ -367,7 +367,7 @@ func (m *WinPerfCounters) ParseConfig() error {
err := m.AddItem(counterPath, computer, objectname, instance, counter, PerfObject.Measurement, PerfObject.IncludeTotal, PerfObject.UseRawValues) err := m.AddItem(counterPath, computer, objectname, instance, counter, PerfObject.Measurement, PerfObject.IncludeTotal, PerfObject.UseRawValues)
if err != nil { if err != nil {
if PerfObject.FailOnMissing || PerfObject.WarnOnMissing { if PerfObject.FailOnMissing || PerfObject.WarnOnMissing {
m.Log.Errorf("invalid counterPath: '%s'. Error: %s\n", counterPath, err.Error()) m.Log.Errorf("invalid counterPath %q: %s", counterPath, err.Error())
} }
if PerfObject.FailOnMissing { if PerfObject.FailOnMissing {
return err return err
@ -440,7 +440,7 @@ func (m *WinPerfCounters) Gather(acc telegraf.Accumulator) error {
err := m.gatherComputerCounters(hostInfo, acc) err := m.gatherComputerCounters(hostInfo, acc)
m.Log.Debugf("gathering from %s finished in %.3fs", hostInfo.computer, time.Since(start)) m.Log.Debugf("gathering from %s finished in %.3fs", hostInfo.computer, time.Since(start))
if err != nil { if err != nil {
acc.AddError(fmt.Errorf("error during collecting data on host '%s': %s", hostInfo.computer, err.Error())) acc.AddError(fmt.Errorf("error during collecting data on host %q: %w", hostInfo.computer, err))
} }
wg.Done() wg.Done()
}(hostCounterInfo) }(hostCounterInfo)

View File

@ -26,7 +26,7 @@ type ServiceErr struct {
} }
func (e *ServiceErr) Error() string { func (e *ServiceErr) Error() string {
return fmt.Sprintf("%s: '%s': %v", e.Message, e.Service, e.Err) return fmt.Sprintf("%s: %q: %v", e.Message, e.Service, e.Err)
} }
func IsPermission(err error) bool { func IsPermission(err error) bool {

View File

@ -99,7 +99,7 @@ func (c *X509Cert) Gather(acc telegraf.Accumulator) error {
for _, location := range collectedUrls { for _, location := range collectedUrls {
certs, ocspresp, err := c.getCert(location, time.Duration(c.Timeout)) certs, ocspresp, err := c.getCert(location, time.Duration(c.Timeout))
if err != nil { if err != nil {
acc.AddError(fmt.Errorf("cannot get SSL cert '%s': %s", location, err.Error())) acc.AddError(fmt.Errorf("cannot get SSL cert %q: %w", location, err))
} }
// Add all returned certs to the pool of intermediates except for // Add all returned certs to the pool of intermediates except for
@ -446,7 +446,7 @@ func (c *X509Cert) getCert(u *url.URL, timeout time.Duration) ([]*x509.Certifica
return certs, &ocspresp, nil return certs, &ocspresp, nil
default: default:
return nil, nil, fmt.Errorf("unsupported scheme '%s' in location %s", u.Scheme, u.String()) return nil, nil, fmt.Errorf("unsupported scheme %q in location %s", u.Scheme, u.String())
} }
} }

View File

@ -468,7 +468,7 @@ func assertMapContains(t *testing.T, expected, actual map[string]string) {
for k, v := range expected { for k, v := range expected {
av, ok := actual[k] av, ok := actual[k]
require.True(t, ok, "Actual map does not contain a value for key '%s'", k) require.True(t, ok, "Actual map does not contain a value for key %q", k)
require.Equal(t, v, av, "The expected value for key '%s' is '%s' but the actual value is '%s", k, v, av) require.Equal(t, v, av, "The expected value for key %q is %q but the actual value is %q", k, v, av)
} }
} }

View File

@ -127,11 +127,11 @@ func (c *CloudWatchLogs) Init() error {
c.logDatKey = lsSplitArray[0] c.logDatKey = lsSplitArray[0]
c.logDataSource = lsSplitArray[1] c.logDataSource = lsSplitArray[1]
c.Log.Debugf("Log data: key '%s', source '%s'...", c.logDatKey, c.logDataSource) c.Log.Debugf("Log data: key %q, source %q...", c.logDatKey, c.logDataSource)
if c.lsSource == "" { if c.lsSource == "" {
c.lsSource = c.LogStream c.lsSource = c.LogStream
c.Log.Debugf("Log stream '%s'...", c.lsSource) c.Log.Debugf("Log stream %q...", c.lsSource)
} }
return nil return nil

View File

@ -426,7 +426,7 @@ func (a *Elasticsearch) GetIndexName(indexName string, eventTime time.Time, tagK
if value, ok := metricTags[key]; ok { if value, ok := metricTags[key]; ok {
tagValues = append(tagValues, value) tagValues = append(tagValues, value)
} else { } else {
a.Log.Debugf("Tag '%s' not found, using '%s' on index name instead\n", key, a.DefaultTagValue) a.Log.Debugf("Tag %q not found, using %q on index name instead\n", key, a.DefaultTagValue)
tagValues = append(tagValues, a.DefaultTagValue) tagValues = append(tagValues, a.DefaultTagValue)
} }
} }

View File

@ -133,7 +133,7 @@ func (k *KinesisOutput) getPartitionKey(metric telegraf.Metric) string {
// Default partition name if default is not set // Default partition name if default is not set
return "telegraf" return "telegraf"
default: default:
k.Log.Errorf("You have configured a Partition method of '%s' which is not supported", k.Partition.Method) k.Log.Errorf("You have configured a Partition method of %q which is not supported", k.Partition.Method)
} }
} }
if k.RandomPartitionKey { if k.RandomPartitionKey {

View File

@ -401,11 +401,11 @@ func (p *Postgresql) writeMetricsFromMeasure(ctx context.Context, db dbh, tableS
if p.TagsAsForeignKeys { if p.TagsAsForeignKeys {
if err = p.writeTagTable(ctx, db, tableSource); err != nil { if err = p.writeTagTable(ctx, db, tableSource); err != nil {
if p.ForeignTagConstraint { if p.ForeignTagConstraint {
return fmt.Errorf("writing to tag table '%s': %w", tableSource.Name()+p.TagTableSuffix, err) return fmt.Errorf("writing to tag table %q: %w", tableSource.Name()+p.TagTableSuffix, err)
} }
// log and continue. As the admin can correct the issue, and tags don't change over time, they can be // log and continue. As the admin can correct the issue, and tags don't change over time, they can be
// added from future metrics after issue is corrected. // added from future metrics after issue is corrected.
p.Logger.Errorf("writing to tag table %q: %s", tableSource.Name()+p.TagTableSuffix, err) p.Logger.Errorf("writing to tag table %q: %s", tableSource.Name()+p.TagTableSuffix, err.Error())
} }
} }

View File

@ -96,11 +96,11 @@ func (tm *TableManager) MatchSource(ctx context.Context, db dbh, rowSource *Tabl
colDefs := make([]string, 0, len(missingCols)) colDefs := make([]string, 0, len(missingCols))
for _, col := range missingCols { for _, col := range missingCols {
if err := rowSource.DropColumn(col); err != nil { if err := rowSource.DropColumn(col); err != nil {
return fmt.Errorf("metric/table mismatch: Unable to omit field/column from \"%s\": %w", tagTable.name, err) return fmt.Errorf("metric/table mismatch: Unable to omit field/column from %q: %w", tagTable.name, err)
} }
colDefs = append(colDefs, col.Name+" "+col.Type) colDefs = append(colDefs, col.Name+" "+col.Type)
} }
tm.Logger.Errorf("table '%s' is missing tag columns (dropping metrics): %s", tm.Logger.Errorf("table %q is missing tag columns (dropping metrics): %s",
tagTable.name, tagTable.name,
strings.Join(colDefs, ", ")) strings.Join(colDefs, ", "))
} }
@ -127,11 +127,11 @@ func (tm *TableManager) MatchSource(ctx context.Context, db dbh, rowSource *Tabl
colDefs := make([]string, 0, len(missingCols)) colDefs := make([]string, 0, len(missingCols))
for _, col := range missingCols { for _, col := range missingCols {
if err := rowSource.DropColumn(col); err != nil { if err := rowSource.DropColumn(col); err != nil {
return fmt.Errorf("metric/table mismatch: Unable to omit field/column from \"%s\": %w", metricTable.name, err) return fmt.Errorf("metric/table mismatch: Unable to omit field/column from %q: %w", metricTable.name, err)
} }
colDefs = append(colDefs, col.Name+" "+col.Type) colDefs = append(colDefs, col.Name+" "+col.Type)
} }
tm.Logger.Errorf("table '%s' is missing columns (omitting fields): %s", tm.Logger.Errorf("table %q is missing columns (omitting fields): %s",
metricTable.name, metricTable.name,
strings.Join(colDefs, ", ")) strings.Join(colDefs, ", "))
} }
@ -187,9 +187,9 @@ func (tm *TableManager) EnsureStructure(
} }
if col.Role == utils.TagColType { if col.Role == utils.TagColType {
return nil, fmt.Errorf("column name too long: \"%s\"", col.Name) return nil, fmt.Errorf("column name too long: %q", col.Name)
} }
tm.Postgresql.Logger.Errorf("column name too long: \"%s\"", col.Name) tm.Postgresql.Logger.Errorf("column name too long: %q", col.Name)
invalidColumns = append(invalidColumns, col) invalidColumns = append(invalidColumns, col)
} }
@ -370,7 +370,7 @@ func (tm *TableManager) update(ctx context.Context,
return err return err
} }
if _, err := tx.Exec(ctx, string(sql)); err != nil { if _, err := tx.Exec(ctx, string(sql)); err != nil {
return fmt.Errorf("executing `%s`: %w", sql, err) return fmt.Errorf("executing %q: %w", sql, err)
} }
} }

View File

@ -196,9 +196,9 @@ func (tsrc *TableSource) DropColumn(col utils.Column) error {
case utils.FieldColType: case utils.FieldColType:
return tsrc.dropFieldColumn(col) return tsrc.dropFieldColumn(col)
case utils.TimeColType, utils.TagsIDColType: case utils.TimeColType, utils.TagsIDColType:
return fmt.Errorf("critical column \"%s\"", col.Name) return fmt.Errorf("critical column %q", col.Name)
default: default:
return fmt.Errorf("internal error: unknown column \"%s\"", col.Name) return fmt.Errorf("internal error: unknown column %q", col.Name)
} }
} }

View File

@ -130,41 +130,41 @@ func (t *Timestream) Connect() error {
} }
if t.MappingMode != MappingModeSingleTable && t.MappingMode != MappingModeMultiTable { if t.MappingMode != MappingModeSingleTable && t.MappingMode != MappingModeMultiTable {
return fmt.Errorf("correct MappingMode key values are: '%s', '%s'", return fmt.Errorf("correct MappingMode key values are: %q, %q",
MappingModeSingleTable, MappingModeMultiTable) MappingModeSingleTable, MappingModeMultiTable)
} }
if t.MappingMode == MappingModeSingleTable { if t.MappingMode == MappingModeSingleTable {
if t.SingleTableName == "" { if t.SingleTableName == "" {
return fmt.Errorf("in '%s' mapping mode, SingleTableName key is required", MappingModeSingleTable) return fmt.Errorf("in %q mapping mode, SingleTableName key is required", MappingModeSingleTable)
} }
if t.SingleTableDimensionNameForTelegrafMeasurementName == "" && !t.UseMultiMeasureRecords { if t.SingleTableDimensionNameForTelegrafMeasurementName == "" && !t.UseMultiMeasureRecords {
return fmt.Errorf("in '%s' mapping mode, SingleTableDimensionNameForTelegrafMeasurementName key is required", return fmt.Errorf("in %q mapping mode, SingleTableDimensionNameForTelegrafMeasurementName key is required",
MappingModeSingleTable) MappingModeSingleTable)
} }
// When using MappingModeSingleTable with UseMultiMeasureRecords enabled, // When using MappingModeSingleTable with UseMultiMeasureRecords enabled,
// measurementName ( from line protocol ) is mapped to multiMeasure name in timestream. // measurementName ( from line protocol ) is mapped to multiMeasure name in timestream.
if t.UseMultiMeasureRecords && t.MeasureNameForMultiMeasureRecords != "" { if t.UseMultiMeasureRecords && t.MeasureNameForMultiMeasureRecords != "" {
return fmt.Errorf("in '%s' mapping mode, with multi-measure enabled, key MeasureNameForMultiMeasureRecords is invalid", MappingModeMultiTable) return fmt.Errorf("in %q mapping mode, with multi-measure enabled, key MeasureNameForMultiMeasureRecords is invalid", MappingModeMultiTable)
} }
} }
if t.MappingMode == MappingModeMultiTable { if t.MappingMode == MappingModeMultiTable {
if t.SingleTableName != "" { if t.SingleTableName != "" {
return fmt.Errorf("in '%s' mapping mode, do not specify SingleTableName key", MappingModeMultiTable) return fmt.Errorf("in %q mapping mode, do not specify SingleTableName key", MappingModeMultiTable)
} }
if t.SingleTableDimensionNameForTelegrafMeasurementName != "" { if t.SingleTableDimensionNameForTelegrafMeasurementName != "" {
return fmt.Errorf("in '%s' mapping mode, do not specify SingleTableDimensionNameForTelegrafMeasurementName key", MappingModeMultiTable) return fmt.Errorf("in %q mapping mode, do not specify SingleTableDimensionNameForTelegrafMeasurementName key", MappingModeMultiTable)
} }
// When using MappingModeMultiTable ( data is ingested to multiple tables ) with // When using MappingModeMultiTable ( data is ingested to multiple tables ) with
// UseMultiMeasureRecords enabled, measurementName is used as tableName in timestream and // UseMultiMeasureRecords enabled, measurementName is used as tableName in timestream and
// we require MeasureNameForMultiMeasureRecords to be configured. // we require MeasureNameForMultiMeasureRecords to be configured.
if t.UseMultiMeasureRecords && t.MeasureNameForMultiMeasureRecords == "" { if t.UseMultiMeasureRecords && t.MeasureNameForMultiMeasureRecords == "" {
return fmt.Errorf("in '%s' mapping mode, with multi-measure enabled, key MeasureNameForMultiMeasureRecords is required", MappingModeMultiTable) return fmt.Errorf("in %q mapping mode, with multi-measure enabled, key MeasureNameForMultiMeasureRecords is required", MappingModeMultiTable)
} }
} }
@ -182,7 +182,7 @@ func (t *Timestream) Connect() error {
t.MaxWriteGoRoutinesCount = MaxWriteRoutinesDefault t.MaxWriteGoRoutinesCount = MaxWriteRoutinesDefault
} }
t.Log.Infof("Constructing Timestream client for '%s' mode", t.MappingMode) t.Log.Infof("Constructing Timestream client for %q mode", t.MappingMode)
svc, err := WriteFactory(&t.CredentialConfig) svc, err := WriteFactory(&t.CredentialConfig)
if err != nil { if err != nil {
@ -190,17 +190,17 @@ func (t *Timestream) Connect() error {
} }
if t.DescribeDatabaseOnStart { if t.DescribeDatabaseOnStart {
t.Log.Infof("Describing database '%s' in region '%s'", t.DatabaseName, t.Region) t.Log.Infof("Describing database %q in region %q", t.DatabaseName, t.Region)
describeDatabaseInput := &timestreamwrite.DescribeDatabaseInput{ describeDatabaseInput := &timestreamwrite.DescribeDatabaseInput{
DatabaseName: aws.String(t.DatabaseName), DatabaseName: aws.String(t.DatabaseName),
} }
describeDatabaseOutput, err := svc.DescribeDatabase(context.Background(), describeDatabaseInput) describeDatabaseOutput, err := svc.DescribeDatabase(context.Background(), describeDatabaseInput)
if err != nil { if err != nil {
t.Log.Errorf("Couldn't describe database '%s'. Check error, fix permissions, connectivity, create database.", t.DatabaseName) t.Log.Errorf("Couldn't describe database %q. Check error, fix permissions, connectivity, create database.", t.DatabaseName)
return err return err
} }
t.Log.Infof("Describe database '%s' returned: '%s'.", t.DatabaseName, describeDatabaseOutput) t.Log.Infof("Describe database %q returned %q.", t.DatabaseName, describeDatabaseOutput)
} }
t.svc = svc t.svc = svc
@ -279,7 +279,7 @@ func (t *Timestream) writeToTimestream(writeRecordsInput *timestreamwrite.WriteR
var notFound *types.ResourceNotFoundException var notFound *types.ResourceNotFoundException
if errors.As(err, &notFound) { if errors.As(err, &notFound) {
if resourceNotFoundRetry { if resourceNotFoundRetry {
t.Log.Warnf("Failed to write to Timestream database '%s' table '%s'. Error: '%s'", t.Log.Warnf("Failed to write to Timestream database %q table %q: %s",
t.DatabaseName, *writeRecordsInput.TableName, notFound) t.DatabaseName, *writeRecordsInput.TableName, notFound)
return t.createTableAndRetry(writeRecordsInput) return t.createTableAndRetry(writeRecordsInput)
} }
@ -293,27 +293,27 @@ func (t *Timestream) writeToTimestream(writeRecordsInput *timestreamwrite.WriteR
if errors.As(err, &rejected) { if errors.As(err, &rejected) {
t.logWriteToTimestreamError(err, writeRecordsInput.TableName) t.logWriteToTimestreamError(err, writeRecordsInput.TableName)
for _, rr := range rejected.RejectedRecords { for _, rr := range rejected.RejectedRecords {
t.Log.Errorf("reject reason: '%s', record index: '%d'", aws.ToString(rr.Reason), rr.RecordIndex) t.Log.Errorf("reject reason: %q, record index: '%d'", aws.ToString(rr.Reason), rr.RecordIndex)
} }
return nil return nil
} }
var throttling *types.ThrottlingException var throttling *types.ThrottlingException
if errors.As(err, &throttling) { if errors.As(err, &throttling) {
return fmt.Errorf("unable to write to Timestream database '%s' table '%s'. Error: %w", return fmt.Errorf("unable to write to Timestream database %q table %q: %w",
t.DatabaseName, *writeRecordsInput.TableName, throttling) t.DatabaseName, *writeRecordsInput.TableName, throttling)
} }
var internal *types.InternalServerException var internal *types.InternalServerException
if errors.As(err, &internal) { if errors.As(err, &internal) {
return fmt.Errorf("unable to write to Timestream database '%s' table '%s'. Error: %w", return fmt.Errorf("unable to write to Timestream database %q table %q: %w",
t.DatabaseName, *writeRecordsInput.TableName, internal) t.DatabaseName, *writeRecordsInput.TableName, internal)
} }
var operation *smithy.OperationError var operation *smithy.OperationError
if !errors.As(err, &operation) { if !errors.As(err, &operation) {
// Retry other, non-aws errors. // Retry other, non-aws errors.
return fmt.Errorf("unable to write to Timestream database '%s' table '%s'. Error: %w", return fmt.Errorf("unable to write to Timestream database %q table %q: %w",
t.DatabaseName, *writeRecordsInput.TableName, err) t.DatabaseName, *writeRecordsInput.TableName, err)
} }
t.logWriteToTimestreamError(err, writeRecordsInput.TableName) t.logWriteToTimestreamError(err, writeRecordsInput.TableName)
@ -322,25 +322,25 @@ func (t *Timestream) writeToTimestream(writeRecordsInput *timestreamwrite.WriteR
} }
func (t *Timestream) logWriteToTimestreamError(err error, tableName *string) { func (t *Timestream) logWriteToTimestreamError(err error, tableName *string) {
t.Log.Errorf("Failed to write to Timestream database '%s' table '%s'. Skipping metric! Error: '%s'", t.Log.Errorf("Failed to write to Timestream database %q table %q: %s. Skipping metric!",
t.DatabaseName, *tableName, err) t.DatabaseName, *tableName, err.Error())
} }
func (t *Timestream) createTableAndRetry(writeRecordsInput *timestreamwrite.WriteRecordsInput) error { func (t *Timestream) createTableAndRetry(writeRecordsInput *timestreamwrite.WriteRecordsInput) error {
if t.CreateTableIfNotExists { if t.CreateTableIfNotExists {
t.Log.Infof( t.Log.Infof(
"Trying to create table '%s' in database '%s', as 'CreateTableIfNotExists' config key is 'true'.", "Trying to create table %q in database %q, as 'CreateTableIfNotExists' config key is 'true'.",
*writeRecordsInput.TableName, *writeRecordsInput.TableName,
t.DatabaseName, t.DatabaseName,
) )
err := t.createTable(writeRecordsInput.TableName) err := t.createTable(writeRecordsInput.TableName)
if err == nil { if err == nil {
t.Log.Infof("Table '%s' in database '%s' created. Retrying writing.", *writeRecordsInput.TableName, t.DatabaseName) t.Log.Infof("Table %q in database %q created. Retrying writing.", *writeRecordsInput.TableName, t.DatabaseName)
return t.writeToTimestream(writeRecordsInput, false) return t.writeToTimestream(writeRecordsInput, false)
} }
t.Log.Errorf("Failed to create table '%s' in database '%s': %s. Skipping metric!", *writeRecordsInput.TableName, t.DatabaseName, err) t.Log.Errorf("Failed to create table %q in database %q: %s. Skipping metric!", *writeRecordsInput.TableName, t.DatabaseName, err.Error())
} else { } else {
t.Log.Errorf("Not trying to create table '%s' in database '%s', as 'CreateTableIfNotExists' config key is 'false'. Skipping metric!", t.Log.Errorf("Not trying to create table %q in database %q, as 'CreateTableIfNotExists' config key is 'false'. Skipping metric!",
*writeRecordsInput.TableName, t.DatabaseName) *writeRecordsInput.TableName, t.DatabaseName)
} }
return nil return nil
@ -469,7 +469,7 @@ func (t *Timestream) buildSingleWriteRecords(point telegraf.Metric) []types.Reco
for fieldName, fieldValue := range point.Fields() { for fieldName, fieldValue := range point.Fields() {
stringFieldValue, stringFieldValueType, ok := convertValue(fieldValue) stringFieldValue, stringFieldValueType, ok := convertValue(fieldValue)
if !ok { if !ok {
t.Log.Warnf("Skipping field '%s'. The type '%s' is not supported in Timestream as MeasureValue. "+ t.Log.Warnf("Skipping field %q. The type %q is not supported in Timestream as MeasureValue. "+
"Supported values are: [int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64, bool]", "Supported values are: [int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64, bool]",
fieldName, reflect.TypeOf(fieldValue)) fieldName, reflect.TypeOf(fieldValue))
continue continue
@ -503,7 +503,7 @@ func (t *Timestream) buildMultiMeasureWriteRecords(point telegraf.Metric) []type
for fieldName, fieldValue := range point.Fields() { for fieldName, fieldValue := range point.Fields() {
stringFieldValue, stringFieldValueType, ok := convertValue(fieldValue) stringFieldValue, stringFieldValueType, ok := convertValue(fieldValue)
if !ok { if !ok {
t.Log.Warnf("Skipping field '%s'. The type '%s' is not supported in Timestream as MeasureValue. "+ t.Log.Warnf("Skipping field %q. The type %q is not supported in Timestream as MeasureValue. "+
"Supported values are: [int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64, bool]", "Supported values are: [int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64, bool]",
fieldName, reflect.TypeOf(fieldValue)) fieldName, reflect.TypeOf(fieldValue))
continue continue

View File

@ -59,7 +59,7 @@ var errInvalidURL = errors.New("invalid websocket URL")
// Init the output plugin. // Init the output plugin.
func (w *WebSocket) Init() error { func (w *WebSocket) Init() error {
if parsedURL, err := url.Parse(w.URL); err != nil || (parsedURL.Scheme != "ws" && parsedURL.Scheme != "wss") { if parsedURL, err := url.Parse(w.URL); err != nil || (parsedURL.Scheme != "ws" && parsedURL.Scheme != "wss") {
return fmt.Errorf("%w: \"%s\"", errInvalidURL, w.URL) return fmt.Errorf("%w: %q", errInvalidURL, w.URL)
} }
return nil return nil
} }

View File

@ -103,7 +103,7 @@ func (p *Parser) readTags(buf []byte) map[string]string {
var tags map[string]string var tags map[string]string
err := json.Unmarshal(tagsBytes, &tags) err := json.Unmarshal(tagsBytes, &tags)
if err != nil { if err != nil {
p.Log.Warnf("Failed to parse tags from JSON path '%s': %s\n", p.TagsPath, err) p.Log.Warnf("Failed to parse tags from JSON path %q: %s", p.TagsPath, err.Error())
} else if len(tags) > 0 { } else if len(tags) > 0 {
return tags return tags
} }

View File

@ -37,7 +37,7 @@ func (c *Config) validateTemplates() error {
} }
if len(parts) > 3 { if len(parts) > 3 {
return fmt.Errorf("invalid template format: '%s'", t) return fmt.Errorf("invalid template format: %q", t)
} }
template := t template := t
@ -66,7 +66,7 @@ func (c *Config) validateTemplates() error {
// Prevent duplicate filters in the config // Prevent duplicate filters in the config
if _, ok := filters[filter]; ok { if _, ok := filters[filter]; ok {
return fmt.Errorf("duplicate filter '%s' found at position: %d", filter, i) return fmt.Errorf("duplicate filter %q found at position: %d", filter, i)
} }
filters[filter] = struct{}{} filters[filter] = struct{}{}
@ -98,7 +98,7 @@ func (c *Config) validateTemplate(template string) error {
} }
if !hasMeasurement { if !hasMeasurement {
return fmt.Errorf("no measurement in template `%s`", template) return fmt.Errorf("no measurement in template %q", template)
} }
return nil return nil
@ -120,11 +120,11 @@ func (c *Config) validateFilter(filter string) error {
func (c *Config) validateTag(keyValue string) error { func (c *Config) validateTag(keyValue string) error {
parts := strings.Split(keyValue, "=") parts := strings.Split(keyValue, "=")
if len(parts) != 2 { if len(parts) != 2 {
return fmt.Errorf("invalid template tags: '%s'", keyValue) return fmt.Errorf("invalid template tags: %q", keyValue)
} }
if parts[0] == "" || parts[1] == "" { if parts[0] == "" || parts[1] == "" {
return fmt.Errorf("invalid template tags: %s'", keyValue) return fmt.Errorf("invalid template tags: %q", keyValue)
} }
return nil return nil

Some files were not shown because too many files have changed in this diff Show More