diff --git a/agent/agent.go b/agent/agent.go index 442b3a420..3add3bb3d 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -710,7 +710,7 @@ func (a *Agent) connectOutput(ctx context.Context, output *models.RunningOutput) err := output.Output.Connect() if err != nil { log.Printf("E! [agent] Failed to connect to [%s], retrying in 15s, "+ - "error was '%s'", output.LogName(), err) + "error was %q", output.LogName(), err) err := internal.SleepContext(ctx, 15*time.Second) if err != nil { diff --git a/cmd/telegraf/printer.go b/cmd/telegraf/printer.go index 1495e42b9..d7ea856ef 100644 --- a/cmd/telegraf/printer.go +++ b/cmd/telegraf/printer.go @@ -364,7 +364,7 @@ func printConfig(name string, p telegraf.PluginDescriber, op string, commented b if di.RemovalIn != "" { removalNote = " and will be removed in " + di.RemovalIn } - outputBuffer.Write([]byte(fmt.Sprintf("\n%s ## DEPRECATED: The '%s' plugin is deprecated in version %s%s, %s.", + outputBuffer.Write([]byte(fmt.Sprintf("\n%s ## DEPRECATED: The %q plugin is deprecated in version %s%s, %s.", comment, name, di.Since, removalNote, di.Notice))) } diff --git a/config/config.go b/config/config.go index 277a0ea29..8a3bb0881 100644 --- a/config/config.go +++ b/config/config.go @@ -399,7 +399,7 @@ func getDefaultConfigPath() ([]string, error) { if _, err := os.Stat(etcfolder); err == nil { files, err := WalkDirectory(etcfolder) if err != nil { - log.Printf("W! unable walk '%s': %s", etcfolder, err) + log.Printf("W! unable walk %q: %s", etcfolder, err) } for _, file := range files { log.Printf("I! Using config file: %s", file) diff --git a/internal/internal_test.go b/internal/internal_test.go index d8ff21d12..3af4ea469 100644 --- a/internal/internal_test.go +++ b/internal/internal_test.go @@ -37,9 +37,9 @@ var tests = []SnakeTest{ func TestSnakeCase(t *testing.T) { for _, test := range tests { - if SnakeCase(test.input) != test.output { - t.Errorf(`SnakeCase("%s"), wanted "%s", got \%s"`, test.input, test.output, SnakeCase(test.input)) - } + t.Run(test.input, func(t *testing.T) { + require.Equal(t, test.output, SnakeCase(test.input)) + }) } } diff --git a/internal/rotate/file_writer.go b/internal/rotate/file_writer.go index 7cfde0269..ddbe3bbe9 100644 --- a/internal/rotate/file_writer.go +++ b/internal/rotate/file_writer.go @@ -131,7 +131,7 @@ func (w *FileWriter) rotateIfNeeded() error { (w.maxSizeInBytes > 0 && w.bytesWritten >= w.maxSizeInBytes) { if err := w.rotate(); err != nil { //Ignore rotation errors and keep the log open - fmt.Printf("unable to rotate the file '%s', %s", w.filename, err.Error()) + fmt.Printf("unable to rotate the file %q, %s", w.filename, err.Error()) } return w.openCurrent() } diff --git a/plugins/aggregators/histogram/histogram_test.go b/plugins/aggregators/histogram/histogram_test.go index 9363e142c..9ba9fc9e4 100644 --- a/plugins/aggregators/histogram/histogram_test.go +++ b/plugins/aggregators/histogram/histogram_test.go @@ -526,5 +526,5 @@ func assertContainsTaggedField(t *testing.T, acc *testutil.Accumulator, metricNa return } - require.Fail(t, fmt.Sprintf("unknown measurement '%s' with tags: %v, fields: %v", metricName, tags, fields)) + require.Fail(t, fmt.Sprintf("unknown measurement %q with tags: %v, fields: %v", metricName, tags, fields)) } diff --git a/plugins/common/jolokia2/client.go b/plugins/common/jolokia2/client.go index 7b377c30a..c9ba20388 100644 --- a/plugins/common/jolokia2/client.go +++ b/plugins/common/jolokia2/client.go @@ -134,7 +134,7 @@ func (c *Client) read(requests []ReadRequest) ([]ReadResponse, error) { req, err := http.NewRequest("POST", requestURL, bytes.NewBuffer(requestBody)) if err != nil { //err is not contained in returned error - it may contain sensitive data (password) which should not be logged - return nil, fmt.Errorf("unable to create new request for: '%s'", c.URL) + return nil, fmt.Errorf("unable to create new request for: %q", c.URL) } req.Header.Add("Content-type", "application/json") @@ -149,7 +149,7 @@ func (c *Client) read(requests []ReadRequest) ([]ReadResponse, error) { defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("response from url \"%s\" has status code %d (%s), expected %d (%s)", + return nil, fmt.Errorf("response from url %q has status code %d (%s), expected %d (%s)", c.URL, resp.StatusCode, http.StatusText(resp.StatusCode), http.StatusOK, http.StatusText(http.StatusOK)) } diff --git a/plugins/common/opcua/client.go b/plugins/common/opcua/client.go index aecb0d9bb..22f20c845 100644 --- a/plugins/common/opcua/client.go +++ b/plugins/common/opcua/client.go @@ -50,13 +50,13 @@ func (o *OpcUAClientConfig) validateEndpoint() error { switch o.SecurityPolicy { case "None", "Basic128Rsa15", "Basic256", "Basic256Sha256", "auto": default: - return fmt.Errorf("invalid security type '%s' in '%s'", o.SecurityPolicy, o.Endpoint) + return fmt.Errorf("invalid security type %q in %q", o.SecurityPolicy, o.Endpoint) } switch o.SecurityMode { case "None", "Sign", "SignAndEncrypt", "auto": default: - return fmt.Errorf("invalid security type '%s' in '%s'", o.SecurityMode, o.Endpoint) + return fmt.Errorf("invalid security type %q in %q", o.SecurityMode, o.Endpoint) } return nil diff --git a/plugins/common/opcua/input/input_client.go b/plugins/common/opcua/input/input_client.go index 4fd60a966..3e86aabe7 100644 --- a/plugins/common/opcua/input/input_client.go +++ b/plugins/common/opcua/input/input_client.go @@ -236,7 +236,7 @@ func tagsSliceToMap(tags [][]string) (map[string]string, error) { func validateNodeToAdd(existing map[metricParts]struct{}, nmm *NodeMetricMapping) error { if nmm.Tag.FieldName == "" { - return fmt.Errorf("empty name in '%s'", nmm.Tag.FieldName) + return fmt.Errorf("empty name in %q", nmm.Tag.FieldName) } if len(nmm.Tag.Namespace) == 0 { @@ -249,19 +249,19 @@ func validateNodeToAdd(existing map[metricParts]struct{}, nmm *NodeMetricMapping mp := newMP(nmm) if _, exists := existing[mp]; exists { - return fmt.Errorf("name '%s' is duplicated (metric name '%s', tags '%s')", + return fmt.Errorf("name %q is duplicated (metric name %q, tags %q)", mp.fieldName, mp.metricName, mp.tags) } switch nmm.Tag.IdentifierType { case "i": if _, err := strconv.Atoi(nmm.Tag.Identifier); err != nil { - return fmt.Errorf("identifier type '%s' does not match the type of identifier '%s'", nmm.Tag.IdentifierType, nmm.Tag.Identifier) + return fmt.Errorf("identifier type %q does not match the type of identifier %q", nmm.Tag.IdentifierType, nmm.Tag.Identifier) } case "s", "g", "b": // Valid identifier type - do nothing. default: - return fmt.Errorf("invalid identifier type '%s' in '%s'", nmm.Tag.IdentifierType, nmm.Tag.FieldName) + return fmt.Errorf("invalid identifier type %q in %q", nmm.Tag.IdentifierType, nmm.Tag.FieldName) } existing[mp] = struct{}{} @@ -382,7 +382,7 @@ func (o *OpcUAInputClient) MetricForNode(nodeIdx int) telegraf.Metric { fields["Quality"] = strings.TrimSpace(fmt.Sprint(o.LastReceivedData[nodeIdx].Quality)) if !o.StatusCodeOK(o.LastReceivedData[nodeIdx].Quality) { mp := newMP(nmm) - o.Log.Debugf("status not OK for node '%s'(metric name '%s', tags '%s')", + o.Log.Debugf("status not OK for node %q(metric name %q, tags %q)", mp.fieldName, mp.metricName, mp.tags) } diff --git a/plugins/common/opcua/input/input_client_test.go b/plugins/common/opcua/input/input_client_test.go index 6d7e93c89..dac2ab5e7 100644 --- a/plugins/common/opcua/input/input_client_test.go +++ b/plugins/common/opcua/input/input_client_test.go @@ -1,7 +1,11 @@ package input import ( + "errors" "fmt" + "testing" + "time" + "github.com/gopcua/opcua/ua" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" @@ -9,8 +13,6 @@ import ( "github.com/influxdata/telegraf/plugins/common/opcua" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" - "testing" - "time" ) func TestTagsSliceToMap(t *testing.T) { @@ -74,7 +76,7 @@ func TestValidateOPCTags(t *testing.T) { }, }, }, - fmt.Errorf("name 'fn' is duplicated (metric name 'mn', tags 't1=v1, t2=v2')"), + errors.New(`name "fn" is duplicated (metric name "mn", tags "t1=v1, t2=v2")`), }, { "empty tag value not allowed", @@ -352,7 +354,7 @@ func TestValidateNodeToAdd(t *testing.T) { }, map[string]string{}) return nmm }(), - err: fmt.Errorf("empty name in ''"), + err: errors.New(`empty name in ""`), }, { name: "empty namespace not allowed", @@ -382,7 +384,7 @@ func TestValidateNodeToAdd(t *testing.T) { }, map[string]string{}) return nmm }(), - err: fmt.Errorf("invalid identifier type '' in 'f'"), + err: errors.New(`invalid identifier type "" in "f"`), }, { name: "invalid identifier type not allowed", @@ -397,7 +399,7 @@ func TestValidateNodeToAdd(t *testing.T) { }, map[string]string{}) return nmm }(), - err: fmt.Errorf("invalid identifier type 'j' in 'f'"), + err: errors.New(`invalid identifier type "j" in "f"`), }, { name: "duplicate metric not allowed", @@ -414,7 +416,7 @@ func TestValidateNodeToAdd(t *testing.T) { }, map[string]string{}) return nmm }(), - err: fmt.Errorf("name 'f' is duplicated (metric name 'testmetric', tags 't1=v1, t2=v2')"), + err: errors.New(`name "f" is duplicated (metric name "testmetric", tags "t1=v1, t2=v2")`), }, { name: "identifier type mismatch", @@ -429,7 +431,7 @@ func TestValidateNodeToAdd(t *testing.T) { }, map[string]string{}) return nmm }(), - err: fmt.Errorf("identifier type 'i' does not match the type of identifier 'hf'"), + err: errors.New(`identifier type "i" does not match the type of identifier "hf"`), }, } diff --git a/plugins/common/shim/input.go b/plugins/common/shim/input.go index b325c1b46..4132d26a2 100644 --- a/plugins/common/shim/input.go +++ b/plugins/common/shim/input.go @@ -56,7 +56,7 @@ func (s *Shim) RunInput(pollInterval time.Duration) error { go func() { err := s.writeProcessedMetrics() if err != nil { - s.log.Warnf("%s", err) + s.log.Warn(err.Error()) } wg.Done() }() diff --git a/plugins/common/shim/processor.go b/plugins/common/shim/processor.go index 554642308..f9f16370d 100644 --- a/plugins/common/shim/processor.go +++ b/plugins/common/shim/processor.go @@ -47,7 +47,7 @@ func (s *Shim) RunProcessor() error { go func() { err := s.writeProcessedMetrics() if err != nil { - s.log.Warnf("%s", err) + s.log.Warn(err.Error()) } wg.Done() }() diff --git a/plugins/common/starlark/builtins.go b/plugins/common/starlark/builtins.go index 40d54720c..3845e1855 100644 --- a/plugins/common/starlark/builtins.go +++ b/plugins/common/starlark/builtins.go @@ -77,7 +77,7 @@ type builtinMethod func(b *starlark.Builtin, args starlark.Tuple, kwargs []starl func builtinAttr(recv starlark.Value, name string, methods map[string]builtinMethod) (starlark.Value, error) { method := methods[name] if method == nil { - return starlark.None, fmt.Errorf("no such method '%s'", name) + return starlark.None, fmt.Errorf("no such method %q", name) } // Allocate a closure over 'method'. diff --git a/plugins/common/starlark/metric.go b/plugins/common/starlark/metric.go index 989c34576..d96fd78ce 100644 --- a/plugins/common/starlark/metric.go +++ b/plugins/common/starlark/metric.go @@ -104,7 +104,7 @@ func (m *Metric) SetField(name string, value starlark.Value) error { return errors.New("cannot set fields") default: return starlark.NoSuchAttrError( - fmt.Sprintf("cannot assign to field '%s'", name)) + fmt.Sprintf("cannot assign to field %q", name)) } } diff --git a/plugins/common/tls/config.go b/plugins/common/tls/config.go index aea80b282..30de4587f 100644 --- a/plugins/common/tls/config.go +++ b/plugins/common/tls/config.go @@ -77,7 +77,7 @@ func (c *ClientConfig) TLSConfig() (*tls.Config, error) { case "freely": renegotiationMethod = tls.RenegotiateFreelyAsClient default: - return nil, fmt.Errorf("unrecognized renegotation method '%s', choose from: 'never', 'once', 'freely'", c.RenegotiationMethod) + return nil, fmt.Errorf("unrecognized renegotation method %q, choose from: 'never', 'once', 'freely'", c.RenegotiationMethod) } tlsConfig := &tls.Config{ diff --git a/plugins/inputs/azure_storage_queue/azure_storage_queue.go b/plugins/inputs/azure_storage_queue/azure_storage_queue.go index 50aa65c9e..d65a8fe7b 100644 --- a/plugins/inputs/azure_storage_queue/azure_storage_queue.go +++ b/plugins/inputs/azure_storage_queue/azure_storage_queue.go @@ -88,7 +88,7 @@ func (a *AzureStorageQueue) Gather(acc telegraf.Accumulator) error { ctx := context.TODO() for marker := (azqueue.Marker{}); marker.NotDone(); { - a.Log.Debugf("Listing queues of storage account '%s'", a.StorageAccountName) + a.Log.Debugf("Listing queues of storage account %q", a.StorageAccountName) queuesSegment, err := serviceURL.ListQueuesSegment(ctx, marker, azqueue.ListQueuesSegmentOptions{ Detail: azqueue.ListQueuesSegmentDetails{Metadata: false}, @@ -99,7 +99,7 @@ func (a *AzureStorageQueue) Gather(acc telegraf.Accumulator) error { marker = queuesSegment.NextMarker for _, queueItem := range queuesSegment.QueueItems { - a.Log.Debugf("Processing queue '%s' of storage account '%s'", queueItem.Name, a.StorageAccountName) + a.Log.Debugf("Processing queue %q of storage account %q", queueItem.Name, a.StorageAccountName) queueURL := serviceURL.NewQueueURL(queueItem.Name) properties, err := queueURL.GetProperties(ctx) if err != nil { diff --git a/plugins/inputs/bond/bond.go b/plugins/inputs/bond/bond.go index 3ccad44b0..c31265b3d 100644 --- a/plugins/inputs/bond/bond.go +++ b/plugins/inputs/bond/bond.go @@ -137,7 +137,7 @@ func (bond *Bond) gatherBondPart(bondName string, rawFile string, acc telegraf.A if err := scanner.Err(); err != nil { return err } - return fmt.Errorf("Couldn't find status info for '%s' ", bondName) + return fmt.Errorf("Couldn't find status info for %q", bondName) } func (bond *Bond) readSysFiles(bondDir string) (sysFiles, error) { diff --git a/plugins/inputs/cassandra/cassandra.go b/plugins/inputs/cassandra/cassandra.go index 1e1f4df96..5b79ad335 100644 --- a/plugins/inputs/cassandra/cassandra.go +++ b/plugins/inputs/cassandra/cassandra.go @@ -130,7 +130,7 @@ func (j javaMetric) addTagsFields(out map[string]interface{}) { } j.acc.AddFields(tokens["class"]+tokens["type"], fields, tags) } else { - j.acc.AddError(fmt.Errorf("missing key 'value' in '%s' output response: %v", j.metric, out)) + j.acc.AddError(fmt.Errorf("missing key 'value' in %q output response: %v", j.metric, out)) } } @@ -154,7 +154,7 @@ func (c cassandraMetric) addTagsFields(out map[string]interface{}) { tokens["scope"] == "*") { valuesMap, ok := out["value"] if !ok { - c.acc.AddError(fmt.Errorf("missing key 'value' in '%s' output response: %v", c.metric, out)) + c.acc.AddError(fmt.Errorf("missing key 'value' in %q output response: %v", c.metric, out)) return } for k, v := range valuesMap.(map[string]interface{}) { @@ -163,7 +163,7 @@ func (c cassandraMetric) addTagsFields(out map[string]interface{}) { } else { values, ok := out["value"] if !ok { - c.acc.AddError(fmt.Errorf("missing key 'value' in '%s' output response: %v", c.metric, out)) + c.acc.AddError(fmt.Errorf("missing key 'value' in %q output response: %v", c.metric, out)) return } addCassandraMetric(r.(map[string]interface{})["mbean"].(string), c, values.(map[string]interface{})) @@ -185,7 +185,7 @@ func (c *Cassandra) getAttr(requestURL *url.URL) (map[string]interface{}, error) // Process response if resp.StatusCode != http.StatusOK { - err = fmt.Errorf("response from url \"%s\" has status code %d (%s), expected %d (%s)", + err = fmt.Errorf("response from url %q has status code %d (%s), expected %d (%s)", requestURL, resp.StatusCode, http.StatusText(resp.StatusCode), diff --git a/plugins/inputs/ceph/ceph_test.go b/plugins/inputs/ceph/ceph_test.go index 7009fab82..0ae40a6d4 100644 --- a/plugins/inputs/ceph/ceph_test.go +++ b/plugins/inputs/ceph/ceph_test.go @@ -177,7 +177,7 @@ func assertFoundSocket(t *testing.T, dir, sockType string, i int, sockets []*soc require.NoError(t, err) if s.socket == expected { found = true - require.Equal(t, s.sockType, sockType, "Unexpected socket type for '%s'", s) + require.Equal(t, s.sockType, sockType, "Unexpected socket type for %q", s) require.Equal(t, s.sockID, strconv.Itoa(i)) } } diff --git a/plugins/inputs/conntrack/conntrack.go b/plugins/inputs/conntrack/conntrack.go index 6846dec5b..cd622cd74 100644 --- a/plugins/inputs/conntrack/conntrack.go +++ b/plugins/inputs/conntrack/conntrack.go @@ -97,7 +97,7 @@ func (c *Conntrack) Gather(acc telegraf.Accumulator) error { fields[metricKey], err = strconv.ParseFloat(v, 64) if err != nil { acc.AddError(fmt.Errorf("failed to parse metric, expected number but "+ - " found '%s': %v", v, err)) + " found %q: %w", v, err)) } } } diff --git a/plugins/inputs/cpu/cpu_test.go b/plugins/inputs/cpu/cpu_test.go index 235c08f25..62fcd35c1 100644 --- a/plugins/inputs/cpu/cpu_test.go +++ b/plugins/inputs/cpu/cpu_test.go @@ -132,14 +132,14 @@ func assertContainsTaggedFloat( return } } else { - require.Fail(t, fmt.Sprintf("Measurement \"%s\" does not have type float64", measurement)) + require.Fail(t, fmt.Sprintf("Measurement %q does not have type float64", measurement)) } } } } } msg := fmt.Sprintf( - "Could not find measurement \"%s\" with requested tags within %f of %f, Actual: %f", + "Could not find measurement %q with requested tags within %f of %f, Actual: %f", measurement, delta, expectedValue, actualValue) require.Fail(t, msg) } diff --git a/plugins/inputs/disque/disque.go b/plugins/inputs/disque/disque.go index f9dc218d0..85fa39b8b 100644 --- a/plugins/inputs/disque/disque.go +++ b/plugins/inputs/disque/disque.go @@ -117,7 +117,7 @@ func (d *Disque) gatherServer(addr *url.URL, acc telegraf.Accumulator) error { return err } if line[0] != '+' { - return fmt.Errorf("%s", strings.TrimSpace(line)[1:]) + return errors.New(strings.TrimSpace(line)[1:]) } } } diff --git a/plugins/inputs/elasticsearch_query/aggregation_parser.go b/plugins/inputs/elasticsearch_query/aggregation_parser.go index c4dff05ee..352a3a257 100644 --- a/plugins/inputs/elasticsearch_query/aggregation_parser.go +++ b/plugins/inputs/elasticsearch_query/aggregation_parser.go @@ -67,12 +67,12 @@ func recurseResponse(acc telegraf.Accumulator, aggNameFunction map[string]string for _, aggName := range aggNames { aggFunction, found := aggNameFunction[aggName] if !found { - return m, fmt.Errorf("child aggregation function '%s' not found %v", aggName, aggNameFunction) + return m, fmt.Errorf("child aggregation function %q not found %v", aggName, aggNameFunction) } resp := getResponseAggregation(aggFunction, aggName, bucketResponse) if resp == nil { - return m, fmt.Errorf("child aggregation '%s' not found", aggName) + return m, fmt.Errorf("child aggregation %q not found", aggName) } switch resp := resp.(type) { diff --git a/plugins/inputs/elasticsearch_query/aggregation_query.go b/plugins/inputs/elasticsearch_query/aggregation_query.go index d53a48b04..1a69750ce 100644 --- a/plugins/inputs/elasticsearch_query/aggregation_query.go +++ b/plugins/inputs/elasticsearch_query/aggregation_query.go @@ -210,7 +210,7 @@ func getFunctionAggregation(function string, aggfield string) (elastic5.Aggregat case "max": agg = elastic5.NewMaxAggregation().Field(aggfield) default: - return nil, fmt.Errorf("aggregation function '%s' not supported", function) + return nil, fmt.Errorf("aggregation function %q not supported", function) } return agg, nil diff --git a/plugins/inputs/elasticsearch_query/elasticsearch_query.go b/plugins/inputs/elasticsearch_query/elasticsearch_query.go index bf74a18b0..10f2039e9 100644 --- a/plugins/inputs/elasticsearch_query/elasticsearch_query.go +++ b/plugins/inputs/elasticsearch_query/elasticsearch_query.go @@ -84,7 +84,7 @@ func (e *ElasticsearchQuery) Init() error { } err = e.initAggregation(ctx, agg, i) if err != nil { - e.Log.Errorf("%s", err) + e.Log.Error(err.Error()) return nil } } @@ -100,7 +100,7 @@ func (e *ElasticsearchQuery) initAggregation(ctx context.Context, agg esAggregat for _, metricField := range agg.MetricFields { if _, ok := agg.mapMetricFields[metricField]; !ok { - return fmt.Errorf("metric field '%s' not found on index '%s'", metricField, agg.Index) + return fmt.Errorf("metric field %q not found on index %q", metricField, agg.Index) } } diff --git a/plugins/inputs/ethtool/ethtool_linux.go b/plugins/inputs/ethtool/ethtool_linux.go index bbdf84f06..06906ff1f 100644 --- a/plugins/inputs/ethtool/ethtool_linux.go +++ b/plugins/inputs/ethtool/ethtool_linux.go @@ -256,7 +256,7 @@ func (c *CommandEthtool) Interfaces(includeNamespaces bool) ([]NamespacedInterfa handle, err := netns.GetFromPath(filepath.Join(namespaceDirectory, name)) if err != nil { - c.Log.Warnf(`Could not get handle for namespace "%s": %s`, name, err) + c.Log.Warnf("Could not get handle for namespace %q: %s", name, err.Error()) continue } handles[name] = handle @@ -282,7 +282,7 @@ func (c *CommandEthtool) Interfaces(includeNamespaces bool) ([]NamespacedInterfa Log: c.Log, } if err := c.namespaceGoroutines[namespace].Start(); err != nil { - c.Log.Errorf(`Failed to start goroutine for namespace "%s": %s`, namespace, err) + c.Log.Errorf("Failed to start goroutine for namespace %q: %s", namespace, err.Error()) delete(c.namespaceGoroutines, namespace) continue } @@ -290,7 +290,7 @@ func (c *CommandEthtool) Interfaces(includeNamespaces bool) ([]NamespacedInterfa interfaces, err := c.namespaceGoroutines[namespace].Interfaces() if err != nil { - c.Log.Warnf(`Could not get interfaces from namespace "%s": %s`, namespace, err) + c.Log.Warnf("Could not get interfaces from namespace %q: %s", namespace, err.Error()) continue } allInterfaces = append(allInterfaces, interfaces...) diff --git a/plugins/inputs/ethtool/namespace_linux.go b/plugins/inputs/ethtool/namespace_linux.go index 34c065504..f60b61cb4 100644 --- a/plugins/inputs/ethtool/namespace_linux.go +++ b/plugins/inputs/ethtool/namespace_linux.go @@ -36,7 +36,6 @@ func (n *NamespaceGoroutine) Interfaces() ([]NamespacedInterface, error) { interfaces, err := n.Do(func(n *NamespaceGoroutine) (interface{}, error) { interfaces, err := net.Interfaces() if err != nil { - n.Log.Errorf(`Could not get interfaces in namespace "%s": %s`, n.name, err) return nil, err } namespacedInterfaces := make([]NamespacedInterface, 0, len(interfaces)) @@ -94,7 +93,7 @@ func (n *NamespaceGoroutine) Start() error { } if !initialNamespace.Equal(n.handle) { if err := netns.Set(n.handle); err != nil { - n.Log.Errorf(`Could not switch to namespace "%s": %s`, n.name, err) + n.Log.Errorf("Could not switch to namespace %q: %s", n.name, err.Error()) started <- err return } @@ -103,7 +102,7 @@ func (n *NamespaceGoroutine) Start() error { // Every namespace needs its own connection to ethtool e, err := ethtoolLib.NewEthtool() if err != nil { - n.Log.Errorf(`Could not create ethtool client for namespace "%s": %s`, n.name, err) + n.Log.Errorf("Could not create ethtool client for namespace %q: %s", n.name, err.Error()) started <- err return } diff --git a/plugins/inputs/fibaro/fibaro.go b/plugins/inputs/fibaro/fibaro.go index 283dca7da..ea85cccc4 100644 --- a/plugins/inputs/fibaro/fibaro.go +++ b/plugins/inputs/fibaro/fibaro.go @@ -85,7 +85,7 @@ func (f *Fibaro) getJSON(path string, dataStruct interface{}) error { defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - err = fmt.Errorf("response from url \"%s\" has status code %d (%s), expected %d (%s)", + err = fmt.Errorf("response from url %q has status code %d (%s), expected %d (%s)", requestURL, resp.StatusCode, http.StatusText(resp.StatusCode), diff --git a/plugins/inputs/fluentd/fluentd.go b/plugins/inputs/fluentd/fluentd.go index ccf62867b..143cebab8 100644 --- a/plugins/inputs/fluentd/fluentd.go +++ b/plugins/inputs/fluentd/fluentd.go @@ -79,7 +79,7 @@ func (*Fluentd) SampleConfig() string { func (h *Fluentd) Gather(acc telegraf.Accumulator) error { _, err := url.Parse(h.Endpoint) if err != nil { - return fmt.Errorf("invalid URL \"%s\"", h.Endpoint) + return fmt.Errorf("invalid URL %q", h.Endpoint) } if h.client == nil { @@ -98,7 +98,7 @@ func (h *Fluentd) Gather(acc telegraf.Accumulator) error { resp, err := h.client.Get(h.Endpoint) if err != nil { - return fmt.Errorf("unable to perform HTTP client GET on \"%s\": %v", h.Endpoint, err) + return fmt.Errorf("unable to perform HTTP client GET on %q: %w", h.Endpoint, err) } defer resp.Body.Close() @@ -106,7 +106,7 @@ func (h *Fluentd) Gather(acc telegraf.Accumulator) error { body, err := io.ReadAll(resp.Body) if err != nil { - return fmt.Errorf("unable to read the HTTP body \"%s\": %v", string(body), err) + return fmt.Errorf("unable to read the HTTP body %q: %w", string(body), err) } if resp.StatusCode != http.StatusOK { diff --git a/plugins/inputs/google_cloud_storage/google_cloud_storage_test.go b/plugins/inputs/google_cloud_storage/google_cloud_storage_test.go index ced2274fa..cf939cba0 100644 --- a/plugins/inputs/google_cloud_storage/google_cloud_storage_test.go +++ b/plugins/inputs/google_cloud_storage/google_cloud_storage_test.go @@ -381,7 +381,7 @@ func fetchJSON(t *testing.T, boundary string, rc io.ReadCloser) (string, error) splits := strings.Split(string(bodyBytes), boundary) offsetPart := splits[2] offsets := strings.Split(offsetPart, "\n") - fmt.Printf("%s", offsets[3]) + fmt.Print(offsets[3]) return offsets[3], nil } diff --git a/plugins/inputs/graylog/graylog.go b/plugins/inputs/graylog/graylog.go index 34d7bebda..97248b933 100644 --- a/plugins/inputs/graylog/graylog.go +++ b/plugins/inputs/graylog/graylog.go @@ -138,7 +138,7 @@ func (h *GrayLog) gatherServer( } requestURL, err := url.Parse(serverURL) if err != nil { - return fmt.Errorf("unable to parse address '%s': %s", serverURL, err) + return fmt.Errorf("unable to parse address %q: %w", serverURL, err) } host, port, _ := net.SplitHostPort(requestURL.Host) @@ -207,7 +207,7 @@ func (h *GrayLog) sendRequest(serverURL string) (string, float64, error) { // Prepare URL requestURL, err := url.Parse(serverURL) if err != nil { - return "", -1, fmt.Errorf("invalid server URL \"%s\"", serverURL) + return "", -1, fmt.Errorf("invalid server URL %q", serverURL) } // Add X-Requested-By header headers["X-Requested-By"] = "Telegraf" @@ -245,7 +245,7 @@ func (h *GrayLog) sendRequest(serverURL string) (string, float64, error) { // Process response if resp.StatusCode != http.StatusOK { - err = fmt.Errorf("response from url \"%s\" has status code %d (%s), expected %d (%s)", + err = fmt.Errorf("response from url %q has status code %d (%s), expected %d (%s)", requestURL.String(), resp.StatusCode, http.StatusText(resp.StatusCode), diff --git a/plugins/inputs/haproxy/haproxy.go b/plugins/inputs/haproxy/haproxy.go index 3002d3814..74f1ede31 100644 --- a/plugins/inputs/haproxy/haproxy.go +++ b/plugins/inputs/haproxy/haproxy.go @@ -137,12 +137,12 @@ func (h *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error { u, err := url.Parse(addr) if err != nil { - return fmt.Errorf("unable parse server address '%s': %s", addr, err) + return fmt.Errorf("unable parse server address %q: %w", addr, err) } req, err := http.NewRequest("GET", addr, nil) if err != nil { - return fmt.Errorf("unable to create new request '%s': %s", addr, err) + return fmt.Errorf("unable to create new request %q: %w", addr, err) } if u.User != nil { p, _ := u.User.Password() @@ -157,16 +157,16 @@ func (h *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error { res, err := h.client.Do(req) if err != nil { - return fmt.Errorf("unable to connect to haproxy server '%s': %s", addr, err) + return fmt.Errorf("unable to connect to haproxy server %q: %w", addr, err) } defer res.Body.Close() if res.StatusCode != 200 { - return fmt.Errorf("unable to get valid stat result from '%s', http response code : %d", addr, res.StatusCode) + return fmt.Errorf("unable to get valid stat result from %q, http response code : %d", addr, res.StatusCode) } if err := h.importCsvResult(res.Body, acc, u.Host); err != nil { - return fmt.Errorf("unable to parse stat result from '%s': %s", addr, err) + return fmt.Errorf("unable to parse stat result from %q: %w", addr, err) } return nil @@ -246,7 +246,7 @@ func (h *haproxy) importCsvResult(r io.Reader, acc telegraf.Accumulator, host st case "type": vi, err := strconv.ParseInt(v, 10, 64) if err != nil { - return fmt.Errorf("unable to parse type value '%s'", v) + return fmt.Errorf("unable to parse type value %q", v) } if vi >= int64(len(typeNames)) { return fmt.Errorf("received unknown type value: %d", vi) diff --git a/plugins/inputs/httpjson/httpjson.go b/plugins/inputs/httpjson/httpjson.go index afe21e8fb..04ad5b17b 100644 --- a/plugins/inputs/httpjson/httpjson.go +++ b/plugins/inputs/httpjson/httpjson.go @@ -176,7 +176,7 @@ func (h *HTTPJSON) sendRequest(serverURL string) (string, float64, error) { // Prepare URL requestURL, err := url.Parse(serverURL) if err != nil { - return "", -1, fmt.Errorf("Invalid server URL \"%s\"", serverURL) + return "", -1, fmt.Errorf("Invalid server URL %q", serverURL) } data := url.Values{} @@ -228,7 +228,7 @@ func (h *HTTPJSON) sendRequest(serverURL string) (string, float64, error) { // Process response if resp.StatusCode != http.StatusOK { - err = fmt.Errorf("Response from url \"%s\" has status code %d (%s), expected %d (%s)", + err = fmt.Errorf("Response from url %q has status code %d (%s), expected %d (%s)", requestURL.String(), resp.StatusCode, http.StatusText(resp.StatusCode), diff --git a/plugins/inputs/hugepages/hugepages.go b/plugins/inputs/hugepages/hugepages.go index 60c65db5e..c75215341 100644 --- a/plugins/inputs/hugepages/hugepages.go +++ b/plugins/inputs/hugepages/hugepages.go @@ -195,7 +195,7 @@ func (h *Hugepages) gatherFromHugepagePath( metricValue, err := strconv.Atoi(string(bytes.TrimSuffix(metricBytes, newlineByte))) if err != nil { - return fmt.Errorf("failed to convert content of '%s': %v", metricFullPath, err) + return fmt.Errorf("failed to convert content of %q: %w", metricFullPath, err) } metrics[metricName] = metricValue @@ -238,7 +238,7 @@ func (h *Hugepages) gatherStatsFromMeminfo(acc telegraf.Accumulator) error { fieldValue, err := strconv.Atoi(string(fields[1])) if err != nil { - return fmt.Errorf("failed to convert content of '%s': %v", fieldName, err) + return fmt.Errorf("failed to convert content of %q: %w", fieldName, err) } metrics[metricName] = fieldValue @@ -270,7 +270,7 @@ func (h *Hugepages) parseHugepagesConfig() error { case meminfoHugepages: h.gatherMeminfo = true default: - return fmt.Errorf("provided hugepages type `%s` is not valid", hugepagesType) + return fmt.Errorf("provided hugepages type %q is not valid", hugepagesType) } } diff --git a/plugins/inputs/intel_dlb/intel_dlb.go b/plugins/inputs/intel_dlb/intel_dlb.go index d9aa9d5a0..2e44829be 100644 --- a/plugins/inputs/intel_dlb/intel_dlb.go +++ b/plugins/inputs/intel_dlb/intel_dlb.go @@ -156,12 +156,12 @@ func (d *IntelDLB) readRasMetrics(devicePath, metricPath string) (map[string]int for _, metric := range metrics { metricPart := strings.Split(metric, " ") if len(metricPart) < 2 { - return nil, fmt.Errorf("error occurred: no value to parse - %+q", metricPart) + return nil, fmt.Errorf("no value to parse: %+q", metricPart) } metricVal, err := strconv.ParseUint(metricPart[1], 10, 10) if err != nil { - return nil, fmt.Errorf("error occurred: failed to parse value '%s': '%s'", metricPart[1], err) + return nil, fmt.Errorf("failed to parse value %q: %w", metricPart[1], err) } rasMetric[metricPart[0]] = metricVal } diff --git a/plugins/inputs/intel_pmu/activators.go b/plugins/inputs/intel_pmu/activators.go index dcec7d92c..279709deb 100644 --- a/plugins/inputs/intel_pmu/activators.go +++ b/plugins/inputs/intel_pmu/activators.go @@ -74,13 +74,13 @@ func (ea *iaEntitiesActivator) activateEntities(coreEntities []*CoreEventEntity, for _, coreEventsEntity := range coreEntities { err := ea.activateCoreEvents(coreEventsEntity) if err != nil { - return fmt.Errorf("failed to activate core events `%s`: %v", coreEventsEntity.EventsTag, err) + return fmt.Errorf("failed to activate core events %q: %w", coreEventsEntity.EventsTag, err) } } for _, uncoreEventsEntity := range uncoreEntities { err := ea.activateUncoreEvents(uncoreEventsEntity) if err != nil { - return fmt.Errorf("failed to activate uncore events `%s`: %v", uncoreEventsEntity.EventsTag, err) + return fmt.Errorf("failed to activate uncore events %q: %w", uncoreEventsEntity.EventsTag, err) } } return nil @@ -105,11 +105,11 @@ func (ea *iaEntitiesActivator) activateCoreEvents(entity *CoreEventEntity) error } placements, err := ea.placementMaker.makeCorePlacements(entity.parsedCores, event.custom.Event) if err != nil { - return fmt.Errorf("failed to create core placements for event `%s`: %v", event.name, err) + return fmt.Errorf("failed to create core placements for event %q: %w", event.name, err) } activeEvents, err := ea.activateEventForPlacements(event, placements) if err != nil { - return fmt.Errorf("failed to activate core event `%s`: %v", event.name, err) + return fmt.Errorf("failed to activate core event %q: %w", event.name, err) } entity.activeEvents = append(entity.activeEvents, activeEvents...) } @@ -130,18 +130,18 @@ func (ea *iaEntitiesActivator) activateUncoreEvents(entity *UncoreEventEntity) e } perfEvent := event.custom.Event if perfEvent == nil { - return fmt.Errorf("perf event of `%s` event is nil", event.name) + return fmt.Errorf("perf event of %q event is nil", event.name) } options := event.custom.Options for _, socket := range entity.parsedSockets { placements, err := ea.placementMaker.makeUncorePlacements(socket, perfEvent) if err != nil { - return fmt.Errorf("failed to create uncore placements for event `%s`: %v", event.name, err) + return fmt.Errorf("failed to create uncore placements for event %q: %w", event.name, err) } activeMultiEvent, err := ea.perfActivator.activateMulti(perfEvent, placements, options) if err != nil { - return fmt.Errorf("failed to activate multi event `%s`: %v", event.name, err) + return fmt.Errorf("failed to activate multi event %q: %w", event.name, err) } events := activeMultiEvent.Events() entity.activeMultiEvents = append(entity.activeMultiEvents, multiEvent{events, perfEvent, socket}) @@ -197,7 +197,7 @@ func (ea *iaEntitiesActivator) activateEventForPlacements(event *eventWithQuals, activeEvent, err := ea.perfActivator.activateEvent(perfEvent, placement, options) if err != nil { - return nil, fmt.Errorf("failed to activate event `%s`: %v", event.name, err) + return nil, fmt.Errorf("failed to activate event %q: %w", event.name, err) } activeEvents = append(activeEvents, activeEvent) } diff --git a/plugins/inputs/intel_pmu/activators_test.go b/plugins/inputs/intel_pmu/activators_test.go index 750f89d83..d9da88f05 100644 --- a/plugins/inputs/intel_pmu/activators_test.go +++ b/plugins/inputs/intel_pmu/activators_test.go @@ -37,7 +37,7 @@ func TestActivateEntities(t *testing.T) { mEntities := []*CoreEventEntity{{EventsTag: tag}} err := mEntitiesActivator.activateEntities(mEntities, nil) require.Error(t, err) - require.Contains(t, err.Error(), fmt.Sprintf("failed to activate core events `%s`", tag)) + require.Contains(t, err.Error(), fmt.Sprintf("failed to activate core events %q", tag)) }) // more uncore test cases in TestActivateUncoreEvents @@ -46,7 +46,7 @@ func TestActivateEntities(t *testing.T) { mEntities := []*UncoreEventEntity{{EventsTag: tag}} err := mEntitiesActivator.activateEntities(nil, mEntities) require.Error(t, err) - require.Contains(t, err.Error(), fmt.Sprintf("failed to activate uncore events `%s`", tag)) + require.Contains(t, err.Error(), fmt.Sprintf("failed to activate uncore events %q", tag)) }) t.Run("nothing to do", func(t *testing.T) { @@ -81,7 +81,7 @@ func TestActivateUncoreEvents(t *testing.T) { mEntity := &UncoreEventEntity{parsedEvents: []*eventWithQuals{{name: name, custom: ia.CustomizableEvent{Event: nil}}}} err := mEntitiesActivator.activateUncoreEvents(mEntity) require.Error(t, err) - require.Contains(t, err.Error(), fmt.Sprintf("perf event of `%s` event is nil", name)) + require.Contains(t, err.Error(), fmt.Sprintf("perf event of %q event is nil", name)) }) t.Run("placement maker and perf activator is nil", func(t *testing.T) { @@ -101,7 +101,7 @@ func TestActivateUncoreEvents(t *testing.T) { err := mEntitiesActivator.activateUncoreEvents(mEntity) require.Error(t, err) - require.Contains(t, err.Error(), fmt.Sprintf("ailed to create uncore placements for event `%s`", eventName)) + require.Contains(t, err.Error(), fmt.Sprintf("ailed to create uncore placements for event %q", eventName)) mMaker.AssertExpectations(t) }) @@ -117,7 +117,7 @@ func TestActivateUncoreEvents(t *testing.T) { err := mEntitiesActivator.activateUncoreEvents(mEntity) require.Error(t, err) - require.Contains(t, err.Error(), fmt.Sprintf("failed to activate multi event `%s`", eventName)) + require.Contains(t, err.Error(), fmt.Sprintf("failed to activate multi event %q", eventName)) mMaker.AssertExpectations(t) mActivator.AssertExpectations(t) }) @@ -188,7 +188,7 @@ func TestActivateCoreEvents(t *testing.T) { err := mEntitiesActivator.activateCoreEvents(mEntity) require.Error(t, err) - require.Contains(t, err.Error(), fmt.Sprintf("failed to create core placements for event `%s`", parsedEvents[0].name)) + require.Contains(t, err.Error(), fmt.Sprintf("failed to create core placements for event %q", parsedEvents[0].name)) mMaker.AssertExpectations(t) }) @@ -206,7 +206,7 @@ func TestActivateCoreEvents(t *testing.T) { err := mEntitiesActivator.activateCoreEvents(mEntity) require.Error(t, err) - require.Contains(t, err.Error(), fmt.Sprintf("failed to activate core event `%s`", parsedEvents[0].name)) + require.Contains(t, err.Error(), fmt.Sprintf("failed to activate core event %q", parsedEvents[0].name)) mMaker.AssertExpectations(t) mActivator.AssertExpectations(t) }) @@ -407,7 +407,7 @@ func TestActivateEventForPlacement(t *testing.T) { mPerfActivator.On("activateEvent", mPerfEvent, placement1, mOptions).Once().Return(nil, errors.New("err")) activeEvents, err := mActivator.activateEventForPlacements(mEvent, mPlacements) require.Error(t, err) - require.Contains(t, err.Error(), fmt.Sprintf("failed to activate event `%s`", mEvent.name)) + require.Contains(t, err.Error(), fmt.Sprintf("failed to activate event %q", mEvent.name)) require.Nil(t, activeEvents) mPerfActivator.AssertExpectations(t) }) diff --git a/plugins/inputs/intel_pmu/config.go b/plugins/inputs/intel_pmu/config.go index 22834826f..bb1b53cad 100644 --- a/plugins/inputs/intel_pmu/config.go +++ b/plugins/inputs/intel_pmu/config.go @@ -83,7 +83,7 @@ func (cp *configParser) parseEvents(events []string) []*eventWithQuals { events, duplications := removeDuplicateStrings(events) for _, duplication := range duplications { if cp.log != nil { - cp.log.Warnf("duplicated event `%s` will be removed", duplication) + cp.log.Warnf("duplicated event %q will be removed", duplication) } } return parseEventsWithQualifiers(events) @@ -198,7 +198,7 @@ func parseIDs(allIDsStrings []string) ([]int, error) { // Single value num, err := strconv.Atoi(id) if err != nil { - return nil, fmt.Errorf("wrong format for id number `%s`: %v", id, err) + return nil, fmt.Errorf("wrong format for id number %q: %w", id, err) } if len(result)+1 > maxIDsSize { return nil, fmt.Errorf("requested number of IDs exceeds max size `%d`", maxIDsSize) diff --git a/plugins/inputs/intel_pmu/intel_pmu.go b/plugins/inputs/intel_pmu/intel_pmu.go index e86b4f7be..01e6a31fe 100644 --- a/plugins/inputs/intel_pmu/intel_pmu.go +++ b/plugins/inputs/intel_pmu/intel_pmu.go @@ -219,14 +219,14 @@ func (i *IntelPMU) Gather(acc telegraf.Accumulator) error { for id, m := range coreMetrics { scaled := ia.EventScaledValue(m.values) if !scaled.IsUint64() { - return fmt.Errorf("cannot process `%s` scaled value `%s`: exceeds uint64", m.name, scaled.String()) + return fmt.Errorf("cannot process %q scaled value %q: exceeds uint64", m.name, scaled.String()) } coreMetrics[id].scaled = scaled.Uint64() } for id, m := range uncoreMetrics { scaled := ia.EventScaledValue(m.values) if !scaled.IsUint64() { - return fmt.Errorf("cannot process `%s` scaled value `%s`: exceeds uint64", m.name, scaled.String()) + return fmt.Errorf("cannot process %q scaled value %q: exceeds uint64", m.name, scaled.String()) } uncoreMetrics[id].scaled = scaled.Uint64() } @@ -248,7 +248,7 @@ func (i *IntelPMU) Stop() { } err := event.Deactivate() if err != nil { - i.Log.Warnf("failed to deactivate core event `%s`: %v", event, err) + i.Log.Warnf("failed to deactivate core event %q: %w", event, err) } } } @@ -263,7 +263,7 @@ func (i *IntelPMU) Stop() { } err := event.Deactivate() if err != nil { - i.Log.Warnf("failed to deactivate uncore event `%s`: %v", event, err) + i.Log.Warnf("failed to deactivate uncore event %q: %w", event, err) } } } @@ -325,10 +325,10 @@ func multiplyAndAdd(factorA uint64, factorB uint64, sum uint64) (uint64, error) bigB := new(big.Int).SetUint64(factorB) activeEvents := new(big.Int).Mul(bigA, bigB) if !activeEvents.IsUint64() { - return 0, fmt.Errorf("value `%s` cannot be represented as uint64", activeEvents.String()) + return 0, fmt.Errorf("value %q cannot be represented as uint64", activeEvents.String()) } if sum > math.MaxUint64-activeEvents.Uint64() { - return 0, fmt.Errorf("value `%s` exceeds uint64", new(big.Int).Add(activeEvents, new(big.Int).SetUint64(sum))) + return 0, fmt.Errorf("value %q exceeds uint64", new(big.Int).Add(activeEvents, new(big.Int).SetUint64(sum))) } sum += activeEvents.Uint64() return sum, nil @@ -340,11 +340,11 @@ func readMaxFD(reader fileInfoProvider) (uint64, error) { } buf, err := reader.readFile(fileMaxPath) if err != nil { - return 0, fmt.Errorf("cannot open `%s` file: %v", fileMaxPath, err) + return 0, fmt.Errorf("cannot open file %q: %w", fileMaxPath, err) } max, err := strconv.ParseUint(strings.Trim(string(buf), "\n "), 10, 64) if err != nil { - return 0, fmt.Errorf("cannot parse file content of `%s`: %v", fileMaxPath, err) + return 0, fmt.Errorf("cannot parse file content of %q: %w", fileMaxPath, err) } return max, nil } @@ -362,16 +362,16 @@ func checkFiles(paths []string, fileInfo fileInfoProvider) error { lInfo, err := fileInfo.lstat(path) if err != nil { if os.IsNotExist(err) { - return fmt.Errorf("file `%s` doesn't exist", path) + return fmt.Errorf("file %q doesn't exist", path) } - return fmt.Errorf("cannot obtain file info of `%s`: %v", path, err) + return fmt.Errorf("cannot obtain file info of %q: %w", path, err) } mode := lInfo.Mode() if mode&os.ModeSymlink != 0 { - return fmt.Errorf("file %s is a symlink", path) + return fmt.Errorf("file %q is a symlink", path) } if !mode.IsRegular() { - return fmt.Errorf("file `%s` doesn't point to a reagular file", path) + return fmt.Errorf("file %q doesn't point to a reagular file", path) } } return nil diff --git a/plugins/inputs/intel_pmu/intel_pmu_test.go b/plugins/inputs/intel_pmu/intel_pmu_test.go index 3569a4acc..53f588c69 100644 --- a/plugins/inputs/intel_pmu/intel_pmu_test.go +++ b/plugins/inputs/intel_pmu/intel_pmu_test.go @@ -251,7 +251,7 @@ func TestGather(t *testing.T) { tag: "BIG_FISH", }, }, - errMSg: "cannot process `I_AM_TOO_BIG` scaled value `36893488147419103230`: exceeds uint64", + errMSg: `cannot process "I_AM_TOO_BIG" scaled value "36893488147419103230": exceeds uint64`, }, { name: "uncore scaled value greater then max uint64", @@ -262,7 +262,7 @@ func TestGather(t *testing.T) { tag: "BIG_FISH", }, }, - errMSg: "cannot process `I_AM_TOO_BIG_UNCORE` scaled value `36893488147419103230`: exceeds uint64", + errMSg: `cannot process "I_AM_TOO_BIG_UNCORE" scaled value "36893488147419103230": exceeds uint64`, }, } @@ -430,8 +430,8 @@ func TestReadMaxFD(t *testing.T) { require.Zero(t, result) }) - openErrorMsg := fmt.Sprintf("cannot open `%s` file", fileMaxPath) - parseErrorMsg := fmt.Sprintf("cannot parse file content of `%s`", fileMaxPath) + openErrorMsg := fmt.Sprintf("cannot open file %q", fileMaxPath) + parseErrorMsg := fmt.Sprintf("cannot parse file content of %q", fileMaxPath) tests := []struct { name string @@ -489,7 +489,7 @@ func TestAddFiles(t *testing.T) { err := checkFiles(paths, mFileInfo) require.Error(t, err) - require.Contains(t, err.Error(), fmt.Sprintf("cannot obtain file info of `%s`", file)) + require.Contains(t, err.Error(), fmt.Sprintf("cannot obtain file info of %q", file)) mFileInfo.AssertExpectations(t) }) @@ -500,7 +500,7 @@ func TestAddFiles(t *testing.T) { err := checkFiles(paths, mFileInfo) require.Error(t, err) - require.Contains(t, err.Error(), fmt.Sprintf("file `%s` doesn't exist", file)) + require.Contains(t, err.Error(), fmt.Sprintf("file %q doesn't exist", file)) mFileInfo.AssertExpectations(t) }) @@ -512,7 +512,7 @@ func TestAddFiles(t *testing.T) { err := checkFiles(paths, mFileInfo) require.Error(t, err) - require.Contains(t, err.Error(), fmt.Sprintf("file %s is a symlink", file)) + require.Contains(t, err.Error(), fmt.Sprintf("file %q is a symlink", file)) mFileInfo.AssertExpectations(t) }) @@ -524,7 +524,7 @@ func TestAddFiles(t *testing.T) { err := checkFiles(paths, mFileInfo) require.Error(t, err) - require.Contains(t, err.Error(), fmt.Sprintf("file `%s` doesn't point to a reagular file", file)) + require.Contains(t, err.Error(), fmt.Sprintf("file %q doesn't point to a reagular file", file)) mFileInfo.AssertExpectations(t) }) diff --git a/plugins/inputs/intel_pmu/reader.go b/plugins/inputs/intel_pmu/reader.go index b3ef24111..9b13618b5 100644 --- a/plugins/inputs/intel_pmu/reader.go +++ b/plugins/inputs/intel_pmu/reader.go @@ -107,7 +107,7 @@ func (ie *iaEntitiesValuesReader) readCoreEvents(entity *CoreEventEntity) ([]cor errGroup.Go(func() error { values, err := ie.eventReader.readValue(actualEvent) if err != nil { - return fmt.Errorf("failed to read core event `%s` values: %v", actualEvent, err) + return fmt.Errorf("failed to read core event %q values: %w", actualEvent, err) } cpu, _ := actualEvent.PMUPlacement() newMetric := coreMetric{ @@ -176,7 +176,7 @@ func (ie *iaEntitiesValuesReader) readMultiEventSeparately(multiEvent multiEvent group.Go(func() error { values, err := ie.eventReader.readValue(actualEvent) if err != nil { - return fmt.Errorf("failed to read uncore event `%s` values: %v", actualEvent, err) + return fmt.Errorf("failed to read uncore event %q values: %w", actualEvent, err) } newMetric := uncoreMetric{ values: values, @@ -217,7 +217,7 @@ func (ie *iaEntitiesValuesReader) readMultiEventAgg(multiEvent multiEvent) (unco group.Go(func() error { value, err := ie.eventReader.readValue(actualEvent) if err != nil { - return fmt.Errorf("failed to read uncore event `%s` values: %v", actualEvent, err) + return fmt.Errorf("failed to read uncore event %q values: %w", actualEvent, err) } values[id] = value return nil @@ -230,7 +230,7 @@ func (ie *iaEntitiesValuesReader) readMultiEventAgg(multiEvent multiEvent) (unco bRaw, bEnabled, bRunning := ia.AggregateValues(values) if !bRaw.IsUint64() || !bEnabled.IsUint64() || !bRunning.IsUint64() { - return uncoreMetric{}, fmt.Errorf("cannot aggregate `%s` values, uint64 exceeding", perfEvent) + return uncoreMetric{}, fmt.Errorf("cannot aggregate %q values, uint64 exceeding", perfEvent) } aggValues := ia.CounterValue{ Raw: bRaw.Uint64(), diff --git a/plugins/inputs/intel_pmu/reader_test.go b/plugins/inputs/intel_pmu/reader_test.go index 71ba6e434..3c0b0579e 100644 --- a/plugins/inputs/intel_pmu/reader_test.go +++ b/plugins/inputs/intel_pmu/reader_test.go @@ -72,7 +72,7 @@ func TestReadCoreEvents(t *testing.T) { metrics, err := mEntitiesReader.readCoreEvents(entity) require.Error(t, err) - require.Contains(t, err.Error(), fmt.Sprintf("failed to read core event `%s` values: %v", event, errMock)) + require.Contains(t, err.Error(), fmt.Sprintf("failed to read core event %q values: %v", event, errMock)) require.Nil(t, metrics) mReader.AssertExpectations(t) }) @@ -149,7 +149,7 @@ func TestReadMultiEventSeparately(t *testing.T) { metrics, err := mEntitiesReader.readMultiEventSeparately(multi) require.Error(t, err) - require.Contains(t, err.Error(), fmt.Sprintf("failed to read uncore event `%s` values: %v", event, errMock)) + require.Contains(t, err.Error(), fmt.Sprintf("failed to read uncore event %q values: %v", event, errMock)) require.Nil(t, metrics) mReader.AssertExpectations(t) }) @@ -248,7 +248,7 @@ func TestReadMultiEventAgg(t *testing.T) { {&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 1, Enabled: 0, Running: 0}}, }, result: ia.CounterValue{}, - errMsg: fmt.Sprintf("cannot aggregate `%s` values, uint64 exceeding", perfEvent), + errMsg: fmt.Sprintf("cannot aggregate %q values, uint64 exceeding", perfEvent), }, { name: "reading fail", diff --git a/plugins/inputs/intel_pmu/resolver.go b/plugins/inputs/intel_pmu/resolver.go index 7517603eb..3e9dc10d1 100644 --- a/plugins/inputs/intel_pmu/resolver.go +++ b/plugins/inputs/intel_pmu/resolver.go @@ -40,10 +40,10 @@ func (e *iaEntitiesResolver) resolveEntities(coreEntities []*CoreEventEntity, un } customEvent, err := e.resolveEvent(event.name, event.qualifiers) if err != nil { - return fmt.Errorf("failed to resolve core event `%s`: %v", event.name, err) + return fmt.Errorf("failed to resolve core event %q: %w", event.name, err) } if customEvent.Event.Uncore { - return fmt.Errorf("uncore event `%s` found in core entity", event.name) + return fmt.Errorf("uncore event %q found in core entity", event.name) } event.custom = customEvent } @@ -66,10 +66,10 @@ func (e *iaEntitiesResolver) resolveEntities(coreEntities []*CoreEventEntity, un } customEvent, err := e.resolveEvent(event.name, event.qualifiers) if err != nil { - return fmt.Errorf("failed to resolve uncore event `%s`: %v", event.name, err) + return fmt.Errorf("failed to resolve uncore event %q: %w", event.name, err) } if !customEvent.Event.Uncore { - return fmt.Errorf("core event `%s` found in uncore entity", event.name) + return fmt.Errorf("core event %q found in uncore entity", event.name) } event.custom = customEvent } @@ -109,7 +109,7 @@ func (e *iaEntitiesResolver) resolveAllEvents() (coreEvents []*eventWithQuals, u // build options for event newEvent.custom.Options, err = ia.NewOptions().Build() if err != nil { - return nil, nil, fmt.Errorf("failed to build options for event `%s`: %v", perfEvent.Name, err) + return nil, nil, fmt.Errorf("failed to build options for event %q: %w", perfEvent.Name, err) } if perfEvent.Uncore { uncoreEvents = append(uncoreEvents, newEvent) @@ -134,12 +134,12 @@ func (e *iaEntitiesResolver) resolveEvent(name string, qualifiers []string) (ia. return custom, fmt.Errorf("failed to transform perf events: %v", err) } if len(perfEvents) < 1 { - return custom, fmt.Errorf("failed to resolve unknown event `%s`", name) + return custom, fmt.Errorf("failed to resolve unknown event %q", name) } // build options for event options, err := ia.NewOptions().SetAttrModifiers(qualifiers).Build() if err != nil { - return custom, fmt.Errorf("failed to build options for event `%s`: %v", name, err) + return custom, fmt.Errorf("failed to build options for event %q: %w", name, err) } custom = ia.CustomizableEvent{ Event: perfEvents[0], diff --git a/plugins/inputs/intel_pmu/resolver_test.go b/plugins/inputs/intel_pmu/resolver_test.go index f761d233f..40e5d5389 100644 --- a/plugins/inputs/intel_pmu/resolver_test.go +++ b/plugins/inputs/intel_pmu/resolver_test.go @@ -60,7 +60,7 @@ func TestResolveEntities(t *testing.T) { err := mResolver.resolveEntities([]*CoreEventEntity{mCoreEntity}, nil) require.Error(t, err) - require.Contains(t, err.Error(), fmt.Sprintf("failed to resolve core event `%s`", name)) + require.Contains(t, err.Error(), fmt.Sprintf("failed to resolve core event %q", name)) mTransformer.AssertExpectations(t) }) @@ -73,7 +73,7 @@ func TestResolveEntities(t *testing.T) { err := mResolver.resolveEntities(nil, []*UncoreEventEntity{mUncoreEntity}) require.Error(t, err) - require.Contains(t, err.Error(), fmt.Sprintf("failed to resolve uncore event `%s`", name)) + require.Contains(t, err.Error(), fmt.Sprintf("failed to resolve uncore event %q", name)) mTransformer.AssertExpectations(t) }) @@ -159,7 +159,7 @@ func TestResolveEntities(t *testing.T) { err := mResolver.resolveEntities([]*CoreEventEntity{mCoreEntity}, nil) require.Error(t, err) - require.Contains(t, err.Error(), fmt.Sprintf("uncore event `%s` found in core entity", eventName)) + require.Contains(t, err.Error(), fmt.Sprintf("uncore event %q found in core entity", eventName)) mTransformer.AssertExpectations(t) }) @@ -179,7 +179,7 @@ func TestResolveEntities(t *testing.T) { err := mResolver.resolveEntities(nil, []*UncoreEventEntity{mUncoreEntity}) require.Error(t, err) - require.Contains(t, err.Error(), fmt.Sprintf("core event `%s` found in uncore entity", eventName)) + require.Contains(t, err.Error(), fmt.Sprintf("core event %q found in uncore entity", eventName)) mTransformer.AssertExpectations(t) }) @@ -349,7 +349,7 @@ func TestResolveEvent(t *testing.T) { _, err := mResolver.resolveEvent(event, qualifiers) require.Error(t, err) - require.Contains(t, err.Error(), fmt.Sprintf("failed to build options for event `%s`", event)) + require.Contains(t, err.Error(), fmt.Sprintf("failed to build options for event %q", event)) mTransformer.AssertExpectations(t) }) diff --git a/plugins/inputs/intel_powerstat/file.go b/plugins/inputs/intel_powerstat/file.go index fb4689b28..75d02a5ca 100644 --- a/plugins/inputs/intel_powerstat/file.go +++ b/plugins/inputs/intel_powerstat/file.go @@ -160,13 +160,13 @@ func checkFile(path string) error { lInfo, err := os.Lstat(path) if err != nil { if os.IsNotExist(err) { - return fmt.Errorf("file `%s` doesn't exist", path) + return fmt.Errorf("file %q doesn't exist", path) } - return fmt.Errorf("cannot obtain file info of `%s`: %v", path, err) + return fmt.Errorf("cannot obtain file info of %q: %w", path, err) } mode := lInfo.Mode() if mode&os.ModeSymlink != 0 { - return fmt.Errorf("file `%s` is a symlink", path) + return fmt.Errorf("file %q is a symlink", path) } return nil } diff --git a/plugins/inputs/ipmi_sensor/ipmi_sensor.go b/plugins/inputs/ipmi_sensor/ipmi_sensor.go index 0a9bc9b1d..3bfbb02da 100644 --- a/plugins/inputs/ipmi_sensor/ipmi_sensor.go +++ b/plugins/inputs/ipmi_sensor/ipmi_sensor.go @@ -273,7 +273,7 @@ func (m *Ipmi) extractFieldsFromRegex(re *regexp.Regexp, input string) map[strin results := make(map[string]string) subexpNames := re.SubexpNames() if len(subexpNames) > len(submatches) { - m.Log.Debugf("No matches found in '%s'", input) + m.Log.Debugf("No matches found in %q", input) return results } for i, name := range subexpNames { diff --git a/plugins/inputs/jolokia/jolokia.go b/plugins/inputs/jolokia/jolokia.go index c1e4385d4..2214fd245 100644 --- a/plugins/inputs/jolokia/jolokia.go +++ b/plugins/inputs/jolokia/jolokia.go @@ -73,7 +73,7 @@ func (j *Jolokia) doRequest(req *http.Request) ([]map[string]interface{}, error) // Process response if resp.StatusCode != http.StatusOK { - err = fmt.Errorf("response from url \"%s\" has status code %d (%s), expected %d (%s)", + err = fmt.Errorf("response from url %q has status code %d (%s), expected %d (%s)", req.RequestURI, resp.StatusCode, http.StatusText(resp.StatusCode), @@ -231,7 +231,7 @@ func (j *Jolokia) Gather(acc telegraf.Accumulator) error { } for i, resp := range out { if status, ok := resp["status"]; ok && status != float64(200) { - acc.AddError(fmt.Errorf("not expected status value in response body (%s:%s mbean=\"%s\" attribute=\"%s\"): %3.f", + acc.AddError(fmt.Errorf("not expected status value in response body (%s:%s mbean=%q attribute=%q): %3.f", server.Host, server.Port, metrics[i].Mbean, metrics[i].Attribute, status)) continue } else if !ok { diff --git a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy.go b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy.go index 153b1954a..5bcff828e 100644 --- a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy.go +++ b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy.go @@ -75,7 +75,7 @@ func (k *Kafka) Start(acc telegraf.Accumulator) error { case "newest": config.Offsets.Initial = sarama.OffsetNewest default: - k.Log.Infof("WARNING: Kafka consumer invalid offset '%s', using 'oldest'\n", + k.Log.Infof("WARNING: Kafka consumer invalid offset %q, using 'oldest'\n", k.Offset) config.Offsets.Initial = sarama.OffsetOldest } diff --git a/plugins/inputs/kibana/kibana.go b/plugins/inputs/kibana/kibana.go index aca78aab9..dcb532ee6 100644 --- a/plugins/inputs/kibana/kibana.go +++ b/plugins/inputs/kibana/kibana.go @@ -218,7 +218,7 @@ func (k *Kibana) gatherKibanaStatus(baseURL string, acc telegraf.Accumulator) er func (k *Kibana) gatherJSONData(url string, v interface{}) (host string, err error) { request, err := http.NewRequest("GET", url, nil) if err != nil { - return "", fmt.Errorf("unable to create new request '%s': %v", url, err) + return "", fmt.Errorf("unable to create new request %q: %w", url, err) } if (k.Username != "") || (k.Password != "") { diff --git a/plugins/inputs/kubernetes/kubernetes.go b/plugins/inputs/kubernetes/kubernetes.go index 6defce253..e3894d7ff 100644 --- a/plugins/inputs/kubernetes/kubernetes.go +++ b/plugins/inputs/kubernetes/kubernetes.go @@ -6,16 +6,17 @@ import ( _ "embed" "encoding/json" "fmt" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" "net/http" "os" "strings" "sync" "time" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" @@ -129,7 +130,7 @@ func getNodeURLs(log telegraf.Logger) ([]string, error) { for _, n := range nodes.Items { address := getNodeAddress(n) if address == "" { - log.Warn("Unable to node addresses for Node '%s'", n.Name) + log.Warnf("Unable to node addresses for Node %q", n.Name) continue } nodeUrls = append(nodeUrls, "https://"+address+":10250") diff --git a/plugins/inputs/modbus/configuration_register.go b/plugins/inputs/modbus/configuration_register.go index 4db104581..fd48b7247 100644 --- a/plugins/inputs/modbus/configuration_register.go +++ b/plugins/inputs/modbus/configuration_register.go @@ -163,13 +163,13 @@ func (c *ConfigurationOriginal) validateFieldDefinitions(fieldDefs []fieldDefini for _, item := range fieldDefs { //check empty name if item.Name == "" { - return fmt.Errorf("empty name in '%s'", registerType) + return fmt.Errorf("empty name in %q", registerType) } //search name duplicate canonicalName := item.Measurement + "." + item.Name if nameEncountered[canonicalName] { - return fmt.Errorf("name '%s' is duplicated in measurement '%s' '%s' - '%s'", item.Name, item.Measurement, registerType, item.Name) + return fmt.Errorf("name %q is duplicated in measurement %q %q - %q", item.Name, item.Measurement, registerType, item.Name) } nameEncountered[canonicalName] = true @@ -178,7 +178,7 @@ func (c *ConfigurationOriginal) validateFieldDefinitions(fieldDefs []fieldDefini switch item.ByteOrder { case "AB", "BA", "ABCD", "CDAB", "BADC", "DCBA", "ABCDEFGH", "HGFEDCBA", "BADCFEHG", "GHEFCDAB": default: - return fmt.Errorf("invalid byte order '%s' in '%s' - '%s'", item.ByteOrder, registerType, item.Name) + return fmt.Errorf("invalid byte order %q in %q - %q", item.ByteOrder, registerType, item.Name) } // search data type @@ -187,31 +187,31 @@ func (c *ConfigurationOriginal) validateFieldDefinitions(fieldDefs []fieldDefini "UINT16", "INT16", "UINT32", "INT32", "UINT64", "INT64", "FLOAT16-IEEE", "FLOAT32-IEEE", "FLOAT64-IEEE", "FLOAT32", "FIXED", "UFIXED": default: - return fmt.Errorf("invalid data type '%s' in '%s' - '%s'", item.DataType, registerType, item.Name) + return fmt.Errorf("invalid data type %q in %q - %q", item.DataType, registerType, item.Name) } // check scale if item.Scale == 0.0 { - return fmt.Errorf("invalid scale '%f' in '%s' - '%s'", item.Scale, registerType, item.Name) + return fmt.Errorf("invalid scale '%f' in %q - %q", item.Scale, registerType, item.Name) } } // check address if len(item.Address) != 1 && len(item.Address) != 2 && len(item.Address) != 4 { - return fmt.Errorf("invalid address '%v' length '%v' in '%s' - '%s'", item.Address, len(item.Address), registerType, item.Name) + return fmt.Errorf("invalid address '%v' length '%v' in %q - %q", item.Address, len(item.Address), registerType, item.Name) } if registerType == cInputRegisters || registerType == cHoldingRegisters { if 2*len(item.Address) != len(item.ByteOrder) { - return fmt.Errorf("invalid byte order '%s' and address '%v' in '%s' - '%s'", item.ByteOrder, item.Address, registerType, item.Name) + return fmt.Errorf("invalid byte order %q and address '%v' in %q - %q", item.ByteOrder, item.Address, registerType, item.Name) } // search duplicated if len(item.Address) > len(removeDuplicates(item.Address)) { - return fmt.Errorf("duplicate address '%v' in '%s' - '%s'", item.Address, registerType, item.Name) + return fmt.Errorf("duplicate address '%v' in %q - %q", item.Address, registerType, item.Name) } } else if len(item.Address) != 1 { - return fmt.Errorf("invalid address'%v' length'%v' in '%s' - '%s'", item.Address, len(item.Address), registerType, item.Name) + return fmt.Errorf("invalid address'%v' length'%v' in %q - %q", item.Address, len(item.Address), registerType, item.Name) } } return nil diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index 06b8c7f05..50749ba9d 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -403,17 +403,17 @@ func typeConvert(types map[string]string, topicValue string, key string) (interf case "uint": newType, err = strconv.ParseUint(topicValue, 10, 64) if err != nil { - return nil, fmt.Errorf("unable to convert field '%s' to type uint: %v", topicValue, err) + return nil, fmt.Errorf("unable to convert field %q to type uint: %w", topicValue, err) } case "int": newType, err = strconv.ParseInt(topicValue, 10, 64) if err != nil { - return nil, fmt.Errorf("unable to convert field '%s' to type int: %v", topicValue, err) + return nil, fmt.Errorf("unable to convert field %q to type int: %w", topicValue, err) } case "float": newType, err = strconv.ParseFloat(topicValue, 64) if err != nil { - return nil, fmt.Errorf("unable to convert field '%s' to type float: %v", topicValue, err) + return nil, fmt.Errorf("unable to convert field %q to type float: %w", topicValue, err) } default: return nil, fmt.Errorf("converting to the type %s is not supported: use int, uint, or float", desiredType) diff --git a/plugins/inputs/nfsclient/nfsclient.go b/plugins/inputs/nfsclient/nfsclient.go index 2f0917449..58bc1d279 100644 --- a/plugins/inputs/nfsclient/nfsclient.go +++ b/plugins/inputs/nfsclient/nfsclient.go @@ -306,12 +306,7 @@ func (n *NFSClient) Gather(acc telegraf.Accumulator) error { return err } - if err := scanner.Err(); err != nil { - n.Log.Errorf("%s", err) - return err - } - - return nil + return scanner.Err() } func (n *NFSClient) Init() error { diff --git a/plugins/inputs/nginx/nginx.go b/plugins/inputs/nginx/nginx.go index 461c4d50f..aa5987819 100644 --- a/plugins/inputs/nginx/nginx.go +++ b/plugins/inputs/nginx/nginx.go @@ -51,7 +51,7 @@ func (n *Nginx) Gather(acc telegraf.Accumulator) error { for _, u := range n.Urls { addr, err := url.Parse(u) if err != nil { - acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err)) + acc.AddError(fmt.Errorf("Unable to parse address %q: %w", u, err)) continue } diff --git a/plugins/inputs/nginx_plus/nginx_plus.go b/plugins/inputs/nginx_plus/nginx_plus.go index 87206d12b..3a46442c6 100644 --- a/plugins/inputs/nginx_plus/nginx_plus.go +++ b/plugins/inputs/nginx_plus/nginx_plus.go @@ -52,7 +52,7 @@ func (n *NginxPlus) Gather(acc telegraf.Accumulator) error { for _, u := range n.Urls { addr, err := url.Parse(u) if err != nil { - acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err)) + acc.AddError(fmt.Errorf("Unable to parse address %q: %w", u, err)) continue } diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api.go b/plugins/inputs/nginx_plus_api/nginx_plus_api.go index 71cc7fa25..2586722fb 100644 --- a/plugins/inputs/nginx_plus_api/nginx_plus_api.go +++ b/plugins/inputs/nginx_plus_api/nginx_plus_api.go @@ -74,7 +74,7 @@ func (n *NginxPlusAPI) Gather(acc telegraf.Accumulator) error { for _, u := range n.Urls { addr, err := url.Parse(u) if err != nil { - acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err)) + acc.AddError(fmt.Errorf("Unable to parse address %q: %w", u, err)) continue } diff --git a/plugins/inputs/nginx_sts/nginx_sts.go b/plugins/inputs/nginx_sts/nginx_sts.go index 1ce9db2cc..cbe80c2df 100644 --- a/plugins/inputs/nginx_sts/nginx_sts.go +++ b/plugins/inputs/nginx_sts/nginx_sts.go @@ -51,7 +51,7 @@ func (n *NginxSTS) Gather(acc telegraf.Accumulator) error { for _, u := range n.Urls { addr, err := url.Parse(u) if err != nil { - acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err)) + acc.AddError(fmt.Errorf("Unable to parse address %q: %w", u, err)) continue } diff --git a/plugins/inputs/nginx_vts/nginx_vts.go b/plugins/inputs/nginx_vts/nginx_vts.go index d12a4d647..f811a2777 100644 --- a/plugins/inputs/nginx_vts/nginx_vts.go +++ b/plugins/inputs/nginx_vts/nginx_vts.go @@ -51,7 +51,7 @@ func (n *NginxVTS) Gather(acc telegraf.Accumulator) error { for _, u := range n.Urls { addr, err := url.Parse(u) if err != nil { - acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err)) + acc.AddError(fmt.Errorf("Unable to parse address %q: %w", u, err)) continue } diff --git a/plugins/inputs/nsq/nsq.go b/plugins/inputs/nsq/nsq.go index a0eeddc77..e577d5ea6 100644 --- a/plugins/inputs/nsq/nsq.go +++ b/plugins/inputs/nsq/nsq.go @@ -165,7 +165,7 @@ func buildURL(e string) (*url.URL, error) { u := fmt.Sprintf(requestPattern, e) addr, err := url.Parse(u) if err != nil { - return nil, fmt.Errorf("unable to parse address '%s': %s", u, err) + return nil, fmt.Errorf("unable to parse address %q: %w", u, err) } return addr, nil } diff --git a/plugins/inputs/opcua_listener/subscribe_client.go b/plugins/inputs/opcua_listener/subscribe_client.go index 00fd20ad9..193d4f03b 100644 --- a/plugins/inputs/opcua_listener/subscribe_client.go +++ b/plugins/inputs/opcua_listener/subscribe_client.go @@ -3,13 +3,14 @@ package opcua_listener import ( "context" "fmt" + "reflect" + "time" + "github.com/gopcua/opcua" "github.com/gopcua/opcua/ua" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/opcua/input" - "reflect" - "time" ) type SubscribeClientConfig struct { @@ -140,7 +141,7 @@ func (o *SubscribeClient) processReceivedNotifications() { i := int(monitoredItemNotif.ClientHandle) oldValue := o.LastReceivedData[i].Value o.UpdateNodeValue(i, monitoredItemNotif.Value) - o.Log.Debugf("Data change notification: node '%s' value changed from %f to %f", + o.Log.Debugf("Data change notification: node %q value changed from %f to %f", o.NodeIDs[i].String(), oldValue, o.LastReceivedData[i].Value) o.metrics <- o.MetricForNode(i) } diff --git a/plugins/inputs/opensearch_query/opensearch_query.go b/plugins/inputs/opensearch_query/opensearch_query.go index d35895de7..2ef2f7614 100644 --- a/plugins/inputs/opensearch_query/opensearch_query.go +++ b/plugins/inputs/opensearch_query/opensearch_query.go @@ -90,7 +90,7 @@ func (o *OpensearchQuery) Init() error { func (o *OpensearchQuery) initAggregation(agg osAggregation, i int) (err error) { for _, metricField := range agg.MetricFields { if _, ok := agg.mapMetricFields[metricField]; !ok { - return fmt.Errorf("metric field '%s' not found on index '%s'", metricField, agg.Index) + return fmt.Errorf("metric field %q not found on index %q", metricField, agg.Index) } } diff --git a/plugins/inputs/opentelemetry/grpc_services.go b/plugins/inputs/opentelemetry/grpc_services.go index 527376446..b46076ed1 100644 --- a/plugins/inputs/opentelemetry/grpc_services.go +++ b/plugins/inputs/opentelemetry/grpc_services.go @@ -49,7 +49,7 @@ var metricsSchemata = map[string]common.MetricsSchema{ func newMetricsService(logger common.Logger, writer *writeToAccumulator, schema string) (*metricsService, error) { ms, found := metricsSchemata[schema] if !found { - return nil, fmt.Errorf("schema '%s' not recognized", schema) + return nil, fmt.Errorf("schema %q not recognized", schema) } converter, err := otel2influx.NewOtelMetricsToLineProtocol(logger, writer, ms) diff --git a/plugins/inputs/pf/pf.go b/plugins/inputs/pf/pf.go index f70218e74..184b818e0 100644 --- a/plugins/inputs/pf/pf.go +++ b/plugins/inputs/pf/pf.go @@ -57,7 +57,7 @@ func (pf *PF) Gather(acc telegraf.Accumulator) error { var errParseHeader = fmt.Errorf("Cannot find header in %s output", pfctlCommand) func errMissingData(tag string) error { - return fmt.Errorf("struct data for tag \"%s\" not found in %s output", tag, pfctlCommand) + return fmt.Errorf("struct data for tag %q not found in %s output", tag, pfctlCommand) } type pfctlOutputStanza struct { diff --git a/plugins/inputs/phpfpm/phpfpm.go b/plugins/inputs/phpfpm/phpfpm.go index 060e5865b..bba92472b 100644 --- a/plugins/inputs/phpfpm/phpfpm.go +++ b/plugins/inputs/phpfpm/phpfpm.go @@ -113,7 +113,7 @@ func (p *phpfpm) gatherServer(addr string, acc telegraf.Accumulator) error { if strings.HasPrefix(addr, "fcgi://") || strings.HasPrefix(addr, "cgi://") { u, err := url.Parse(addr) if err != nil { - return fmt.Errorf("unable parse server address '%s': %s", addr, err) + return fmt.Errorf("unable parse server address %q: %w", addr, err) } socketAddr := strings.Split(u.Host, ":") fcgiIP := socketAddr[0] @@ -165,22 +165,22 @@ func (p *phpfpm) gatherFcgi(fcgi *conn, statusPath string, acc telegraf.Accumula func (p *phpfpm) gatherHTTP(addr string, acc telegraf.Accumulator) error { u, err := url.Parse(addr) if err != nil { - return fmt.Errorf("unable parse server address '%s': %v", addr, err) + return fmt.Errorf("unable parse server address %q: %w", addr, err) } req, err := http.NewRequest("GET", u.String(), nil) if err != nil { - return fmt.Errorf("unable to create new request '%s': %v", addr, err) + return fmt.Errorf("unable to create new request %q: %w", addr, err) } res, err := p.client.Do(req) if err != nil { - return fmt.Errorf("unable to connect to phpfpm status page '%s': %v", addr, err) + return fmt.Errorf("unable to connect to phpfpm status page %q: %w", addr, err) } defer res.Body.Close() if res.StatusCode != 200 { - return fmt.Errorf("unable to get valid stat result from '%s': %v", addr, err) + return fmt.Errorf("unable to get valid stat result from %q: %w", addr, err) } importMetric(res.Body, acc, addr) diff --git a/plugins/inputs/procstat/native_finder.go b/plugins/inputs/procstat/native_finder.go index 3e8f580eb..134336ee6 100644 --- a/plugins/inputs/procstat/native_finder.go +++ b/plugins/inputs/procstat/native_finder.go @@ -45,8 +45,7 @@ func (pg *NativeFinder) PidFile(path string) ([]PID, error) { var pids []PID pidString, err := os.ReadFile(path) if err != nil { - return pids, fmt.Errorf("Failed to read pidfile '%s'. Error: '%s'", - path, err) + return pids, fmt.Errorf("Failed to read pidfile %q: %w", path, err) } pid, err := strconv.ParseInt(strings.TrimSpace(string(pidString)), 10, 32) if err != nil { diff --git a/plugins/inputs/procstat/pgrep.go b/plugins/inputs/procstat/pgrep.go index 34c44e0b2..137512b1b 100644 --- a/plugins/inputs/procstat/pgrep.go +++ b/plugins/inputs/procstat/pgrep.go @@ -27,7 +27,7 @@ func (pg *Pgrep) PidFile(path string) ([]PID, error) { var pids []PID pidString, err := os.ReadFile(path) if err != nil { - return pids, fmt.Errorf("Failed to read pidfile '%s'. Error: '%s'", + return pids, fmt.Errorf("Failed to read pidfile %q: %w", path, err) } pid, err := strconv.ParseInt(strings.TrimSpace(string(pidString)), 10, 32) diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index 7c124b455..6c346fe6c 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -432,7 +432,7 @@ func (p *Procstat) simpleSystemdUnitPIDs() ([]PID, error) { } pid, err := strconv.ParseInt(string(kv[1]), 10, 32) if err != nil { - return nil, fmt.Errorf("invalid pid '%s'", kv[1]) + return nil, fmt.Errorf("invalid pid %q", kv[1]) } pids = append(pids, PID(pid)) } @@ -448,7 +448,7 @@ func (p *Procstat) cgroupPIDs() []PidsTags { items, err := filepath.Glob(procsPath) if err != nil { - return []PidsTags{{nil, nil, fmt.Errorf("glob failed '%s'", err)}} + return []PidsTags{{nil, nil, fmt.Errorf("glob failed: %w", err)}} } pidTags := make([]PidsTags, 0, len(items)) @@ -483,7 +483,7 @@ func (p *Procstat) singleCgroupPIDs(path string) ([]PID, error) { } pid, err := strconv.ParseInt(string(pidBS), 10, 32) if err != nil { - return nil, fmt.Errorf("invalid pid '%s'", pidBS) + return nil, fmt.Errorf("invalid pid %q", pidBS) } pids = append(pids, PID(pid)) } diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index 407b964c1..7ca7a15cf 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -284,7 +284,7 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error addr := "http://localhost" + path req, err = http.NewRequest("GET", addr, nil) if err != nil { - return fmt.Errorf("unable to create new request '%s': %s", addr, err) + return fmt.Errorf("unable to create new request %q: %w", addr, err) } // ignore error because it's been handled before getting here @@ -306,7 +306,7 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error } req, err = http.NewRequest("GET", u.URL.String(), nil) if err != nil { - return fmt.Errorf("unable to create new request '%s': %s", u.URL.String(), err) + return fmt.Errorf("unable to create new request %q: %w", u.URL.String(), err) } } diff --git a/plugins/inputs/puppetagent/puppetagent.go b/plugins/inputs/puppetagent/puppetagent.go index ce82c593e..f4332858d 100644 --- a/plugins/inputs/puppetagent/puppetagent.go +++ b/plugins/inputs/puppetagent/puppetagent.go @@ -93,19 +93,19 @@ func (pa *PuppetAgent) Gather(acc telegraf.Accumulator) error { } if _, err := os.Stat(pa.Location); err != nil { - return fmt.Errorf("%s", err) + return err } fh, err := os.ReadFile(pa.Location) if err != nil { - return fmt.Errorf("%s", err) + return err } var puppetState State err = yaml.Unmarshal(fh, &puppetState) if err != nil { - return fmt.Errorf("%s", err) + return err } tags := map[string]string{"location": pa.Location} diff --git a/plugins/inputs/raindrops/raindrops.go b/plugins/inputs/raindrops/raindrops.go index 7fd0a10e7..ae1eaaa3a 100644 --- a/plugins/inputs/raindrops/raindrops.go +++ b/plugins/inputs/raindrops/raindrops.go @@ -35,7 +35,7 @@ func (r *Raindrops) Gather(acc telegraf.Accumulator) error { for _, u := range r.Urls { addr, err := url.Parse(u) if err != nil { - acc.AddError(fmt.Errorf("unable to parse address '%s': %s", u, err)) + acc.AddError(fmt.Errorf("unable to parse address %q: %w", u, err)) continue } diff --git a/plugins/inputs/ravendb/ravendb.go b/plugins/inputs/ravendb/ravendb.go index 37873fd3b..08d0c44ff 100644 --- a/plugins/inputs/ravendb/ravendb.go +++ b/plugins/inputs/ravendb/ravendb.go @@ -126,7 +126,7 @@ func (r *RavenDB) requestJSON(u string, target interface{}) error { r.Log.Debugf("%s: %s", u, resp.Status) if resp.StatusCode >= 400 { - return fmt.Errorf("invalid response code to request '%s': %d - %s", r.URL, resp.StatusCode, resp.Status) + return fmt.Errorf("invalid response code to request %q: %d - %s", r.URL, resp.StatusCode, resp.Status) } return json.NewDecoder(resp.Body).Decode(target) diff --git a/plugins/inputs/rethinkdb/rethinkdb.go b/plugins/inputs/rethinkdb/rethinkdb.go index 5cc133f93..3f690a6fa 100644 --- a/plugins/inputs/rethinkdb/rethinkdb.go +++ b/plugins/inputs/rethinkdb/rethinkdb.go @@ -38,7 +38,7 @@ func (r *RethinkDB) Gather(acc telegraf.Accumulator) error { for _, serv := range r.Servers { u, err := url.Parse(serv) if err != nil { - acc.AddError(fmt.Errorf("unable to parse to address '%s': %s", serv, err)) + acc.AddError(fmt.Errorf("unable to parse to address %q: %w", serv, err)) continue } else if u.Scheme == "" { // fallback to simple string based address (i.e. "10.0.0.1:10000") diff --git a/plugins/inputs/riemann_listener/riemann_listener.go b/plugins/inputs/riemann_listener/riemann_listener.go index 97ff8bca8..8944bf127 100644 --- a/plugins/inputs/riemann_listener/riemann_listener.go +++ b/plugins/inputs/riemann_listener/riemann_listener.go @@ -326,7 +326,7 @@ func (rsl *RiemannSocketListener) Start(acc telegraf.Accumulator) error { rsl.listen(ctx) }() default: - return fmt.Errorf("unknown protocol '%s' in '%s'", protocol, rsl.ServiceAddress) + return fmt.Errorf("unknown protocol %q in %q", protocol, rsl.ServiceAddress) } return nil diff --git a/plugins/inputs/smart/smart.go b/plugins/inputs/smart/smart.go index 629c54ef5..7d68af1d3 100644 --- a/plugins/inputs/smart/smart.go +++ b/plugins/inputs/smart/smart.go @@ -402,7 +402,7 @@ func (m *Smart) Init() error { } if !contains(knownReadMethods, m.ReadMethod) { - return fmt.Errorf("provided read method `%s` is not valid", m.ReadMethod) + return fmt.Errorf("provided read method %q is not valid", m.ReadMethod) } err := validatePath(m.PathSmartctl) @@ -864,7 +864,7 @@ func (m *Smart) gatherDisk(acc telegraf.Accumulator, device string, wg *sync.Wai } if err := parse(fields, deviceFields, matches[2]); err != nil { - acc.AddError(fmt.Errorf("error parsing %s: '%s': %s", attr.Name, matches[2], err.Error())) + acc.AddError(fmt.Errorf("error parsing %s: %q: %w", attr.Name, matches[2], err)) continue } // if the field is classified as an attribute, only add it @@ -923,7 +923,7 @@ func parseRawValue(rawVal string) (int64, error) { unit := regexp.MustCompile("^(.*)([hms])$") parts := strings.Split(rawVal, "+") if len(parts) == 0 { - return 0, fmt.Errorf("couldn't parse RAW_VALUE '%s'", rawVal) + return 0, fmt.Errorf("couldn't parse RAW_VALUE %q", rawVal) } duration := int64(0) diff --git a/plugins/inputs/snmp/gosmi_test.go b/plugins/inputs/snmp/gosmi_test.go index 26125599f..19f838da2 100644 --- a/plugins/inputs/snmp/gosmi_test.go +++ b/plugins/inputs/snmp/gosmi_test.go @@ -104,10 +104,10 @@ func TestFieldInitGosmi(t *testing.T) { for _, txl := range translations { f := Field{Oid: txl.inputOid, Name: txl.inputName, Conversion: txl.inputConversion} err := f.init(tr) - require.NoError(t, err, "inputOid='%s' inputName='%s'", txl.inputOid, txl.inputName) + require.NoError(t, err, "inputOid=%q inputName=%q", txl.inputOid, txl.inputName) - assert.Equal(t, txl.expectedOid, f.Oid, "inputOid='%s' inputName='%s' inputConversion='%s'", txl.inputOid, txl.inputName, txl.inputConversion) - assert.Equal(t, txl.expectedName, f.Name, "inputOid='%s' inputName='%s' inputConversion='%s'", txl.inputOid, txl.inputName, txl.inputConversion) + assert.Equal(t, txl.expectedOid, f.Oid, "inputOid=%q inputName=%q inputConversion=%q", txl.inputOid, txl.inputName, txl.inputConversion) + assert.Equal(t, txl.expectedName, f.Name, "inputOid=%q inputName=%q inputConversion=%q", txl.inputOid, txl.inputName, txl.inputConversion) } } diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index eeca2dfa0..787789ad8 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -768,7 +768,7 @@ func fieldConvert(tr Translator, conv string, ent gosnmp.SnmpPDU) (v interface{} return tr.SnmpFormatEnum(ent.Name, ent.Value, true) } - return nil, fmt.Errorf("invalid conversion type '%s'", conv) + return nil, fmt.Errorf("invalid conversion type %q", conv) } func init() { diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go index 360f0104e..d7bf9c37f 100644 --- a/plugins/inputs/snmp/snmp_test.go +++ b/plugins/inputs/snmp/snmp_test.go @@ -121,11 +121,11 @@ func TestFieldInit(t *testing.T) { for _, txl := range translations { f := Field{Oid: txl.inputOid, Name: txl.inputName, Conversion: txl.inputConversion} err := f.init(tr) - if !assert.NoError(t, err, "inputOid='%s' inputName='%s'", txl.inputOid, txl.inputName) { + if !assert.NoError(t, err, "inputOid=%q inputName=%q", txl.inputOid, txl.inputName) { continue } - assert.Equal(t, txl.expectedOid, f.Oid, "inputOid='%s' inputName='%s' inputConversion='%s'", txl.inputOid, txl.inputName, txl.inputConversion) - assert.Equal(t, txl.expectedName, f.Name, "inputOid='%s' inputName='%s' inputConversion='%s'", txl.inputOid, txl.inputName, txl.inputConversion) + assert.Equal(t, txl.expectedOid, f.Oid, "inputOid=%q inputName=%q inputConversion=%q", txl.inputOid, txl.inputName, txl.inputConversion) + assert.Equal(t, txl.expectedName, f.Name, "inputOid=%q inputName=%q inputConversion=%q", txl.inputOid, txl.inputName, txl.inputConversion) } } diff --git a/plugins/inputs/snmp_trap/snmp_trap.go b/plugins/inputs/snmp_trap/snmp_trap.go index b00db021e..36d84abc2 100644 --- a/plugins/inputs/snmp_trap/snmp_trap.go +++ b/plugins/inputs/snmp_trap/snmp_trap.go @@ -128,7 +128,7 @@ func (s *SnmpTrap) Start(acc telegraf.Accumulator) error { case "authpriv": s.listener.Params.MsgFlags = gosnmp.AuthPriv default: - return fmt.Errorf("unknown security level '%s'", s.SecLevel) + return fmt.Errorf("unknown security level %q", s.SecLevel) } var authenticationProtocol gosnmp.SnmpV3AuthProtocol @@ -148,7 +148,7 @@ func (s *SnmpTrap) Start(acc telegraf.Accumulator) error { case "": authenticationProtocol = gosnmp.NoAuth default: - return fmt.Errorf("unknown authentication protocol '%s'", s.AuthProtocol) + return fmt.Errorf("unknown authentication protocol %q", s.AuthProtocol) } var privacyProtocol gosnmp.SnmpV3PrivProtocol @@ -168,7 +168,7 @@ func (s *SnmpTrap) Start(acc telegraf.Accumulator) error { case "": privacyProtocol = gosnmp.NoPriv default: - return fmt.Errorf("unknown privacy protocol '%s'", s.PrivProtocol) + return fmt.Errorf("unknown privacy protocol %q", s.PrivProtocol) } secname, err := s.SecName.Get() @@ -211,7 +211,7 @@ func (s *SnmpTrap) Start(acc telegraf.Accumulator) error { // gosnmp.TrapListener currently supports udp only. For forward // compatibility, require udp in the service address if protocol != "udp" { - return fmt.Errorf("unknown protocol '%s' in '%s'", protocol, s.ServiceAddress) + return fmt.Errorf("unknown protocol %q in %q", protocol, s.ServiceAddress) } // If (*TrapListener).Listen immediately returns an error we need diff --git a/plugins/inputs/statsd/datadog.go b/plugins/inputs/statsd/datadog.go index df35198b1..6aec2b726 100644 --- a/plugins/inputs/statsd/datadog.go +++ b/plugins/inputs/statsd/datadog.go @@ -50,14 +50,14 @@ func (s *Statsd) parseEventMessage(now time.Time, message string, defaultHostnam titleLen, err := strconv.ParseInt(rawLen[0], 10, 64) if err != nil { - return fmt.Errorf("invalid message format, could not parse title.length: '%s'", rawLen[0]) + return fmt.Errorf("invalid message format, could not parse title.length: %q", rawLen[0]) } if len(rawLen[1]) < 1 { - return fmt.Errorf("invalid message format, could not parse text.length: '%s'", rawLen[0]) + return fmt.Errorf("invalid message format, could not parse text.length: %q", rawLen[0]) } textLen, err := strconv.ParseInt(rawLen[1][:len(rawLen[1])-1], 10, 64) if err != nil { - return fmt.Errorf("invalid message format, could not parse text.length: '%s'", rawLen[0]) + return fmt.Errorf("invalid message format, could not parse text.length: %q", rawLen[0]) } if titleLen+textLen+1 > int64(len(message)) { return fmt.Errorf("invalid message format, title.length and text.length exceed total message length") @@ -121,7 +121,7 @@ func (s *Statsd) parseEventMessage(now time.Time, message string, defaultHostnam fields["source_type_name"] = rawMetadataFields[i][2:] default: if rawMetadataFields[i][0] != '#' { - return fmt.Errorf("unknown metadata type: '%s'", rawMetadataFields[i]) + return fmt.Errorf("unknown metadata type: %q", rawMetadataFields[i]) } parseDataDogTags(tags, rawMetadataFields[i][1:]) } diff --git a/plugins/inputs/synproxy/synproxy_linux.go b/plugins/inputs/synproxy/synproxy_linux.go index 86e26f26d..0cfb71761 100644 --- a/plugins/inputs/synproxy/synproxy_linux.go +++ b/plugins/inputs/synproxy/synproxy_linux.go @@ -79,7 +79,7 @@ func (k *Synproxy) getSynproxyStat() (map[string]interface{}, error) { x, err := strconv.ParseUint(val, 16, 32) // If field is not a valid hexstring if err != nil { - return nil, fmt.Errorf("invalid value '%s' found", val) + return nil, fmt.Errorf("invalid value %q found", val) } if hname[i] != "" { fields[hname[i]] = fields[hname[i]].(uint32) + uint32(x) diff --git a/plugins/inputs/syslog/syslog.go b/plugins/inputs/syslog/syslog.go index 1ae7c5fb7..c3c690733 100644 --- a/plugins/inputs/syslog/syslog.go +++ b/plugins/inputs/syslog/syslog.go @@ -93,7 +93,7 @@ func (s *Syslog) Start(acc telegraf.Accumulator) error { case "udp", "udp4", "udp6", "ip", "ip4", "ip6", "unixgram": s.isStream = false default: - return fmt.Errorf("unknown protocol '%s' in '%s'", scheme, s.Address) + return fmt.Errorf("unknown protocol %q in %q", scheme, s.Address) } if scheme == "unix" || scheme == "unixpacket" || scheme == "unixgram" { @@ -150,12 +150,12 @@ func (s *Syslog) Stop() { func getAddressParts(a string) (scheme string, host string, err error) { parts := strings.SplitN(a, "://", 2) if len(parts) != 2 { - return "", "", fmt.Errorf("missing protocol within address '%s'", a) + return "", "", fmt.Errorf("missing protocol within address %q", a) } u, err := url.Parse(filepath.ToSlash(a)) //convert backslashes to slashes (to make Windows path a valid URL) if err != nil { - return "", "", fmt.Errorf("could not parse address '%s': %v", a, err) + return "", "", fmt.Errorf("could not parse address %q: %w", a, err) } switch u.Scheme { case "unix", "unixpacket", "unixgram": diff --git a/plugins/inputs/syslog/syslog_test.go b/plugins/inputs/syslog/syslog_test.go index 165da5971..efff4714c 100644 --- a/plugins/inputs/syslog/syslog_test.go +++ b/plugins/inputs/syslog/syslog_test.go @@ -35,14 +35,14 @@ func TestAddress(t *testing.T) { Address: "localhost:6514", } err = rec.Start(&testutil.Accumulator{}) - require.EqualError(t, err, "missing protocol within address 'localhost:6514'") + require.EqualError(t, err, `missing protocol within address "localhost:6514"`) require.Error(t, err) rec = &Syslog{ Address: "unsupported://example.com:6514", } err = rec.Start(&testutil.Accumulator{}) - require.EqualError(t, err, "unknown protocol 'unsupported' in 'example.com:6514'") + require.EqualError(t, err, `unknown protocol "unsupported" in "example.com:6514"`) require.Error(t, err) tmpdir := t.TempDir() diff --git a/plugins/inputs/sysstat/sysstat.go b/plugins/inputs/sysstat/sysstat.go index 24d57ef93..16388e8fd 100644 --- a/plugins/inputs/sysstat/sysstat.go +++ b/plugins/inputs/sysstat/sysstat.go @@ -202,7 +202,7 @@ func (s *Sysstat) parse(acc telegraf.Accumulator, option string, tmpfile string, return err } if err := cmd.Start(); err != nil { - return fmt.Errorf("running command '%s' failed: %s", strings.Join(cmd.Args, " "), err) + return fmt.Errorf("running command %q failed: %w", strings.Join(cmd.Args, " "), err) } r := bufio.NewReader(stdout) diff --git a/plugins/inputs/tengine/tengine.go b/plugins/inputs/tengine/tengine.go index 0313e7727..1a6e307f8 100644 --- a/plugins/inputs/tengine/tengine.go +++ b/plugins/inputs/tengine/tengine.go @@ -51,7 +51,7 @@ func (n *Tengine) Gather(acc telegraf.Accumulator) error { for _, u := range n.Urls { addr, err := url.Parse(u) if err != nil { - acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err)) + acc.AddError(fmt.Errorf("Unable to parse address %q: %w", u, err)) continue } diff --git a/plugins/inputs/uwsgi/uwsgi.go b/plugins/inputs/uwsgi/uwsgi.go index 3c472e1ee..bd241b8f7 100644 --- a/plugins/inputs/uwsgi/uwsgi.go +++ b/plugins/inputs/uwsgi/uwsgi.go @@ -52,7 +52,7 @@ func (u *Uwsgi) Gather(acc telegraf.Accumulator) error { defer wg.Done() n, err := url.Parse(s) if err != nil { - acc.AddError(fmt.Errorf("could not parse uWSGI Stats Server url '%s': %s", s, err.Error())) + acc.AddError(fmt.Errorf("could not parse uWSGI Stats Server url %q: %w", s, err)) return } @@ -97,13 +97,13 @@ func (u *Uwsgi) gatherServer(acc telegraf.Accumulator, address *url.URL) error { r = resp.Body s.source = address.Host default: - return fmt.Errorf("'%s' is not a supported scheme", address.Scheme) + return fmt.Errorf("%q is not a supported scheme", address.Scheme) } defer r.Close() if err := json.NewDecoder(r).Decode(&s); err != nil { - return fmt.Errorf("failed to decode json payload from '%s': %s", address.String(), err.Error()) + return fmt.Errorf("failed to decode json payload from %q: %w", address.String(), err) } u.gatherStatServer(acc, &s) diff --git a/plugins/inputs/win_perf_counters/pdh.go b/plugins/inputs/win_perf_counters/pdh.go index 34aaf2c39..a498e11d8 100644 --- a/plugins/inputs/win_perf_counters/pdh.go +++ b/plugins/inputs/win_perf_counters/pdh.go @@ -567,7 +567,7 @@ func PdhFormatError(msgId uint32) string { buf := make([]uint16, 300) _, err := windows.FormatMessage(flags, uintptr(libpdhDll.Handle), msgId, 0, buf, nil) if err == nil { - return fmt.Sprintf("%s", UTF16PtrToString(&buf[0])) + return UTF16PtrToString(&buf[0]) } return fmt.Sprintf("(pdhErr=%d) %s", msgId, err.Error()) } diff --git a/plugins/inputs/win_perf_counters/win_perf_counters.go b/plugins/inputs/win_perf_counters/win_perf_counters.go index 5a1dc4187..7b701ff27 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters.go @@ -357,7 +357,7 @@ func (m *WinPerfCounters) ParseConfig() error { } for _, counter := range PerfObject.Counters { if len(PerfObject.Instances) == 0 { - m.Log.Warnf("Missing 'Instances' param for object '%s'\n", PerfObject.ObjectName) + m.Log.Warnf("Missing 'Instances' param for object %q", PerfObject.ObjectName) } for _, instance := range PerfObject.Instances { objectname := PerfObject.ObjectName @@ -367,7 +367,7 @@ func (m *WinPerfCounters) ParseConfig() error { err := m.AddItem(counterPath, computer, objectname, instance, counter, PerfObject.Measurement, PerfObject.IncludeTotal, PerfObject.UseRawValues) if err != nil { if PerfObject.FailOnMissing || PerfObject.WarnOnMissing { - m.Log.Errorf("invalid counterPath: '%s'. Error: %s\n", counterPath, err.Error()) + m.Log.Errorf("invalid counterPath %q: %s", counterPath, err.Error()) } if PerfObject.FailOnMissing { return err @@ -440,7 +440,7 @@ func (m *WinPerfCounters) Gather(acc telegraf.Accumulator) error { err := m.gatherComputerCounters(hostInfo, acc) m.Log.Debugf("gathering from %s finished in %.3fs", hostInfo.computer, time.Since(start)) if err != nil { - acc.AddError(fmt.Errorf("error during collecting data on host '%s': %s", hostInfo.computer, err.Error())) + acc.AddError(fmt.Errorf("error during collecting data on host %q: %w", hostInfo.computer, err)) } wg.Done() }(hostCounterInfo) diff --git a/plugins/inputs/win_services/win_services.go b/plugins/inputs/win_services/win_services.go index cddbb11d1..e2060d10c 100644 --- a/plugins/inputs/win_services/win_services.go +++ b/plugins/inputs/win_services/win_services.go @@ -26,7 +26,7 @@ type ServiceErr struct { } func (e *ServiceErr) Error() string { - return fmt.Sprintf("%s: '%s': %v", e.Message, e.Service, e.Err) + return fmt.Sprintf("%s: %q: %v", e.Message, e.Service, e.Err) } func IsPermission(err error) bool { diff --git a/plugins/inputs/x509_cert/x509_cert.go b/plugins/inputs/x509_cert/x509_cert.go index 0bdb81d7d..96379c7f6 100644 --- a/plugins/inputs/x509_cert/x509_cert.go +++ b/plugins/inputs/x509_cert/x509_cert.go @@ -99,7 +99,7 @@ func (c *X509Cert) Gather(acc telegraf.Accumulator) error { for _, location := range collectedUrls { certs, ocspresp, err := c.getCert(location, time.Duration(c.Timeout)) if err != nil { - acc.AddError(fmt.Errorf("cannot get SSL cert '%s': %s", location, err.Error())) + acc.AddError(fmt.Errorf("cannot get SSL cert %q: %w", location, err)) } // Add all returned certs to the pool of intermediates except for @@ -446,7 +446,7 @@ func (c *X509Cert) getCert(u *url.URL, timeout time.Duration) ([]*x509.Certifica return certs, &ocspresp, nil default: - return nil, nil, fmt.Errorf("unsupported scheme '%s' in location %s", u.Scheme, u.String()) + return nil, nil, fmt.Errorf("unsupported scheme %q in location %s", u.Scheme, u.String()) } } diff --git a/plugins/outputs/application_insights/application_insights_test.go b/plugins/outputs/application_insights/application_insights_test.go index 24ed7042d..fb6084238 100644 --- a/plugins/outputs/application_insights/application_insights_test.go +++ b/plugins/outputs/application_insights/application_insights_test.go @@ -468,7 +468,7 @@ func assertMapContains(t *testing.T, expected, actual map[string]string) { for k, v := range expected { av, ok := actual[k] - require.True(t, ok, "Actual map does not contain a value for key '%s'", k) - require.Equal(t, v, av, "The expected value for key '%s' is '%s' but the actual value is '%s", k, v, av) + require.True(t, ok, "Actual map does not contain a value for key %q", k) + require.Equal(t, v, av, "The expected value for key %q is %q but the actual value is %q", k, v, av) } } diff --git a/plugins/outputs/cloudwatch_logs/cloudwatch_logs.go b/plugins/outputs/cloudwatch_logs/cloudwatch_logs.go index 7d13b82e3..8dc0e78ac 100644 --- a/plugins/outputs/cloudwatch_logs/cloudwatch_logs.go +++ b/plugins/outputs/cloudwatch_logs/cloudwatch_logs.go @@ -127,11 +127,11 @@ func (c *CloudWatchLogs) Init() error { c.logDatKey = lsSplitArray[0] c.logDataSource = lsSplitArray[1] - c.Log.Debugf("Log data: key '%s', source '%s'...", c.logDatKey, c.logDataSource) + c.Log.Debugf("Log data: key %q, source %q...", c.logDatKey, c.logDataSource) if c.lsSource == "" { c.lsSource = c.LogStream - c.Log.Debugf("Log stream '%s'...", c.lsSource) + c.Log.Debugf("Log stream %q...", c.lsSource) } return nil diff --git a/plugins/outputs/elasticsearch/elasticsearch.go b/plugins/outputs/elasticsearch/elasticsearch.go index ed4a1ab4f..68ab562c0 100644 --- a/plugins/outputs/elasticsearch/elasticsearch.go +++ b/plugins/outputs/elasticsearch/elasticsearch.go @@ -426,7 +426,7 @@ func (a *Elasticsearch) GetIndexName(indexName string, eventTime time.Time, tagK if value, ok := metricTags[key]; ok { tagValues = append(tagValues, value) } else { - a.Log.Debugf("Tag '%s' not found, using '%s' on index name instead\n", key, a.DefaultTagValue) + a.Log.Debugf("Tag %q not found, using %q on index name instead\n", key, a.DefaultTagValue) tagValues = append(tagValues, a.DefaultTagValue) } } diff --git a/plugins/outputs/kinesis/kinesis.go b/plugins/outputs/kinesis/kinesis.go index a7b826e37..f062e4fe4 100644 --- a/plugins/outputs/kinesis/kinesis.go +++ b/plugins/outputs/kinesis/kinesis.go @@ -133,7 +133,7 @@ func (k *KinesisOutput) getPartitionKey(metric telegraf.Metric) string { // Default partition name if default is not set return "telegraf" default: - k.Log.Errorf("You have configured a Partition method of '%s' which is not supported", k.Partition.Method) + k.Log.Errorf("You have configured a Partition method of %q which is not supported", k.Partition.Method) } } if k.RandomPartitionKey { diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index e30c58f2e..713d52d92 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -401,11 +401,11 @@ func (p *Postgresql) writeMetricsFromMeasure(ctx context.Context, db dbh, tableS if p.TagsAsForeignKeys { if err = p.writeTagTable(ctx, db, tableSource); err != nil { if p.ForeignTagConstraint { - return fmt.Errorf("writing to tag table '%s': %w", tableSource.Name()+p.TagTableSuffix, err) + return fmt.Errorf("writing to tag table %q: %w", tableSource.Name()+p.TagTableSuffix, err) } // log and continue. As the admin can correct the issue, and tags don't change over time, they can be // added from future metrics after issue is corrected. - p.Logger.Errorf("writing to tag table %q: %s", tableSource.Name()+p.TagTableSuffix, err) + p.Logger.Errorf("writing to tag table %q: %s", tableSource.Name()+p.TagTableSuffix, err.Error()) } } diff --git a/plugins/outputs/postgresql/table_manager.go b/plugins/outputs/postgresql/table_manager.go index 53812a858..3e3171ecc 100644 --- a/plugins/outputs/postgresql/table_manager.go +++ b/plugins/outputs/postgresql/table_manager.go @@ -96,11 +96,11 @@ func (tm *TableManager) MatchSource(ctx context.Context, db dbh, rowSource *Tabl colDefs := make([]string, 0, len(missingCols)) for _, col := range missingCols { if err := rowSource.DropColumn(col); err != nil { - return fmt.Errorf("metric/table mismatch: Unable to omit field/column from \"%s\": %w", tagTable.name, err) + return fmt.Errorf("metric/table mismatch: Unable to omit field/column from %q: %w", tagTable.name, err) } colDefs = append(colDefs, col.Name+" "+col.Type) } - tm.Logger.Errorf("table '%s' is missing tag columns (dropping metrics): %s", + tm.Logger.Errorf("table %q is missing tag columns (dropping metrics): %s", tagTable.name, strings.Join(colDefs, ", ")) } @@ -127,11 +127,11 @@ func (tm *TableManager) MatchSource(ctx context.Context, db dbh, rowSource *Tabl colDefs := make([]string, 0, len(missingCols)) for _, col := range missingCols { if err := rowSource.DropColumn(col); err != nil { - return fmt.Errorf("metric/table mismatch: Unable to omit field/column from \"%s\": %w", metricTable.name, err) + return fmt.Errorf("metric/table mismatch: Unable to omit field/column from %q: %w", metricTable.name, err) } colDefs = append(colDefs, col.Name+" "+col.Type) } - tm.Logger.Errorf("table '%s' is missing columns (omitting fields): %s", + tm.Logger.Errorf("table %q is missing columns (omitting fields): %s", metricTable.name, strings.Join(colDefs, ", ")) } @@ -187,9 +187,9 @@ func (tm *TableManager) EnsureStructure( } if col.Role == utils.TagColType { - return nil, fmt.Errorf("column name too long: \"%s\"", col.Name) + return nil, fmt.Errorf("column name too long: %q", col.Name) } - tm.Postgresql.Logger.Errorf("column name too long: \"%s\"", col.Name) + tm.Postgresql.Logger.Errorf("column name too long: %q", col.Name) invalidColumns = append(invalidColumns, col) } @@ -370,7 +370,7 @@ func (tm *TableManager) update(ctx context.Context, return err } if _, err := tx.Exec(ctx, string(sql)); err != nil { - return fmt.Errorf("executing `%s`: %w", sql, err) + return fmt.Errorf("executing %q: %w", sql, err) } } diff --git a/plugins/outputs/postgresql/table_source.go b/plugins/outputs/postgresql/table_source.go index f819eea2d..1b8c66626 100644 --- a/plugins/outputs/postgresql/table_source.go +++ b/plugins/outputs/postgresql/table_source.go @@ -196,9 +196,9 @@ func (tsrc *TableSource) DropColumn(col utils.Column) error { case utils.FieldColType: return tsrc.dropFieldColumn(col) case utils.TimeColType, utils.TagsIDColType: - return fmt.Errorf("critical column \"%s\"", col.Name) + return fmt.Errorf("critical column %q", col.Name) default: - return fmt.Errorf("internal error: unknown column \"%s\"", col.Name) + return fmt.Errorf("internal error: unknown column %q", col.Name) } } diff --git a/plugins/outputs/timestream/timestream.go b/plugins/outputs/timestream/timestream.go index 48a23ef5c..f5fe7ab99 100644 --- a/plugins/outputs/timestream/timestream.go +++ b/plugins/outputs/timestream/timestream.go @@ -130,41 +130,41 @@ func (t *Timestream) Connect() error { } if t.MappingMode != MappingModeSingleTable && t.MappingMode != MappingModeMultiTable { - return fmt.Errorf("correct MappingMode key values are: '%s', '%s'", + return fmt.Errorf("correct MappingMode key values are: %q, %q", MappingModeSingleTable, MappingModeMultiTable) } if t.MappingMode == MappingModeSingleTable { if t.SingleTableName == "" { - return fmt.Errorf("in '%s' mapping mode, SingleTableName key is required", MappingModeSingleTable) + return fmt.Errorf("in %q mapping mode, SingleTableName key is required", MappingModeSingleTable) } if t.SingleTableDimensionNameForTelegrafMeasurementName == "" && !t.UseMultiMeasureRecords { - return fmt.Errorf("in '%s' mapping mode, SingleTableDimensionNameForTelegrafMeasurementName key is required", + return fmt.Errorf("in %q mapping mode, SingleTableDimensionNameForTelegrafMeasurementName key is required", MappingModeSingleTable) } // When using MappingModeSingleTable with UseMultiMeasureRecords enabled, // measurementName ( from line protocol ) is mapped to multiMeasure name in timestream. if t.UseMultiMeasureRecords && t.MeasureNameForMultiMeasureRecords != "" { - return fmt.Errorf("in '%s' mapping mode, with multi-measure enabled, key MeasureNameForMultiMeasureRecords is invalid", MappingModeMultiTable) + return fmt.Errorf("in %q mapping mode, with multi-measure enabled, key MeasureNameForMultiMeasureRecords is invalid", MappingModeMultiTable) } } if t.MappingMode == MappingModeMultiTable { if t.SingleTableName != "" { - return fmt.Errorf("in '%s' mapping mode, do not specify SingleTableName key", MappingModeMultiTable) + return fmt.Errorf("in %q mapping mode, do not specify SingleTableName key", MappingModeMultiTable) } if t.SingleTableDimensionNameForTelegrafMeasurementName != "" { - return fmt.Errorf("in '%s' mapping mode, do not specify SingleTableDimensionNameForTelegrafMeasurementName key", MappingModeMultiTable) + return fmt.Errorf("in %q mapping mode, do not specify SingleTableDimensionNameForTelegrafMeasurementName key", MappingModeMultiTable) } // When using MappingModeMultiTable ( data is ingested to multiple tables ) with // UseMultiMeasureRecords enabled, measurementName is used as tableName in timestream and // we require MeasureNameForMultiMeasureRecords to be configured. if t.UseMultiMeasureRecords && t.MeasureNameForMultiMeasureRecords == "" { - return fmt.Errorf("in '%s' mapping mode, with multi-measure enabled, key MeasureNameForMultiMeasureRecords is required", MappingModeMultiTable) + return fmt.Errorf("in %q mapping mode, with multi-measure enabled, key MeasureNameForMultiMeasureRecords is required", MappingModeMultiTable) } } @@ -182,7 +182,7 @@ func (t *Timestream) Connect() error { t.MaxWriteGoRoutinesCount = MaxWriteRoutinesDefault } - t.Log.Infof("Constructing Timestream client for '%s' mode", t.MappingMode) + t.Log.Infof("Constructing Timestream client for %q mode", t.MappingMode) svc, err := WriteFactory(&t.CredentialConfig) if err != nil { @@ -190,17 +190,17 @@ func (t *Timestream) Connect() error { } if t.DescribeDatabaseOnStart { - t.Log.Infof("Describing database '%s' in region '%s'", t.DatabaseName, t.Region) + t.Log.Infof("Describing database %q in region %q", t.DatabaseName, t.Region) describeDatabaseInput := ×treamwrite.DescribeDatabaseInput{ DatabaseName: aws.String(t.DatabaseName), } describeDatabaseOutput, err := svc.DescribeDatabase(context.Background(), describeDatabaseInput) if err != nil { - t.Log.Errorf("Couldn't describe database '%s'. Check error, fix permissions, connectivity, create database.", t.DatabaseName) + t.Log.Errorf("Couldn't describe database %q. Check error, fix permissions, connectivity, create database.", t.DatabaseName) return err } - t.Log.Infof("Describe database '%s' returned: '%s'.", t.DatabaseName, describeDatabaseOutput) + t.Log.Infof("Describe database %q returned %q.", t.DatabaseName, describeDatabaseOutput) } t.svc = svc @@ -279,7 +279,7 @@ func (t *Timestream) writeToTimestream(writeRecordsInput *timestreamwrite.WriteR var notFound *types.ResourceNotFoundException if errors.As(err, ¬Found) { if resourceNotFoundRetry { - t.Log.Warnf("Failed to write to Timestream database '%s' table '%s'. Error: '%s'", + t.Log.Warnf("Failed to write to Timestream database %q table %q: %s", t.DatabaseName, *writeRecordsInput.TableName, notFound) return t.createTableAndRetry(writeRecordsInput) } @@ -293,27 +293,27 @@ func (t *Timestream) writeToTimestream(writeRecordsInput *timestreamwrite.WriteR if errors.As(err, &rejected) { t.logWriteToTimestreamError(err, writeRecordsInput.TableName) for _, rr := range rejected.RejectedRecords { - t.Log.Errorf("reject reason: '%s', record index: '%d'", aws.ToString(rr.Reason), rr.RecordIndex) + t.Log.Errorf("reject reason: %q, record index: '%d'", aws.ToString(rr.Reason), rr.RecordIndex) } return nil } var throttling *types.ThrottlingException if errors.As(err, &throttling) { - return fmt.Errorf("unable to write to Timestream database '%s' table '%s'. Error: %w", + return fmt.Errorf("unable to write to Timestream database %q table %q: %w", t.DatabaseName, *writeRecordsInput.TableName, throttling) } var internal *types.InternalServerException if errors.As(err, &internal) { - return fmt.Errorf("unable to write to Timestream database '%s' table '%s'. Error: %w", + return fmt.Errorf("unable to write to Timestream database %q table %q: %w", t.DatabaseName, *writeRecordsInput.TableName, internal) } var operation *smithy.OperationError if !errors.As(err, &operation) { // Retry other, non-aws errors. - return fmt.Errorf("unable to write to Timestream database '%s' table '%s'. Error: %w", + return fmt.Errorf("unable to write to Timestream database %q table %q: %w", t.DatabaseName, *writeRecordsInput.TableName, err) } t.logWriteToTimestreamError(err, writeRecordsInput.TableName) @@ -322,25 +322,25 @@ func (t *Timestream) writeToTimestream(writeRecordsInput *timestreamwrite.WriteR } func (t *Timestream) logWriteToTimestreamError(err error, tableName *string) { - t.Log.Errorf("Failed to write to Timestream database '%s' table '%s'. Skipping metric! Error: '%s'", - t.DatabaseName, *tableName, err) + t.Log.Errorf("Failed to write to Timestream database %q table %q: %s. Skipping metric!", + t.DatabaseName, *tableName, err.Error()) } func (t *Timestream) createTableAndRetry(writeRecordsInput *timestreamwrite.WriteRecordsInput) error { if t.CreateTableIfNotExists { t.Log.Infof( - "Trying to create table '%s' in database '%s', as 'CreateTableIfNotExists' config key is 'true'.", + "Trying to create table %q in database %q, as 'CreateTableIfNotExists' config key is 'true'.", *writeRecordsInput.TableName, t.DatabaseName, ) err := t.createTable(writeRecordsInput.TableName) if err == nil { - t.Log.Infof("Table '%s' in database '%s' created. Retrying writing.", *writeRecordsInput.TableName, t.DatabaseName) + t.Log.Infof("Table %q in database %q created. Retrying writing.", *writeRecordsInput.TableName, t.DatabaseName) return t.writeToTimestream(writeRecordsInput, false) } - t.Log.Errorf("Failed to create table '%s' in database '%s': %s. Skipping metric!", *writeRecordsInput.TableName, t.DatabaseName, err) + t.Log.Errorf("Failed to create table %q in database %q: %s. Skipping metric!", *writeRecordsInput.TableName, t.DatabaseName, err.Error()) } else { - t.Log.Errorf("Not trying to create table '%s' in database '%s', as 'CreateTableIfNotExists' config key is 'false'. Skipping metric!", + t.Log.Errorf("Not trying to create table %q in database %q, as 'CreateTableIfNotExists' config key is 'false'. Skipping metric!", *writeRecordsInput.TableName, t.DatabaseName) } return nil @@ -469,7 +469,7 @@ func (t *Timestream) buildSingleWriteRecords(point telegraf.Metric) []types.Reco for fieldName, fieldValue := range point.Fields() { stringFieldValue, stringFieldValueType, ok := convertValue(fieldValue) if !ok { - t.Log.Warnf("Skipping field '%s'. The type '%s' is not supported in Timestream as MeasureValue. "+ + t.Log.Warnf("Skipping field %q. The type %q is not supported in Timestream as MeasureValue. "+ "Supported values are: [int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64, bool]", fieldName, reflect.TypeOf(fieldValue)) continue @@ -503,7 +503,7 @@ func (t *Timestream) buildMultiMeasureWriteRecords(point telegraf.Metric) []type for fieldName, fieldValue := range point.Fields() { stringFieldValue, stringFieldValueType, ok := convertValue(fieldValue) if !ok { - t.Log.Warnf("Skipping field '%s'. The type '%s' is not supported in Timestream as MeasureValue. "+ + t.Log.Warnf("Skipping field %q. The type %q is not supported in Timestream as MeasureValue. "+ "Supported values are: [int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64, bool]", fieldName, reflect.TypeOf(fieldValue)) continue diff --git a/plugins/outputs/websocket/websocket.go b/plugins/outputs/websocket/websocket.go index e50a45c12..86a919b82 100644 --- a/plugins/outputs/websocket/websocket.go +++ b/plugins/outputs/websocket/websocket.go @@ -59,7 +59,7 @@ var errInvalidURL = errors.New("invalid websocket URL") // Init the output plugin. func (w *WebSocket) Init() error { if parsedURL, err := url.Parse(w.URL); err != nil || (parsedURL.Scheme != "ws" && parsedURL.Scheme != "wss") { - return fmt.Errorf("%w: \"%s\"", errInvalidURL, w.URL) + return fmt.Errorf("%w: %q", errInvalidURL, w.URL) } return nil } diff --git a/plugins/parsers/dropwizard/parser.go b/plugins/parsers/dropwizard/parser.go index 095d05f0d..6a3416461 100644 --- a/plugins/parsers/dropwizard/parser.go +++ b/plugins/parsers/dropwizard/parser.go @@ -103,7 +103,7 @@ func (p *Parser) readTags(buf []byte) map[string]string { var tags map[string]string err := json.Unmarshal(tagsBytes, &tags) if err != nil { - p.Log.Warnf("Failed to parse tags from JSON path '%s': %s\n", p.TagsPath, err) + p.Log.Warnf("Failed to parse tags from JSON path %q: %s", p.TagsPath, err.Error()) } else if len(tags) > 0 { return tags } diff --git a/plugins/parsers/graphite/config.go b/plugins/parsers/graphite/config.go index 43c705869..2bff0e08b 100644 --- a/plugins/parsers/graphite/config.go +++ b/plugins/parsers/graphite/config.go @@ -37,7 +37,7 @@ func (c *Config) validateTemplates() error { } if len(parts) > 3 { - return fmt.Errorf("invalid template format: '%s'", t) + return fmt.Errorf("invalid template format: %q", t) } template := t @@ -66,7 +66,7 @@ func (c *Config) validateTemplates() error { // Prevent duplicate filters in the config if _, ok := filters[filter]; ok { - return fmt.Errorf("duplicate filter '%s' found at position: %d", filter, i) + return fmt.Errorf("duplicate filter %q found at position: %d", filter, i) } filters[filter] = struct{}{} @@ -98,7 +98,7 @@ func (c *Config) validateTemplate(template string) error { } if !hasMeasurement { - return fmt.Errorf("no measurement in template `%s`", template) + return fmt.Errorf("no measurement in template %q", template) } return nil @@ -120,11 +120,11 @@ func (c *Config) validateFilter(filter string) error { func (c *Config) validateTag(keyValue string) error { parts := strings.Split(keyValue, "=") if len(parts) != 2 { - return fmt.Errorf("invalid template tags: '%s'", keyValue) + return fmt.Errorf("invalid template tags: %q", keyValue) } if parts[0] == "" || parts[1] == "" { - return fmt.Errorf("invalid template tags: %s'", keyValue) + return fmt.Errorf("invalid template tags: %q", keyValue) } return nil diff --git a/plugins/parsers/graphite/parser.go b/plugins/parsers/graphite/parser.go index 5028197df..38125a76b 100644 --- a/plugins/parsers/graphite/parser.go +++ b/plugins/parsers/graphite/parser.go @@ -108,7 +108,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { // Parse value. v, err := strconv.ParseFloat(fields[1], 64) if err != nil { - return nil, fmt.Errorf(`field "%s" value: %w`, fields[0], err) + return nil, fmt.Errorf(`field %q value: %w`, fields[0], err) } fieldValues := map[string]interface{}{} @@ -125,7 +125,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { // Parse timestamp. unixTime, err := strconv.ParseFloat(fields[2], 64) if err != nil { - return nil, fmt.Errorf(`field "%s" time: %w`, fields[0], err) + return nil, fmt.Errorf(`field %q time: %w`, fields[0], err) } // -1 is a special value that gets converted to current UTC time diff --git a/plugins/parsers/graphite/parser_test.go b/plugins/parsers/graphite/parser_test.go index bb42ec937..cde098f5e 100644 --- a/plugins/parsers/graphite/parser_test.go +++ b/plugins/parsers/graphite/parser_test.go @@ -766,9 +766,7 @@ func TestApplyTemplateSpecific(t *testing.T) { if !ok { t.Error("Expected for template to apply a 'service' tag, but not found") } - if service != "facebook" { - t.Errorf("Expected service='facebook' tag, got service='%s'", service) - } + require.Equal(t, "facebook", service) } func TestApplyTemplateTags(t *testing.T) { @@ -785,9 +783,7 @@ func TestApplyTemplateTags(t *testing.T) { if !ok { t.Error("Expected for template to apply a 'region' tag, but not found") } - if region != "us-west" { - t.Errorf("Expected region='us-west' tag, got region='%s'", region) - } + require.Equal(t, "us-west", region) } func TestApplyTemplateField(t *testing.T) { diff --git a/plugins/parsers/json_v2/parser.go b/plugins/parsers/json_v2/parser.go index 26c1a2a49..bc8a484eb 100644 --- a/plugins/parsers/json_v2/parser.go +++ b/plugins/parsers/json_v2/parser.go @@ -586,25 +586,25 @@ func (p *Parser) convertType(input gjson.Result, desiredType string, name string case "uint": r, err := strconv.ParseUint(inputType, 10, 64) if err != nil { - return nil, fmt.Errorf("unable to convert field '%s' to type uint: %w", name, err) + return nil, fmt.Errorf("unable to convert field %q to type uint: %w", name, err) } return r, nil case "int": r, err := strconv.ParseInt(inputType, 10, 64) if err != nil { - return nil, fmt.Errorf("unable to convert field '%s' to type int: %w", name, err) + return nil, fmt.Errorf("unable to convert field %q to type int: %w", name, err) } return r, nil case "float": r, err := strconv.ParseFloat(inputType, 64) if err != nil { - return nil, fmt.Errorf("unable to convert field '%s' to type float: %w", name, err) + return nil, fmt.Errorf("unable to convert field %q to type float: %w", name, err) } return r, nil case "bool": r, err := strconv.ParseBool(inputType) if err != nil { - return nil, fmt.Errorf("unable to convert field '%s' to type bool: %w", name, err) + return nil, fmt.Errorf("unable to convert field %q to type bool: %w", name, err) } return r, nil } @@ -639,11 +639,11 @@ func (p *Parser) convertType(input gjson.Result, desiredType string, name string } else if inputType == 1 { return true, nil } else { - return nil, fmt.Errorf("unable to convert field '%s' to type bool", name) + return nil, fmt.Errorf("unable to convert field %q to type bool", name) } } default: - return nil, fmt.Errorf("unknown format '%T' for field '%s'", inputType, name) + return nil, fmt.Errorf("unknown format '%T' for field %q", inputType, name) } return input.Value(), nil diff --git a/plugins/parsers/prometheus/parser_test.go b/plugins/parsers/prometheus/parser_test.go index 7455f7d1f..af9c626d5 100644 --- a/plugins/parsers/prometheus/parser_test.go +++ b/plugins/parsers/prometheus/parser_test.go @@ -496,12 +496,12 @@ func TestParserProtobufHeader(t *testing.T) { defer ts.Close() req, err := http.NewRequest("GET", ts.URL, nil) if err != nil { - t.Fatalf("unable to create new request '%s': %s", ts.URL, err) + t.Fatalf("unable to create new request %q: %s", ts.URL, err) } var resp *http.Response resp, err = uClient.Do(req) if err != nil { - t.Fatalf("error making HTTP request to %s: %s", ts.URL, err) + t.Fatalf("error making HTTP request to %q: %s", ts.URL, err) } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) @@ -511,7 +511,7 @@ func TestParserProtobufHeader(t *testing.T) { parser := Parser{Header: resp.Header} metrics, err := parser.Parse(body) if err != nil { - t.Fatalf("error reading metrics for %s: %s", ts.URL, err) + t.Fatalf("error reading metrics for %q: %s", ts.URL, err) } testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics()) } diff --git a/plugins/parsers/xpath/parser.go b/plugins/parsers/xpath/parser.go index f5705609c..39a5d76cc 100644 --- a/plugins/parsers/xpath/parser.go +++ b/plugins/parsers/xpath/parser.go @@ -245,7 +245,7 @@ func (p *Parser) parseQuery(starttime time.Time, doc, selected dataNode, config // Execute the query and cast the returned values into strings v, err := p.executeQuery(doc, selected, query) if err != nil { - return nil, fmt.Errorf("failed to query tag '%s': %w", name, err) + return nil, fmt.Errorf("failed to query tag %q: %w", name, err) } switch v := v.(type) { case string: @@ -257,7 +257,7 @@ func (p *Parser) parseQuery(starttime time.Time, doc, selected dataNode, config case nil: continue default: - return nil, fmt.Errorf("unknown format '%T' for tag '%s'", v, name) + return nil, fmt.Errorf("unknown format '%T' for tag %q", v, name) } } @@ -282,15 +282,15 @@ func (p *Parser) parseQuery(starttime time.Time, doc, selected dataNode, config for _, selectedtag := range selectedTagNodes { n, err := p.executeQuery(doc, selectedtag, tagnamequery) if err != nil { - return nil, fmt.Errorf("failed to query tag name with query '%s': %w", tagnamequery, err) + return nil, fmt.Errorf("failed to query tag name with query %q: %w", tagnamequery, err) } name, ok := n.(string) if !ok { - return nil, fmt.Errorf("failed to query tag name with query '%s': result is not a string (%v)", tagnamequery, n) + return nil, fmt.Errorf("failed to query tag name with query %q: result is not a string (%v)", tagnamequery, n) } v, err := p.executeQuery(doc, selectedtag, tagvaluequery) if err != nil { - return nil, fmt.Errorf("failed to query tag value for '%s': %w", name, err) + return nil, fmt.Errorf("failed to query tag value for %q: %w", name, err) } if config.TagNameExpand { @@ -314,7 +314,7 @@ func (p *Parser) parseQuery(starttime time.Time, doc, selected dataNode, config // Convert the tag to be a string s, err := internal.ToString(v) if err != nil { - return nil, fmt.Errorf("failed to query tag value for '%s': result is not a string (%v)", name, v) + return nil, fmt.Errorf("failed to query tag value for %q: result is not a string (%v)", name, v) } tags[name] = s } @@ -333,13 +333,13 @@ func (p *Parser) parseQuery(starttime time.Time, doc, selected dataNode, config // Execute the query and cast the returned values into integers v, err := p.executeQuery(doc, selected, query) if err != nil { - return nil, fmt.Errorf("failed to query field (int) '%s': %w", name, err) + return nil, fmt.Errorf("failed to query field (int) %q: %w", name, err) } switch v := v.(type) { case string: fields[name], err = strconv.ParseInt(v, 10, 54) if err != nil { - return nil, fmt.Errorf("failed to parse field (int) '%s': %w", name, err) + return nil, fmt.Errorf("failed to parse field (int) %q: %w", name, err) } case bool: fields[name] = int64(0) @@ -351,7 +351,7 @@ func (p *Parser) parseQuery(starttime time.Time, doc, selected dataNode, config case nil: continue default: - return nil, fmt.Errorf("unknown format '%T' for field (int) '%s'", v, name) + return nil, fmt.Errorf("unknown format '%T' for field (int) %q", v, name) } } @@ -359,7 +359,7 @@ func (p *Parser) parseQuery(starttime time.Time, doc, selected dataNode, config // Execute the query and store the result in fields v, err := p.executeQuery(doc, selected, query) if err != nil { - return nil, fmt.Errorf("failed to query field '%s': %w", name, err) + return nil, fmt.Errorf("failed to query field %q: %w", name, err) } fields[name] = v } @@ -385,15 +385,15 @@ func (p *Parser) parseQuery(starttime time.Time, doc, selected dataNode, config for _, selectedfield := range selectedFieldNodes { n, err := p.executeQuery(doc, selectedfield, fieldnamequery) if err != nil { - return nil, fmt.Errorf("failed to query field name with query '%s': %w", fieldnamequery, err) + return nil, fmt.Errorf("failed to query field name with query %q: %w", fieldnamequery, err) } name, ok := n.(string) if !ok { - return nil, fmt.Errorf("failed to query field name with query '%s': result is not a string (%v)", fieldnamequery, n) + return nil, fmt.Errorf("failed to query field name with query %q: result is not a string (%v)", fieldnamequery, n) } v, err := p.executeQuery(doc, selectedfield, fieldvaluequery) if err != nil { - return nil, fmt.Errorf("failed to query field value for '%s': %w", name, err) + return nil, fmt.Errorf("failed to query field value for %q: %w", name, err) } if config.FieldNameExpand { @@ -439,7 +439,7 @@ func (p *Parser) executeQuery(doc, selected dataNode, query string) (r interface // Compile the query expr, err := path.Compile(query) if err != nil { - return nil, fmt.Errorf("failed to compile query '%s': %w", query, err) + return nil, fmt.Errorf("failed to compile query %q: %w", query, err) } // Evaluate the compiled expression and handle returned node-iterators diff --git a/plugins/parsers/xpath/parser_test.go b/plugins/parsers/xpath/parser_test.go index a9ec8253b..2c1691757 100644 --- a/plugins/parsers/xpath/parser_test.go +++ b/plugins/parsers/xpath/parser_test.go @@ -166,7 +166,7 @@ func TestInvalidTypeQueriesFail(t *testing.T) { }, }, defaultTags: map[string]string{}, - expectedError: "failed to parse field (int) 'a': strconv.ParseInt: parsing \"this is a test\": invalid syntax", + expectedError: `failed to parse field (int) "a": strconv.ParseInt: parsing "this is a test": invalid syntax`, }, } diff --git a/plugins/processors/parser/parser.go b/plugins/processors/parser/parser.go index 3eb9b6337..95154c24a 100644 --- a/plugins/processors/parser/parser.go +++ b/plugins/processors/parser/parser.go @@ -64,7 +64,7 @@ func (p *Parser) Apply(metrics ...telegraf.Metric) []telegraf.Metric { // prior to returning. newMetrics = append(newMetrics, fromFieldMetric...) default: - p.Log.Errorf("field '%s' not a string, skipping", key) + p.Log.Errorf("field %q not a string, skipping", key) } } } diff --git a/plugins/processors/printer/printer.go b/plugins/processors/printer/printer.go index a302c70b2..6165f79a7 100644 --- a/plugins/processors/printer/printer.go +++ b/plugins/processors/printer/printer.go @@ -28,7 +28,7 @@ func (p *Printer) Apply(in ...telegraf.Metric) []telegraf.Metric { if err != nil { continue } - fmt.Printf("%s", octets) + fmt.Print(octets) } return in } diff --git a/plugins/processors/topk/topk.go b/plugins/processors/topk/topk.go index 024b33c1e..5ba973cba 100644 --- a/plugins/processors/topk/topk.go +++ b/plugins/processors/topk/topk.go @@ -285,7 +285,7 @@ func (t *TopK) getAggregationFunction(aggOperation string) (func([]telegraf.Metr } val, ok := convert(fieldVal) if !ok { - t.Log.Infof("Cannot convert value '%s' from metric '%s' with tags '%s'", + t.Log.Infof("Cannot convert value %q from metric %q with tags %q", m.Fields()[field], m.Name(), m.Tags()) continue } @@ -351,7 +351,7 @@ func (t *TopK) getAggregationFunction(aggOperation string) (func([]telegraf.Metr } val, ok := convert(fieldVal) if !ok { - t.Log.Infof("Cannot convert value '%s' from metric '%s' with tags '%s'", + t.Log.Infof("Cannot convert value %q from metric %q with tags %q", m.Fields()[field], m.Name(), m.Tags()) continue } @@ -377,7 +377,7 @@ func (t *TopK) getAggregationFunction(aggOperation string) (func([]telegraf.Metr }, nil default: - return nil, fmt.Errorf("unknown aggregation function '%s', no metrics will be processed", t.Aggregation) + return nil, fmt.Errorf("unknown aggregation function %q, no metrics will be processed", t.Aggregation) } } diff --git a/plugins/serializers/graphite/graphite.go b/plugins/serializers/graphite/graphite.go index 4679e09fb..c8516cc46 100644 --- a/plugins/serializers/graphite/graphite.go +++ b/plugins/serializers/graphite/graphite.go @@ -224,7 +224,7 @@ func InitGraphiteTemplates(templates []string) ([]*GraphiteTemplate, string, err } if len(parts) > 2 { - return nil, "", fmt.Errorf("invalid template format: '%s'", t) + return nil, "", fmt.Errorf("invalid template format: %q", t) } tFilter, err := filter.Compile([]string{parts[0]}) diff --git a/testutil/container.go b/testutil/container.go index 8c0daa4f9..b3791dda9 100644 --- a/testutil/container.go +++ b/testutil/container.go @@ -112,7 +112,7 @@ func (c *Container) LookupMappedPorts() error { if err != nil { return fmt.Errorf("failed to find %q: %w", port, err) } - fmt.Printf("mapped container port '%s' to host port '%s'\n", port, p.Port()) + fmt.Printf("mapped container port %q to host port %q\n", port, p.Port()) c.Ports[port] = p.Port() } diff --git a/tools/package_lxd_test/main.go b/tools/package_lxd_test/main.go index 49be42d5d..1f53b54f7 100644 --- a/tools/package_lxd_test/main.go +++ b/tools/package_lxd_test/main.go @@ -47,10 +47,10 @@ func main() { }, Action: func(c *cli.Context) error { if image != "" && packageFile != "" { - fmt.Printf("test package '%s' on image '%s'\n", packageFile, image) + fmt.Printf("test package %q on image %q\n", packageFile, image) return launchTests(packageFile, []string{image}) } else if packageFile != "" { - fmt.Printf("test package '%s' on all applicable images\n", packageFile) + fmt.Printf("test package %q on all applicable images\n", packageFile) extension := filepath.Ext(packageFile) switch extension { diff --git a/tools/readme_linter/assert.go b/tools/readme_linter/assert.go index 38cca0976..5e40f6f97 100644 --- a/tools/readme_linter/assert.go +++ b/tools/readme_linter/assert.go @@ -121,7 +121,7 @@ func (t *T) assertFirstChildRegexp(expectedPattern string, n ast.Node) { actual := string(c.Text(t.markdown)) if !validRegexp.MatchString(actual) { - t.printFailedAssertf(n, "'%s' does not match regexp '%s'", actual, expectedPattern) + t.printFailedAssertf(n, "%q does not match regexp %q", actual, expectedPattern) return } }