chore(linters): replace 'fmt.Sprintf' with string addition (#14699)
This commit is contained in:
parent
19fd5712ae
commit
98ec91478a
|
|
@ -1,7 +1,6 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
|
@ -28,7 +27,7 @@ func (p *PprofServer) Start(address string) {
|
|||
pprofHostPort := address
|
||||
parts := strings.Split(pprofHostPort, ":")
|
||||
if len(parts) == 2 && parts[0] == "" {
|
||||
pprofHostPort = fmt.Sprintf("localhost:%s", parts[1])
|
||||
pprofHostPort = "localhost:" + parts[1]
|
||||
}
|
||||
pprofHostPort = "http://" + pprofHostPort + "/debug/pprof"
|
||||
|
||||
|
|
|
|||
|
|
@ -99,7 +99,7 @@ func testBuildTags(t *testing.T, buildComment string, pluginCategory string, plu
|
|||
// For ex ["!custom", "inputs", "inputs.docker"] returns "inputs.docker"
|
||||
func getPluginBuildTag(tags []string, pluginCategory string) string {
|
||||
for _, tag := range tags {
|
||||
if strings.HasPrefix(tag, fmt.Sprintf("%s.", pluginCategory)) {
|
||||
if strings.HasPrefix(tag, pluginCategory+".") {
|
||||
return tag
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -41,10 +41,10 @@ func generateCert(host string, rsaBits int, certFile, keyFile string, dur time.D
|
|||
rsaBits = 2048
|
||||
}
|
||||
if len(certFile) == 0 {
|
||||
certFile = fmt.Sprintf("%s/cert.pem", dir)
|
||||
certFile = dir + "/cert.pem"
|
||||
}
|
||||
if len(keyFile) == 0 {
|
||||
keyFile = fmt.Sprintf("%s/key.pem", dir)
|
||||
keyFile = dir + "/key.pem"
|
||||
}
|
||||
|
||||
priv, err := rsa.GenerateKey(rand.Reader, rsaBits)
|
||||
|
|
|
|||
|
|
@ -307,7 +307,7 @@ func (a *Aerospike) getSetInfo(namespaceSet string, n *as.Node, infoPolicy *as.I
|
|||
func (a *Aerospike) parseSetInfo(acc telegraf.Accumulator, stats map[string]string, hostPort string, namespaceSet string, nodeName string) {
|
||||
stat := strings.Split(
|
||||
strings.TrimSuffix(
|
||||
stats[fmt.Sprintf("sets/%s", namespaceSet)], ";"), ":")
|
||||
stats["sets/"+namespaceSet], ";"), ":")
|
||||
nTags := map[string]string{
|
||||
"aerospike_host": hostPort,
|
||||
"node_name": nodeName,
|
||||
|
|
|
|||
|
|
@ -484,7 +484,7 @@ func formatField(metricName string, statistic string) string {
|
|||
func formatMeasurement(project string) string {
|
||||
project = strings.ReplaceAll(project, "/", "_")
|
||||
project = snakeCase(project)
|
||||
return fmt.Sprintf("aliyuncms_%s", project)
|
||||
return "aliyuncms_" + project
|
||||
}
|
||||
|
||||
func snakeCase(s string) string {
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
package aurora
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
|
|
@ -21,7 +20,7 @@ func TestAurora(t *testing.T) {
|
|||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
|
||||
u, err := url.Parse("http://" + ts.Listener.Addr().String())
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
|
|
@ -220,7 +219,7 @@ func TestBasicAuth(t *testing.T) {
|
|||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
|
||||
u, err := url.Parse("http://" + ts.Listener.Addr().String())
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
|
|
|
|||
|
|
@ -10,10 +10,11 @@ import (
|
|||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/filter"
|
||||
"github.com/influxdata/telegraf/plugins/inputs/system"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
type DiskIO struct {
|
||||
|
|
@ -68,7 +69,7 @@ func (d *DiskIO) diskInfo(devName string) (map[string]string, error) {
|
|||
_, err := os.Stat(udevDataPath)
|
||||
if err != nil {
|
||||
// This path failed, try the fallback .udev style (non-systemd)
|
||||
udevDataPath = fmt.Sprintf("/dev/.udev/db/block:%s", devName)
|
||||
udevDataPath = "/dev/.udev/db/block:" + devName
|
||||
_, err := os.Stat(udevDataPath)
|
||||
if err != nil {
|
||||
// Giving up, cannot retrieve disk info
|
||||
|
|
|
|||
|
|
@ -6,8 +6,9 @@ import (
|
|||
"net"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
func TestDisqueGeneratesMetricsIntegration(t *testing.T) {
|
||||
|
|
@ -47,7 +48,7 @@ func TestDisqueGeneratesMetricsIntegration(t *testing.T) {
|
|||
}
|
||||
}()
|
||||
|
||||
addr := fmt.Sprintf("disque://%s", l.Addr().String())
|
||||
addr := "disque://" + l.Addr().String()
|
||||
|
||||
r := &Disque{
|
||||
Servers: []string{addr},
|
||||
|
|
@ -117,7 +118,7 @@ func TestDisqueCanPullStatsFromMultipleServersIntegration(t *testing.T) {
|
|||
}
|
||||
}()
|
||||
|
||||
addr := fmt.Sprintf("disque://%s", l.Addr().String())
|
||||
addr := "disque://" + l.Addr().String()
|
||||
|
||||
r := &Disque{
|
||||
Servers: []string{addr},
|
||||
|
|
|
|||
|
|
@ -792,7 +792,7 @@ func getDeviceStatMap(blkioStats types.BlkioStats) map[string]map[string]interfa
|
|||
deviceStatMap[device] = make(map[string]interface{})
|
||||
}
|
||||
|
||||
field := fmt.Sprintf("io_service_bytes_recursive_%s", strings.ToLower(metric.Op))
|
||||
field := "io_service_bytes_recursive_" + strings.ToLower(metric.Op)
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
|
|
@ -803,31 +803,31 @@ func getDeviceStatMap(blkioStats types.BlkioStats) map[string]map[string]interfa
|
|||
deviceStatMap[device] = make(map[string]interface{})
|
||||
}
|
||||
|
||||
field := fmt.Sprintf("io_serviced_recursive_%s", strings.ToLower(metric.Op))
|
||||
field := "io_serviced_recursive_" + strings.ToLower(metric.Op)
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
for _, metric := range blkioStats.IoQueuedRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
field := fmt.Sprintf("io_queue_recursive_%s", strings.ToLower(metric.Op))
|
||||
field := "io_queue_recursive_" + strings.ToLower(metric.Op)
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
for _, metric := range blkioStats.IoServiceTimeRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
field := fmt.Sprintf("io_service_time_recursive_%s", strings.ToLower(metric.Op))
|
||||
field := "io_service_time_recursive_" + strings.ToLower(metric.Op)
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
for _, metric := range blkioStats.IoWaitTimeRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
field := fmt.Sprintf("io_wait_time_%s", strings.ToLower(metric.Op))
|
||||
field := "io_wait_time_" + strings.ToLower(metric.Op)
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
for _, metric := range blkioStats.IoMergedRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
field := fmt.Sprintf("io_merged_recursive_%s", strings.ToLower(metric.Op))
|
||||
field := "io_merged_recursive_" + strings.ToLower(metric.Op)
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -90,7 +90,7 @@ func (d *Dovecot) gatherServer(addr string, acc telegraf.Accumulator, qtype stri
|
|||
return fmt.Errorf("setting deadline failed for dovecot server %q: %w", addr, err)
|
||||
}
|
||||
|
||||
msg := fmt.Sprintf("EXPORT\t%s", qtype)
|
||||
msg := "EXPORT\t" + qtype
|
||||
if len(filter) > 0 {
|
||||
msg += fmt.Sprintf("\t%s=%s", qtype, filter)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ func Test_LinkStatusCommand(t *testing.T) {
|
|||
response := fmt.Sprintf(`{%q:{%q: "DOWN"}}`, ethdevLinkStatusCommand, linkStatusStringFieldName)
|
||||
simulateResponse(mockConn, response, nil)
|
||||
dpdkConn := dpdk.connectors[0]
|
||||
dpdkConn.processCommand(mockAcc, testutil.Logger{}, fmt.Sprintf("%s,1", ethdevLinkStatusCommand), nil)
|
||||
dpdkConn.processCommand(mockAcc, testutil.Logger{}, ethdevLinkStatusCommand+",1", nil)
|
||||
|
||||
expected := []telegraf.Metric{
|
||||
testutil.MustMetric(
|
||||
|
|
@ -45,7 +45,7 @@ func Test_LinkStatusCommand(t *testing.T) {
|
|||
response := fmt.Sprintf(`{%q:{%q: "UP"}}`, ethdevLinkStatusCommand, linkStatusStringFieldName)
|
||||
simulateResponse(mockConn, response, nil)
|
||||
dpdkConn := dpdk.connectors[0]
|
||||
dpdkConn.processCommand(mockAcc, testutil.Logger{}, fmt.Sprintf("%s,1", ethdevLinkStatusCommand), nil)
|
||||
dpdkConn.processCommand(mockAcc, testutil.Logger{}, ethdevLinkStatusCommand+",1", nil)
|
||||
|
||||
expected := []telegraf.Metric{
|
||||
testutil.MustMetric(
|
||||
|
|
@ -72,7 +72,7 @@ func Test_LinkStatusCommand(t *testing.T) {
|
|||
response := fmt.Sprintf(`{%q:{}}`, ethdevLinkStatusCommand)
|
||||
simulateResponse(mockConn, response, nil)
|
||||
dpdkConn := dpdk.connectors[0]
|
||||
dpdkConn.processCommand(mockAcc, testutil.Logger{}, fmt.Sprintf("%s,1", ethdevLinkStatusCommand), nil)
|
||||
dpdkConn.processCommand(mockAcc, testutil.Logger{}, ethdevLinkStatusCommand+",1", nil)
|
||||
|
||||
actual := mockAcc.GetTelegrafMetrics()
|
||||
testutil.RequireMetricsEqual(t, nil, actual, testutil.IgnoreTime())
|
||||
|
|
@ -84,7 +84,7 @@ func Test_LinkStatusCommand(t *testing.T) {
|
|||
response := fmt.Sprintf(`{%q:{"tag1": 1}}`, ethdevLinkStatusCommand)
|
||||
simulateResponse(mockConn, response, nil)
|
||||
dpdkConn := dpdk.connectors[0]
|
||||
dpdkConn.processCommand(mockAcc, testutil.Logger{}, fmt.Sprintf("%s,1", ethdevLinkStatusCommand), nil)
|
||||
dpdkConn.processCommand(mockAcc, testutil.Logger{}, ethdevLinkStatusCommand+",1", nil)
|
||||
expected := []telegraf.Metric{
|
||||
testutil.MustMetric(
|
||||
"dpdk",
|
||||
|
|
@ -109,7 +109,7 @@ func Test_LinkStatusCommand(t *testing.T) {
|
|||
response := fmt.Sprintf(`{%q:{%q: "BOB"}}`, ethdevLinkStatusCommand, linkStatusStringFieldName)
|
||||
simulateResponse(mockConn, response, nil)
|
||||
dpdkConn := dpdk.connectors[0]
|
||||
dpdkConn.processCommand(mockAcc, testutil.Logger{}, fmt.Sprintf("%s,1", ethdevLinkStatusCommand), nil)
|
||||
dpdkConn.processCommand(mockAcc, testutil.Logger{}, ethdevLinkStatusCommand+",1", nil)
|
||||
|
||||
expected := []telegraf.Metric{
|
||||
testutil.MustMetric(
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ func isInMemorySocketPath(filePath, socketPath string) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
socketPathPrefix := fmt.Sprintf("%s:", socketPath)
|
||||
socketPathPrefix := socketPath + ":"
|
||||
if strings.HasPrefix(filePath, socketPathPrefix) {
|
||||
suffix := filePath[len(socketPathPrefix):]
|
||||
if number, err := strconv.Atoi(suffix); err == nil {
|
||||
|
|
|
|||
|
|
@ -207,7 +207,7 @@ func blkstats(id string, stats *types.StatsJSON, acc telegraf.Accumulator, tags
|
|||
deviceStatMap[device] = make(map[string]interface{})
|
||||
}
|
||||
|
||||
field := fmt.Sprintf("io_service_bytes_recursive_%s", strings.ToLower(metric.Op))
|
||||
field := "io_service_bytes_recursive_" + strings.ToLower(metric.Op)
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
|
|
@ -218,31 +218,31 @@ func blkstats(id string, stats *types.StatsJSON, acc telegraf.Accumulator, tags
|
|||
deviceStatMap[device] = make(map[string]interface{})
|
||||
}
|
||||
|
||||
field := fmt.Sprintf("io_serviced_recursive_%s", strings.ToLower(metric.Op))
|
||||
field := "io_serviced_recursive_" + strings.ToLower(metric.Op)
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
for _, metric := range blkioStats.IoQueuedRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
field := fmt.Sprintf("io_queue_recursive_%s", strings.ToLower(metric.Op))
|
||||
field := "io_queue_recursive_" + strings.ToLower(metric.Op)
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
for _, metric := range blkioStats.IoServiceTimeRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
field := fmt.Sprintf("io_service_time_recursive_%s", strings.ToLower(metric.Op))
|
||||
field := "io_service_time_recursive_" + strings.ToLower(metric.Op)
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
for _, metric := range blkioStats.IoWaitTimeRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
field := fmt.Sprintf("io_wait_time_%s", strings.ToLower(metric.Op))
|
||||
field := "io_wait_time_" + strings.ToLower(metric.Op)
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
for _, metric := range blkioStats.IoMergedRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
field := fmt.Sprintf("io_merged_recursive_%s", strings.ToLower(metric.Op))
|
||||
field := "io_merged_recursive_" + strings.ToLower(metric.Op)
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -216,7 +216,7 @@ func (g *GitHub) getPullRequestFields(ctx context.Context, owner, repo string) (
|
|||
return fields, err
|
||||
}
|
||||
|
||||
f := fmt.Sprintf("%s_pull_requests", class)
|
||||
f := class + "_pull_requests"
|
||||
fields[f] = searchResult.GetTotal()
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -221,7 +221,7 @@ func TestBodyAndContentEncoding(t *testing.T) {
|
|||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
address := fmt.Sprintf("http://%s", ts.Listener.Addr().String())
|
||||
address := "http://" + ts.Listener.Addr().String()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
|
|
@ -320,7 +320,7 @@ func TestOAuthClientCredentialsGrant(t *testing.T) {
|
|||
|
||||
var token = "2YotnFZFEjr1zCsicMWpAA"
|
||||
|
||||
u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
|
||||
u, err := url.Parse("http://" + ts.Listener.Addr().String())
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
|
|
|
|||
|
|
@ -282,8 +282,8 @@ func (h *HTTPResponse) httpGather(u string) (map[string]interface{}, map[string]
|
|||
h.setBodyReadError("The body of the HTTP Response is too large", bodyBytes, fields, tags)
|
||||
return fields, tags, nil
|
||||
} else if err != nil {
|
||||
h.setBodyReadError(fmt.Sprintf("Failed to read body of HTTP Response : %s", err.Error()), bodyBytes, fields, tags)
|
||||
return fields, tags, nil
|
||||
h.setBodyReadError("Failed to read body of HTTP Response : "+err.Error(), bodyBytes, fields, tags)
|
||||
return fields, tags, nil //nolint:nilerr // error is handled properly
|
||||
}
|
||||
|
||||
// Add the body of the response if expected
|
||||
|
|
|
|||
|
|
@ -133,7 +133,7 @@ func (i *Icinga2) gatherObjects(acc telegraf.Accumulator, checks ResultObject, o
|
|||
"port": serverURL.Port(),
|
||||
}
|
||||
|
||||
acc.AddFields(fmt.Sprintf("icinga2_%s", objectType), fields, tags)
|
||||
acc.AddFields("icinga2_"+objectType, fields, tags)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -343,7 +343,7 @@ func (h *InfluxDBListener) handleWriteInternalParser(res http.ResponseWriter, re
|
|||
case 1:
|
||||
partialErrorString = firstParseErrorStr
|
||||
case 2:
|
||||
partialErrorString = fmt.Sprintf("%s (and 1 other parse error)", firstParseErrorStr)
|
||||
partialErrorString = firstParseErrorStr + " (and 1 other parse error)"
|
||||
default:
|
||||
partialErrorString = fmt.Sprintf("%s (and %d other parse errors)", firstParseErrorStr, parseErrorCount-1)
|
||||
}
|
||||
|
|
@ -459,7 +459,7 @@ func (h *InfluxDBListener) handleWriteUpstreamParser(res http.ResponseWriter, re
|
|||
case 1:
|
||||
partialErrorString = firstParseErrorStr
|
||||
case 2:
|
||||
partialErrorString = fmt.Sprintf("%s (and 1 other parse error)", firstParseErrorStr)
|
||||
partialErrorString = firstParseErrorStr + " (and 1 other parse error)"
|
||||
default:
|
||||
partialErrorString = fmt.Sprintf("%s (and %d other parse errors)", firstParseErrorStr, parseErrorCount-1)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,7 +4,6 @@ import (
|
|||
"bytes"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
|
@ -266,7 +265,7 @@ func TestWriteKeepDatabase(t *testing.T) {
|
|||
testMsgWithDB := "cpu_load_short,host=server01,database=wrongdb value=12.0 1422568543702900257\n"
|
||||
|
||||
for _, tc := range parserTestCases {
|
||||
t.Run(fmt.Sprintf("parser %s", tc.parser), func(t *testing.T) {
|
||||
t.Run("parser "+tc.parser, func(t *testing.T) {
|
||||
listener := newTestListener()
|
||||
listener.ParserType = tc.parser
|
||||
listener.DatabaseTag = "database"
|
||||
|
|
@ -353,7 +352,7 @@ func TestWriteRetentionPolicyTag(t *testing.T) {
|
|||
// http listener should add a newline at the end of the buffer if it's not there
|
||||
func TestWriteNoNewline(t *testing.T) {
|
||||
for _, tc := range parserTestCases {
|
||||
t.Run(fmt.Sprintf("parser %s", tc.parser), func(t *testing.T) {
|
||||
t.Run("parser "+tc.parser, func(t *testing.T) {
|
||||
listener := newTestListener()
|
||||
listener.ParserType = tc.parser
|
||||
|
||||
|
|
@ -379,7 +378,7 @@ func TestWriteNoNewline(t *testing.T) {
|
|||
|
||||
func TestPartialWrite(t *testing.T) {
|
||||
for _, tc := range parserTestCases {
|
||||
t.Run(fmt.Sprintf("parser %s", tc.parser), func(t *testing.T) {
|
||||
t.Run("parser "+tc.parser, func(t *testing.T) {
|
||||
listener := newTestListener()
|
||||
listener.ParserType = tc.parser
|
||||
|
||||
|
|
@ -413,7 +412,7 @@ func TestWriteMaxLineSizeIncrease(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
for _, tc := range parserTestCases {
|
||||
t.Run(fmt.Sprintf("parser %s", tc.parser), func(t *testing.T) {
|
||||
t.Run("parser "+tc.parser, func(t *testing.T) {
|
||||
listener := &InfluxDBListener{
|
||||
Log: testutil.Logger{},
|
||||
ServiceAddress: "localhost:0",
|
||||
|
|
@ -441,7 +440,7 @@ func TestWriteVerySmallMaxBody(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
for _, tc := range parserTestCases {
|
||||
t.Run(fmt.Sprintf("parser %s", tc.parser), func(t *testing.T) {
|
||||
t.Run("parser "+tc.parser, func(t *testing.T) {
|
||||
listener := &InfluxDBListener{
|
||||
Log: testutil.Logger{},
|
||||
ServiceAddress: "localhost:0",
|
||||
|
|
@ -474,7 +473,7 @@ func TestWriteLargeLine(t *testing.T) {
|
|||
hugeMetricString := string(hugeMetric)
|
||||
|
||||
for _, tc := range parserTestCases {
|
||||
t.Run(fmt.Sprintf("parser %s", tc.parser), func(t *testing.T) {
|
||||
t.Run("parser "+tc.parser, func(t *testing.T) {
|
||||
listener := &InfluxDBListener{
|
||||
Log: testutil.Logger{},
|
||||
ServiceAddress: "localhost:0",
|
||||
|
|
@ -554,7 +553,7 @@ func TestWriteLargeLine(t *testing.T) {
|
|||
// test that writing gzipped data works
|
||||
func TestWriteGzippedData(t *testing.T) {
|
||||
for _, tc := range parserTestCases {
|
||||
t.Run(fmt.Sprintf("parser %s", tc.parser), func(t *testing.T) {
|
||||
t.Run("parser "+tc.parser, func(t *testing.T) {
|
||||
listener := newTestListener()
|
||||
listener.ParserType = tc.parser
|
||||
|
||||
|
|
@ -635,7 +634,7 @@ func TestWriteHighTraffic(t *testing.T) {
|
|||
|
||||
func TestReceive404ForInvalidEndpoint(t *testing.T) {
|
||||
for _, tc := range parserTestCases {
|
||||
t.Run(fmt.Sprintf("parser %s", tc.parser), func(t *testing.T) {
|
||||
t.Run("parser "+tc.parser, func(t *testing.T) {
|
||||
listener := newTestListener()
|
||||
listener.ParserType = tc.parser
|
||||
|
||||
|
|
@ -655,7 +654,7 @@ func TestReceive404ForInvalidEndpoint(t *testing.T) {
|
|||
|
||||
func TestWriteInvalid(t *testing.T) {
|
||||
for _, tc := range parserTestCases {
|
||||
t.Run(fmt.Sprintf("parser %s", tc.parser), func(t *testing.T) {
|
||||
t.Run("parser "+tc.parser, func(t *testing.T) {
|
||||
listener := newTestListener()
|
||||
listener.ParserType = tc.parser
|
||||
|
||||
|
|
@ -675,7 +674,7 @@ func TestWriteInvalid(t *testing.T) {
|
|||
|
||||
func TestWriteEmpty(t *testing.T) {
|
||||
for _, tc := range parserTestCases {
|
||||
t.Run(fmt.Sprintf("parser %s", tc.parser), func(t *testing.T) {
|
||||
t.Run("parser "+tc.parser, func(t *testing.T) {
|
||||
listener := newTestListener()
|
||||
listener.ParserType = tc.parser
|
||||
|
||||
|
|
@ -695,7 +694,7 @@ func TestWriteEmpty(t *testing.T) {
|
|||
|
||||
func TestQuery(t *testing.T) {
|
||||
for _, tc := range parserTestCases {
|
||||
t.Run(fmt.Sprintf("parser %s", tc.parser), func(t *testing.T) {
|
||||
t.Run("parser "+tc.parser, func(t *testing.T) {
|
||||
listener := newTestListener()
|
||||
listener.ParserType = tc.parser
|
||||
|
||||
|
|
@ -716,7 +715,7 @@ func TestQuery(t *testing.T) {
|
|||
|
||||
func TestPing(t *testing.T) {
|
||||
for _, tc := range parserTestCases {
|
||||
t.Run(fmt.Sprintf("parser %s", tc.parser), func(t *testing.T) {
|
||||
t.Run("parser "+tc.parser, func(t *testing.T) {
|
||||
listener := newTestListener()
|
||||
listener.ParserType = tc.parser
|
||||
|
||||
|
|
@ -738,7 +737,7 @@ func TestPing(t *testing.T) {
|
|||
|
||||
func TestPingVerbose(t *testing.T) {
|
||||
for _, tc := range parserTestCases {
|
||||
t.Run(fmt.Sprintf("parser %s", tc.parser), func(t *testing.T) {
|
||||
t.Run("parser "+tc.parser, func(t *testing.T) {
|
||||
listener := newTestListener()
|
||||
listener.ParserType = tc.parser
|
||||
|
||||
|
|
@ -760,7 +759,7 @@ func TestPingVerbose(t *testing.T) {
|
|||
|
||||
func TestWriteWithPrecision(t *testing.T) {
|
||||
for _, tc := range parserTestCases {
|
||||
t.Run(fmt.Sprintf("parser %s", tc.parser), func(t *testing.T) {
|
||||
t.Run("parser "+tc.parser, func(t *testing.T) {
|
||||
listener := newTestListener()
|
||||
listener.ParserType = tc.parser
|
||||
|
||||
|
|
@ -787,7 +786,7 @@ func TestWriteWithPrecision(t *testing.T) {
|
|||
|
||||
func TestWriteWithPrecisionNoTimestamp(t *testing.T) {
|
||||
for _, tc := range parserTestCases {
|
||||
t.Run(fmt.Sprintf("parser %s", tc.parser), func(t *testing.T) {
|
||||
t.Run("parser "+tc.parser, func(t *testing.T) {
|
||||
listener := newTestListener()
|
||||
listener.ParserType = tc.parser
|
||||
listener.timeFunc = func() time.Time {
|
||||
|
|
|
|||
|
|
@ -92,7 +92,7 @@ func (h *InfluxDBV2Listener) Gather(_ telegraf.Accumulator) error {
|
|||
func (h *InfluxDBV2Listener) routes() {
|
||||
credentials := ""
|
||||
if h.Token != "" {
|
||||
credentials = fmt.Sprintf("Token %s", h.Token)
|
||||
credentials = "Token " + h.Token
|
||||
}
|
||||
authHandler := internal.GenericAuthHandler(credentials,
|
||||
func(_ http.ResponseWriter) {
|
||||
|
|
|
|||
|
|
@ -4,7 +4,6 @@ import (
|
|||
"bytes"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
|
@ -154,7 +153,7 @@ func TestWriteTokenAuth(t *testing.T) {
|
|||
|
||||
req, err := http.NewRequest("POST", createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), bytes.NewBuffer([]byte(testMsg)))
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Token %s", token))
|
||||
req.Header.Set("Authorization", "Token "+token)
|
||||
resp, err := client.Do(req)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, resp.Body.Close())
|
||||
|
|
@ -216,7 +215,7 @@ func TestWriteKeepBucket(t *testing.T) {
|
|||
// http listener should add a newline at the end of the buffer if it's not there
|
||||
func TestWriteNoNewline(t *testing.T) {
|
||||
for _, tc := range parserTestCases {
|
||||
t.Run(fmt.Sprintf("parser %s", tc.parser), func(t *testing.T) {
|
||||
t.Run("parser "+tc.parser, func(t *testing.T) {
|
||||
listener := newTestListener()
|
||||
listener.ParserType = tc.parser
|
||||
|
||||
|
|
@ -242,7 +241,7 @@ func TestWriteNoNewline(t *testing.T) {
|
|||
|
||||
func TestAllOrNothing(t *testing.T) {
|
||||
for _, tc := range parserTestCases {
|
||||
t.Run(fmt.Sprintf("parser %s", tc.parser), func(t *testing.T) {
|
||||
t.Run("parser "+tc.parser, func(t *testing.T) {
|
||||
listener := newTestListener()
|
||||
listener.ParserType = tc.parser
|
||||
|
||||
|
|
@ -265,7 +264,7 @@ func TestWriteMaxLineSizeIncrease(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
for _, tc := range parserTestCases {
|
||||
t.Run(fmt.Sprintf("parser %s", tc.parser), func(t *testing.T) {
|
||||
t.Run("parser "+tc.parser, func(t *testing.T) {
|
||||
listener := &InfluxDBV2Listener{
|
||||
Log: testutil.Logger{},
|
||||
ServiceAddress: "localhost:0",
|
||||
|
|
@ -293,7 +292,7 @@ func TestWriteVerySmallMaxBody(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
for _, tc := range parserTestCases {
|
||||
t.Run(fmt.Sprintf("parser %s", tc.parser), func(t *testing.T) {
|
||||
t.Run("parser "+tc.parser, func(t *testing.T) {
|
||||
listener := &InfluxDBV2Listener{
|
||||
Log: testutil.Logger{},
|
||||
ServiceAddress: "localhost:0",
|
||||
|
|
@ -469,7 +468,7 @@ func TestWriteHighTraffic(t *testing.T) {
|
|||
|
||||
func TestReceive404ForInvalidEndpoint(t *testing.T) {
|
||||
for _, tc := range parserTestCases {
|
||||
t.Run(fmt.Sprintf("parser %s", tc.parser), func(t *testing.T) {
|
||||
t.Run("parser "+tc.parser, func(t *testing.T) {
|
||||
listener := newTestListener()
|
||||
listener.ParserType = tc.parser
|
||||
|
||||
|
|
@ -489,7 +488,7 @@ func TestReceive404ForInvalidEndpoint(t *testing.T) {
|
|||
|
||||
func TestWriteInvalid(t *testing.T) {
|
||||
for _, tc := range parserTestCases {
|
||||
t.Run(fmt.Sprintf("parser %s", tc.parser), func(t *testing.T) {
|
||||
t.Run("parser "+tc.parser, func(t *testing.T) {
|
||||
listener := newTestListener()
|
||||
listener.ParserType = tc.parser
|
||||
|
||||
|
|
@ -509,7 +508,7 @@ func TestWriteInvalid(t *testing.T) {
|
|||
|
||||
func TestWriteEmpty(t *testing.T) {
|
||||
for _, tc := range parserTestCases {
|
||||
t.Run(fmt.Sprintf("parser %s", tc.parser), func(t *testing.T) {
|
||||
t.Run("parser "+tc.parser, func(t *testing.T) {
|
||||
listener := newTestListener()
|
||||
listener.ParserType = tc.parser
|
||||
|
||||
|
|
@ -550,7 +549,7 @@ func TestReady(t *testing.T) {
|
|||
|
||||
func TestWriteWithPrecision(t *testing.T) {
|
||||
for _, tc := range parserTestCases {
|
||||
t.Run(fmt.Sprintf("parser %s", tc.parser), func(t *testing.T) {
|
||||
t.Run("parser "+tc.parser, func(t *testing.T) {
|
||||
listener := newTestListener()
|
||||
listener.ParserType = tc.parser
|
||||
|
||||
|
|
|
|||
|
|
@ -4,7 +4,6 @@ package intel_baseband
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
|
@ -244,7 +243,7 @@ func TestParseOperationName(t *testing.T) {
|
|||
logConnector := prepareLogConnMock()
|
||||
require.NotNil(t, logConnector)
|
||||
for _, tc := range testCases {
|
||||
t.Run(fmt.Sprintf("expected %s", tc.expected), func(t *testing.T) {
|
||||
t.Run("expected "+tc.expected, func(t *testing.T) {
|
||||
operationName := logConnector.parseOperationName(tc.input)
|
||||
require.Equal(t, tc.expected, operationName)
|
||||
})
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@ package kube_inventory
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
|
@ -16,7 +15,7 @@ func collectPods(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesIn
|
|||
var err error
|
||||
|
||||
if ki.KubeletURL != "" {
|
||||
err = ki.queryPodsFromKubelet(fmt.Sprintf("%s/pods", ki.KubeletURL), listRef)
|
||||
err = ki.queryPodsFromKubelet(ki.KubeletURL+"/pods", listRef)
|
||||
} else {
|
||||
listRef, err = ki.client.getPods(ctx, ki.NodeName)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -159,7 +159,7 @@ func getNodeAddress(addresses []v1.NodeAddress) string {
|
|||
|
||||
func (k *Kubernetes) gatherSummary(baseURL string, acc telegraf.Accumulator) error {
|
||||
summaryMetrics := &SummaryMetrics{}
|
||||
err := k.LoadJSON(fmt.Sprintf("%s/stats/summary", baseURL), summaryMetrics)
|
||||
err := k.LoadJSON(baseURL+"/stats/summary", summaryMetrics)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -224,7 +224,7 @@ func buildNodeMetrics(summaryMetrics *SummaryMetrics, acc telegraf.Accumulator)
|
|||
|
||||
func (k *Kubernetes) gatherPodInfo(baseURL string) ([]Item, error) {
|
||||
var podAPI Pods
|
||||
err := k.LoadJSON(fmt.Sprintf("%s/pods", baseURL), &podAPI)
|
||||
err := k.LoadJSON(baseURL+"/pods", &podAPI)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -284,7 +284,7 @@ func (logstash *Logstash) gatherPluginsStats(
|
|||
if strings.HasPrefix(k, "bulk_requests") {
|
||||
continue
|
||||
}
|
||||
newKey := fmt.Sprintf("bulk_requests_%s", k)
|
||||
newKey := "bulk_requests_" + k
|
||||
flattener.Fields[newKey] = v
|
||||
delete(flattener.Fields, k)
|
||||
}
|
||||
|
|
@ -307,7 +307,7 @@ func (logstash *Logstash) gatherPluginsStats(
|
|||
if strings.HasPrefix(k, "documents") {
|
||||
continue
|
||||
}
|
||||
newKey := fmt.Sprintf("documents_%s", k)
|
||||
newKey := "documents_" + k
|
||||
flattener.Fields[newKey] = v
|
||||
delete(flattener.Fields, k)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -58,7 +58,7 @@ func (p *ReportsParams) String() string {
|
|||
func NewChimpAPI(apiKey string, log telegraf.Logger) *ChimpAPI {
|
||||
u := &url.URL{}
|
||||
u.Scheme = "https"
|
||||
u.Host = fmt.Sprintf("%s.api.mailchimp.com", mailchimpDatacenter.FindString(apiKey))
|
||||
u.Host = mailchimpDatacenter.FindString(apiKey) + ".api.mailchimp.com"
|
||||
u.User = url.UserPassword("", apiKey)
|
||||
return &ChimpAPI{url: u, log: log}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -213,7 +213,7 @@ func (m *Monit) Init() error {
|
|||
}
|
||||
|
||||
func (m *Monit) Gather(acc telegraf.Accumulator) error {
|
||||
req, err := http.NewRequest("GET", fmt.Sprintf("%s/_status?format=xml", m.Address), nil)
|
||||
req, err := http.NewRequest("GET", m.Address+"/_status?format=xml", nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -137,7 +137,7 @@ func (n *NeptuneApex) parseXML(acc telegraf.Accumulator, data []byte) error {
|
|||
}
|
||||
// Find Amp and Watt probes and add them as fields.
|
||||
// Remove the redundant probe.
|
||||
if pos := findProbe(fmt.Sprintf("%sW", o.Name), r.Probe); pos > -1 {
|
||||
if pos := findProbe(o.Name+"W", r.Probe); pos > -1 {
|
||||
value, err := strconv.ParseFloat(
|
||||
strings.TrimSpace(r.Probe[pos].Value), 64)
|
||||
if err != nil {
|
||||
|
|
@ -149,7 +149,7 @@ func (n *NeptuneApex) parseXML(acc telegraf.Accumulator, data []byte) error {
|
|||
r.Probe[pos] = r.Probe[len(r.Probe)-1]
|
||||
r.Probe = r.Probe[:len(r.Probe)-1]
|
||||
}
|
||||
if pos := findProbe(fmt.Sprintf("%sA", o.Name), r.Probe); pos > -1 {
|
||||
if pos := findProbe(o.Name+"A", r.Probe); pos > -1 {
|
||||
value, err := strconv.ParseFloat(
|
||||
strings.TrimSpace(r.Probe[pos].Value), 64)
|
||||
if err != nil {
|
||||
|
|
@ -245,7 +245,7 @@ func parseTime(val string, tz float64) (time.Time, error) {
|
|||
}
|
||||
|
||||
func (n *NeptuneApex) sendRequest(server string) ([]byte, error) {
|
||||
url := fmt.Sprintf("%s/cgi-bin/status.xml", server)
|
||||
url := server + "/cgi-bin/status.xml"
|
||||
resp, err := n.httpClient.Get(url)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("http GET failed: %w", err)
|
||||
|
|
|
|||
|
|
@ -55,11 +55,11 @@ func TestNginxGeneratesMetrics(t *testing.T) {
|
|||
defer ts.Close()
|
||||
|
||||
n := &Nginx{
|
||||
Urls: []string{fmt.Sprintf("%s/stub_status", ts.URL)},
|
||||
Urls: []string{ts.URL + "/stub_status"},
|
||||
}
|
||||
|
||||
nt := &Nginx{
|
||||
Urls: []string{fmt.Sprintf("%s/tengine_status", ts.URL)},
|
||||
Urls: []string{ts.URL + "/tengine_status"},
|
||||
}
|
||||
|
||||
var accNginx testutil.Accumulator
|
||||
|
|
|
|||
|
|
@ -264,7 +264,7 @@ func TestNginxPlusGeneratesMetrics(t *testing.T) {
|
|||
defer ts.Close()
|
||||
|
||||
n := &NginxPlus{
|
||||
Urls: []string{fmt.Sprintf("%s/status", ts.URL)},
|
||||
Urls: []string{ts.URL + "/status"},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
|
|
|||
|
|
@ -8,8 +8,9 @@ import (
|
|||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
const processesPayload = `
|
||||
|
|
@ -1532,7 +1533,7 @@ func TestUnknownContentType(t *testing.T) {
|
|||
|
||||
func prepareAddr(t *testing.T, ts *httptest.Server) (*url.URL, string, string) {
|
||||
t.Helper()
|
||||
addr, err := url.Parse(fmt.Sprintf("%s/api", ts.URL))
|
||||
addr, err := url.Parse(ts.URL + "/api")
|
||||
require.NoError(t, err)
|
||||
|
||||
host, port, err := net.SplitHostPort(addr.Host)
|
||||
|
|
@ -1561,7 +1562,7 @@ func prepareEndpoint(t *testing.T, path string, payload string) (*httptest.Serve
|
|||
}))
|
||||
|
||||
n := &NginxPlusAPI{
|
||||
Urls: []string{fmt.Sprintf("%s/api", ts.URL)},
|
||||
Urls: []string{ts.URL + "/api"},
|
||||
APIVersion: defaultAPIVersion,
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -178,7 +178,7 @@ func TestNginxPlusGeneratesMetrics(t *testing.T) {
|
|||
defer ts.Close()
|
||||
|
||||
n := &NginxSTS{
|
||||
Urls: []string{fmt.Sprintf("%s/status", ts.URL)},
|
||||
Urls: []string{ts.URL + "/status"},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
|
|
|||
|
|
@ -57,7 +57,7 @@ func TestNginxUpstreamCheckData(test *testing.T) {
|
|||
defer testServer.Close()
|
||||
|
||||
check := NewNginxUpstreamCheck()
|
||||
check.URL = fmt.Sprintf("%s/status", testServer.URL)
|
||||
check.URL = testServer.URL + "/status"
|
||||
|
||||
var accumulator testutil.Accumulator
|
||||
|
||||
|
|
@ -68,34 +68,34 @@ func TestNginxUpstreamCheckData(test *testing.T) {
|
|||
test,
|
||||
"nginx_upstream_check",
|
||||
map[string]interface{}{
|
||||
"status": string("up"),
|
||||
"status": "up",
|
||||
"status_code": uint8(1),
|
||||
"rise": uint64(1000),
|
||||
"fall": uint64(0),
|
||||
},
|
||||
map[string]string{
|
||||
"upstream": string("upstream-1"),
|
||||
"type": string("http"),
|
||||
"name": string("127.0.0.1:8081"),
|
||||
"port": string("0"),
|
||||
"url": fmt.Sprintf("%s/status", testServer.URL),
|
||||
"upstream": "upstream-1",
|
||||
"type": "http",
|
||||
"name": "127.0.0.1:8081",
|
||||
"port": "0",
|
||||
"url": testServer.URL + "/status",
|
||||
})
|
||||
|
||||
accumulator.AssertContainsTaggedFields(
|
||||
test,
|
||||
"nginx_upstream_check",
|
||||
map[string]interface{}{
|
||||
"status": string("down"),
|
||||
"status": "down",
|
||||
"status_code": uint8(2),
|
||||
"rise": uint64(0),
|
||||
"fall": uint64(2000),
|
||||
},
|
||||
map[string]string{
|
||||
"upstream": string("upstream-2"),
|
||||
"type": string("tcp"),
|
||||
"name": string("127.0.0.1:8082"),
|
||||
"port": string("8080"),
|
||||
"url": fmt.Sprintf("%s/status", testServer.URL),
|
||||
"upstream": "upstream-2",
|
||||
"type": "tcp",
|
||||
"name": "127.0.0.1:8082",
|
||||
"port": "8080",
|
||||
"url": testServer.URL + "/status",
|
||||
})
|
||||
}
|
||||
|
||||
|
|
@ -119,7 +119,7 @@ func TestNginxUpstreamCheckRequest(test *testing.T) {
|
|||
defer testServer.Close()
|
||||
|
||||
check := NewNginxUpstreamCheck()
|
||||
check.URL = fmt.Sprintf("%s/status", testServer.URL)
|
||||
check.URL = testServer.URL + "/status"
|
||||
check.Headers["X-test"] = "test-value"
|
||||
check.HostHeader = "status.local"
|
||||
check.Username = "user"
|
||||
|
|
|
|||
|
|
@ -215,7 +215,7 @@ func TestNginxPlusGeneratesMetrics(t *testing.T) {
|
|||
defer ts.Close()
|
||||
|
||||
n := &NginxVTS{
|
||||
Urls: []string{fmt.Sprintf("%s/status", ts.URL)},
|
||||
Urls: []string{ts.URL + "/status"},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ import (
|
|||
// The `data` field contains a list of commands to execute with
|
||||
// the \n character after every command.
|
||||
func (p *PowerdnsRecursor) gatherFromV1Server(address string, acc telegraf.Accumulator) error {
|
||||
recvSocket := filepath.Join(p.SocketDir, fmt.Sprintf("pdns_recursor_telegraf%s", uuid.New().String()))
|
||||
recvSocket := filepath.Join(p.SocketDir, "pdns_recursor_telegraf"+uuid.New().String())
|
||||
|
||||
laddr, err := net.ResolveUnixAddr("unixgram", recvSocket)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ import (
|
|||
// Datagram 1 => status: uint32
|
||||
// Datagram 2 => data: byte[] (max 16_384 bytes)
|
||||
func (p *PowerdnsRecursor) gatherFromV2Server(address string, acc telegraf.Accumulator) error {
|
||||
recvSocket := filepath.Join(p.SocketDir, fmt.Sprintf("pdns_recursor_telegraf%s", uuid.New().String()))
|
||||
recvSocket := filepath.Join(p.SocketDir, "pdns_recursor_telegraf"+uuid.New().String())
|
||||
|
||||
laddr, err := net.ResolveUnixAddr("unixgram", recvSocket)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -366,7 +366,7 @@ func (p *Procstat) supervisorPIDs() ([]string, map[string]map[string]string, err
|
|||
|
||||
func (p *Procstat) systemdUnitPIDs() ([]PidsTags, error) {
|
||||
if p.IncludeSystemdChildren {
|
||||
p.CGroup = fmt.Sprintf("systemd/system.slice/%s", p.SystemdUnit)
|
||||
p.CGroup = "systemd/system.slice/" + p.SystemdUnit
|
||||
return p.cgroupPIDs()
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -58,7 +58,7 @@ func TestRaindropsGeneratesMetrics(t *testing.T) {
|
|||
defer ts.Close()
|
||||
|
||||
n := &Raindrops{
|
||||
Urls: []string{fmt.Sprintf("%s/_raindrops", ts.URL)},
|
||||
Urls: []string{ts.URL + "/_raindrops"},
|
||||
httpClient: &http.Client{Transport: &http.Transport{
|
||||
ResponseHeaderTimeout: 3 * time.Second,
|
||||
}},
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@
|
|||
package ras
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
|
@ -23,14 +22,14 @@ func TestUpdateCounters(t *testing.T) {
|
|||
if metric == processorBase {
|
||||
// processor_base_errors is sum of other seven errors: internal_timer_errors, smm_handler_code_access_violation_errors,
|
||||
// internal_parity_errors, frc_errors, external_mce_errors, microcode_rom_parity_errors and unclassified_mce_errors
|
||||
require.Equal(t, int64(7), value, fmt.Sprintf("%s should have value of 7", processorBase))
|
||||
require.Equal(t, int64(7), value, processorBase+" should have value of 7")
|
||||
} else {
|
||||
require.Equal(t, int64(1), value, fmt.Sprintf("%s should have value of 1", metric))
|
||||
require.Equal(t, int64(1), value, metric+" should have value of 1")
|
||||
}
|
||||
}
|
||||
|
||||
for metric, value := range ras.serverCounters {
|
||||
require.Equal(t, int64(1), value, fmt.Sprintf("%s should have value of 1", metric))
|
||||
require.Equal(t, int64(1), value, metric+" should have value of 1")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -102,9 +101,9 @@ func TestMultipleSockets(t *testing.T) {
|
|||
for _, metricData := range ras.cpuSocketCounters {
|
||||
for metric, value := range metricData {
|
||||
if metric == levelTwoCache {
|
||||
require.Equal(t, int64(1), value, fmt.Sprintf("%s should have value of 1", levelTwoCache))
|
||||
require.Equal(t, int64(1), value, levelTwoCache+" should have value of 1")
|
||||
} else {
|
||||
require.Equal(t, int64(0), value, fmt.Sprintf("%s should have value of 0", metric))
|
||||
require.Equal(t, int64(0), value, metric+" should have value of 0")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -125,11 +124,11 @@ func TestEmptyDatabase(t *testing.T) {
|
|||
require.Len(t, ras.serverCounters, 2, "Should contain default counters for server")
|
||||
|
||||
for metric, value := range ras.cpuSocketCounters[0] {
|
||||
require.Equal(t, int64(0), value, fmt.Sprintf("%s should have value of 0", metric))
|
||||
require.Equal(t, int64(0), value, metric+" should have value of 0")
|
||||
}
|
||||
|
||||
for metric, value := range ras.serverCounters {
|
||||
require.Equal(t, int64(0), value, fmt.Sprintf("%s should have value of 0", metric))
|
||||
require.Equal(t, int64(0), value, metric+" should have value of 0")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -693,7 +693,7 @@ func coerceType(value interface{}, typ reflect.Type) reflect.Value {
|
|||
value = float64(0)
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("unhandled destination type %s", typ.Kind().String()))
|
||||
panic("unhandled destination type " + typ.Kind().String())
|
||||
}
|
||||
case int, int8, int16, int32, int64:
|
||||
switch typ.Kind() {
|
||||
|
|
@ -704,7 +704,7 @@ func coerceType(value interface{}, typ reflect.Type) reflect.Value {
|
|||
case reflect.Float64:
|
||||
value = float64(reflect.ValueOf(sourceType).Int())
|
||||
default:
|
||||
panic(fmt.Sprintf("unhandled destination type %s", typ.Kind().String()))
|
||||
panic("unhandled destination type " + typ.Kind().String())
|
||||
}
|
||||
case uint, uint8, uint16, uint32, uint64:
|
||||
switch typ.Kind() {
|
||||
|
|
@ -715,7 +715,7 @@ func coerceType(value interface{}, typ reflect.Type) reflect.Value {
|
|||
case reflect.Float64:
|
||||
value = float64(reflect.ValueOf(sourceType).Uint())
|
||||
default:
|
||||
panic(fmt.Sprintf("unhandled destination type %s", typ.Kind().String()))
|
||||
panic("unhandled destination type " + typ.Kind().String())
|
||||
}
|
||||
case float32, float64:
|
||||
switch typ.Kind() {
|
||||
|
|
@ -726,7 +726,7 @@ func coerceType(value interface{}, typ reflect.Type) reflect.Value {
|
|||
case reflect.Float64:
|
||||
// types match
|
||||
default:
|
||||
panic(fmt.Sprintf("unhandled destination type %s", typ.Kind().String()))
|
||||
panic("unhandled destination type " + typ.Kind().String())
|
||||
}
|
||||
case string:
|
||||
switch typ.Kind() {
|
||||
|
|
@ -737,7 +737,7 @@ func coerceType(value interface{}, typ reflect.Type) reflect.Value {
|
|||
case reflect.Float64:
|
||||
value, _ = strconv.ParseFloat(value.(string), 64)
|
||||
default:
|
||||
panic(fmt.Sprintf("unhandled destination type %s", typ.Kind().String()))
|
||||
panic("unhandled destination type " + typ.Kind().String())
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("unhandled source type %T", sourceType))
|
||||
|
|
|
|||
|
|
@ -996,7 +996,7 @@ func parseWearLeveling(acc telegraf.Accumulator, fields map[string]interface{},
|
|||
values := []int64{min, max, avg}
|
||||
for i, submetricName := range []string{"Min", "Max", "Avg"} {
|
||||
fields["raw_value"] = values[i]
|
||||
tags["name"] = fmt.Sprintf("Wear_Leveling_%s", submetricName)
|
||||
tags["name"] = "Wear_Leveling_" + submetricName
|
||||
acc.AddFields("smart_attribute", fields, tags)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -283,7 +283,7 @@ func (s *Stackdriver) newListTimeSeriesFilter(metricType string) string {
|
|||
resourceLabelsFilter = append(resourceLabelsFilter, fmt.Sprintf(valueFmt, resourceLabel.Key, resourceLabel.Value))
|
||||
}
|
||||
if len(resourceLabelsFilter) == 1 {
|
||||
filterString += fmt.Sprintf(" AND %s", resourceLabelsFilter[0])
|
||||
filterString += " AND " + resourceLabelsFilter[0]
|
||||
} else {
|
||||
filterString += fmt.Sprintf(" AND (%s)", strings.Join(resourceLabelsFilter, " OR "))
|
||||
}
|
||||
|
|
@ -301,7 +301,7 @@ func (s *Stackdriver) newListTimeSeriesFilter(metricType string) string {
|
|||
metricLabelsFilter = append(metricLabelsFilter, fmt.Sprintf(valueFmt, metricLabel.Key, metricLabel.Value))
|
||||
}
|
||||
if len(metricLabelsFilter) == 1 {
|
||||
filterString += fmt.Sprintf(" AND %s", metricLabelsFilter[0])
|
||||
filterString += " AND " + metricLabelsFilter[0]
|
||||
} else {
|
||||
filterString += fmt.Sprintf(" AND (%s)", strings.Join(metricLabelsFilter, " OR "))
|
||||
}
|
||||
|
|
@ -319,7 +319,7 @@ func (s *Stackdriver) newListTimeSeriesFilter(metricType string) string {
|
|||
userLabelsFilter = append(userLabelsFilter, fmt.Sprintf(valueFmt, metricLabel.Key, metricLabel.Value))
|
||||
}
|
||||
if len(userLabelsFilter) == 1 {
|
||||
filterString += fmt.Sprintf(" AND %s", userLabelsFilter[0])
|
||||
filterString += " AND " + userLabelsFilter[0]
|
||||
} else {
|
||||
filterString += fmt.Sprintf(" AND (%s)", strings.Join(userLabelsFilter, " OR "))
|
||||
}
|
||||
|
|
@ -337,7 +337,7 @@ func (s *Stackdriver) newListTimeSeriesFilter(metricType string) string {
|
|||
systemLabelsFilter = append(systemLabelsFilter, fmt.Sprintf(valueFmt, metricLabel.Key, metricLabel.Value))
|
||||
}
|
||||
if len(systemLabelsFilter) == 1 {
|
||||
filterString += fmt.Sprintf(" AND %s", systemLabelsFilter[0])
|
||||
filterString += " AND " + systemLabelsFilter[0]
|
||||
} else {
|
||||
filterString += fmt.Sprintf(" AND (%s)", strings.Join(systemLabelsFilter, " OR "))
|
||||
}
|
||||
|
|
@ -357,7 +357,7 @@ func (s *Stackdriver) newTimeSeriesConf(
|
|||
StartTime: ×tamppb.Timestamp{Seconds: startTime.Unix()},
|
||||
}
|
||||
tsReq := &monitoringpb.ListTimeSeriesRequest{
|
||||
Name: fmt.Sprintf("projects/%s", s.Project),
|
||||
Name: "projects/" + s.Project,
|
||||
Filter: filter,
|
||||
Interval: interval,
|
||||
}
|
||||
|
|
@ -493,7 +493,7 @@ func (s *Stackdriver) generatetimeSeriesConfs(
|
|||
|
||||
ret := []*timeSeriesConf{}
|
||||
req := &monitoringpb.ListMetricDescriptorsRequest{
|
||||
Name: fmt.Sprintf("projects/%s", s.Project),
|
||||
Name: "projects/" + s.Project,
|
||||
}
|
||||
|
||||
filters := s.newListMetricDescriptorsFilters()
|
||||
|
|
|
|||
|
|
@ -126,13 +126,13 @@ func getShowParameters(s *SystemdUnits) *[]string {
|
|||
|
||||
// add the fields we're interested in to the command line
|
||||
for property := range tagMap {
|
||||
params = append(params, fmt.Sprintf("--property=%s", property))
|
||||
params = append(params, "--property="+property)
|
||||
}
|
||||
for property := range valueMap {
|
||||
// If a property exists within the tagMap it was already added. Do not add it again to
|
||||
// keep the command line short.
|
||||
if _, exists := tagMap[property]; !exists {
|
||||
params = append(params, fmt.Sprintf("--property=%s", property))
|
||||
params = append(params, "--property="+property)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -310,7 +310,7 @@ func sensorsTemperaturesOld(syspath string) ([]host.TemperatureStat, error) {
|
|||
c, _ := os.ReadFile(filepath.Join(filepath.Dir(file), filename[0]+"_label"))
|
||||
if c != nil {
|
||||
//format the label from "Core 0" to "core0_"
|
||||
label = fmt.Sprintf("%s_", strings.Join(strings.Split(strings.TrimSpace(strings.ToLower(string(c))), " "), ""))
|
||||
label = strings.Join(strings.Split(strings.TrimSpace(strings.ToLower(string(c))), " "), "") + "_"
|
||||
}
|
||||
|
||||
// Get the name of the temperature you are reading
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ func TestTengineGeneratesMetrics(t *testing.T) {
|
|||
defer ts.Close()
|
||||
|
||||
n := &Tengine{
|
||||
Urls: []string{fmt.Sprintf("%s/us", ts.URL)},
|
||||
Urls: []string{ts.URL + "/us"},
|
||||
}
|
||||
|
||||
var accTengine testutil.Accumulator
|
||||
|
|
|
|||
|
|
@ -381,7 +381,7 @@ func (e *Endpoint) queryResyncSummary(ctx context.Context, vsanClient *soap.Clie
|
|||
}
|
||||
vsanSystemEx := types.ManagedObjectReference{
|
||||
Type: "VsanSystemEx",
|
||||
Value: fmt.Sprintf("vsanSystemEx-%s", strings.Split(hostRefValue, "-")[1]),
|
||||
Value: "vsanSystemEx-" + strings.Split(hostRefValue, "-")[1],
|
||||
}
|
||||
|
||||
includeSummary := true
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ import (
|
|||
)
|
||||
|
||||
// initialize test data
|
||||
var sysDrive = fmt.Sprintf(`%s\`, os.Getenv("SystemDrive")) // C:\
|
||||
var sysDrive = os.Getenv("SystemDrive") + `\` // C:\
|
||||
|
||||
// include Name as a tag, FreeSpace as a field, and Purpose as a known-null class property
|
||||
var testQuery = Query{
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ type poolInfo struct {
|
|||
}
|
||||
|
||||
func probeVersion(kstatPath string) (metricsVersion, []string, error) {
|
||||
poolsDirs, err := filepath.Glob(fmt.Sprintf("%s/*/objset-*", kstatPath))
|
||||
poolsDirs, err := filepath.Glob(kstatPath + "/*/objset-*")
|
||||
|
||||
// From the docs: the only possible returned error is ErrBadPattern, when pattern is malformed.
|
||||
// Because of this we need to determine how to fallback differently.
|
||||
|
|
@ -42,7 +42,7 @@ func probeVersion(kstatPath string) (metricsVersion, []string, error) {
|
|||
}
|
||||
|
||||
// Fallback to the old kstat in case of an older ZFS version.
|
||||
poolsDirs, err = filepath.Glob(fmt.Sprintf("%s/*/io", kstatPath))
|
||||
poolsDirs, err = filepath.Glob(kstatPath + "/*/io")
|
||||
if err != nil {
|
||||
return unknown, poolsDirs, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -165,7 +165,7 @@ func (adx *AzureDataExplorer) pushMetrics(ctx context.Context, format ingest.Fil
|
|||
length := len(metricsArray)
|
||||
adx.Log.Debugf("Writing %d metrics to table %q", length, tableName)
|
||||
reader := bytes.NewReader(metricsArray)
|
||||
mapping := ingest.IngestionMappingRef(fmt.Sprintf("%s_mapping", tableName), ingest.JSON)
|
||||
mapping := ingest.IngestionMappingRef(tableName+"_mapping", ingest.JSON)
|
||||
if metricIngestor != nil {
|
||||
if _, err := metricIngestor.FromReader(ctx, reader, format, mapping); err != nil {
|
||||
adx.Log.Errorf("sending ingestion request to Azure Data Explorer for table %q failed: %v", tableName, err)
|
||||
|
|
|
|||
|
|
@ -4,7 +4,6 @@ import (
|
|||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sync"
|
||||
"testing"
|
||||
|
|
@ -225,7 +224,7 @@ func (r *stubResult) Get(ctx context.Context) (string, error) {
|
|||
case err := <-r.err:
|
||||
return "", err
|
||||
case <-r.done:
|
||||
return fmt.Sprintf("id-%s", r.metricIds[0]), nil
|
||||
return "id-" + r.metricIds[0], nil
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -62,7 +62,7 @@ func TestMethod(t *testing.T) {
|
|||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
|
||||
u, err := url.Parse("http://" + ts.Listener.Addr().String())
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
|
|
@ -132,7 +132,7 @@ func TestHTTPClientConfig(t *testing.T) {
|
|||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
|
||||
u, err := url.Parse("http://" + ts.Listener.Addr().String())
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
|
|
@ -201,7 +201,7 @@ func TestStatusCode(t *testing.T) {
|
|||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
|
||||
u, err := url.Parse("http://" + ts.Listener.Addr().String())
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
|
|
@ -287,7 +287,7 @@ func TestContentType(t *testing.T) {
|
|||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
|
||||
u, err := url.Parse("http://" + ts.Listener.Addr().String())
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
|
|
@ -335,7 +335,7 @@ func TestContentEncodingGzip(t *testing.T) {
|
|||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
|
||||
u, err := url.Parse("http://" + ts.Listener.Addr().String())
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
|
|
@ -396,7 +396,7 @@ func TestBasicAuth(t *testing.T) {
|
|||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
|
||||
u, err := url.Parse("http://" + ts.Listener.Addr().String())
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
|
|
@ -453,7 +453,7 @@ func TestOAuthClientCredentialsGrant(t *testing.T) {
|
|||
|
||||
var token = "2YotnFZFEjr1zCsicMWpAA"
|
||||
|
||||
u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
|
||||
u, err := url.Parse("http://" + ts.Listener.Addr().String())
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
|
|
@ -556,7 +556,7 @@ func TestOAuthAuthorizationCodeGrant(t *testing.T) {
|
|||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
|
||||
u, err := url.Parse("http://" + ts.Listener.Addr().String())
|
||||
require.NoError(t, err)
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
|
|
@ -654,7 +654,7 @@ func TestDefaultUserAgent(t *testing.T) {
|
|||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
|
||||
u, err := url.Parse("http://" + ts.Listener.Addr().String())
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("default-user-agent", func(t *testing.T) {
|
||||
|
|
@ -683,7 +683,7 @@ func TestBatchedUnbatched(t *testing.T) {
|
|||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
|
||||
u, err := url.Parse("http://" + ts.Listener.Addr().String())
|
||||
require.NoError(t, err)
|
||||
|
||||
client := &HTTP{
|
||||
|
|
@ -734,7 +734,7 @@ func TestAwsCredentials(t *testing.T) {
|
|||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
|
||||
u, err := url.Parse("http://" + ts.Listener.Addr().String())
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
|
|
|
|||
|
|
@ -63,7 +63,7 @@ func TestHTTP_CreateDatabase(t *testing.T) {
|
|||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
|
||||
u, err := url.Parse("http://" + ts.Listener.Addr().String())
|
||||
require.NoError(t, err)
|
||||
|
||||
successResponse := []byte(`{"results": [{"statement_id": 0}]}`)
|
||||
|
|
@ -273,7 +273,7 @@ func TestHTTP_Write(t *testing.T) {
|
|||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
|
||||
u, err := url.Parse("http://" + ts.Listener.Addr().String())
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
|
|
@ -765,7 +765,7 @@ func TestDBRPTags(t *testing.T) {
|
|||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
|
||||
u, err := url.Parse("http://" + ts.Listener.Addr().String())
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
|
|
@ -1011,7 +1011,7 @@ func TestDBRPTagsCreateDatabaseNotCalledOnRetryAfterForbidden(t *testing.T) {
|
|||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
|
||||
u, err := url.Parse("http://" + ts.Listener.Addr().String())
|
||||
require.NoError(t, err)
|
||||
|
||||
handlers := &MockHandlerChain{
|
||||
|
|
@ -1084,7 +1084,7 @@ func TestDBRPTagsCreateDatabaseCalledOnDatabaseNotFound(t *testing.T) {
|
|||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
|
||||
u, err := url.Parse("http://" + ts.Listener.Addr().String())
|
||||
require.NoError(t, err)
|
||||
|
||||
handlers := &MockHandlerChain{
|
||||
|
|
@ -1176,7 +1176,7 @@ func TestDBNotFoundShouldDropMetricWhenSkipDatabaseCreateIsTrue(t *testing.T) {
|
|||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
|
||||
u, err := url.Parse("http://" + ts.Listener.Addr().String())
|
||||
require.NoError(t, err)
|
||||
f := func(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.URL.Path {
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@ package loki
|
|||
import (
|
||||
"compress/gzip"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
|
|
@ -64,7 +63,7 @@ func TestStatusCode(t *testing.T) {
|
|||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
|
||||
u, err := url.Parse("http://" + ts.Listener.Addr().String())
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
|
|
@ -133,7 +132,7 @@ func TestContentType(t *testing.T) {
|
|||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
|
||||
u, err := url.Parse("http://" + ts.Listener.Addr().String())
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
|
|
@ -178,7 +177,7 @@ func TestContentEncodingGzip(t *testing.T) {
|
|||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
|
||||
u, err := url.Parse("http://" + ts.Listener.Addr().String())
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
|
|
@ -244,7 +243,7 @@ func TestMetricNameLabel(t *testing.T) {
|
|||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
|
||||
u, err := url.Parse("http://" + ts.Listener.Addr().String())
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
|
|
@ -294,7 +293,7 @@ func TestBasicAuth(t *testing.T) {
|
|||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
|
||||
u, err := url.Parse("http://" + ts.Listener.Addr().String())
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
|
|
@ -341,7 +340,7 @@ func TestOAuthClientCredentialsGrant(t *testing.T) {
|
|||
|
||||
var token = "2YotnFZFEjr1zCsicMWpAA"
|
||||
|
||||
u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
|
||||
u, err := url.Parse("http://" + ts.Listener.Addr().String())
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
|
|
@ -408,7 +407,7 @@ func TestDefaultUserAgent(t *testing.T) {
|
|||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
|
||||
u, err := url.Parse("http://" + ts.Listener.Addr().String())
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("default-user-agent", func(t *testing.T) {
|
||||
|
|
@ -432,7 +431,7 @@ func TestMetricSorting(t *testing.T) {
|
|||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
|
||||
u, err := url.Parse("http://" + ts.Listener.Addr().String())
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("out of order metrics", func(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -78,7 +78,7 @@ func TestConnectAndWrite(t *testing.T) {
|
|||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
testURL := fmt.Sprintf("http://%s", ts.Listener.Addr().String())
|
||||
testURL := "http://" + ts.Listener.Addr().String()
|
||||
testAPIKey := "a0b1c2d3-e4f5-g6h7-i8j9-k0l1m2n3o4p5"
|
||||
testCheck := "telegraf"
|
||||
testEntity := "entity1"
|
||||
|
|
@ -86,7 +86,7 @@ func TestConnectAndWrite(t *testing.T) {
|
|||
testHandler := "influxdb"
|
||||
testTagName := "myTagName"
|
||||
testTagValue := "myTagValue"
|
||||
expectedAuthHeader := fmt.Sprintf("Key %s", testAPIKey)
|
||||
expectedAuthHeader := "Key " + testAPIKey
|
||||
expectedURL := fmt.Sprintf("/api/core/v2/namespaces/%s/events", testNamespace)
|
||||
expectedPointName := "cpu"
|
||||
expectedPointValue := float64(42)
|
||||
|
|
|
|||
|
|
@ -394,7 +394,7 @@ func (s *Stackdriver) sendBatch(batch []telegraf.Metric) error {
|
|||
|
||||
// Prepare time series request.
|
||||
timeSeriesRequest := &monitoringpb.CreateTimeSeriesRequest{
|
||||
Name: fmt.Sprintf("projects/%s", s.Project),
|
||||
Name: "projects/" + s.Project,
|
||||
TimeSeries: timeSeries,
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -106,7 +106,7 @@ func TestWrite(t *testing.T) {
|
|||
}
|
||||
|
||||
s := &Stackdriver{
|
||||
Project: fmt.Sprintf("projects/%s", "[PROJECT]"),
|
||||
Project: "projects/" + "[PROJECT]",
|
||||
Namespace: "test",
|
||||
Log: testutil.Logger{},
|
||||
client: c,
|
||||
|
|
@ -134,7 +134,7 @@ func TestWriteResourceTypeAndLabels(t *testing.T) {
|
|||
}
|
||||
|
||||
s := &Stackdriver{
|
||||
Project: fmt.Sprintf("projects/%s", "[PROJECT]"),
|
||||
Project: "projects/" + "[PROJECT]",
|
||||
Namespace: "test",
|
||||
ResourceType: "foo",
|
||||
ResourceLabels: map[string]string{
|
||||
|
|
@ -167,7 +167,7 @@ func TestWriteTagsAsResourceLabels(t *testing.T) {
|
|||
}
|
||||
|
||||
s := &Stackdriver{
|
||||
Project: fmt.Sprintf("projects/%s", "[PROJECT]"),
|
||||
Project: "projects/" + "[PROJECT]",
|
||||
Namespace: "test",
|
||||
ResourceType: "foo",
|
||||
TagsAsResourceLabels: []string{"job_name"},
|
||||
|
|
@ -231,7 +231,7 @@ func TestWriteMetricTypesOfficial(t *testing.T) {
|
|||
}
|
||||
|
||||
s := &Stackdriver{
|
||||
Project: fmt.Sprintf("projects/%s", "[PROJECT]"),
|
||||
Project: "projects/" + "[PROJECT]",
|
||||
Namespace: "test",
|
||||
ResourceLabels: map[string]string{
|
||||
"mylabel": "myvalue",
|
||||
|
|
@ -307,7 +307,7 @@ func TestWriteMetricTypesPath(t *testing.T) {
|
|||
}
|
||||
|
||||
s := &Stackdriver{
|
||||
Project: fmt.Sprintf("projects/%s", "[PROJECT]"),
|
||||
Project: "projects/" + "[PROJECT]",
|
||||
Namespace: "test",
|
||||
ResourceLabels: map[string]string{
|
||||
"mylabel": "myvalue",
|
||||
|
|
@ -367,7 +367,7 @@ func TestWriteAscendingTime(t *testing.T) {
|
|||
}
|
||||
|
||||
s := &Stackdriver{
|
||||
Project: fmt.Sprintf("projects/%s", "[PROJECT]"),
|
||||
Project: "projects/" + "[PROJECT]",
|
||||
Namespace: "test",
|
||||
Log: testutil.Logger{},
|
||||
client: c,
|
||||
|
|
@ -440,7 +440,7 @@ func TestWriteBatchable(t *testing.T) {
|
|||
}
|
||||
|
||||
s := &Stackdriver{
|
||||
Project: fmt.Sprintf("projects/%s", "[PROJECT]"),
|
||||
Project: "projects/" + "[PROJECT]",
|
||||
Namespace: "test",
|
||||
Log: testutil.Logger{},
|
||||
client: c,
|
||||
|
|
@ -629,7 +629,7 @@ func TestWriteIgnoredErrors(t *testing.T) {
|
|||
}
|
||||
|
||||
s := &Stackdriver{
|
||||
Project: fmt.Sprintf("projects/%s", "[PROJECT]"),
|
||||
Project: "projects/" + "[PROJECT]",
|
||||
Namespace: "test",
|
||||
Log: testutil.Logger{},
|
||||
client: c,
|
||||
|
|
@ -706,7 +706,7 @@ func TestGetStackdriverIntervalEndpoints(t *testing.T) {
|
|||
}
|
||||
|
||||
s := &Stackdriver{
|
||||
Project: fmt.Sprintf("projects/%s", "[PROJECT]"),
|
||||
Project: "projects/" + "[PROJECT]",
|
||||
Namespace: "test",
|
||||
Log: testutil.Logger{},
|
||||
client: c,
|
||||
|
|
|
|||
|
|
@ -67,7 +67,7 @@ func TestMethod(t *testing.T) {
|
|||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
|
||||
u, err := url.Parse("http://" + ts.Listener.Addr().String())
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
|
|
@ -118,7 +118,7 @@ func TestStatusCode(t *testing.T) {
|
|||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
|
||||
u, err := url.Parse("http://" + ts.Listener.Addr().String())
|
||||
require.NoError(t, err)
|
||||
|
||||
pluginFn := func() *SumoLogic {
|
||||
|
|
@ -271,7 +271,7 @@ func TestContentType(t *testing.T) {
|
|||
}))
|
||||
defer ts.Close()
|
||||
|
||||
u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
|
||||
u, err := url.Parse("http://" + ts.Listener.Addr().String())
|
||||
require.NoError(t, err)
|
||||
|
||||
plugin := tt.plugin()
|
||||
|
|
@ -293,7 +293,7 @@ func TestContentEncodingGzip(t *testing.T) {
|
|||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
|
||||
u, err := url.Parse("http://" + ts.Listener.Addr().String())
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
|
|
@ -347,7 +347,7 @@ func TestDefaultUserAgent(t *testing.T) {
|
|||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
|
||||
u, err := url.Parse("http://" + ts.Listener.Addr().String())
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("default-user-agent", func(t *testing.T) {
|
||||
|
|
@ -466,7 +466,7 @@ func TestMaxRequestBodySize(t *testing.T) {
|
|||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
|
||||
u, err := url.Parse("http://" + ts.Listener.Addr().String())
|
||||
require.NoError(t, err)
|
||||
|
||||
testcases := []struct {
|
||||
|
|
@ -640,7 +640,7 @@ func TestTryingToSendEmptyMetricsDoesntFail(t *testing.T) {
|
|||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
|
||||
u, err := url.Parse("http://" + ts.Listener.Addr().String())
|
||||
require.NoError(t, err)
|
||||
|
||||
metrics := make([]telegraf.Metric, 0)
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@ package zabbix
|
|||
import (
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"sort"
|
||||
|
|
@ -859,7 +858,7 @@ func TestBuildZabbixMetric(t *testing.T) {
|
|||
1,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, fmt.Sprintf("%sname.value[b,bar]", keyPrefix), zm.Key)
|
||||
require.Equal(t, keyPrefix+"name.value[b,bar]", zm.Key)
|
||||
|
||||
zm, err = z.buildZabbixMetric(testutil.MustMetric(
|
||||
"name",
|
||||
|
|
@ -870,7 +869,7 @@ func TestBuildZabbixMetric(t *testing.T) {
|
|||
1,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, fmt.Sprintf("%sname.value", keyPrefix), zm.Key)
|
||||
require.Equal(t, keyPrefix+"name.value", zm.Key)
|
||||
}
|
||||
|
||||
func TestGetHostname(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -59,11 +59,11 @@ func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) {
|
|||
switch {
|
||||
case strings.HasSuffix(field.Key, "_bucket"):
|
||||
// if bucket only, init sum, count, inf
|
||||
metrickeysum, promtssum := getPromTS(fmt.Sprintf("%s_sum", metricName), labels, float64(0), metric.Time())
|
||||
metrickeysum, promtssum := getPromTS(metricName+"_sum", labels, float64(0), metric.Time())
|
||||
if _, ok = entries[metrickeysum]; !ok {
|
||||
entries[metrickeysum] = promtssum
|
||||
}
|
||||
metrickeycount, promtscount := getPromTS(fmt.Sprintf("%s_count", metricName), labels, float64(0), metric.Time())
|
||||
metrickeycount, promtscount := getPromTS(metricName+"_count", labels, float64(0), metric.Time())
|
||||
if _, ok = entries[metrickeycount]; !ok {
|
||||
entries[metrickeycount] = promtscount
|
||||
}
|
||||
|
|
@ -71,7 +71,7 @@ func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) {
|
|||
Name: "le",
|
||||
Value: "+Inf",
|
||||
}
|
||||
metrickeyinf, promtsinf := getPromTS(fmt.Sprintf("%s_bucket", metricName), labels, float64(0), metric.Time(), extraLabel)
|
||||
metrickeyinf, promtsinf := getPromTS(metricName+"_bucket", labels, float64(0), metric.Time(), extraLabel)
|
||||
if _, ok = entries[metrickeyinf]; !ok {
|
||||
entries[metrickeyinf] = promtsinf
|
||||
}
|
||||
|
|
@ -93,14 +93,14 @@ func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) {
|
|||
Name: "le",
|
||||
Value: fmt.Sprint(bound),
|
||||
}
|
||||
metrickey, promts = getPromTS(fmt.Sprintf("%s_bucket", metricName), labels, float64(count), metric.Time(), extraLabel)
|
||||
metrickey, promts = getPromTS(metricName+"_bucket", labels, float64(count), metric.Time(), extraLabel)
|
||||
case strings.HasSuffix(field.Key, "_sum"):
|
||||
sum, ok := prometheus.SampleSum(field.Value)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
metrickey, promts = getPromTS(fmt.Sprintf("%s_sum", metricName), labels, sum, metric.Time())
|
||||
metrickey, promts = getPromTS(metricName+"_sum", labels, sum, metric.Time())
|
||||
case strings.HasSuffix(field.Key, "_count"):
|
||||
count, ok := prometheus.SampleCount(field.Value)
|
||||
if !ok {
|
||||
|
|
@ -112,12 +112,12 @@ func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) {
|
|||
Name: "le",
|
||||
Value: "+Inf",
|
||||
}
|
||||
metrickeyinf, promtsinf := getPromTS(fmt.Sprintf("%s_bucket", metricName), labels, float64(count), metric.Time(), extraLabel)
|
||||
metrickeyinf, promtsinf := getPromTS(metricName+"_bucket", labels, float64(count), metric.Time(), extraLabel)
|
||||
if minf, ok := entries[metrickeyinf]; !ok || minf.Samples[0].Value == 0 {
|
||||
entries[metrickeyinf] = promtsinf
|
||||
}
|
||||
|
||||
metrickey, promts = getPromTS(fmt.Sprintf("%s_count", metricName), labels, float64(count), metric.Time())
|
||||
metrickey, promts = getPromTS(metricName+"_count", labels, float64(count), metric.Time())
|
||||
default:
|
||||
continue
|
||||
}
|
||||
|
|
@ -129,14 +129,14 @@ func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) {
|
|||
continue
|
||||
}
|
||||
|
||||
metrickey, promts = getPromTS(fmt.Sprintf("%s_sum", metricName), labels, sum, metric.Time())
|
||||
metrickey, promts = getPromTS(metricName+"_sum", labels, sum, metric.Time())
|
||||
case strings.HasSuffix(field.Key, "_count"):
|
||||
count, ok := prometheus.SampleCount(field.Value)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
metrickey, promts = getPromTS(fmt.Sprintf("%s_count", metricName), labels, float64(count), metric.Time())
|
||||
metrickey, promts = getPromTS(metricName+"_count", labels, float64(count), metric.Time())
|
||||
default:
|
||||
quantileTag, ok := metric.GetTag("quantile")
|
||||
if !ok {
|
||||
|
|
|
|||
|
|
@ -475,7 +475,7 @@ func (a *Accumulator) AssertDoesNotContainMeasurement(t *testing.T, measurement
|
|||
defer a.Unlock()
|
||||
for _, p := range a.Metrics {
|
||||
if p.Measurement == measurement {
|
||||
msg := fmt.Sprintf("found unexpected measurement %s", measurement)
|
||||
msg := "found unexpected measurement " + measurement
|
||||
require.Fail(t, msg)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -137,7 +137,7 @@ func (c *Container) CheckStatus(serviceName string) error {
|
|||
|
||||
func (c *Container) UploadAndInstall(filename string) error {
|
||||
basename := filepath.Base(filename)
|
||||
destination := fmt.Sprintf("/root/%s", basename)
|
||||
destination := "/root/" + basename
|
||||
|
||||
if err := c.client.Push(c.Name, filename, destination); err != nil {
|
||||
return err
|
||||
|
|
|
|||
|
|
@ -78,7 +78,7 @@ func launchTests(packageFile string, images []string) error {
|
|||
fmt.Printf("starting test with %s\n", image)
|
||||
|
||||
uuidWithHyphen := uuid.New()
|
||||
name := fmt.Sprintf("telegraf-test-%s", uuidWithHyphen.String()[0:8])
|
||||
name := "telegraf-test-" + uuidWithHyphen.String()[0:8]
|
||||
|
||||
err := runTest(image, name, packageFile)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -147,32 +147,32 @@ func main() {
|
|||
{
|
||||
FileName: ".circleci/config.yml",
|
||||
Regex: `(quay\.io\/influxdb\/telegraf-ci):(\d.\d*.\d)`,
|
||||
Replace: fmt.Sprintf("$1:%s", version),
|
||||
Replace: "$1:" + version,
|
||||
},
|
||||
{
|
||||
FileName: ".github/workflows/govulncheck.yml",
|
||||
Regex: `(go-version-input).*`,
|
||||
Replace: fmt.Sprintf("$1: %s", version),
|
||||
Replace: "$1: " + version,
|
||||
},
|
||||
{
|
||||
FileName: "go.mod",
|
||||
Regex: `(go)\s(\d.\d*)`,
|
||||
Replace: fmt.Sprintf("$1 %s", noPatchVersion),
|
||||
Replace: "$1 " + noPatchVersion,
|
||||
},
|
||||
{
|
||||
FileName: "Makefile",
|
||||
Regex: `(quay\.io\/influxdb\/telegraf-ci):(\d.\d*.\d)`,
|
||||
Replace: fmt.Sprintf("$1:%s", version),
|
||||
Replace: "$1:" + version,
|
||||
},
|
||||
{
|
||||
FileName: "README.md",
|
||||
Regex: `(Telegraf requires Go version) (\d.\d*)`,
|
||||
Replace: fmt.Sprintf("$1 %s", noPatchVersion),
|
||||
Replace: "$1 " + noPatchVersion,
|
||||
},
|
||||
{
|
||||
FileName: "scripts/ci.docker",
|
||||
Regex: `(FROM golang):(\d.\d*.\d)`,
|
||||
Replace: fmt.Sprintf("$1:%s", version),
|
||||
Replace: "$1:" + version,
|
||||
},
|
||||
{
|
||||
FileName: "scripts/installgo_linux.sh",
|
||||
|
|
|
|||
Loading…
Reference in New Issue