Linter fixes - gosimple (#9046)

This commit is contained in:
Paweł Żak 2021-03-25 18:57:01 +01:00 committed by GitHub
parent d5b4c3e148
commit 099ccda3f9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
70 changed files with 152 additions and 255 deletions

View File

@ -47,7 +47,7 @@ func Compile(filters []string) (Filter, error) {
// hasMeta reports whether path contains any magic glob characters.
func hasMeta(s string) bool {
return strings.IndexAny(s, "*?[") >= 0
return strings.ContainsAny(s, "*?[")
}
type filter struct {

View File

@ -84,10 +84,10 @@ func (g *GlobPath) GetRoots() []string {
// hasMeta reports whether path contains any magic glob characters.
func hasMeta(path string) bool {
return strings.IndexAny(path, "*?[") >= 0
return strings.ContainsAny(path, "*?[")
}
// hasSuperMeta reports whether path contains any super magic glob characters (**).
func hasSuperMeta(path string) bool {
return strings.Index(path, "**") >= 0
return strings.Contains(path, "**")
}

View File

@ -67,7 +67,7 @@ func TestMain(m *testing.M) {
// externalProcess is an external "misbehaving" process that won't exit
// cleanly.
func externalProcess() {
wait := make(chan int, 0)
wait := make(chan int)
fmt.Fprintln(os.Stdout, "started")
<-wait
os.Exit(2)

View File

@ -59,10 +59,8 @@ func (t *Template) Apply(line string, joiner string) (string, map[string]string,
field = append(field, fields[i])
case "field*":
field = append(field, fields[i:]...)
break
case "measurement*":
measurement = append(measurement, fields[i:]...)
break
default:
tags[tag] = append(tags[tag], fields[i])
}

View File

@ -69,8 +69,7 @@ func (t *telegrafLog) Write(b []byte) (n int, err error) {
}
func (t *telegrafLog) Close() error {
var stdErrWriter io.Writer
stdErrWriter = os.Stderr
stdErrWriter := os.Stderr
// avoid closing stderr
if t.internalWriter != stdErrWriter {
closer, isCloser := t.internalWriter.(io.Closer)

View File

@ -54,41 +54,41 @@ func (f *Filter) Compile() error {
var err error
f.nameDrop, err = filter.Compile(f.NameDrop)
if err != nil {
return fmt.Errorf("Error compiling 'namedrop', %s", err)
return fmt.Errorf("error compiling 'namedrop', %s", err)
}
f.namePass, err = filter.Compile(f.NamePass)
if err != nil {
return fmt.Errorf("Error compiling 'namepass', %s", err)
return fmt.Errorf("error compiling 'namepass', %s", err)
}
f.fieldDrop, err = filter.Compile(f.FieldDrop)
if err != nil {
return fmt.Errorf("Error compiling 'fielddrop', %s", err)
return fmt.Errorf("error compiling 'fielddrop', %s", err)
}
f.fieldPass, err = filter.Compile(f.FieldPass)
if err != nil {
return fmt.Errorf("Error compiling 'fieldpass', %s", err)
return fmt.Errorf("error compiling 'fieldpass', %s", err)
}
f.tagExclude, err = filter.Compile(f.TagExclude)
if err != nil {
return fmt.Errorf("Error compiling 'tagexclude', %s", err)
return fmt.Errorf("error compiling 'tagexclude', %s", err)
}
f.tagInclude, err = filter.Compile(f.TagInclude)
if err != nil {
return fmt.Errorf("Error compiling 'taginclude', %s", err)
return fmt.Errorf("error compiling 'taginclude', %s", err)
}
for i := range f.TagDrop {
f.TagDrop[i].filter, err = filter.Compile(f.TagDrop[i].Filter)
if err != nil {
return fmt.Errorf("Error compiling 'tagdrop', %s", err)
return fmt.Errorf("error compiling 'tagdrop', %s", err)
}
}
for i := range f.TagPass {
f.TagPass[i].filter, err = filter.Compile(f.TagPass[i].Filter)
if err != nil {
return fmt.Errorf("Error compiling 'tagpass', %s", err)
return fmt.Errorf("error compiling 'tagpass', %s", err)
}
}
return nil
@ -132,17 +132,11 @@ func (f *Filter) IsActive() bool {
// based on the drop/pass filter parameters
func (f *Filter) shouldNamePass(key string) bool {
pass := func(f *Filter) bool {
if f.namePass.Match(key) {
return true
}
return false
return f.namePass.Match(key)
}
drop := func(f *Filter) bool {
if f.nameDrop.Match(key) {
return false
}
return true
return !f.nameDrop.Match(key)
}
if f.namePass != nil && f.nameDrop != nil {

View File

@ -100,6 +100,4 @@ func SetLoggerOnPlugin(i interface{}, log telegraf.Logger) {
log.Debugf("Plugin %q defines a 'Log' field on its struct of an unexpected type %q. Expected telegraf.Logger",
valI.Type().Name(), field.Type().String())
}
return
}

View File

@ -539,9 +539,7 @@ func (m *mockOutput) Write(metrics []telegraf.Metric) error {
m.metrics = []telegraf.Metric{}
}
for _, metric := range metrics {
m.metrics = append(m.metrics, metric)
}
m.metrics = append(m.metrics, metrics...)
return nil
}

View File

@ -84,6 +84,4 @@ func setLoggerOnPlugin(i interface{}, log telegraf.Logger) {
field.Set(reflect.ValueOf(log))
}
}
return
}

View File

@ -250,8 +250,6 @@ func (a *Aerospike) parseNodeInfo(stats map[string]string, hostPort string, node
fields[key] = parseAerospikeValue(key, v)
}
acc.AddFields("aerospike_node", fields, tags, time.Now())
return
}
func (a *Aerospike) getNamespaces(n *as.Node) ([]string, error) {
@ -295,8 +293,6 @@ func (a *Aerospike) parseNamespaceInfo(stats map[string]string, hostPort string,
nFields[key] = parseAerospikeValue(key, parts[1])
}
acc.AddFields("aerospike_namespace", nFields, nTags, time.Now())
return
}
func (a *Aerospike) getSets(n *as.Node) ([]string, error) {
@ -365,8 +361,6 @@ func (a *Aerospike) parseSetInfo(stats map[string]string, hostPort string, names
nFields[key] = parseAerospikeValue(key, pieces[1])
}
acc.AddFields("aerospike_set", nFields, nTags, time.Now())
return
}
func (a *Aerospike) getTTLHistogram(hostPort string, namespace string, set string, n *as.Node, acc telegraf.Accumulator) error {
@ -430,7 +424,7 @@ func (a *Aerospike) parseHistogram(stats map[string]string, hostPort string, nam
// Normalize incase of less buckets than expected
numRecordsPerBucket := 1
if len(buckets) > a.NumberHistogramBuckets {
numRecordsPerBucket = int(math.Ceil((float64(len(buckets)) / float64(a.NumberHistogramBuckets))))
numRecordsPerBucket = int(math.Ceil(float64(len(buckets)) / float64(a.NumberHistogramBuckets)))
}
bucketCount := 0
@ -462,8 +456,6 @@ func (a *Aerospike) parseHistogram(stats map[string]string, hostPort string, nam
}
acc.AddFields(fmt.Sprintf("aerospike_histogram_%v", strings.Replace(histogramType, "-", "_", -1)), nFields, nTags, time.Now())
return
}
func splitNamespaceSet(namespaceSet string) (string, string) {

View File

@ -71,7 +71,7 @@ func (a *externalAuth) Mechanism() string {
return "EXTERNAL"
}
func (a *externalAuth) Response() string {
return fmt.Sprintf("\000")
return "\000"
}
const (
@ -288,7 +288,7 @@ func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, err
ch, err := a.conn.Channel()
if err != nil {
return nil, fmt.Errorf("Failed to open a channel: %s", err.Error())
return nil, fmt.Errorf("failed to open a channel: %s", err.Error())
}
if a.Exchange != "" {
@ -335,7 +335,7 @@ func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, err
nil,
)
if err != nil {
return nil, fmt.Errorf("Failed to bind a queue: %s", err)
return nil, fmt.Errorf("failed to bind a queue: %s", err)
}
}
@ -345,7 +345,7 @@ func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, err
false, // global
)
if err != nil {
return nil, fmt.Errorf("Failed to set QoS: %s", err)
return nil, fmt.Errorf("failed to set QoS: %s", err)
}
msgs, err := ch.Consume(
@ -358,7 +358,7 @@ func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, err
nil, // arguments
)
if err != nil {
return nil, fmt.Errorf("Failed establishing connection to queue: %s", err)
return nil, fmt.Errorf("failed establishing connection to queue: %s", err)
}
return msgs, err
@ -395,7 +395,7 @@ func declareExchange(
)
}
if err != nil {
return fmt.Errorf("Error declaring exchange: %v", err)
return fmt.Errorf("error declaring exchange: %v", err)
}
return nil
}
@ -437,7 +437,7 @@ func declareQueue(
)
}
if err != nil {
return nil, fmt.Errorf("Error declaring queue: %v", err)
return nil, fmt.Errorf("error declaring queue: %v", err)
}
return &queue, nil
}

View File

@ -127,12 +127,12 @@ func (c *CiscoTelemetryMDT) nxosValueXform(field *telemetry.TelemetryField, valu
}
case *telemetry.TelemetryField_Uint32Value:
vali, ok := value.(uint32)
if ok == true {
if ok {
return vali
}
case *telemetry.TelemetryField_Uint64Value:
vali, ok := value.(uint64)
if ok == true {
if ok {
return vali
}
} //switch

View File

@ -68,7 +68,7 @@ func (d *DiskIO) SampleConfig() string {
// hasMeta reports whether s contains any special glob characters.
func hasMeta(s string) bool {
return strings.IndexAny(s, "*?[") >= 0
return strings.ContainsAny(s, "*?[")
}
func (d *DiskIO) init() error {

View File

@ -24,7 +24,7 @@ func setupNullDisk(t *testing.T, s *DiskIO, devName string) func() error {
require.NoError(t, err)
if s.infoCache == nil {
s.infoCache = make(map[string]diskInfoCache, 0)
s.infoCache = make(map[string]diskInfoCache)
}
ic, ok := s.infoCache[devName]
if !ok {

View File

@ -8,8 +8,6 @@ import (
"github.com/influxdata/telegraf/testutil"
"fmt"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@ -56,8 +54,7 @@ func (t *transportMock) CancelRequest(_ *http.Request) {
func checkIsMaster(es *Elasticsearch, server string, expected bool, t *testing.T) {
if es.serverInfo[server].isMaster() != expected {
msg := fmt.Sprintf("IsMaster set incorrectly")
assert.Fail(t, msg)
assert.Fail(t, "IsMaster set incorrectly")
}
}
@ -231,8 +228,7 @@ func TestGatherClusterStatsMaster(t *testing.T) {
IsMasterResultTokens := strings.Split(string(IsMasterResult), " ")
if masterID != IsMasterResultTokens[0] {
msg := fmt.Sprintf("catmaster is incorrect")
assert.Fail(t, msg)
assert.Fail(t, "catmaster is incorrect")
}
// now get node status, which determines whether we're master
@ -275,8 +271,7 @@ func TestGatherClusterStatsNonMaster(t *testing.T) {
IsNotMasterResultTokens := strings.Split(string(IsNotMasterResult), " ")
if masterID != IsNotMasterResultTokens[0] {
msg := fmt.Sprintf("catmaster is incorrect")
assert.Fail(t, msg)
assert.Fail(t, "catmaster is incorrect")
}
// now get node status, which determines whether we're master

View File

@ -101,7 +101,7 @@ func (f *Fibaro) getJSON(path string, dataStruct interface{}) error {
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
err = fmt.Errorf("Response from url \"%s\" has status code %d (%s), expected %d (%s)",
err = fmt.Errorf("response from url \"%s\" has status code %d (%s), expected %d (%s)",
requestURL,
resp.StatusCode,
http.StatusText(resp.StatusCode),
@ -159,7 +159,7 @@ func (f *Fibaro) Gather(acc telegraf.Accumulator) error {
for _, device := range devices {
// skip device in some cases
if device.RoomID == 0 ||
device.Enabled == false ||
!device.Enabled ||
device.Properties.Dead == "true" ||
device.Type == "com.fibaro.zwaveDevice" {
continue

View File

@ -66,10 +66,7 @@ func parse(data []byte) (datapointArray []pluginData, err error) {
return
}
for _, point := range endpointData.Payload {
datapointArray = append(datapointArray, point)
}
datapointArray = append(datapointArray, endpointData.Payload...)
return
}

View File

@ -203,13 +203,13 @@ func (h *GrayLog) flatten(item map[string]interface{}, fields map[string]interfa
id = id + "_"
}
for k, i := range item {
switch i.(type) {
switch i := i.(type) {
case int:
fields[id+k] = i.(float64)
fields[id+k] = float64(i)
case float64:
fields[id+k] = i.(float64)
fields[id+k] = i
case map[string]interface{}:
h.flatten(i.(map[string]interface{}), fields, id+k)
h.flatten(i, fields, id+k)
default:
}
}

View File

@ -95,9 +95,7 @@ func (h *haproxy) Gather(acc telegraf.Accumulator) error {
if len(matches) == 0 {
endpoints = append(endpoints, socketPath)
} else {
for _, match := range matches {
endpoints = append(endpoints, match)
}
endpoints = append(endpoints, matches...)
}
}

View File

@ -188,7 +188,7 @@ func (h *HTTPResponse) createHTTPClient() (*http.Client, error) {
Timeout: h.ResponseTimeout.Duration,
}
if h.FollowRedirects == false {
if !h.FollowRedirects {
client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
}
@ -247,10 +247,10 @@ func setError(err error, fields map[string]interface{}, tags map[string]string)
opErr, isNetErr := (urlErr.Err).(*net.OpError)
if isNetErr {
switch e := (opErr.Err).(type) {
case (*net.DNSError):
case *net.DNSError:
setResult("dns_error", fields, tags)
return e
case (*net.ParseError):
case *net.ParseError:
// Parse error has to do with parsing of IP addresses, so we
// group it with address errors
setResult("address_error", fields, tags)
@ -412,7 +412,7 @@ func (h *HTTPResponse) Gather(acc telegraf.Accumulator) error {
var err error
h.compiledStringMatch, err = regexp.Compile(h.ResponseStringMatch)
if err != nil {
return fmt.Errorf("Failed to compile regular expression %s : %s", h.ResponseStringMatch, err)
return fmt.Errorf("failed to compile regular expression %s : %s", h.ResponseStringMatch, err)
}
}
@ -450,7 +450,7 @@ func (h *HTTPResponse) Gather(acc telegraf.Accumulator) error {
}
if addr.Scheme != "http" && addr.Scheme != "https" {
acc.AddError(errors.New("Only http and https are supported"))
acc.AddError(errors.New("only http and https are supported"))
continue
}

View File

@ -130,7 +130,6 @@ func setUpTestMux() http.Handler {
})
mux.HandleFunc("/twosecondnap", func(w http.ResponseWriter, req *http.Request) {
time.Sleep(time.Second * 2)
return
})
mux.HandleFunc("/nocontent", func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusNoContent)

View File

@ -337,7 +337,7 @@ func (h *InfluxDBListener) handleWrite() http.HandlerFunc {
var partialErrorString string
switch parseErrorCount {
case 1:
partialErrorString = fmt.Sprintf("%s", firstParseErrorStr)
partialErrorString = firstParseErrorStr
case 2:
partialErrorString = fmt.Sprintf("%s (and 1 other parse error)", firstParseErrorStr)
default:

View File

@ -245,7 +245,6 @@ func (r *IntelRDT) createArgsAndStartPQOS(ctx context.Context) {
args = append(args, processArg)
go r.readData(ctx, args, r.processesPIDsMap)
}
return
}
func (r *IntelRDT) readData(ctx context.Context, args []string, processesPIDsAssociation map[string]string) {

View File

@ -54,7 +54,6 @@ func (p *Publisher) publishCores(measurement string) {
p.errChan <- err
}
p.addToAccumulatorCores(coresString, values, timestamp)
return
}
func (p *Publisher) publishProcess(measurement processMeasurement) {
@ -63,7 +62,6 @@ func (p *Publisher) publishProcess(measurement processMeasurement) {
p.errChan <- err
}
p.addToAccumulatorProcesses(process, coresString, values, timestamp)
return
}
func parseCoresMeasurement(measurements string) (string, []float64, time.Time, error) {

View File

@ -301,7 +301,7 @@ func (j *Jenkins) getJobDetail(jr jobRequest, acc telegraf.Accumulator) error {
}
// filter out not included job.
if j.jobFilterInclude != nil && j.jobFilterInclude.Match(jr.hierarchyName()) == false {
if j.jobFilterInclude != nil && !j.jobFilterInclude.Match(jr.hierarchyName()) {
return nil
}

View File

@ -46,7 +46,7 @@ func (g *Gatherer) Gather(client *Client, acc telegraf.Accumulator) error {
// gatherResponses adds points to an accumulator from the ReadResponse objects
// returned by a Jolokia agent.
func (g *Gatherer) gatherResponses(responses []ReadResponse, tags map[string]string, acc telegraf.Accumulator) {
series := make(map[string][]point, 0)
series := make(map[string][]point)
for _, metric := range g.metrics {
points, ok := series[metric.Name]
@ -55,11 +55,7 @@ func (g *Gatherer) gatherResponses(responses []ReadResponse, tags map[string]str
}
responsePoints, responseErrors := g.generatePoints(metric, responses)
for _, responsePoint := range responsePoints {
points = append(points, responsePoint)
}
points = append(points, responsePoints...)
for _, err := range responseErrors {
acc.AddError(err)
}
@ -88,7 +84,7 @@ func (g *Gatherer) generatePoints(metric Metric, responses []ReadResponse) ([]po
case 404:
continue
default:
errors = append(errors, fmt.Errorf("Unexpected status in response from target %s (%q): %d",
errors = append(errors, fmt.Errorf("unexpected status in response from target %s (%q): %d",
response.RequestTarget, response.RequestMbean, response.Status))
continue
}

View File

@ -750,8 +750,7 @@ func TestJolokia2_ProxyTargets(t *testing.T) {
func TestFillFields(t *testing.T) {
complex := map[string]interface{}{"Value": []interface{}{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}
var scalar interface{}
scalar = []interface{}{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
scalar := []interface{}{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
results := map[string]interface{}{}
newPointBuilder(Metric{Name: "test", Mbean: "complex"}, []string{"this", "that"}, "/").fillFields("", complex, results)

View File

@ -169,25 +169,18 @@ func (m *OpenConfigTelemetry) extractData(r *telemetry.OpenConfigData, grpcServe
} else {
kv[xmlpath] = v.GetStrValue()
}
break
case *telemetry.KeyValue_DoubleValue:
kv[xmlpath] = v.GetDoubleValue()
break
case *telemetry.KeyValue_IntValue:
kv[xmlpath] = v.GetIntValue()
break
case *telemetry.KeyValue_UintValue:
kv[xmlpath] = v.GetUintValue()
break
case *telemetry.KeyValue_SintValue:
kv[xmlpath] = v.GetSintValue()
break
case *telemetry.KeyValue_BoolValue:
kv[xmlpath] = v.GetBoolValue()
break
case *telemetry.KeyValue_BytesValue:
kv[xmlpath] = v.GetBytesValue()
break
}
// Insert other tags from message

View File

@ -219,9 +219,7 @@ func (k *Kapacitor) gatherURL(
// Strip out high-cardinality or duplicative tags
excludeTags := []string{"host", "cluster_id", "server_id"}
for _, key := range excludeTags {
if _, ok := obj.Tags[key]; ok {
delete(obj.Tags, key)
}
delete(obj.Tags, key)
}
// Convert time-related string field to int

View File

@ -185,7 +185,6 @@ func (m *Mesos) Gather(acc telegraf.Accumulator) error {
go func(master *url.URL) {
acc.AddError(m.gatherMainMetrics(master, MASTER, acc))
wg.Done()
return
}(master)
}
@ -194,7 +193,6 @@ func (m *Mesos) Gather(acc telegraf.Accumulator) error {
go func(slave *url.URL) {
acc.AddError(m.gatherMainMetrics(slave, SLAVE, acc))
wg.Done()
return
}(slave)
}
@ -244,9 +242,7 @@ func metricsDiff(role Role, w []string) []string {
// masterBlocks serves as kind of metrics registry grouping them in sets
func getMetrics(role Role, group string) []string {
var m map[string][]string
m = make(map[string][]string)
m := make(map[string][]string)
if role == MASTER {
m["resources"] = []string{
@ -504,13 +500,13 @@ func (m *Mesos) filterMetrics(role Role, metrics *map[string]interface{}) {
case "allocator":
for m := range *metrics {
if strings.HasPrefix(m, "allocator/") {
delete((*metrics), m)
delete(*metrics, m)
}
}
case "framework_offers":
for m := range *metrics {
if strings.HasPrefix(m, "master/frameworks/") || strings.HasPrefix(m, "frameworks/") {
delete((*metrics), m)
delete(*metrics, m)
}
}
@ -518,7 +514,7 @@ func (m *Mesos) filterMetrics(role Role, metrics *map[string]interface{}) {
default:
for _, v := range getMetrics(role, k) {
if _, ok = (*metrics)[v]; ok {
delete((*metrics), v)
delete(*metrics, v)
}
}
}

View File

@ -208,9 +208,7 @@ func (m *Modbus) InitRegister(fields []fieldContainer, name string) error {
addrs := []uint16{}
for _, field := range fields {
for _, a := range field.Address {
addrs = append(addrs, a)
}
addrs = append(addrs, field.Address...)
}
addrs = removeDuplicates(addrs)

View File

@ -1220,9 +1220,7 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec
// Get the entry with the highest lock
highestLocked := lockdiffs[len(lockdiffs)-1]
var timeDiffMillis int64
timeDiffMillis = newStat.UptimeMillis - oldStat.UptimeMillis
timeDiffMillis := newStat.UptimeMillis - oldStat.UptimeMillis
lockToReport := highestLocked.Writes
// if the highest locked namespace is not '.'

View File

@ -252,7 +252,6 @@ func (m *MQTTConsumer) onConnectionLost(_ mqtt.Client, err error) {
m.acc.AddError(fmt.Errorf("connection lost: %v", err))
m.Log.Debugf("Disconnected %v", m.Servers)
m.state = Disconnected
return
}
func (m *MQTTConsumer) recvMessage(_ mqtt.Client, msg mqtt.Message) {

View File

@ -1858,11 +1858,11 @@ func (m *Mysql) parseValue(value sql.RawBytes) (interface{}, bool) {
// parseValue can be used to convert values such as "ON","OFF","Yes","No" to 0,1
func parseValue(value sql.RawBytes) (interface{}, bool) {
if bytes.EqualFold(value, []byte("YES")) || bytes.Compare(value, []byte("ON")) == 0 {
if bytes.EqualFold(value, []byte("YES")) || bytes.Equal(value, []byte("ON")) {
return 1, true
}
if bytes.EqualFold(value, []byte("NO")) || bytes.Compare(value, []byte("OFF")) == 0 {
if bytes.EqualFold(value, []byte("NO")) || bytes.Equal(value, []byte("OFF")) {
return 0, true
}

View File

@ -183,11 +183,11 @@ var Mappings = []*Mapping{
}
func ParseValue(value sql.RawBytes) (float64, bool) {
if bytes.Compare(value, []byte("Yes")) == 0 || bytes.Compare(value, []byte("ON")) == 0 {
if bytes.Equal(value, []byte("Yes")) || bytes.Equal(value, []byte("ON")) {
return 1, true
}
if bytes.Compare(value, []byte("No")) == 0 || bytes.Compare(value, []byte("OFF")) == 0 {
if bytes.Equal(value, []byte("No")) || bytes.Equal(value, []byte("OFF")) {
return 0, true
}
n, err := strconv.ParseFloat(string(value), 64)

View File

@ -47,11 +47,11 @@ func ParseGTIDMode(value sql.RawBytes) (interface{}, error) {
}
func ParseValue(value sql.RawBytes) (interface{}, error) {
if bytes.EqualFold(value, []byte("YES")) || bytes.Compare(value, []byte("ON")) == 0 {
if bytes.EqualFold(value, []byte("YES")) || bytes.Equal(value, []byte("ON")) {
return 1, nil
}
if bytes.EqualFold(value, []byte("NO")) || bytes.Compare(value, []byte("OFF")) == 0 {
if bytes.EqualFold(value, []byte("NO")) || bytes.Equal(value, []byte("OFF")) {
return 0, nil
}

View File

@ -437,7 +437,7 @@ func TestSendRequest(t *testing.T) {
if test.wantErr {
return
}
if bytes.Compare(resp, []byte("data")) != 0 {
if !bytes.Equal(resp, []byte("data")) {
t.Errorf(
"Response data mismatch. got=%q, want=%q", resp, "data")
}

View File

@ -190,7 +190,6 @@ func gatherSearchResult(sr *ldap.SearchResult, o *Openldap, acc telegraf.Accumul
}
}
acc.AddFields("openldap", fields, tags)
return
}
// Convert a DN to metric name, eg cn=Read,cn=Waiters,cn=Monitor becomes waiters_read

View File

@ -61,7 +61,7 @@ func qScan(path string, acc telegraf.Accumulator) (int64, int64, int64, error) {
}
var age int64
if !oldest.IsZero() {
age = int64(time.Now().Sub(oldest) / time.Second)
age = int64(time.Since(oldest) / time.Second)
} else if length != 0 {
// system doesn't support ctime
age = -1

View File

@ -289,15 +289,15 @@ type fakeRow struct {
func (f fakeRow) Scan(dest ...interface{}) error {
if len(f.fields) != len(dest) {
return errors.New("Nada matchy buddy")
return errors.New("nada matchy buddy")
}
for i, d := range dest {
switch d.(type) {
case (*interface{}):
*d.(*interface{}) = f.fields[i]
switch d := d.(type) {
case *interface{}:
*d = f.fields[i]
default:
return fmt.Errorf("Bad type %T", d)
return fmt.Errorf("bad type %T", d)
}
}
return nil

View File

@ -169,8 +169,8 @@ func (p *Prometheus) Init() error {
// Check if set as env var and is valid IP address
envVarNodeIP := os.Getenv("NODE_IP")
if envVarNodeIP == "" || net.ParseIP(envVarNodeIP) == nil {
errorMessage := "The node_ip config and the environment variable NODE_IP are not set or invalid. Cannot get pod list for monitor_kubernetes_pods using node scrape scope"
return errors.New(errorMessage)
return errors.New("the node_ip config and the environment variable NODE_IP are not set or invalid; " +
"cannot get pod list for monitor_kubernetes_pods using node scrape scope")
}
p.NodeIP = envVarNodeIP
@ -180,15 +180,15 @@ func (p *Prometheus) Init() error {
var err error
p.podLabelSelector, err = labels.Parse(p.KubernetesLabelSelector)
if err != nil {
return fmt.Errorf("Error parsing the specified label selector(s): %s", err.Error())
return fmt.Errorf("error parsing the specified label selector(s): %s", err.Error())
}
p.podFieldSelector, err = fields.ParseSelector(p.KubernetesFieldSelector)
if err != nil {
return fmt.Errorf("Error parsing the specified field selector(s): %s", err.Error())
return fmt.Errorf("error parsing the specified field selector(s): %s", err.Error())
}
isValid, invalidSelector := fieldSelectorIsSupported(p.podFieldSelector)
if !isValid {
return fmt.Errorf("The field selector %s is not supported for pods", invalidSelector)
return fmt.Errorf("the field selector %s is not supported for pods", invalidSelector)
}
p.Log.Infof("Using pod scrape scope at node level to get pod list using cAdvisor.")
@ -227,7 +227,7 @@ type URLAndAddress struct {
}
func (p *Prometheus) GetAllURLs() (map[string]URLAndAddress, error) {
allURLs := make(map[string]URLAndAddress, 0)
allURLs := make(map[string]URLAndAddress)
for _, u := range p.URLs {
URL, err := url.Parse(u)
if err != nil {

View File

@ -261,23 +261,23 @@ func TestInitConfigErrors(t *testing.T) {
p.NodeIP = "10.240.0.0.0"
os.Setenv("NODE_IP", "10.000.0.0.0")
err := p.Init()
expectedMessage := "The node_ip config and the environment variable NODE_IP are not set or invalid. Cannot get pod list for monitor_kubernetes_pods using node scrape scope"
assert.Equal(t, expectedMessage, err.Error())
expectedMessage := "the node_ip config and the environment variable NODE_IP are not set or invalid. Cannot get pod list for monitor_kubernetes_pods using node scrape scope"
require.Error(t, err, expectedMessage)
os.Setenv("NODE_IP", "10.000.0.0")
p.KubernetesLabelSelector = "label0==label0, label0 in (=)"
err = p.Init()
expectedMessage = "Error parsing the specified label selector(s): unable to parse requirement: found '=', expected: ',', ')' or identifier"
assert.Equal(t, expectedMessage, err.Error())
expectedMessage = "error parsing the specified label selector(s): unable to parse requirement: found '=', expected: ',', ')' or identifier"
require.Error(t, err, expectedMessage)
p.KubernetesLabelSelector = "label0==label"
p.KubernetesFieldSelector = "field,"
err = p.Init()
expectedMessage = "Error parsing the specified field selector(s): invalid selector: 'field,'; can't understand 'field'"
assert.Equal(t, expectedMessage, err.Error())
expectedMessage = "error parsing the specified field selector(s): invalid selector: 'field,'; can't understand 'field'"
require.Error(t, err, expectedMessage)
p.KubernetesFieldSelector = "spec.containerNames=containerNames"
err = p.Init()
expectedMessage = "The field selector spec.containerNames is not supported for pods"
assert.Equal(t, expectedMessage, err.Error())
expectedMessage = "the field selector spec.containerNames is not supported for pods"
require.Error(t, err, expectedMessage)
}

View File

@ -234,7 +234,7 @@ func fillnode(parentNode Node, oidName string, ids []string) {
// ids = ["1", "3", "6", ...]
id, ids := ids[0], ids[1:]
node, ok := parentNode.subnodes[id]
if ok == false {
if !ok {
node = Node{
id: id,
name: "",

View File

@ -151,11 +151,8 @@ func sendTrap(t *testing.T, goSNMP gosnmp.GoSNMP, trap gosnmp.SnmpTrap) {
}
func TestReceiveTrap(t *testing.T) {
var now uint32
now = 123123123
var fakeTime time.Time
fakeTime = time.Unix(456456456, 456)
now := uint32(123123123)
fakeTime := time.Unix(456456456, 456)
type entry struct {
oid string

View File

@ -162,8 +162,5 @@ func TestRunningStats_PercentileLimit(t *testing.T) {
}
func fuzzyEqual(a, b, epsilon float64) bool {
if math.Abs(a-b) > epsilon {
return false
}
return true
return math.Abs(a-b) <= epsilon
}

View File

@ -284,7 +284,7 @@ func (s *Sysstat) parse(acc telegraf.Accumulator, option string, ts time.Time) e
tags: make(map[string]string),
}
}
g, _ := m[device]
g := m[device]
if len(g.tags) == 0 {
for k, v := range tags {
g.tags[k] = v

View File

@ -28,8 +28,7 @@ func TestTengineTags(t *testing.T) {
func TestTengineGeneratesMetrics(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var rsp string
rsp = tengineSampleResponse
rsp := tengineSampleResponse
fmt.Fprintln(w, rsp)
}))
defer ts.Close()

View File

@ -891,7 +891,7 @@ func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Tim
numQs := 0
for _, object := range res.objects {
timeBuckets := make(map[int64]*types.PerfQuerySpec, 0)
timeBuckets := make(map[int64]*types.PerfQuerySpec)
for metricIdx, metric := range res.metrics {
// Determine time of last successful collection
metricName := e.getMetricNameForID(metric.CounterId)

View File

@ -27,7 +27,7 @@ func (t *TSCache) Purge() {
defer t.mux.Unlock()
n := 0
for k, v := range t.table {
if time.Now().Sub(v) > t.ttl {
if time.Since(v) > t.ttl {
delete(t.table, k)
n++
}

View File

@ -109,7 +109,7 @@ func (wb *Webhooks) Start(acc telegraf.Accumulator) error {
wb.srv = &http.Server{Handler: r}
ln, err := net.Listen("tcp", fmt.Sprintf("%s", wb.ServiceAddress))
ln, err := net.Listen("tcp", wb.ServiceAddress)
if err != nil {
return fmt.Errorf("error starting server: %v", err)
}

View File

@ -154,7 +154,7 @@ func (c *X509Cert) getCert(u *url.URL, timeout time.Duration) ([]*x509.Certifica
}
certs = append(certs, cert)
}
if rest == nil || len(rest) == 0 {
if len(rest) == 0 {
break
}
content = rest

View File

@ -2,7 +2,6 @@ package amqp
import (
"bytes"
"fmt"
"strings"
"time"
@ -29,7 +28,7 @@ func (a *externalAuth) Mechanism() string {
}
func (a *externalAuth) Response() string {
return fmt.Sprintf("\000")
return "\000"
}
type AMQP struct {

View File

@ -68,10 +68,10 @@ func (f *statisticField) buildDatum() []*cloudwatch.MetricDatum {
if f.hasAllFields() {
// If we have all required fields, we build datum with StatisticValues
min, _ := f.values[statisticTypeMin]
max, _ := f.values[statisticTypeMax]
sum, _ := f.values[statisticTypeSum]
count, _ := f.values[statisticTypeCount]
min := f.values[statisticTypeMin]
max := f.values[statisticTypeMax]
sum := f.values[statisticTypeSum]
count := f.values[statisticTypeCount]
datum := &cloudwatch.MetricDatum{
MetricName: aws.String(strings.Join([]string{f.metricName, f.fieldName}, "_")),

View File

@ -63,7 +63,6 @@ func (c *Collector) Describe(_ chan<- *prometheus.Desc) {
// Sending no descriptor at all marks the Collector as "unchecked",
// i.e. no checks will be performed at registration time, and the
// Collector may yield any Metric it sees fit in its Collect method.
return
}
func (c *Collector) Collect(ch chan<- prometheus.Metric) {

View File

@ -101,7 +101,7 @@ func (r *Riemann) Write(metrics []telegraf.Metric) error {
if r.client == nil {
if err := r.Connect(); err != nil {
return fmt.Errorf("Failed to (re)connect to Riemann: %s", err.Error())
return fmt.Errorf("failed to (re)connect to Riemann: %s", err.Error())
}
}
@ -109,14 +109,12 @@ func (r *Riemann) Write(metrics []telegraf.Metric) error {
var events []*raidman.Event
for _, m := range metrics {
evs := r.buildRiemannEvents(m)
for _, ev := range evs {
events = append(events, ev)
}
events = append(events, evs...)
}
if err := r.client.SendMulti(events); err != nil {
r.Close()
return fmt.Errorf("Failed to send riemann message: %s", err)
return fmt.Errorf("failed to send riemann message: %s", err)
}
return nil
}
@ -145,14 +143,14 @@ func (r *Riemann) buildRiemannEvents(m telegraf.Metric) []*raidman.Event {
Tags: r.tags(m.Tags()),
}
switch value.(type) {
switch value := value.(type) {
case string:
// only send string metrics if explicitly enabled, skip otherwise
if !r.StringAsState {
r.Log.Debugf("Riemann event states disabled, skipping metric value [%s]", value)
continue
}
event.State = value.(string)
event.State = value
case int, int64, uint64, float32, float64:
event.Metric = value
default:

View File

@ -77,9 +77,7 @@ func (r *Riemann) Write(metrics []telegraf.Metric) error {
var events []*raidman.Event
for _, p := range metrics {
evs := buildEvents(p, r.Separator)
for _, ev := range evs {
events = append(events, ev)
}
events = append(events, evs...)
}
var senderr = r.client.SendMulti(events)
@ -109,9 +107,9 @@ func buildEvents(p telegraf.Metric, s string) []*raidman.Event {
Service: serviceName(s, p.Name(), p.Tags(), fieldName),
}
switch value.(type) {
switch value := value.(type) {
case string:
event.State = value.(string)
event.State = value
default:
event.Metric = value
}

View File

@ -130,10 +130,8 @@ func (s *SignalFx) ConvertToSignalFx(metrics []telegraf.Metric) ([]*datapoint.Da
for _, metric := range metrics {
s.Log.Debugf("Processing the following measurement: %v", metric)
var timestamp = metric.Time()
var metricType datapoint.MetricType
metricType = GetMetricType(metric.Type())
metricType := GetMetricType(metric.Type())
for field, val := range metric.Fields() {
// Copy the metric tags because they are meant to be treated as
// immutable

View File

@ -178,7 +178,7 @@ func buildTags(tags []*telegraf.Tag) []string {
indexSource = index
}
indexSource++
tagsString[indexSource] = fmt.Sprintf("source=telegraf")
tagsString[indexSource] = "source=telegraf"
sort.Strings(tagsString)
return tagsString
}

View File

@ -1,7 +1,6 @@
package warp10
import (
"fmt"
"testing"
"github.com/influxdata/telegraf/testutil"
@ -60,7 +59,7 @@ func TestHandleWarp10Error(t *testing.T) {
</body>
</html>
`,
Expected: fmt.Sprintf("Invalid token"),
Expected: "Invalid token",
},
{
Message: `
@ -75,7 +74,7 @@ func TestHandleWarp10Error(t *testing.T) {
</body>
</html>
`,
Expected: fmt.Sprintf("Token Expired"),
Expected: "Token Expired",
},
{
Message: `
@ -90,7 +89,7 @@ func TestHandleWarp10Error(t *testing.T) {
</body>
</html>
`,
Expected: fmt.Sprintf("Token revoked"),
Expected: "Token revoked",
},
{
Message: `

View File

@ -76,7 +76,7 @@ func NewCollectdParser(
func (p *CollectdParser) Parse(buf []byte) ([]telegraf.Metric, error) {
valueLists, err := network.Parse(buf, p.popts)
if err != nil {
return nil, fmt.Errorf("Collectd parser error: %s", err)
return nil, fmt.Errorf("collectd parser error: %s", err)
}
metrics := []telegraf.Metric{}
@ -105,7 +105,7 @@ func (p *CollectdParser) ParseLine(line string) (telegraf.Metric, error) {
}
if len(metrics) != 1 {
return nil, errors.New("Line contains multiple metrics")
return nil, errors.New("line contains multiple metrics")
}
return metrics[0], nil
@ -128,8 +128,7 @@ func UnmarshalValueList(vl *api.ValueList, multiValue string) []telegraf.Metric
switch multiValue {
case "split":
for i := range vl.Values {
var name string
name = fmt.Sprintf("%s_%s", vl.Identifier.Plugin, vl.DSName(i))
name := fmt.Sprintf("%s_%s", vl.Identifier.Plugin, vl.DSName(i))
tags := make(map[string]string)
fields := make(map[string]interface{})

View File

@ -157,7 +157,7 @@ func (p *GraphiteParser) ParseLine(line string) (telegraf.Metric, error) {
if len(tagValue) != 2 || len(tagValue[0]) == 0 || len(tagValue[1]) == 0 {
continue
}
if strings.IndexAny(tagValue[0], "!^") != -1 {
if strings.ContainsAny(tagValue[0], "!^") {
continue
}
if strings.Index(tagValue[1], "~") == 0 {

View File

@ -134,14 +134,14 @@ func (p *Parser) parseQuery(starttime time.Time, doc, selected *xmlquery.Node, c
if err != nil {
return nil, fmt.Errorf("failed to query timestamp: %v", err)
}
switch v.(type) {
switch v := v.(type) {
case string:
// Parse the string with the given format or assume the string to contain
// a unix timestamp in seconds if no format is given.
if len(config.TimestampFmt) < 1 || strings.HasPrefix(config.TimestampFmt, "unix") {
var nanoseconds int64
t, err := strconv.ParseFloat(v.(string), 64)
t, err := strconv.ParseFloat(v, 64)
if err != nil {
return nil, fmt.Errorf("failed to parse unix timestamp: %v", err)
}
@ -158,14 +158,14 @@ func (p *Parser) parseQuery(starttime time.Time, doc, selected *xmlquery.Node, c
}
timestamp = time.Unix(0, nanoseconds)
} else {
timestamp, err = time.Parse(config.TimestampFmt, v.(string))
timestamp, err = time.Parse(config.TimestampFmt, v)
if err != nil {
return nil, fmt.Errorf("failed to query timestamp format: %v", err)
}
}
case float64:
// Assume the value to contain a timestamp in seconds and fractions thereof.
timestamp = time.Unix(0, int64(v.(float64)*1e9))
timestamp = time.Unix(0, int64(v*1e9))
case nil:
// No timestamp found. Just ignore the time and use "starttime"
default:
@ -181,13 +181,13 @@ func (p *Parser) parseQuery(starttime time.Time, doc, selected *xmlquery.Node, c
if err != nil {
return nil, fmt.Errorf("failed to query tag '%s': %v", name, err)
}
switch v.(type) {
switch v := v.(type) {
case string:
tags[name] = v.(string)
tags[name] = v
case bool:
tags[name] = strconv.FormatBool(v.(bool))
tags[name] = strconv.FormatBool(v)
case float64:
tags[name] = strconv.FormatFloat(v.(float64), 'G', -1, 64)
tags[name] = strconv.FormatFloat(v, 'G', -1, 64)
case nil:
continue
default:
@ -206,19 +206,19 @@ func (p *Parser) parseQuery(starttime time.Time, doc, selected *xmlquery.Node, c
if err != nil {
return nil, fmt.Errorf("failed to query field (int) '%s': %v", name, err)
}
switch v.(type) {
switch v := v.(type) {
case string:
fields[name], err = strconv.ParseInt(v.(string), 10, 54)
fields[name], err = strconv.ParseInt(v, 10, 54)
if err != nil {
return nil, fmt.Errorf("failed to parse field (int) '%s': %v", name, err)
}
case bool:
fields[name] = int64(0)
if v.(bool) {
if v {
fields[name] = int64(1)
}
case float64:
fields[name] = int64(v.(float64))
fields[name] = int64(v)
case nil:
continue
default:

View File

@ -40,7 +40,7 @@ func (d *Dedup) cleanup() {
return
}
d.FlushTime = time.Now()
keep := make(map[uint64]telegraf.Metric, 0)
keep := make(map[uint64]telegraf.Metric)
for id, metric := range d.Cache {
if time.Since(metric.Time()) < d.DedupInterval.Duration {
keep[id] = metric

View File

@ -55,14 +55,14 @@ func (mapper *EnumMapper) Init() error {
if mapping.Field != "" {
fieldFilter, err := filter.NewIncludeExcludeFilter([]string{mapping.Field}, nil)
if err != nil {
return fmt.Errorf("Failed to create new field filter: %w", err)
return fmt.Errorf("failed to create new field filter: %w", err)
}
mapper.FieldFilters[mapping.Field] = fieldFilter
}
if mapping.Tag != "" {
tagFilter, err := filter.NewIncludeExcludeFilter([]string{mapping.Tag}, nil)
if err != nil {
return fmt.Errorf("Failed to create new tag filter: %s", err)
return fmt.Errorf("failed to create new tag filter: %s", err)
}
mapper.TagFilters[mapping.Tag] = tagFilter
}
@ -153,7 +153,7 @@ func adjustValue(in interface{}) interface{} {
}
func (mapping *Mapping) mapValue(original string) (interface{}, bool) {
if mapped, found := mapping.ValueMappings[original]; found == true {
if mapped, found := mapping.ValueMappings[original]; found {
return mapped, true
}
if mapping.Default != nil {

View File

@ -110,10 +110,7 @@ func sortMetrics(metrics []MetricAggregation, field string, reverse bool) {
less := func(i, j int) bool {
iv := metrics[i].values[field]
jv := metrics[j].values[field]
if iv < jv {
return true
}
return false
return iv < jv
}
if reverse {
@ -276,7 +273,7 @@ func (t *TopK) push() []telegraf.Metric {
}
// The return value that will hold the returned metrics
var ret = make([]telegraf.Metric, 0, 0)
var ret = make([]telegraf.Metric, 0)
// Get the top K metrics for each field and add them to the return value
addedKeys := make(map[string]bool)
for _, field := range t.Fields {

View File

@ -138,8 +138,7 @@ func runAndCompare(topk *TopK, metrics []telegraf.Metric, answer []telegraf.Metr
// Smoke tests
func TestTopkAggregatorsSmokeTests(t *testing.T) {
// Build the processor
var topk TopK
topk = *New()
topk := *New()
topk.Period = oneSecondDuration
topk.Fields = []string{"a"}
topk.GroupBy = []string{"tag_name"}
@ -160,8 +159,7 @@ func TestTopkAggregatorsSmokeTests(t *testing.T) {
// AddAggregateFields + Mean aggregator
func TestTopkMeanAddAggregateFields(t *testing.T) {
// Build the processor
var topk TopK
topk = *New()
topk := *New()
topk.Period = oneSecondDuration
topk.Aggregation = "mean"
topk.AddAggregateFields = []string{"a"}
@ -189,8 +187,7 @@ func TestTopkMeanAddAggregateFields(t *testing.T) {
// AddAggregateFields + Sum aggregator
func TestTopkSumAddAggregateFields(t *testing.T) {
// Build the processor
var topk TopK
topk = *New()
topk := *New()
topk.Period = oneSecondDuration
topk.Aggregation = "sum"
topk.AddAggregateFields = []string{"a"}
@ -218,8 +215,7 @@ func TestTopkSumAddAggregateFields(t *testing.T) {
// AddAggregateFields + Max aggregator
func TestTopkMaxAddAggregateFields(t *testing.T) {
// Build the processor
var topk TopK
topk = *New()
topk := *New()
topk.Period = oneSecondDuration
topk.Aggregation = "max"
topk.AddAggregateFields = []string{"a"}
@ -247,8 +243,7 @@ func TestTopkMaxAddAggregateFields(t *testing.T) {
// AddAggregateFields + Min aggregator
func TestTopkMinAddAggregateFields(t *testing.T) {
// Build the processor
var topk TopK
topk = *New()
topk := *New()
topk.Period = oneSecondDuration
topk.Aggregation = "min"
topk.AddAggregateFields = []string{"a"}
@ -276,8 +271,7 @@ func TestTopkMinAddAggregateFields(t *testing.T) {
// GroupBy
func TestTopkGroupby1(t *testing.T) {
// Build the processor
var topk TopK
topk = *New()
topk := *New()
topk.Period = oneSecondDuration
topk.K = 3
topk.Aggregation = "sum"
@ -301,8 +295,7 @@ func TestTopkGroupby1(t *testing.T) {
}
func TestTopkGroupby2(t *testing.T) {
// Build the processor
var topk TopK
topk = *New()
topk := *New()
topk.Period = oneSecondDuration
topk.K = 3
topk.Aggregation = "mean"
@ -330,8 +323,7 @@ func TestTopkGroupby2(t *testing.T) {
}
func TestTopkGroupby3(t *testing.T) {
// Build the processor
var topk TopK
topk = *New()
topk := *New()
topk.Period = oneSecondDuration
topk.K = 1
topk.Aggregation = "min"
@ -356,8 +348,7 @@ func TestTopkGroupby3(t *testing.T) {
// GroupBy + Fields
func TestTopkGroupbyFields1(t *testing.T) {
// Build the processor
var topk TopK
topk = *New()
topk := *New()
topk.Period = oneSecondDuration
topk.K = 4 // This settings generate less than 3 groups
topk.Aggregation = "mean"
@ -383,8 +374,7 @@ func TestTopkGroupbyFields1(t *testing.T) {
func TestTopkGroupbyFields2(t *testing.T) {
// Build the processor
var topk TopK
topk = *New()
topk := *New()
topk.Period = oneSecondDuration
topk.K = 2
topk.Aggregation = "sum"
@ -411,8 +401,7 @@ func TestTopkGroupbyFields2(t *testing.T) {
// GroupBy metric name
func TestTopkGroupbyMetricName1(t *testing.T) {
// Build the processor
var topk TopK
topk = *New()
topk := *New()
topk.Period = oneSecondDuration
topk.K = 1
topk.Aggregation = "sum"
@ -437,8 +426,7 @@ func TestTopkGroupbyMetricName1(t *testing.T) {
func TestTopkGroupbyMetricName2(t *testing.T) {
// Build the processor
var topk TopK
topk = *New()
topk := *New()
topk.Period = oneSecondDuration
topk.K = 2
topk.Aggregation = "sum"
@ -465,8 +453,7 @@ func TestTopkGroupbyMetricName2(t *testing.T) {
// BottomK
func TestTopkBottomk(t *testing.T) {
// Build the processor
var topk TopK
topk = *New()
topk := *New()
topk.Period = oneSecondDuration
topk.K = 3
topk.Aggregation = "sum"
@ -491,8 +478,7 @@ func TestTopkBottomk(t *testing.T) {
// GroupByKeyTag
func TestTopkGroupByKeyTag(t *testing.T) {
// Build the processor
var topk TopK
topk = *New()
topk := *New()
topk.Period = oneSecondDuration
topk.K = 3
topk.Aggregation = "sum"

View File

@ -256,14 +256,14 @@ func BenchmarkReader(b *testing.B) {
),
)
metrics := make([]telegraf.Metric, 1000, 1000)
metrics := make([]telegraf.Metric, 1000)
for i := range metrics {
metrics[i] = m
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
readbuf := make([]byte, 4096, 4096)
readbuf := make([]byte, 4096)
serializer := NewSerializer()
reader := NewReader(metrics, serializer)
for {

View File

@ -178,7 +178,6 @@ func (r *Registry) set(key uint64, s Stat) {
}
r.stats[key][s.FieldName()] = s
return
}
func key(measurement string, tags map[string]string) uint64 {

View File

@ -235,11 +235,9 @@ func (a *Accumulator) AddError(err error) {
}
func (a *Accumulator) SetPrecision(_ time.Duration) {
return
}
func (a *Accumulator) DisablePrecision() {
return
}
func (a *Accumulator) Debug() bool {
@ -394,7 +392,6 @@ func (a *Accumulator) AssertDoesNotContainsTaggedFields(
assert.Fail(t, msg)
}
}
return
}
func (a *Accumulator) AssertContainsFields(
t *testing.T,

View File

@ -99,16 +99,12 @@ func newMetricDiff(metric telegraf.Metric) *metricDiff {
m := &metricDiff{}
m.Measurement = metric.Name()
for _, tag := range metric.TagList() {
m.Tags = append(m.Tags, tag)
}
m.Tags = append(m.Tags, metric.TagList()...)
sort.Slice(m.Tags, func(i, j int) bool {
return m.Tags[i].Key < m.Tags[j].Key
})
for _, field := range metric.FieldList() {
m.Fields = append(m.Fields, field)
}
m.Fields = append(m.Fields, metric.FieldList()...)
sort.Slice(m.Fields, func(i, j int) bool {
return m.Fields[i].Key < m.Fields[j].Key
})