chore: Update go to v1.23.0 (#15766)

Co-authored-by: Pawel Zak <pawel.zak.pawel@gmail.com>
This commit is contained in:
Sven Rebhan 2024-08-30 21:23:51 +02:00 committed by GitHub
parent 9e3e22094a
commit b2967cb515
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
52 changed files with 373 additions and 303 deletions

View File

@ -8,7 +8,7 @@ executors:
working_directory: '/go/src/github.com/influxdata/telegraf'
resource_class: large
docker:
- image: 'quay.io/influxdb/telegraf-ci:1.22.6'
- image: 'quay.io/influxdb/telegraf-ci:1.23.0'
environment:
GOFLAGS: -p=4
mac:
@ -106,10 +106,13 @@ jobs:
- run: 'make check-deps'
- run:
name: "Install golangci-lint"
command: go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.59.1
command: go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.60.3
- run:
name: "golangci-lint/Linux"
command: GOGC=80 GOMEMLIMIT=6656MiB /go/bin/golangci-lint run --verbose
# There are only 4 vCPUs available for this executor, so use only 4 instead of the default number
# (the OS may report the number of CPUs on the host instead of the number of CPUs available to the guest).
command: GOGC=80 GOMEMLIMIT=6144MiB /go/bin/golangci-lint run --verbose --timeout=30m --concurrency 4
no_output_timeout: 30m
lint-macos:
executor: telegraf-ci
steps:
@ -117,10 +120,12 @@ jobs:
- check-changed-files-or-halt
- run:
name: "Install golangci-lint"
command: go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.59.1
command: go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.60.3
- run:
name: "golangci-lint/macOS"
command: GOGC=80 GOMEMLIMIT=6656MiB GOOS=darwin /go/bin/golangci-lint run --verbose --timeout=30m
# There are only 4 vCPUs available for this executor, so use only 4 instead of the default number
# (the OS may report the number of CPUs on the host instead of the number of CPUs available to the guest).
command: GOGC=80 GOMEMLIMIT=6144MiB GOOS=darwin /go/bin/golangci-lint run --verbose --timeout=30m --concurrency 4
no_output_timeout: 30m
lint-windows:
executor: telegraf-ci
@ -129,10 +134,12 @@ jobs:
- check-changed-files-or-halt
- run:
name: "Install golangci-lint"
command: go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.59.1
command: go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.60.3
- run:
name: "golangci-lint/Windows"
command: GOGC=80 GOMEMLIMIT=6656MiB GOOS=windows /go/bin/golangci-lint run --verbose --timeout=30m
# There are only 4 vCPUs available for this executor, so use only 4 instead of the default number
# (the OS may report the number of CPUs on the host instead of the number of CPUs available to the guest).
command: GOGC=80 GOMEMLIMIT=6144MiB GOOS=windows /go/bin/golangci-lint run --verbose --timeout=30m --concurrency 4
no_output_timeout: 30m
test-go-linux:
executor: telegraf-ci

View File

@ -11,7 +11,7 @@ jobs:
steps:
- uses: actions/setup-go@v5
with:
go-version: '1.22.6'
go-version: '1.23.0'
- uses: actions/checkout@v4
with:
fetch-depth: 0

View File

@ -11,7 +11,6 @@ linters:
- errcheck
- errname
- errorlint
- exportloopref
- gocheckcompilerdirectives
- gocritic
- goprintffuncname

View File

@ -180,7 +180,7 @@ vet:
.PHONY: lint-install
lint-install:
@echo "Installing golangci-lint"
go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.59.1
go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.60.3
@echo "Installing markdownlint"
npm install -g markdownlint-cli
@ -257,8 +257,8 @@ plugins/parsers/influx/machine.go: plugins/parsers/influx/machine.go.rl
.PHONY: ci
ci:
docker build -t quay.io/influxdb/telegraf-ci:1.22.6 - < scripts/ci.docker
docker push quay.io/influxdb/telegraf-ci:1.22.6
docker build -t quay.io/influxdb/telegraf-ci:1.23.0 - < scripts/ci.docker
docker push quay.io/influxdb/telegraf-ci:1.23.0
.PHONY: install
install: $(buildbin)

View File

@ -419,7 +419,7 @@ func (t *Telegraf) runAgent(ctx context.Context, reloadConfig bool) error {
log.Printf("I! Found %d secrets...", c.NumberSecrets)
msg := fmt.Sprintf("Insufficient lockable memory %dkb when %dkb is required.", available, required)
msg += " Please increase the limit for Telegraf in your Operating System!"
log.Printf("W! " + color.RedString(msg))
log.Print("W! " + color.RedString(msg))
}
}
ag := agent.NewAgent(c)

View File

@ -21,11 +21,11 @@ import (
func getLockedMemoryLimit() uint64 {
handle := windows.CurrentProcess()
var min, max uintptr
var low, high uintptr
var flag uint32
windows.GetProcessWorkingSetSizeEx(handle, &min, &max, &flag)
windows.GetProcessWorkingSetSizeEx(handle, &low, &high, &flag)
return uint64(max)
return uint64(high)
}
func (t *Telegraf) Run() error {

View File

@ -1585,7 +1585,7 @@ func (c *Config) setLocalMissingTomlFieldTracker(counter map[string]int) {
// All other elements are subtables of their respective plugin and
// should just be hit once anyway. Therefore, we mark them with a
// high number to handle them correctly later.
pt := reflect.PtrTo(t)
pt := reflect.PointerTo(t)
root := pt.Implements(reflect.TypeOf((*telegraf.Input)(nil)).Elem())
root = root || pt.Implements(reflect.TypeOf((*telegraf.ServiceInput)(nil)).Elem())
root = root || pt.Implements(reflect.TypeOf((*telegraf.Output)(nil)).Elem())

2
go.mod
View File

@ -1,6 +1,6 @@
module github.com/influxdata/telegraf
go 1.22.0
go 1.23.0
require (
cloud.google.com/go/bigquery v1.62.0

View File

@ -85,10 +85,6 @@ func TestFindNestedTextFile(t *testing.T) {
}
func TestMatch_ErrPermission(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("Skipping Unix only test")
}
tests := []struct {
input string
expected []string
@ -106,6 +102,7 @@ func TestMatch_ErrPermission(t *testing.T) {
}
func TestWindowsSeparator(t *testing.T) {
//nolint:staticcheck // Silence linter for now as we plan to reenable tests for Windows later
if runtime.GOOS != "windows" {
t.Skip("Skipping Windows only test")
}

View File

@ -123,8 +123,8 @@ func SnakeCase(in string) string {
// RandomSleep will sleep for a random amount of time up to max.
// If the shutdown channel is closed, it will return before it has finished sleeping.
func RandomSleep(max time.Duration, shutdown chan struct{}) {
sleepDuration := RandomDuration(max)
func RandomSleep(limit time.Duration, shutdown chan struct{}) {
sleepDuration := RandomDuration(limit)
if sleepDuration == 0 {
return
}
@ -140,12 +140,12 @@ func RandomSleep(max time.Duration, shutdown chan struct{}) {
}
// RandomDuration returns a random duration between 0 and max.
func RandomDuration(max time.Duration) time.Duration {
if max == 0 {
func RandomDuration(limit time.Duration) time.Duration {
if limit == 0 {
return 0
}
return time.Duration(rand.Int63n(max.Nanoseconds())) //nolint:gosec // G404: not security critical
return time.Duration(rand.Int63n(limit.Nanoseconds())) //nolint:gosec // G404: not security critical
}
// SleepContext sleeps until the context is closed or the duration is reached.

View File

@ -234,7 +234,7 @@ func TestCompressWithGzipErrorPropagationCopy(t *testing.T) {
rc := CompressWithGzip(r)
n, err := io.Copy(io.Discard, rc)
require.Greater(t, n, int64(0))
require.Positive(t, n)
require.ErrorIs(t, err, expected)
require.NoError(t, rc.Close())
}

View File

@ -9,13 +9,13 @@ import (
func TestBackoffFunc(t *testing.T) {
b := 250 * time.Millisecond
max := 1100 * time.Millisecond
limit := 1100 * time.Millisecond
f := makeBackoffFunc(b, max)
f := makeBackoffFunc(b, limit)
require.Equal(t, b, f(0, 0))
require.Equal(t, b*2, f(1, 0))
require.Equal(t, b*4, f(2, 0))
require.Equal(t, max, f(3, 0)) // would be 2000 but that's greater than max
require.Equal(t, limit, f(3, 0)) // would be 2000 but that's greater than max
f = makeBackoffFunc(b, 0) // max = 0 means no max
require.Equal(t, b*8, f(3, 0)) // with no max, it's 2000

View File

@ -161,7 +161,7 @@ func TestURLs(t *testing.T) {
require.NoError(t, err)
default:
w.WriteHeader(http.StatusNotFound)
t.Fatalf("unexpected path: " + r.URL.Path)
t.Fatalf("unexpected path: %s", r.URL.Path)
}
})

View File

@ -165,6 +165,7 @@ func (b *Beanstalkd) gatherTubeStats(connection *textproto.Conn, tube string, ac
}
func runQuery(connection *textproto.Conn, cmd string, result interface{}) error {
//nolint:govet // Keep dynamic command as the passed string is constant
requestID, err := connection.Cmd(cmd)
if err != nil {
return err

View File

@ -314,21 +314,18 @@ func (cms *CloudWatchMetricStreams) composeMetrics(data Data) {
// Rename Statistics to match the CloudWatch API if in API Compatability mode
if cms.APICompatability {
max, ok := fields["max"]
if ok {
fields["maximum"] = max
if v, ok := fields["max"]; ok {
fields["maximum"] = v
delete(fields, "max")
}
min, ok := fields["min"]
if ok {
fields["minimum"] = min
if v, ok := fields["min"]; ok {
fields["minimum"] = v
delete(fields, "min")
}
count, ok := fields["count"]
if ok {
fields["samplecount"] = count
if v, ok := fields["count"]; ok {
fields["samplecount"] = v
delete(fields, "count")
}
}

View File

@ -78,9 +78,9 @@ func TestConfigsUsed(t *testing.T) {
dfltFiles = []string{cntFname, maxFname}
count := 1234321
max := 9999999
limit := 9999999
require.NoError(t, os.WriteFile(cntFile.Name(), []byte(strconv.Itoa(count)), 0640))
require.NoError(t, os.WriteFile(maxFile.Name(), []byte(strconv.Itoa(max)), 0640))
require.NoError(t, os.WriteFile(maxFile.Name(), []byte(strconv.Itoa(limit)), 0640))
c := &Conntrack{}
require.NoError(t, c.Init())
acc := &testutil.Accumulator{}
@ -94,7 +94,7 @@ func TestConfigsUsed(t *testing.T) {
acc.AssertContainsFields(t, inputName,
map[string]interface{}{
fix(cntFname): float64(count),
fix(maxFname): float64(max),
fix(maxFname): float64(limit),
})
}

View File

@ -215,7 +215,7 @@ func (monitor *DirectoryMonitor) read(filePath string) {
// Handle a file read error. We don't halt execution but do document, log, and move the problematic file.
if err != nil {
monitor.Log.Errorf("Error while reading file: '" + filePath + "'. " + err.Error())
monitor.Log.Errorf("Error while reading file: %q: %v", filePath, err)
monitor.filesDropped.Incr(1)
monitor.filesDroppedDir.Incr(1)
if monitor.ErrorDirectory != "" {
@ -343,7 +343,7 @@ func (monitor *DirectoryMonitor) moveFile(srcPath string, dstBaseDir string) {
dstPath := filepath.Join(dstBaseDir, basePath)
err := os.MkdirAll(filepath.Dir(dstPath), 0750)
if err != nil {
monitor.Log.Errorf("Error creating directory hierarchy for " + srcPath + ". Error: " + err.Error())
monitor.Log.Errorf("Error creating directory hierarchy for %q: %v", srcPath, err)
}
inputFile, err := os.Open(srcPath)

View File

@ -312,6 +312,7 @@ func TestTruncate(t *testing.T) {
}
func TestRemoveCarriageReturns(t *testing.T) {
//nolint:staticcheck // Silence linter for now as we plan to reenable tests for Windows later
if runtime.GOOS == "windows" {
// Test that all carriage returns are removed
for _, test := range crTests {

View File

@ -198,6 +198,7 @@ func getTestdataDir() string {
var chunks []string
var testDirectory string
//nolint:staticcheck // Silence linter for now as we plan to reenable tests for Windows later
if runtime.GOOS == "windows" {
chunks = strings.Split(dir, "\\")
testDirectory = strings.Join(chunks[:], "\\") + "\\testdata"

View File

@ -269,7 +269,7 @@ func startMultipleItemGCSServer(t *testing.T) *httptest.Server {
require.NoError(t, err)
} else {
w.WriteHeader(http.StatusNotFound)
t.Fatalf("unexpected path: " + r.URL.Path)
t.Fatalf("unexpected path: %s", r.URL.Path)
}
default:
@ -399,7 +399,7 @@ func serveJSONText(w http.ResponseWriter, jsonText []byte) {
func failPath(path string, t *testing.T, w http.ResponseWriter) {
w.WriteHeader(http.StatusNotFound)
t.Fatalf("unexpected path: " + path)
t.Fatalf("unexpected path: %s", path)
}
func parseJSONFromFile(t *testing.T, jsonFilePath string) map[string]interface{} {

View File

@ -376,7 +376,7 @@ func (h *HTTPResponse) httpGather(cl client) (map[string]interface{}, map[string
// Set result in case of a body read error
func (h *HTTPResponse) setBodyReadError(errorMsg string, bodyBytes []byte, fields map[string]interface{}, tags map[string]string) {
h.Log.Debugf(errorMsg)
h.Log.Debug(errorMsg)
setResult("body_read_error", fields, tags)
fields["content_length"] = len(bodyBytes)
if h.ResponseStringMatch != "" {

View File

@ -348,11 +348,11 @@ func readMaxFD(reader fileInfoProvider) (uint64, error) {
if err != nil {
return 0, fmt.Errorf("cannot open file %q: %w", fileMaxPath, err)
}
max, err := strconv.ParseUint(strings.Trim(string(buf), "\n "), 10, 64)
limit, err := strconv.ParseUint(strings.Trim(string(buf), "\n "), 10, 64)
if err != nil {
return 0, fmt.Errorf("cannot parse file content of %q: %w", fileMaxPath, err)
}
return max, nil
return limit, nil
}
func checkFiles(paths []string, fileInfo fileInfoProvider) error {

View File

@ -145,43 +145,38 @@ func TestResolveEntities(t *testing.T) {
t.Run("uncore event found in core entity", func(t *testing.T) {
mQuals := []string{"config1=0x23h"}
mOptions, err := ia.NewOptions().SetAttrModifiers(mQuals).Build()
require.NoError(t, err)
eventName := "uncore event 1"
testCase := test{event: &eventWithQuals{name: eventName, qualifiers: mQuals},
options: mOptions,
perfEvent: &ia.PerfEvent{Name: eventName, Uncore: true}}
testCase := test{
event: &eventWithQuals{name: eventName, qualifiers: mQuals},
perfEvent: &ia.PerfEvent{Name: eventName, Uncore: true},
}
matcher := ia.NewNameMatcher(eventName)
mTransformer.On("Transform", nil, matcher).Return([]*ia.PerfEvent{testCase.perfEvent}, nil).Once()
mCoreEntity := &CoreEventEntity{parsedEvents: []*eventWithQuals{testCase.event}, allEvents: false}
err = mResolver.resolveEntities([]*CoreEventEntity{mCoreEntity}, nil)
require.Error(t, err)
require.Contains(t, err.Error(), fmt.Sprintf("uncore event %q found in core entity", eventName))
err := mResolver.resolveEntities([]*CoreEventEntity{mCoreEntity}, nil)
require.ErrorContains(t, err, fmt.Sprintf("uncore event %q found in core entity", eventName))
mTransformer.AssertExpectations(t)
})
t.Run("core event found in uncore entity", func(t *testing.T) {
mQuals := []string{"config1=0x23h"}
mOptions, err := ia.NewOptions().SetAttrModifiers(mQuals).Build()
require.NoError(t, err)
eventName := "core event 1"
testCase := test{event: &eventWithQuals{name: eventName, qualifiers: mQuals},
options: mOptions,
perfEvent: &ia.PerfEvent{Name: eventName, Uncore: false}}
testCase := test{
event: &eventWithQuals{name: eventName, qualifiers: mQuals},
perfEvent: &ia.PerfEvent{Name: eventName, Uncore: false},
}
matcher := ia.NewNameMatcher(eventName)
mTransformer.On("Transform", nil, matcher).Return([]*ia.PerfEvent{testCase.perfEvent}, nil).Once()
mUncoreEntity := &UncoreEventEntity{parsedEvents: []*eventWithQuals{testCase.event}, allEvents: false}
err = mResolver.resolveEntities(nil, []*UncoreEventEntity{mUncoreEntity})
err := mResolver.resolveEntities(nil, []*UncoreEventEntity{mUncoreEntity})
require.Error(t, err)
require.Contains(t, err.Error(), fmt.Sprintf("core event %q found in uncore entity", eventName))
require.ErrorContains(t, err, fmt.Sprintf("core event %q found in uncore entity", eventName))
mTransformer.AssertExpectations(t)
})

View File

@ -294,7 +294,6 @@ type packageMetric[T numeric] struct {
fetchFn func(packageID int) (T, error)
}
//nolint:revive // Confusing-naming caused by a generic type that implements this interface method.
func (m *packageMetric[T]) fields() (map[string]interface{}, error) {
val, err := m.fetchFn(m.packageID)
if err != nil {
@ -306,7 +305,6 @@ func (m *packageMetric[T]) fields() (map[string]interface{}, error) {
}, nil
}
//nolint:revive // Confusing-naming caused by a generic type that implements this interface method.
func (m *packageMetric[T]) tags() map[string]string {
return map[string]string{
"package_id": strconv.Itoa(m.packageID),

View File

@ -537,10 +537,10 @@ func checkForDuplicates(values []int, valuesToCheck []int) bool {
return false
}
func makeRange(min, max int) []int {
a := make([]int, max-min+1)
func makeRange(low, high int) []int {
a := make([]int, high-low+1)
for i := range a {
a[i] = min + i
a[i] = low + i
}
return a
}

View File

@ -650,7 +650,7 @@ func TestJolokia2_ClientAuthRequest(t *testing.T) {
require.EqualValuesf(t, "sally", username, "Expected to post with username %s, but was %s", "sally", username)
require.EqualValuesf(t, "seashore", password, "Expected to post with password %s, but was %s", "seashore", password)
require.NotZero(t, len(requests), "Expected to post a request body, but was empty.")
require.NotEmpty(t, requests, "Expected to post a request body, but was empty.")
request := requests[0]["mbean"]
require.EqualValuesf(t, "hello:foo=bar", request, "Expected to query mbean %s, but was %s", "hello:foo=bar", request)

View File

@ -113,7 +113,7 @@ func TestJolokia2_ClientProxyAuthRequest(t *testing.T) {
require.NoError(t, plugin.Gather(&acc))
require.EqualValuesf(t, "sally", username, "Expected to post with username %s, but was %s", "sally", username)
require.EqualValuesf(t, "seashore", password, "Expected to post with password %s, but was %s", "seashore", password)
require.NotZero(t, len(requests), "Expected to post a request body, but was empty.")
require.NotEmpty(t, requests, "Expected to post a request body, but was empty.")
request := requests[0]
expected := "hello:foo=bar"

View File

@ -629,7 +629,7 @@ func TestExponentialBackoff(t *testing.T) {
var err error
backoff := 10 * time.Millisecond
max := 3
limit := 3
// get an unused port by listening on next available port, then closing it
listener, err := net.Listen("tcp", "127.0.0.1:0")
@ -650,7 +650,7 @@ func TestExponentialBackoff(t *testing.T) {
ReadConfig: kafka.ReadConfig{
Config: kafka.Config{
MetadataRetryMax: max,
MetadataRetryMax: limit,
MetadataRetryBackoff: config.Duration(backoff),
MetadataRetryType: "exponential",
},
@ -670,7 +670,7 @@ func TestExponentialBackoff(t *testing.T) {
t.Logf("elapsed %d", elapsed)
var expectedRetryDuration time.Duration
for i := 0; i < max; i++ {
for i := 0; i < limit; i++ {
expectedRetryDuration += backoff * time.Duration(math.Pow(2, float64(i)))
}
t.Logf("expected > %d", expectedRetryDuration)

View File

@ -1,10 +1,12 @@
package net
import (
"os"
"path/filepath"
"testing"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/plugins/inputs/system"
"github.com/influxdata/telegraf/testutil"
"github.com/shirou/gopsutil/v3/net"
@ -13,9 +15,7 @@ import (
func TestNetIOStats(t *testing.T) {
var mps system.MockPS
var err error
defer mps.AssertExpectations(t)
var acc testutil.Accumulator
netio := net.IOCountersStat{
Name: "eth0",
@ -42,43 +42,47 @@ func TestNetIOStats(t *testing.T) {
}
mps.On("NetProto").Return(netprotos, nil)
require.NoError(t, os.Setenv("HOST_SYS", filepath.Join("testdata", "general", "sys")))
t.Setenv("HOST_SYS", filepath.Join("testdata", "general", "sys"))
err = (&NetIOStats{ps: &mps, skipChecks: true}).Gather(&acc)
require.NoError(t, err)
plugin := &NetIOStats{ps: &mps, skipChecks: true}
ntags := map[string]string{
"interface": "eth0",
}
var acc testutil.Accumulator
require.NoError(t, plugin.Gather(&acc))
fields1 := map[string]interface{}{
"bytes_sent": uint64(1123),
"bytes_recv": uint64(8734422),
"packets_sent": uint64(781),
"packets_recv": uint64(23456),
"err_in": uint64(832),
"err_out": uint64(8),
"drop_in": uint64(7),
"drop_out": uint64(1),
"speed": int64(100),
expected := []telegraf.Metric{
metric.New(
"net",
map[string]string{"interface": "eth0"},
map[string]interface{}{
"bytes_sent": uint64(1123),
"bytes_recv": uint64(8734422),
"packets_sent": uint64(781),
"packets_recv": uint64(23456),
"err_in": uint64(832),
"err_out": uint64(8),
"drop_in": uint64(7),
"drop_out": uint64(1),
"speed": int64(100),
},
time.Unix(0, 0),
telegraf.Counter,
),
metric.New(
"net",
map[string]string{"interface": "all"},
map[string]interface{}{
"udp_noports": int64(892592),
"udp_indatagrams": int64(4655),
},
time.Unix(0, 0),
),
}
acc.AssertContainsTaggedFields(t, "net", fields1, ntags)
fields2 := map[string]interface{}{
"udp_noports": int64(892592),
"udp_indatagrams": int64(4655),
}
ntags = map[string]string{
"interface": "all",
}
acc.AssertContainsTaggedFields(t, "net", fields2, ntags)
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime())
}
func TestNetIOStatsSpeedUnsupported(t *testing.T) {
var mps system.MockPS
var err error
defer mps.AssertExpectations(t)
var acc testutil.Accumulator
netio := net.IOCountersStat{
Name: "eth1",
@ -105,43 +109,47 @@ func TestNetIOStatsSpeedUnsupported(t *testing.T) {
}
mps.On("NetProto").Return(netprotos, nil)
require.NoError(t, os.Setenv("HOST_SYS", filepath.Join("testdata", "general", "sys")))
t.Setenv("HOST_SYS", filepath.Join("testdata", "general", "sys"))
err = (&NetIOStats{ps: &mps, skipChecks: true}).Gather(&acc)
require.NoError(t, err)
plugin := &NetIOStats{ps: &mps, skipChecks: true}
ntags := map[string]string{
"interface": "eth1",
}
var acc testutil.Accumulator
require.NoError(t, plugin.Gather(&acc))
fields1 := map[string]interface{}{
"bytes_sent": uint64(1123),
"bytes_recv": uint64(8734422),
"packets_sent": uint64(781),
"packets_recv": uint64(23456),
"err_in": uint64(832),
"err_out": uint64(8),
"drop_in": uint64(7),
"drop_out": uint64(1),
"speed": int64(-1),
expected := []telegraf.Metric{
metric.New(
"net",
map[string]string{"interface": "eth1"},
map[string]interface{}{
"bytes_sent": uint64(1123),
"bytes_recv": uint64(8734422),
"packets_sent": uint64(781),
"packets_recv": uint64(23456),
"err_in": uint64(832),
"err_out": uint64(8),
"drop_in": uint64(7),
"drop_out": uint64(1),
"speed": int64(-1),
},
time.Unix(0, 0),
telegraf.Counter,
),
metric.New(
"net",
map[string]string{"interface": "all"},
map[string]interface{}{
"udp_noports": int64(892592),
"udp_indatagrams": int64(4655),
},
time.Unix(0, 0),
),
}
acc.AssertContainsTaggedFields(t, "net", fields1, ntags)
fields2 := map[string]interface{}{
"udp_noports": int64(892592),
"udp_indatagrams": int64(4655),
}
ntags = map[string]string{
"interface": "all",
}
acc.AssertContainsTaggedFields(t, "net", fields2, ntags)
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime())
}
func TestNetIOStatsNoSpeedFile(t *testing.T) {
var mps system.MockPS
var err error
defer mps.AssertExpectations(t)
var acc testutil.Accumulator
netio := net.IOCountersStat{
Name: "eth2",
@ -168,34 +176,40 @@ func TestNetIOStatsNoSpeedFile(t *testing.T) {
}
mps.On("NetProto").Return(netprotos, nil)
require.NoError(t, os.Setenv("HOST_SYS", filepath.Join("testdata", "general", "sys")))
t.Setenv("HOST_SYS", filepath.Join("testdata", "general", "sys"))
err = (&NetIOStats{ps: &mps, skipChecks: true}).Gather(&acc)
require.NoError(t, err)
plugin := &NetIOStats{ps: &mps, skipChecks: true}
ntags := map[string]string{
"interface": "eth2",
}
var acc testutil.Accumulator
require.NoError(t, plugin.Gather(&acc))
fields1 := map[string]interface{}{
"bytes_sent": uint64(1123),
"bytes_recv": uint64(8734422),
"packets_sent": uint64(781),
"packets_recv": uint64(23456),
"err_in": uint64(832),
"err_out": uint64(8),
"drop_in": uint64(7),
"drop_out": uint64(1),
"speed": int64(-1),
expected := []telegraf.Metric{
metric.New(
"net",
map[string]string{"interface": "eth2"},
map[string]interface{}{
"bytes_sent": uint64(1123),
"bytes_recv": uint64(8734422),
"packets_sent": uint64(781),
"packets_recv": uint64(23456),
"err_in": uint64(832),
"err_out": uint64(8),
"drop_in": uint64(7),
"drop_out": uint64(1),
"speed": int64(-1),
},
time.Unix(0, 0),
telegraf.Counter,
),
metric.New(
"net",
map[string]string{"interface": "all"},
map[string]interface{}{
"udp_noports": int64(892592),
"udp_indatagrams": int64(4655),
},
time.Unix(0, 0),
),
}
acc.AssertContainsTaggedFields(t, "net", fields1, ntags)
fields2 := map[string]interface{}{
"udp_noports": int64(892592),
"udp_indatagrams": int64(4655),
}
ntags = map[string]string{
"interface": "all",
}
acc.AssertContainsTaggedFields(t, "net", fields2, ntags)
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime())
}

View File

@ -209,6 +209,8 @@ func decodeTCPFlags(b []byte) (interface{}, error) {
results = append(results, ".")
}
}
//nolint:gosec // False positive (b[1] is not out of range - it is ensured by above checks)
return strings.Join(results, "") + mapTCPFlags(b[1]), nil
}

View File

@ -151,11 +151,11 @@ func processPingOutput(out string) (statistics, error) {
if len(approxs) != 4 {
return stats, err
}
min, err := strconv.Atoi(approxs[1])
low, err := strconv.Atoi(approxs[1])
if err != nil {
return stats, err
}
max, err := strconv.Atoi(approxs[2])
high, err := strconv.Atoi(approxs[2])
if err != nil {
return stats, err
}
@ -165,8 +165,8 @@ func processPingOutput(out string) (statistics, error) {
}
stats.avg = avg
stats.min = min
stats.max = max
stats.min = low
stats.max = high
return stats, err
}

View File

@ -9,6 +9,7 @@ import (
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf"
@ -25,9 +26,7 @@ func TestProcesses(t *testing.T) {
readProcFile: tester.testProcFile,
}
var acc testutil.Accumulator
err := processes.Gather(&acc)
require.NoError(t, err)
require.NoError(t, processes.Gather(&acc))
require.True(t, acc.HasInt64Field("processes", "running"))
require.True(t, acc.HasInt64Field("processes", "sleeping"))
@ -35,7 +34,7 @@ func TestProcesses(t *testing.T) {
require.True(t, acc.HasInt64Field("processes", "total"))
total, ok := acc.Get("processes")
require.True(t, ok)
require.Greater(t, total.Fields["total"].(int64), int64(0))
require.Positive(t, total.Fields["total"])
}
func TestFromPS(t *testing.T) {
@ -46,8 +45,7 @@ func TestFromPS(t *testing.T) {
}
var acc testutil.Accumulator
err := processes.Gather(&acc)
require.NoError(t, err)
require.NoError(t, processes.Gather(&acc))
fields := getEmptyFields()
fields["blocked"] = int64(3)
@ -68,8 +66,7 @@ func TestFromPSError(t *testing.T) {
}
var acc testutil.Accumulator
err := processes.Gather(&acc)
require.Error(t, err)
require.Error(t, processes.Gather(&acc))
}
func TestFromProcFiles(t *testing.T) {
@ -84,8 +81,7 @@ func TestFromProcFiles(t *testing.T) {
}
var acc testutil.Accumulator
err := processes.Gather(&acc)
require.NoError(t, err)
require.NoError(t, processes.Gather(&acc))
fields := getEmptyFields()
fields["sleeping"] = tester.calls
@ -107,8 +103,7 @@ func TestFromProcFilesWithSpaceInCmd(t *testing.T) {
}
var acc testutil.Accumulator
err := processes.Gather(&acc)
require.NoError(t, err)
require.NoError(t, processes.Gather(&acc))
fields := getEmptyFields()
fields["sleeping"] = tester.calls
@ -141,8 +136,7 @@ func TestParkedProcess(t *testing.T) {
}
var acc testutil.Accumulator
err := plugin.Gather(&acc)
require.NoError(t, err)
require.NoError(t, plugin.Gather(&acc))
expected := []telegraf.Metric{
testutil.MustMetric(
@ -164,13 +158,12 @@ func TestParkedProcess(t *testing.T) {
telegraf.Gauge,
),
}
actual := acc.GetTelegrafMetrics()
for _, a := range actual {
a.RemoveField("total")
a.RemoveField("total_threads")
options := []cmp.Option{
testutil.IgnoreTime(),
testutil.IgnoreFields("total", "total_threads"),
}
testutil.RequireMetricsEqual(t, expected, actual,
testutil.IgnoreTime())
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), options...)
}
func testExecPS(out string) func(_ bool) ([]byte, error) {

View File

@ -995,12 +995,12 @@ func parseThermalThrottle(acc telegraf.Accumulator, fields map[string]interface{
}
func parseWearLeveling(acc telegraf.Accumulator, fields map[string]interface{}, tags map[string]string, str string) error {
var min, max, avg int64
var vmin, vmax, avg int64
if _, err := fmt.Sscanf(str, "min: %d, max: %d, avg: %d", &min, &max, &avg); err != nil {
if _, err := fmt.Sscanf(str, "min: %d, max: %d, avg: %d", &vmin, &vmax, &avg); err != nil {
return err
}
values := []int64{min, max, avg}
values := []int64{vmin, vmax, avg}
for i, submetricName := range []string{"Min", "Max", "Avg"} {
fields["raw_value"] = values[i]
tags["name"] = "Wear_Leveling_" + submetricName

View File

@ -146,15 +146,5 @@ func (rs *RunningStats) Percentile(n float64) float64 {
}
i := float64(len(rs.perc)) * n / float64(100)
return rs.perc[clamp(i, 0, len(rs.perc)-1)]
}
func clamp(i float64, min int, max int) int {
if i < float64(min) {
return min
}
if i > float64(max) {
return max
}
return int(i)
return rs.perc[max(0, min(int(i), len(rs.perc)-1))]
}

View File

@ -6,11 +6,14 @@ import (
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/nwaples/tacplus"
"github.com/stretchr/testify/require"
"github.com/testcontainers/testcontainers-go/wait"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/testutil"
)
@ -49,7 +52,7 @@ func (t testRequestHandler) HandleAuthorRequest(_ context.Context, a *tacplus.Au
return &tacplus.AuthorResponse{Status: tacplus.AuthorStatusFail}
}
func (t testRequestHandler) HandleAcctRequest(_ context.Context, _ *tacplus.AcctRequest, _ *tacplus.ServerSession) *tacplus.AcctReply {
func (testRequestHandler) HandleAcctRequest(context.Context, *tacplus.AcctRequest, *tacplus.ServerSession) *tacplus.AcctReply {
return &tacplus.AcctReply{Status: tacplus.AcctStatusSuccess}
}
@ -105,7 +108,6 @@ func TestTacacsInit(t *testing.T) {
}
err := plugin.Init()
if tt.errContains == "" {
require.NoError(t, err)
if tt.requestAddr == "" {
@ -145,8 +147,9 @@ func TestTacacsLocal(t *testing.T) {
}
go func() {
err = srv.Serve(l)
require.NoError(t, err, "local srv.Serve failed to start serving on "+srvLocal)
if err := srv.Serve(l); err != nil {
t.Logf("local srv.Serve failed to start serving on %s", srvLocal)
}
}()
var testset = []struct {
@ -190,16 +193,6 @@ func TestTacacsLocal(t *testing.T) {
requestAddr: "127.0.0.1",
errContains: "error on new tacacs authentication start request to " + srvLocal + " : bad secret or packet",
},
{
name: "unreachable",
testingTimeout: config.Duration(time.Nanosecond * 1000),
serverToTest: []string{"unreachable.test:49"},
usedUsername: config.NewSecret([]byte(`testusername`)),
usedPassword: config.NewSecret([]byte(`testpassword`)),
usedSecret: config.NewSecret([]byte(`testsecret`)),
requestAddr: "127.0.0.1",
errContains: "error on new tacacs authentication start request to unreachable.test:49 : dial tcp",
},
}
for _, tt := range testset {
@ -221,23 +214,97 @@ func TestTacacsLocal(t *testing.T) {
if tt.errContains == "" {
require.Empty(t, acc.Errors)
require.True(t, acc.HasMeasurement("tacacs"))
require.True(t, acc.HasTag("tacacs", "source"))
require.Equal(t, srvLocal, acc.TagValue("tacacs", "source"))
require.True(t, acc.HasInt64Field("tacacs", "responsetime_ms"))
require.True(t, acc.HasStringField("tacacs", "response_status"))
require.Equal(t, tt.reqRespStatus, acc.Metrics[0].Fields["response_status"])
expected := []telegraf.Metric{
metric.New(
"tacacs",
map[string]string{"source": srvLocal},
map[string]interface{}{
"responsetime_ms": int64(0),
"response_status": tt.reqRespStatus,
},
time.Unix(0, 0),
),
}
options := []cmp.Option{
testutil.IgnoreTime(),
testutil.IgnoreFields("responsetime_ms"),
}
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), options...)
} else {
require.Len(t, acc.Errors, 1)
require.ErrorContains(t, acc.FirstError(), tt.errContains)
require.False(t, acc.HasTag("tacacs", "source"))
require.False(t, acc.HasInt64Field("tacacs", "responsetime_ms"))
require.False(t, acc.HasStringField("tacacs", "response_status"))
require.Empty(t, acc.GetTelegrafMetrics())
}
})
}
}
func TestTacacsLocalTimeout(t *testing.T) {
testHandler := tacplus.ServerConnHandler{
Handler: &testRequestHandler{
"testusername": {
password: "testpassword",
},
},
ConnConfig: tacplus.ConnConfig{
Secret: []byte(`testsecret`),
Mux: true,
},
}
l, err := net.Listen("tcp", "localhost:0")
require.NoError(t, err, "local net listen failed to start listening")
srvLocal := l.Addr().String()
srv := &tacplus.Server{
ServeConn: func(nc net.Conn) {
testHandler.Serve(nc)
},
}
go func() {
if err := srv.Serve(l); err != nil {
t.Logf("local srv.Serve failed to start serving on %s", srvLocal)
}
}()
// Initialize the plugin
plugin := &Tacacs{
ResponseTimeout: config.Duration(time.Microsecond),
Servers: []string{"unreachable.test:49"},
Username: config.NewSecret([]byte(`testusername`)),
Password: config.NewSecret([]byte(`testpassword`)),
Secret: config.NewSecret([]byte(`testsecret`)),
RequestAddr: "127.0.0.1",
Log: &testutil.Logger{},
}
require.NoError(t, plugin.Init())
// Try to connect, this will return a metric with the timeout...
var acc testutil.Accumulator
require.NoError(t, plugin.Gather(&acc))
expected := []telegraf.Metric{
metric.New(
"tacacs",
map[string]string{"source": "unreachable.test:49"},
map[string]interface{}{
"response_status": string("Timeout"),
"responsetime_ms": int64(0),
},
time.Unix(0, 0),
),
}
options := []cmp.Option{
testutil.IgnoreTime(),
testutil.IgnoreFields("responsetime_ms"),
}
require.Empty(t, acc.Errors)
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), options...)
}
func TestTacacsIntegration(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
@ -250,8 +317,7 @@ func TestTacacsIntegration(t *testing.T) {
wait.ForLog("Starting server..."),
),
}
err := container.Start()
require.NoError(t, err, "failed to start container")
require.NoError(t, container.Start(), "failed to start container")
defer container.Terminate()
port := container.Ports["49"]
@ -302,14 +368,22 @@ func TestTacacsIntegration(t *testing.T) {
require.NoError(t, plugin.Gather(&acc))
require.NoError(t, acc.FirstError())
require.True(t, acc.HasMeasurement("tacacs"))
require.True(t, acc.HasStringField("tacacs", "response_status"))
require.True(t, acc.HasInt64Field("tacacs", "responsetime_ms"))
require.True(t, acc.HasTag("tacacs", "source"))
require.Equal(t, tt.reqRespStatus, acc.Metrics[0].Fields["response_status"])
require.Equal(t, container.Address+":"+port, acc.TagValue("tacacs", "source"))
expected := []telegraf.Metric{
metric.New(
"tacacs",
map[string]string{"source": container.Address + ":" + port},
map[string]interface{}{
"responsetime_ms": int64(0),
"response_status": tt.reqRespStatus,
},
time.Unix(0, 0),
),
}
options := []cmp.Option{
testutil.IgnoreTime(),
testutil.IgnoreFields("responsetime_ms"),
}
testutil.RequireMetricsStructureEqual(t, expected, acc.GetTelegrafMetrics(), options...)
})
}
}

View File

@ -38,7 +38,8 @@ func TestInvalidMetricFormat(t *testing.T) {
}
func TestNameCollisions(t *testing.T) {
require.NoError(t, os.Setenv("HOST_SYS", filepath.Join("testcases", "with_name", "sys")))
t.Setenv("HOST_SYS", filepath.Join("testcases", "with_name", "sys"))
plugin := &Temperature{Log: &testutil.Logger{}}
require.NoError(t, plugin.Init())
@ -87,7 +88,7 @@ func TestCases(t *testing.T) {
}
// Prepare the environment
require.NoError(t, os.Setenv("HOST_SYS", filepath.Join(testcasePath, "sys")))
t.Setenv("HOST_SYS", filepath.Join(testcasePath, "sys"))
// Configure the plugin
cfg := config.NewConfig()
@ -120,7 +121,7 @@ func TestCases(t *testing.T) {
}
// Prepare the environment
require.NoError(t, os.Setenv("HOST_SYS", filepath.Join(testcasePath, "sys")))
t.Setenv("HOST_SYS", filepath.Join(testcasePath, "sys"))
// Configure the plugin
cfg := config.NewConfig()
@ -221,7 +222,7 @@ func TestRegression(t *testing.T) {
}
// Prepare the environment
require.NoError(t, os.Setenv("HOST_SYS", filepath.Join(testcasePath, "sys")))
t.Setenv("HOST_SYS", filepath.Join(testcasePath, "sys"))
// Use the v1.28.x code to compare against
var acc testutil.Accumulator

View File

@ -580,7 +580,7 @@ func TestWinPerfCountersCollectRawIntegration(t *testing.T) {
require.True(t, ok, "Expected presence of %s field", expectedCounter)
valInt64, ok := val.(int64)
require.Truef(t, ok, "Expected int64, got %T", val)
require.Greaterf(t, valInt64, int64(0), "Expected > 0, got %d, for %#v", valInt64, metric)
require.Positivef(t, valInt64, "Value not positive for metric %#v", metric)
}
// Test *Array way
@ -604,6 +604,6 @@ func TestWinPerfCountersCollectRawIntegration(t *testing.T) {
require.True(t, ok, "Expected presence of %s field", expectedCounter)
valInt64, ok := val.(int64)
require.Truef(t, ok, "Expected int64, got %T", val)
require.Greaterf(t, valInt64, int64(0), "Expected > 0, got %d, for %#v", valInt64, metric)
require.Positivef(t, valInt64, "Value not positive for metric %#v", metric)
}
}

View File

@ -134,21 +134,21 @@ func NewBinaryAnnotations(annotations []BinaryAnnotation, endpoint Endpoint) []t
}
func minMax(span Span) (time.Time, time.Time) {
min := now().UTC()
max := time.Time{}.UTC()
low := now().UTC()
high := time.Time{}.UTC()
for _, annotation := range span.Annotations() {
ts := annotation.Timestamp()
if !ts.IsZero() && ts.Before(min) {
min = ts
if !ts.IsZero() && ts.Before(low) {
low = ts
}
if !ts.IsZero() && ts.After(max) {
max = ts
if !ts.IsZero() && ts.After(high) {
high = ts
}
}
if max.IsZero() {
max = min
if high.IsZero() {
high = low
}
return min, max
return low, high
}
func guessTimestamp(span Span) time.Time {
@ -157,8 +157,8 @@ func guessTimestamp(span Span) time.Time {
return ts
}
min, _ := minMax(span)
return min
low, _ := minMax(span)
return low
}
func convertDuration(span Span) time.Duration {
@ -166,8 +166,8 @@ func convertDuration(span Span) time.Duration {
if duration != 0 {
return duration
}
min, max := minMax(span)
return max.Sub(min)
low, high := minMax(span)
return high.Sub(low)
}
func parentID(span Span) (string, error) {

View File

@ -380,19 +380,19 @@ func translate(m telegraf.Metric, prefix string) (*azureMonitorMetric, error) {
dimensionValues = append(dimensionValues, tag.Value)
}
min, err := getFloatField(m, "min")
vmin, err := getFloatField(m, "min")
if err != nil {
return nil, err
}
max, err := getFloatField(m, "max")
vmax, err := getFloatField(m, "max")
if err != nil {
return nil, err
}
sum, err := getFloatField(m, "sum")
vsum, err := getFloatField(m, "sum")
if err != nil {
return nil, err
}
count, err := getIntField(m, "count")
vcount, err := getIntField(m, "count")
if err != nil {
return nil, err
}
@ -417,10 +417,10 @@ func translate(m telegraf.Metric, prefix string) (*azureMonitorMetric, error) {
Series: []*azureMonitorSeries{
{
DimensionValues: dimensionValues,
Min: min,
Max: max,
Sum: sum,
Count: count,
Min: vmin,
Max: vmax,
Sum: vsum,
Count: vcount,
},
},
},

View File

@ -69,20 +69,20 @@ func (f *statisticField) buildDatum() []types.MetricDatum {
if f.hasAllFields() {
// If we have all required fields, we build datum with StatisticValues
min := f.values[statisticTypeMin]
max := f.values[statisticTypeMax]
sum := f.values[statisticTypeSum]
count := f.values[statisticTypeCount]
vmin := f.values[statisticTypeMin]
vmax := f.values[statisticTypeMax]
vsum := f.values[statisticTypeSum]
vcount := f.values[statisticTypeCount]
datum := types.MetricDatum{
MetricName: aws.String(strings.Join([]string{f.metricName, f.fieldName}, "_")),
Dimensions: BuildDimensions(f.tags),
Timestamp: aws.Time(f.timestamp),
StatisticValues: &types.StatisticSet{
Minimum: aws.Float64(min),
Maximum: aws.Float64(max),
Sum: aws.Float64(sum),
SampleCount: aws.Float64(count),
Minimum: aws.Float64(vmin),
Maximum: aws.Float64(vmax),
Sum: aws.Float64(vsum),
SampleCount: aws.Float64(vcount),
},
StorageResolution: aws.Int32(int32(f.storageResolution)),
}

View File

@ -15,7 +15,7 @@ import (
)
func TestSqlite(t *testing.T) {
dbfile := filepath.Join(os.TempDir(), "db")
dbfile := filepath.Join(t.TempDir(), "db")
defer os.Remove(dbfile)
// Use the plugin to write to the database address :=
@ -27,9 +27,8 @@ func TestSqlite(t *testing.T) {
p.DataSourceName = address
require.NoError(t, p.Connect())
require.NoError(t, p.Write(
testMetrics,
))
defer p.Close()
require.NoError(t, p.Write(testMetrics))
//read directly from the database
db, err := gosql.Open("sqlite", address)
@ -41,7 +40,7 @@ func TestSqlite(t *testing.T) {
require.Equal(t, 1, countMetricOne)
var countMetricTwo int
require.NoError(t, db.QueryRow("select count(*) from metric_one").Scan(&countMetricTwo))
require.NoError(t, db.QueryRow("select count(*) from metric_two").Scan(&countMetricTwo))
require.Equal(t, 1, countMetricTwo)
var rows *gosql.Rows

View File

@ -278,30 +278,30 @@ const (
var ErrBadThresholdFormat = errors.New("bad threshold format")
// Handles all cases from https://nagios-plugins.org/doc/guidelines.html#THRESHOLDFORMAT
func parseThreshold(threshold string) (min float64, max float64, err error) {
func parseThreshold(threshold string) (vmin, vmax float64, err error) {
thresh := strings.Split(threshold, ":")
switch len(thresh) {
case 1:
max, err = strconv.ParseFloat(thresh[0], 64)
vmax, err = strconv.ParseFloat(thresh[0], 64)
if err != nil {
return 0, 0, ErrBadThresholdFormat
}
return 0, max, nil
return 0, vmax, nil
case 2:
if thresh[0] == "~" {
min = MinFloat64
vmin = MinFloat64
} else {
min, err = strconv.ParseFloat(thresh[0], 64)
vmin, err = strconv.ParseFloat(thresh[0], 64)
if err != nil {
min = 0
vmin = 0
}
}
if thresh[1] == "" {
max = MaxFloat64
vmax = MaxFloat64
} else {
max, err = strconv.ParseFloat(thresh[1], 64)
vmax, err = strconv.ParseFloat(thresh[1], 64)
if err != nil {
return 0, 0, ErrBadThresholdFormat
}
@ -310,7 +310,7 @@ func parseThreshold(threshold string) (min float64, max float64, err error) {
return 0, 0, ErrBadThresholdFormat
}
return min, max, err
return vmin, vmax, err
}
func init() {

View File

@ -518,9 +518,9 @@ func TestParseThreshold(t *testing.T) {
}
for i := range tests {
min, max, err := parseThreshold(tests[i].input)
require.InDelta(t, tests[i].eMin, min, testutil.DefaultDelta)
require.InDelta(t, tests[i].eMax, max, testutil.DefaultDelta)
vmin, vmax, err := parseThreshold(tests[i].input)
require.InDelta(t, tests[i].eMin, vmin, testutil.DefaultDelta)
require.InDelta(t, tests[i].eMax, vmax, testutil.DefaultDelta)
require.Equal(t, tests[i].eErr, err)
}
}

View File

@ -19,7 +19,7 @@ tftp 69/udp`
func TestReadServicesFile(t *testing.T) {
readServicesFile()
require.NotZero(t, len(services))
require.NotEmpty(t, services)
}
func TestFakeServices(t *testing.T) {

View File

@ -3755,7 +3755,7 @@ func parseMetricsFrom(t *testing.T, lines []string, header string) (metrics []te
parser := &influx.Parser{}
require.NoError(t, parser.Init())
require.NotZero(t, len(lines), "Expected some lines to parse from .star file, found none")
require.NotEmpty(t, lines, "Expected some lines to parse from .star file, found none")
startIdx := -1
endIdx := len(lines)
for i := range lines {
@ -3782,7 +3782,7 @@ func parseMetricsFrom(t *testing.T, lines []string, header string) (metrics []te
// parses error message out of line protocol following a header
func parseErrorMessage(t *testing.T, lines []string, header string) string {
require.NotZero(t, len(lines), "Expected some lines to parse from .star file, found none")
require.NotEmpty(t, lines, "Expected some lines to parse from .star file, found none")
startIdx := -1
for i := range lines {
if strings.TrimLeft(lines[i], "# ") == header {

View File

@ -299,7 +299,7 @@ func (t *TopK) getAggregationFunction(aggOperation string) (func([]telegraf.Metr
case "min":
return func(ms []telegraf.Metric, fields []string) map[string]float64 {
min := func(agg map[string]float64, val float64, field string) {
vmin := func(agg map[string]float64, val float64, field string) {
// If this field has not been set, set it to the maximum float64
_, ok := agg[field]
if !ok {
@ -311,12 +311,12 @@ func (t *TopK) getAggregationFunction(aggOperation string) (func([]telegraf.Metr
agg[field] = val
}
}
return aggregator(ms, fields, min)
return aggregator(ms, fields, vmin)
}, nil
case "max":
return func(ms []telegraf.Metric, fields []string) map[string]float64 {
max := func(agg map[string]float64, val float64, field string) {
vmax := func(agg map[string]float64, val float64, field string) {
// If this field has not been set, set it to the minimum float64
_, ok := agg[field]
if !ok {
@ -328,7 +328,7 @@ func (t *TopK) getAggregationFunction(aggOperation string) (func([]telegraf.Metr
agg[field] = val
}
}
return aggregator(ms, fields, max)
return aggregator(ms, fields, vmax)
}, nil
case "mean":

View File

@ -1,4 +1,4 @@
FROM golang:1.22.6
FROM golang:1.23.0
RUN chmod -R 755 "$GOPATH"

View File

@ -2,10 +2,10 @@
set -eux
GO_VERSION="1.22.6"
GO_VERSION="1.23.0"
GO_ARCH="linux-amd64"
# from https://golang.org/dl
GO_VERSION_SHA="999805bed7d9039ec3da1a53bfbcafc13e367da52aa823cb60b68ba22d44c616"
GO_VERSION_SHA="905a297f19ead44780548933e0ff1a1b86e8327bb459e92f9c0012569f76f5e3"
# Download Go and verify Go tarball
setup_go () {

View File

@ -3,9 +3,9 @@
set -eux
ARCH=$(uname -m)
GO_VERSION="1.22.6"
GO_VERSION_SHA_arm64="ebac39fd44fc22feed1bb519af431c84c55776e39b30f4fd62930da9c0cfd1e3" # from https://golang.org/dl
GO_VERSION_SHA_amd64="9c3c0124b01b5365f73a1489649f78f971ecf84844ad9ca58fde133096ddb61b" # from https://golang.org/dl
GO_VERSION="1.23.0"
GO_VERSION_SHA_arm64="b770812aef17d7b2ea406588e2b97689e9557aac7e646fe76218b216e2c51406" # from https://golang.org/dl
GO_VERSION_SHA_amd64="ffd070acf59f054e8691b838f274d540572db0bd09654af851e4e76ab88403dc" # from https://golang.org/dl
if [ "$ARCH" = 'arm64' ]; then
GO_ARCH="darwin-arm64"

View File

@ -2,7 +2,7 @@
set -eux
GO_VERSION="1.22.6"
GO_VERSION="1.23.0"
setup_go () {
choco upgrade golang --allow-downgrade --version=${GO_VERSION}

View File

@ -255,6 +255,7 @@ func extractPluginInfo(file *ast.File, pluginType string, declarations map[strin
return registeredNames, nil
}
//nolint:staticcheck // Use deprecated ast.Package for now
func extractPackageDeclarations(pkg *ast.Package) map[string]string {
declarations := make(map[string]string)
@ -286,6 +287,7 @@ func extractPackageDeclarations(pkg *ast.Package) map[string]string {
return declarations
}
//nolint:staticcheck // Use deprecated ast.Package for now
func extractRegisteredNames(pkg *ast.Package, pluginType string) []string {
var registeredNames []string

View File

@ -32,9 +32,8 @@ func firstSection(t *T, root ast.Node) error {
n = n.NextSibling()
t.assertKind(ast.KindParagraph, n)
length := len(n.Text(t.markdown))
min := 30
if length < min {
t.assertNodef(n, "short first section. Please add short description of plugin. length %d, minimum %d", length, min)
if length < 30 {
t.assertNodef(n, "short first section. Please add short description of plugin. length %d, minimum 30", length)
}
return nil