chore: Update go to v1.23.0 (#15766)

Co-authored-by: Pawel Zak <pawel.zak.pawel@gmail.com>
This commit is contained in:
Sven Rebhan 2024-08-30 21:23:51 +02:00 committed by GitHub
parent 9e3e22094a
commit b2967cb515
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
52 changed files with 373 additions and 303 deletions

View File

@ -8,7 +8,7 @@ executors:
working_directory: '/go/src/github.com/influxdata/telegraf' working_directory: '/go/src/github.com/influxdata/telegraf'
resource_class: large resource_class: large
docker: docker:
- image: 'quay.io/influxdb/telegraf-ci:1.22.6' - image: 'quay.io/influxdb/telegraf-ci:1.23.0'
environment: environment:
GOFLAGS: -p=4 GOFLAGS: -p=4
mac: mac:
@ -106,10 +106,13 @@ jobs:
- run: 'make check-deps' - run: 'make check-deps'
- run: - run:
name: "Install golangci-lint" name: "Install golangci-lint"
command: go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.59.1 command: go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.60.3
- run: - run:
name: "golangci-lint/Linux" name: "golangci-lint/Linux"
command: GOGC=80 GOMEMLIMIT=6656MiB /go/bin/golangci-lint run --verbose # There are only 4 vCPUs available for this executor, so use only 4 instead of the default number
# (the OS may report the number of CPUs on the host instead of the number of CPUs available to the guest).
command: GOGC=80 GOMEMLIMIT=6144MiB /go/bin/golangci-lint run --verbose --timeout=30m --concurrency 4
no_output_timeout: 30m
lint-macos: lint-macos:
executor: telegraf-ci executor: telegraf-ci
steps: steps:
@ -117,10 +120,12 @@ jobs:
- check-changed-files-or-halt - check-changed-files-or-halt
- run: - run:
name: "Install golangci-lint" name: "Install golangci-lint"
command: go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.59.1 command: go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.60.3
- run: - run:
name: "golangci-lint/macOS" name: "golangci-lint/macOS"
command: GOGC=80 GOMEMLIMIT=6656MiB GOOS=darwin /go/bin/golangci-lint run --verbose --timeout=30m # There are only 4 vCPUs available for this executor, so use only 4 instead of the default number
# (the OS may report the number of CPUs on the host instead of the number of CPUs available to the guest).
command: GOGC=80 GOMEMLIMIT=6144MiB GOOS=darwin /go/bin/golangci-lint run --verbose --timeout=30m --concurrency 4
no_output_timeout: 30m no_output_timeout: 30m
lint-windows: lint-windows:
executor: telegraf-ci executor: telegraf-ci
@ -129,10 +134,12 @@ jobs:
- check-changed-files-or-halt - check-changed-files-or-halt
- run: - run:
name: "Install golangci-lint" name: "Install golangci-lint"
command: go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.59.1 command: go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.60.3
- run: - run:
name: "golangci-lint/Windows" name: "golangci-lint/Windows"
command: GOGC=80 GOMEMLIMIT=6656MiB GOOS=windows /go/bin/golangci-lint run --verbose --timeout=30m # There are only 4 vCPUs available for this executor, so use only 4 instead of the default number
# (the OS may report the number of CPUs on the host instead of the number of CPUs available to the guest).
command: GOGC=80 GOMEMLIMIT=6144MiB GOOS=windows /go/bin/golangci-lint run --verbose --timeout=30m --concurrency 4
no_output_timeout: 30m no_output_timeout: 30m
test-go-linux: test-go-linux:
executor: telegraf-ci executor: telegraf-ci

View File

@ -11,7 +11,7 @@ jobs:
steps: steps:
- uses: actions/setup-go@v5 - uses: actions/setup-go@v5
with: with:
go-version: '1.22.6' go-version: '1.23.0'
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0

View File

@ -11,7 +11,6 @@ linters:
- errcheck - errcheck
- errname - errname
- errorlint - errorlint
- exportloopref
- gocheckcompilerdirectives - gocheckcompilerdirectives
- gocritic - gocritic
- goprintffuncname - goprintffuncname

View File

@ -180,7 +180,7 @@ vet:
.PHONY: lint-install .PHONY: lint-install
lint-install: lint-install:
@echo "Installing golangci-lint" @echo "Installing golangci-lint"
go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.59.1 go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.60.3
@echo "Installing markdownlint" @echo "Installing markdownlint"
npm install -g markdownlint-cli npm install -g markdownlint-cli
@ -257,8 +257,8 @@ plugins/parsers/influx/machine.go: plugins/parsers/influx/machine.go.rl
.PHONY: ci .PHONY: ci
ci: ci:
docker build -t quay.io/influxdb/telegraf-ci:1.22.6 - < scripts/ci.docker docker build -t quay.io/influxdb/telegraf-ci:1.23.0 - < scripts/ci.docker
docker push quay.io/influxdb/telegraf-ci:1.22.6 docker push quay.io/influxdb/telegraf-ci:1.23.0
.PHONY: install .PHONY: install
install: $(buildbin) install: $(buildbin)

View File

@ -419,7 +419,7 @@ func (t *Telegraf) runAgent(ctx context.Context, reloadConfig bool) error {
log.Printf("I! Found %d secrets...", c.NumberSecrets) log.Printf("I! Found %d secrets...", c.NumberSecrets)
msg := fmt.Sprintf("Insufficient lockable memory %dkb when %dkb is required.", available, required) msg := fmt.Sprintf("Insufficient lockable memory %dkb when %dkb is required.", available, required)
msg += " Please increase the limit for Telegraf in your Operating System!" msg += " Please increase the limit for Telegraf in your Operating System!"
log.Printf("W! " + color.RedString(msg)) log.Print("W! " + color.RedString(msg))
} }
} }
ag := agent.NewAgent(c) ag := agent.NewAgent(c)

View File

@ -21,11 +21,11 @@ import (
func getLockedMemoryLimit() uint64 { func getLockedMemoryLimit() uint64 {
handle := windows.CurrentProcess() handle := windows.CurrentProcess()
var min, max uintptr var low, high uintptr
var flag uint32 var flag uint32
windows.GetProcessWorkingSetSizeEx(handle, &min, &max, &flag) windows.GetProcessWorkingSetSizeEx(handle, &low, &high, &flag)
return uint64(max) return uint64(high)
} }
func (t *Telegraf) Run() error { func (t *Telegraf) Run() error {

View File

@ -1585,7 +1585,7 @@ func (c *Config) setLocalMissingTomlFieldTracker(counter map[string]int) {
// All other elements are subtables of their respective plugin and // All other elements are subtables of their respective plugin and
// should just be hit once anyway. Therefore, we mark them with a // should just be hit once anyway. Therefore, we mark them with a
// high number to handle them correctly later. // high number to handle them correctly later.
pt := reflect.PtrTo(t) pt := reflect.PointerTo(t)
root := pt.Implements(reflect.TypeOf((*telegraf.Input)(nil)).Elem()) root := pt.Implements(reflect.TypeOf((*telegraf.Input)(nil)).Elem())
root = root || pt.Implements(reflect.TypeOf((*telegraf.ServiceInput)(nil)).Elem()) root = root || pt.Implements(reflect.TypeOf((*telegraf.ServiceInput)(nil)).Elem())
root = root || pt.Implements(reflect.TypeOf((*telegraf.Output)(nil)).Elem()) root = root || pt.Implements(reflect.TypeOf((*telegraf.Output)(nil)).Elem())

2
go.mod
View File

@ -1,6 +1,6 @@
module github.com/influxdata/telegraf module github.com/influxdata/telegraf
go 1.22.0 go 1.23.0
require ( require (
cloud.google.com/go/bigquery v1.62.0 cloud.google.com/go/bigquery v1.62.0

View File

@ -85,10 +85,6 @@ func TestFindNestedTextFile(t *testing.T) {
} }
func TestMatch_ErrPermission(t *testing.T) { func TestMatch_ErrPermission(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("Skipping Unix only test")
}
tests := []struct { tests := []struct {
input string input string
expected []string expected []string
@ -106,6 +102,7 @@ func TestMatch_ErrPermission(t *testing.T) {
} }
func TestWindowsSeparator(t *testing.T) { func TestWindowsSeparator(t *testing.T) {
//nolint:staticcheck // Silence linter for now as we plan to reenable tests for Windows later
if runtime.GOOS != "windows" { if runtime.GOOS != "windows" {
t.Skip("Skipping Windows only test") t.Skip("Skipping Windows only test")
} }

View File

@ -123,8 +123,8 @@ func SnakeCase(in string) string {
// RandomSleep will sleep for a random amount of time up to max. // RandomSleep will sleep for a random amount of time up to max.
// If the shutdown channel is closed, it will return before it has finished sleeping. // If the shutdown channel is closed, it will return before it has finished sleeping.
func RandomSleep(max time.Duration, shutdown chan struct{}) { func RandomSleep(limit time.Duration, shutdown chan struct{}) {
sleepDuration := RandomDuration(max) sleepDuration := RandomDuration(limit)
if sleepDuration == 0 { if sleepDuration == 0 {
return return
} }
@ -140,12 +140,12 @@ func RandomSleep(max time.Duration, shutdown chan struct{}) {
} }
// RandomDuration returns a random duration between 0 and max. // RandomDuration returns a random duration between 0 and max.
func RandomDuration(max time.Duration) time.Duration { func RandomDuration(limit time.Duration) time.Duration {
if max == 0 { if limit == 0 {
return 0 return 0
} }
return time.Duration(rand.Int63n(max.Nanoseconds())) //nolint:gosec // G404: not security critical return time.Duration(rand.Int63n(limit.Nanoseconds())) //nolint:gosec // G404: not security critical
} }
// SleepContext sleeps until the context is closed or the duration is reached. // SleepContext sleeps until the context is closed or the duration is reached.

View File

@ -234,7 +234,7 @@ func TestCompressWithGzipErrorPropagationCopy(t *testing.T) {
rc := CompressWithGzip(r) rc := CompressWithGzip(r)
n, err := io.Copy(io.Discard, rc) n, err := io.Copy(io.Discard, rc)
require.Greater(t, n, int64(0)) require.Positive(t, n)
require.ErrorIs(t, err, expected) require.ErrorIs(t, err, expected)
require.NoError(t, rc.Close()) require.NoError(t, rc.Close())
} }

View File

@ -9,13 +9,13 @@ import (
func TestBackoffFunc(t *testing.T) { func TestBackoffFunc(t *testing.T) {
b := 250 * time.Millisecond b := 250 * time.Millisecond
max := 1100 * time.Millisecond limit := 1100 * time.Millisecond
f := makeBackoffFunc(b, max) f := makeBackoffFunc(b, limit)
require.Equal(t, b, f(0, 0)) require.Equal(t, b, f(0, 0))
require.Equal(t, b*2, f(1, 0)) require.Equal(t, b*2, f(1, 0))
require.Equal(t, b*4, f(2, 0)) require.Equal(t, b*4, f(2, 0))
require.Equal(t, max, f(3, 0)) // would be 2000 but that's greater than max require.Equal(t, limit, f(3, 0)) // would be 2000 but that's greater than max
f = makeBackoffFunc(b, 0) // max = 0 means no max f = makeBackoffFunc(b, 0) // max = 0 means no max
require.Equal(t, b*8, f(3, 0)) // with no max, it's 2000 require.Equal(t, b*8, f(3, 0)) // with no max, it's 2000

View File

@ -161,7 +161,7 @@ func TestURLs(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
default: default:
w.WriteHeader(http.StatusNotFound) w.WriteHeader(http.StatusNotFound)
t.Fatalf("unexpected path: " + r.URL.Path) t.Fatalf("unexpected path: %s", r.URL.Path)
} }
}) })

View File

@ -165,6 +165,7 @@ func (b *Beanstalkd) gatherTubeStats(connection *textproto.Conn, tube string, ac
} }
func runQuery(connection *textproto.Conn, cmd string, result interface{}) error { func runQuery(connection *textproto.Conn, cmd string, result interface{}) error {
//nolint:govet // Keep dynamic command as the passed string is constant
requestID, err := connection.Cmd(cmd) requestID, err := connection.Cmd(cmd)
if err != nil { if err != nil {
return err return err

View File

@ -314,21 +314,18 @@ func (cms *CloudWatchMetricStreams) composeMetrics(data Data) {
// Rename Statistics to match the CloudWatch API if in API Compatability mode // Rename Statistics to match the CloudWatch API if in API Compatability mode
if cms.APICompatability { if cms.APICompatability {
max, ok := fields["max"] if v, ok := fields["max"]; ok {
if ok { fields["maximum"] = v
fields["maximum"] = max
delete(fields, "max") delete(fields, "max")
} }
min, ok := fields["min"] if v, ok := fields["min"]; ok {
if ok { fields["minimum"] = v
fields["minimum"] = min
delete(fields, "min") delete(fields, "min")
} }
count, ok := fields["count"] if v, ok := fields["count"]; ok {
if ok { fields["samplecount"] = v
fields["samplecount"] = count
delete(fields, "count") delete(fields, "count")
} }
} }

View File

@ -78,9 +78,9 @@ func TestConfigsUsed(t *testing.T) {
dfltFiles = []string{cntFname, maxFname} dfltFiles = []string{cntFname, maxFname}
count := 1234321 count := 1234321
max := 9999999 limit := 9999999
require.NoError(t, os.WriteFile(cntFile.Name(), []byte(strconv.Itoa(count)), 0640)) require.NoError(t, os.WriteFile(cntFile.Name(), []byte(strconv.Itoa(count)), 0640))
require.NoError(t, os.WriteFile(maxFile.Name(), []byte(strconv.Itoa(max)), 0640)) require.NoError(t, os.WriteFile(maxFile.Name(), []byte(strconv.Itoa(limit)), 0640))
c := &Conntrack{} c := &Conntrack{}
require.NoError(t, c.Init()) require.NoError(t, c.Init())
acc := &testutil.Accumulator{} acc := &testutil.Accumulator{}
@ -94,7 +94,7 @@ func TestConfigsUsed(t *testing.T) {
acc.AssertContainsFields(t, inputName, acc.AssertContainsFields(t, inputName,
map[string]interface{}{ map[string]interface{}{
fix(cntFname): float64(count), fix(cntFname): float64(count),
fix(maxFname): float64(max), fix(maxFname): float64(limit),
}) })
} }

View File

@ -215,7 +215,7 @@ func (monitor *DirectoryMonitor) read(filePath string) {
// Handle a file read error. We don't halt execution but do document, log, and move the problematic file. // Handle a file read error. We don't halt execution but do document, log, and move the problematic file.
if err != nil { if err != nil {
monitor.Log.Errorf("Error while reading file: '" + filePath + "'. " + err.Error()) monitor.Log.Errorf("Error while reading file: %q: %v", filePath, err)
monitor.filesDropped.Incr(1) monitor.filesDropped.Incr(1)
monitor.filesDroppedDir.Incr(1) monitor.filesDroppedDir.Incr(1)
if monitor.ErrorDirectory != "" { if monitor.ErrorDirectory != "" {
@ -343,7 +343,7 @@ func (monitor *DirectoryMonitor) moveFile(srcPath string, dstBaseDir string) {
dstPath := filepath.Join(dstBaseDir, basePath) dstPath := filepath.Join(dstBaseDir, basePath)
err := os.MkdirAll(filepath.Dir(dstPath), 0750) err := os.MkdirAll(filepath.Dir(dstPath), 0750)
if err != nil { if err != nil {
monitor.Log.Errorf("Error creating directory hierarchy for " + srcPath + ". Error: " + err.Error()) monitor.Log.Errorf("Error creating directory hierarchy for %q: %v", srcPath, err)
} }
inputFile, err := os.Open(srcPath) inputFile, err := os.Open(srcPath)

View File

@ -312,6 +312,7 @@ func TestTruncate(t *testing.T) {
} }
func TestRemoveCarriageReturns(t *testing.T) { func TestRemoveCarriageReturns(t *testing.T) {
//nolint:staticcheck // Silence linter for now as we plan to reenable tests for Windows later
if runtime.GOOS == "windows" { if runtime.GOOS == "windows" {
// Test that all carriage returns are removed // Test that all carriage returns are removed
for _, test := range crTests { for _, test := range crTests {

View File

@ -198,6 +198,7 @@ func getTestdataDir() string {
var chunks []string var chunks []string
var testDirectory string var testDirectory string
//nolint:staticcheck // Silence linter for now as we plan to reenable tests for Windows later
if runtime.GOOS == "windows" { if runtime.GOOS == "windows" {
chunks = strings.Split(dir, "\\") chunks = strings.Split(dir, "\\")
testDirectory = strings.Join(chunks[:], "\\") + "\\testdata" testDirectory = strings.Join(chunks[:], "\\") + "\\testdata"

View File

@ -269,7 +269,7 @@ func startMultipleItemGCSServer(t *testing.T) *httptest.Server {
require.NoError(t, err) require.NoError(t, err)
} else { } else {
w.WriteHeader(http.StatusNotFound) w.WriteHeader(http.StatusNotFound)
t.Fatalf("unexpected path: " + r.URL.Path) t.Fatalf("unexpected path: %s", r.URL.Path)
} }
default: default:
@ -399,7 +399,7 @@ func serveJSONText(w http.ResponseWriter, jsonText []byte) {
func failPath(path string, t *testing.T, w http.ResponseWriter) { func failPath(path string, t *testing.T, w http.ResponseWriter) {
w.WriteHeader(http.StatusNotFound) w.WriteHeader(http.StatusNotFound)
t.Fatalf("unexpected path: " + path) t.Fatalf("unexpected path: %s", path)
} }
func parseJSONFromFile(t *testing.T, jsonFilePath string) map[string]interface{} { func parseJSONFromFile(t *testing.T, jsonFilePath string) map[string]interface{} {

View File

@ -376,7 +376,7 @@ func (h *HTTPResponse) httpGather(cl client) (map[string]interface{}, map[string
// Set result in case of a body read error // Set result in case of a body read error
func (h *HTTPResponse) setBodyReadError(errorMsg string, bodyBytes []byte, fields map[string]interface{}, tags map[string]string) { func (h *HTTPResponse) setBodyReadError(errorMsg string, bodyBytes []byte, fields map[string]interface{}, tags map[string]string) {
h.Log.Debugf(errorMsg) h.Log.Debug(errorMsg)
setResult("body_read_error", fields, tags) setResult("body_read_error", fields, tags)
fields["content_length"] = len(bodyBytes) fields["content_length"] = len(bodyBytes)
if h.ResponseStringMatch != "" { if h.ResponseStringMatch != "" {

View File

@ -348,11 +348,11 @@ func readMaxFD(reader fileInfoProvider) (uint64, error) {
if err != nil { if err != nil {
return 0, fmt.Errorf("cannot open file %q: %w", fileMaxPath, err) return 0, fmt.Errorf("cannot open file %q: %w", fileMaxPath, err)
} }
max, err := strconv.ParseUint(strings.Trim(string(buf), "\n "), 10, 64) limit, err := strconv.ParseUint(strings.Trim(string(buf), "\n "), 10, 64)
if err != nil { if err != nil {
return 0, fmt.Errorf("cannot parse file content of %q: %w", fileMaxPath, err) return 0, fmt.Errorf("cannot parse file content of %q: %w", fileMaxPath, err)
} }
return max, nil return limit, nil
} }
func checkFiles(paths []string, fileInfo fileInfoProvider) error { func checkFiles(paths []string, fileInfo fileInfoProvider) error {

View File

@ -145,43 +145,38 @@ func TestResolveEntities(t *testing.T) {
t.Run("uncore event found in core entity", func(t *testing.T) { t.Run("uncore event found in core entity", func(t *testing.T) {
mQuals := []string{"config1=0x23h"} mQuals := []string{"config1=0x23h"}
mOptions, err := ia.NewOptions().SetAttrModifiers(mQuals).Build()
require.NoError(t, err)
eventName := "uncore event 1" eventName := "uncore event 1"
testCase := test{event: &eventWithQuals{name: eventName, qualifiers: mQuals}, testCase := test{
options: mOptions, event: &eventWithQuals{name: eventName, qualifiers: mQuals},
perfEvent: &ia.PerfEvent{Name: eventName, Uncore: true}} perfEvent: &ia.PerfEvent{Name: eventName, Uncore: true},
}
matcher := ia.NewNameMatcher(eventName) matcher := ia.NewNameMatcher(eventName)
mTransformer.On("Transform", nil, matcher).Return([]*ia.PerfEvent{testCase.perfEvent}, nil).Once() mTransformer.On("Transform", nil, matcher).Return([]*ia.PerfEvent{testCase.perfEvent}, nil).Once()
mCoreEntity := &CoreEventEntity{parsedEvents: []*eventWithQuals{testCase.event}, allEvents: false} mCoreEntity := &CoreEventEntity{parsedEvents: []*eventWithQuals{testCase.event}, allEvents: false}
err = mResolver.resolveEntities([]*CoreEventEntity{mCoreEntity}, nil) err := mResolver.resolveEntities([]*CoreEventEntity{mCoreEntity}, nil)
require.ErrorContains(t, err, fmt.Sprintf("uncore event %q found in core entity", eventName))
require.Error(t, err)
require.Contains(t, err.Error(), fmt.Sprintf("uncore event %q found in core entity", eventName))
mTransformer.AssertExpectations(t) mTransformer.AssertExpectations(t)
}) })
t.Run("core event found in uncore entity", func(t *testing.T) { t.Run("core event found in uncore entity", func(t *testing.T) {
mQuals := []string{"config1=0x23h"} mQuals := []string{"config1=0x23h"}
mOptions, err := ia.NewOptions().SetAttrModifiers(mQuals).Build()
require.NoError(t, err)
eventName := "core event 1" eventName := "core event 1"
testCase := test{event: &eventWithQuals{name: eventName, qualifiers: mQuals}, testCase := test{
options: mOptions, event: &eventWithQuals{name: eventName, qualifiers: mQuals},
perfEvent: &ia.PerfEvent{Name: eventName, Uncore: false}} perfEvent: &ia.PerfEvent{Name: eventName, Uncore: false},
}
matcher := ia.NewNameMatcher(eventName) matcher := ia.NewNameMatcher(eventName)
mTransformer.On("Transform", nil, matcher).Return([]*ia.PerfEvent{testCase.perfEvent}, nil).Once() mTransformer.On("Transform", nil, matcher).Return([]*ia.PerfEvent{testCase.perfEvent}, nil).Once()
mUncoreEntity := &UncoreEventEntity{parsedEvents: []*eventWithQuals{testCase.event}, allEvents: false} mUncoreEntity := &UncoreEventEntity{parsedEvents: []*eventWithQuals{testCase.event}, allEvents: false}
err = mResolver.resolveEntities(nil, []*UncoreEventEntity{mUncoreEntity}) err := mResolver.resolveEntities(nil, []*UncoreEventEntity{mUncoreEntity})
require.Error(t, err) require.ErrorContains(t, err, fmt.Sprintf("core event %q found in uncore entity", eventName))
require.Contains(t, err.Error(), fmt.Sprintf("core event %q found in uncore entity", eventName))
mTransformer.AssertExpectations(t) mTransformer.AssertExpectations(t)
}) })

View File

@ -294,7 +294,6 @@ type packageMetric[T numeric] struct {
fetchFn func(packageID int) (T, error) fetchFn func(packageID int) (T, error)
} }
//nolint:revive // Confusing-naming caused by a generic type that implements this interface method.
func (m *packageMetric[T]) fields() (map[string]interface{}, error) { func (m *packageMetric[T]) fields() (map[string]interface{}, error) {
val, err := m.fetchFn(m.packageID) val, err := m.fetchFn(m.packageID)
if err != nil { if err != nil {
@ -306,7 +305,6 @@ func (m *packageMetric[T]) fields() (map[string]interface{}, error) {
}, nil }, nil
} }
//nolint:revive // Confusing-naming caused by a generic type that implements this interface method.
func (m *packageMetric[T]) tags() map[string]string { func (m *packageMetric[T]) tags() map[string]string {
return map[string]string{ return map[string]string{
"package_id": strconv.Itoa(m.packageID), "package_id": strconv.Itoa(m.packageID),

View File

@ -537,10 +537,10 @@ func checkForDuplicates(values []int, valuesToCheck []int) bool {
return false return false
} }
func makeRange(min, max int) []int { func makeRange(low, high int) []int {
a := make([]int, max-min+1) a := make([]int, high-low+1)
for i := range a { for i := range a {
a[i] = min + i a[i] = low + i
} }
return a return a
} }

View File

@ -650,7 +650,7 @@ func TestJolokia2_ClientAuthRequest(t *testing.T) {
require.EqualValuesf(t, "sally", username, "Expected to post with username %s, but was %s", "sally", username) require.EqualValuesf(t, "sally", username, "Expected to post with username %s, but was %s", "sally", username)
require.EqualValuesf(t, "seashore", password, "Expected to post with password %s, but was %s", "seashore", password) require.EqualValuesf(t, "seashore", password, "Expected to post with password %s, but was %s", "seashore", password)
require.NotZero(t, len(requests), "Expected to post a request body, but was empty.") require.NotEmpty(t, requests, "Expected to post a request body, but was empty.")
request := requests[0]["mbean"] request := requests[0]["mbean"]
require.EqualValuesf(t, "hello:foo=bar", request, "Expected to query mbean %s, but was %s", "hello:foo=bar", request) require.EqualValuesf(t, "hello:foo=bar", request, "Expected to query mbean %s, but was %s", "hello:foo=bar", request)

View File

@ -113,7 +113,7 @@ func TestJolokia2_ClientProxyAuthRequest(t *testing.T) {
require.NoError(t, plugin.Gather(&acc)) require.NoError(t, plugin.Gather(&acc))
require.EqualValuesf(t, "sally", username, "Expected to post with username %s, but was %s", "sally", username) require.EqualValuesf(t, "sally", username, "Expected to post with username %s, but was %s", "sally", username)
require.EqualValuesf(t, "seashore", password, "Expected to post with password %s, but was %s", "seashore", password) require.EqualValuesf(t, "seashore", password, "Expected to post with password %s, but was %s", "seashore", password)
require.NotZero(t, len(requests), "Expected to post a request body, but was empty.") require.NotEmpty(t, requests, "Expected to post a request body, but was empty.")
request := requests[0] request := requests[0]
expected := "hello:foo=bar" expected := "hello:foo=bar"

View File

@ -629,7 +629,7 @@ func TestExponentialBackoff(t *testing.T) {
var err error var err error
backoff := 10 * time.Millisecond backoff := 10 * time.Millisecond
max := 3 limit := 3
// get an unused port by listening on next available port, then closing it // get an unused port by listening on next available port, then closing it
listener, err := net.Listen("tcp", "127.0.0.1:0") listener, err := net.Listen("tcp", "127.0.0.1:0")
@ -650,7 +650,7 @@ func TestExponentialBackoff(t *testing.T) {
ReadConfig: kafka.ReadConfig{ ReadConfig: kafka.ReadConfig{
Config: kafka.Config{ Config: kafka.Config{
MetadataRetryMax: max, MetadataRetryMax: limit,
MetadataRetryBackoff: config.Duration(backoff), MetadataRetryBackoff: config.Duration(backoff),
MetadataRetryType: "exponential", MetadataRetryType: "exponential",
}, },
@ -670,7 +670,7 @@ func TestExponentialBackoff(t *testing.T) {
t.Logf("elapsed %d", elapsed) t.Logf("elapsed %d", elapsed)
var expectedRetryDuration time.Duration var expectedRetryDuration time.Duration
for i := 0; i < max; i++ { for i := 0; i < limit; i++ {
expectedRetryDuration += backoff * time.Duration(math.Pow(2, float64(i))) expectedRetryDuration += backoff * time.Duration(math.Pow(2, float64(i)))
} }
t.Logf("expected > %d", expectedRetryDuration) t.Logf("expected > %d", expectedRetryDuration)

View File

@ -1,10 +1,12 @@
package net package net
import ( import (
"os"
"path/filepath" "path/filepath"
"testing" "testing"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/plugins/inputs/system" "github.com/influxdata/telegraf/plugins/inputs/system"
"github.com/influxdata/telegraf/testutil" "github.com/influxdata/telegraf/testutil"
"github.com/shirou/gopsutil/v3/net" "github.com/shirou/gopsutil/v3/net"
@ -13,9 +15,7 @@ import (
func TestNetIOStats(t *testing.T) { func TestNetIOStats(t *testing.T) {
var mps system.MockPS var mps system.MockPS
var err error
defer mps.AssertExpectations(t) defer mps.AssertExpectations(t)
var acc testutil.Accumulator
netio := net.IOCountersStat{ netio := net.IOCountersStat{
Name: "eth0", Name: "eth0",
@ -42,43 +42,47 @@ func TestNetIOStats(t *testing.T) {
} }
mps.On("NetProto").Return(netprotos, nil) mps.On("NetProto").Return(netprotos, nil)
require.NoError(t, os.Setenv("HOST_SYS", filepath.Join("testdata", "general", "sys"))) t.Setenv("HOST_SYS", filepath.Join("testdata", "general", "sys"))
err = (&NetIOStats{ps: &mps, skipChecks: true}).Gather(&acc) plugin := &NetIOStats{ps: &mps, skipChecks: true}
require.NoError(t, err)
ntags := map[string]string{ var acc testutil.Accumulator
"interface": "eth0", require.NoError(t, plugin.Gather(&acc))
}
fields1 := map[string]interface{}{ expected := []telegraf.Metric{
"bytes_sent": uint64(1123), metric.New(
"bytes_recv": uint64(8734422), "net",
"packets_sent": uint64(781), map[string]string{"interface": "eth0"},
"packets_recv": uint64(23456), map[string]interface{}{
"err_in": uint64(832), "bytes_sent": uint64(1123),
"err_out": uint64(8), "bytes_recv": uint64(8734422),
"drop_in": uint64(7), "packets_sent": uint64(781),
"drop_out": uint64(1), "packets_recv": uint64(23456),
"speed": int64(100), "err_in": uint64(832),
"err_out": uint64(8),
"drop_in": uint64(7),
"drop_out": uint64(1),
"speed": int64(100),
},
time.Unix(0, 0),
telegraf.Counter,
),
metric.New(
"net",
map[string]string{"interface": "all"},
map[string]interface{}{
"udp_noports": int64(892592),
"udp_indatagrams": int64(4655),
},
time.Unix(0, 0),
),
} }
acc.AssertContainsTaggedFields(t, "net", fields1, ntags) testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime())
fields2 := map[string]interface{}{
"udp_noports": int64(892592),
"udp_indatagrams": int64(4655),
}
ntags = map[string]string{
"interface": "all",
}
acc.AssertContainsTaggedFields(t, "net", fields2, ntags)
} }
func TestNetIOStatsSpeedUnsupported(t *testing.T) { func TestNetIOStatsSpeedUnsupported(t *testing.T) {
var mps system.MockPS var mps system.MockPS
var err error
defer mps.AssertExpectations(t) defer mps.AssertExpectations(t)
var acc testutil.Accumulator
netio := net.IOCountersStat{ netio := net.IOCountersStat{
Name: "eth1", Name: "eth1",
@ -105,43 +109,47 @@ func TestNetIOStatsSpeedUnsupported(t *testing.T) {
} }
mps.On("NetProto").Return(netprotos, nil) mps.On("NetProto").Return(netprotos, nil)
require.NoError(t, os.Setenv("HOST_SYS", filepath.Join("testdata", "general", "sys"))) t.Setenv("HOST_SYS", filepath.Join("testdata", "general", "sys"))
err = (&NetIOStats{ps: &mps, skipChecks: true}).Gather(&acc) plugin := &NetIOStats{ps: &mps, skipChecks: true}
require.NoError(t, err)
ntags := map[string]string{ var acc testutil.Accumulator
"interface": "eth1", require.NoError(t, plugin.Gather(&acc))
}
fields1 := map[string]interface{}{ expected := []telegraf.Metric{
"bytes_sent": uint64(1123), metric.New(
"bytes_recv": uint64(8734422), "net",
"packets_sent": uint64(781), map[string]string{"interface": "eth1"},
"packets_recv": uint64(23456), map[string]interface{}{
"err_in": uint64(832), "bytes_sent": uint64(1123),
"err_out": uint64(8), "bytes_recv": uint64(8734422),
"drop_in": uint64(7), "packets_sent": uint64(781),
"drop_out": uint64(1), "packets_recv": uint64(23456),
"speed": int64(-1), "err_in": uint64(832),
"err_out": uint64(8),
"drop_in": uint64(7),
"drop_out": uint64(1),
"speed": int64(-1),
},
time.Unix(0, 0),
telegraf.Counter,
),
metric.New(
"net",
map[string]string{"interface": "all"},
map[string]interface{}{
"udp_noports": int64(892592),
"udp_indatagrams": int64(4655),
},
time.Unix(0, 0),
),
} }
acc.AssertContainsTaggedFields(t, "net", fields1, ntags) testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime())
fields2 := map[string]interface{}{
"udp_noports": int64(892592),
"udp_indatagrams": int64(4655),
}
ntags = map[string]string{
"interface": "all",
}
acc.AssertContainsTaggedFields(t, "net", fields2, ntags)
} }
func TestNetIOStatsNoSpeedFile(t *testing.T) { func TestNetIOStatsNoSpeedFile(t *testing.T) {
var mps system.MockPS var mps system.MockPS
var err error
defer mps.AssertExpectations(t) defer mps.AssertExpectations(t)
var acc testutil.Accumulator
netio := net.IOCountersStat{ netio := net.IOCountersStat{
Name: "eth2", Name: "eth2",
@ -168,34 +176,40 @@ func TestNetIOStatsNoSpeedFile(t *testing.T) {
} }
mps.On("NetProto").Return(netprotos, nil) mps.On("NetProto").Return(netprotos, nil)
require.NoError(t, os.Setenv("HOST_SYS", filepath.Join("testdata", "general", "sys"))) t.Setenv("HOST_SYS", filepath.Join("testdata", "general", "sys"))
err = (&NetIOStats{ps: &mps, skipChecks: true}).Gather(&acc) plugin := &NetIOStats{ps: &mps, skipChecks: true}
require.NoError(t, err)
ntags := map[string]string{ var acc testutil.Accumulator
"interface": "eth2", require.NoError(t, plugin.Gather(&acc))
}
fields1 := map[string]interface{}{ expected := []telegraf.Metric{
"bytes_sent": uint64(1123), metric.New(
"bytes_recv": uint64(8734422), "net",
"packets_sent": uint64(781), map[string]string{"interface": "eth2"},
"packets_recv": uint64(23456), map[string]interface{}{
"err_in": uint64(832), "bytes_sent": uint64(1123),
"err_out": uint64(8), "bytes_recv": uint64(8734422),
"drop_in": uint64(7), "packets_sent": uint64(781),
"drop_out": uint64(1), "packets_recv": uint64(23456),
"speed": int64(-1), "err_in": uint64(832),
"err_out": uint64(8),
"drop_in": uint64(7),
"drop_out": uint64(1),
"speed": int64(-1),
},
time.Unix(0, 0),
telegraf.Counter,
),
metric.New(
"net",
map[string]string{"interface": "all"},
map[string]interface{}{
"udp_noports": int64(892592),
"udp_indatagrams": int64(4655),
},
time.Unix(0, 0),
),
} }
acc.AssertContainsTaggedFields(t, "net", fields1, ntags) testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime())
fields2 := map[string]interface{}{
"udp_noports": int64(892592),
"udp_indatagrams": int64(4655),
}
ntags = map[string]string{
"interface": "all",
}
acc.AssertContainsTaggedFields(t, "net", fields2, ntags)
} }

View File

@ -209,6 +209,8 @@ func decodeTCPFlags(b []byte) (interface{}, error) {
results = append(results, ".") results = append(results, ".")
} }
} }
//nolint:gosec // False positive (b[1] is not out of range - it is ensured by above checks)
return strings.Join(results, "") + mapTCPFlags(b[1]), nil return strings.Join(results, "") + mapTCPFlags(b[1]), nil
} }

View File

@ -151,11 +151,11 @@ func processPingOutput(out string) (statistics, error) {
if len(approxs) != 4 { if len(approxs) != 4 {
return stats, err return stats, err
} }
min, err := strconv.Atoi(approxs[1]) low, err := strconv.Atoi(approxs[1])
if err != nil { if err != nil {
return stats, err return stats, err
} }
max, err := strconv.Atoi(approxs[2]) high, err := strconv.Atoi(approxs[2])
if err != nil { if err != nil {
return stats, err return stats, err
} }
@ -165,8 +165,8 @@ func processPingOutput(out string) (statistics, error) {
} }
stats.avg = avg stats.avg = avg
stats.min = min stats.min = low
stats.max = max stats.max = high
return stats, err return stats, err
} }

View File

@ -9,6 +9,7 @@ import (
"testing" "testing"
"time" "time"
"github.com/google/go-cmp/cmp"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
@ -25,9 +26,7 @@ func TestProcesses(t *testing.T) {
readProcFile: tester.testProcFile, readProcFile: tester.testProcFile,
} }
var acc testutil.Accumulator var acc testutil.Accumulator
require.NoError(t, processes.Gather(&acc))
err := processes.Gather(&acc)
require.NoError(t, err)
require.True(t, acc.HasInt64Field("processes", "running")) require.True(t, acc.HasInt64Field("processes", "running"))
require.True(t, acc.HasInt64Field("processes", "sleeping")) require.True(t, acc.HasInt64Field("processes", "sleeping"))
@ -35,7 +34,7 @@ func TestProcesses(t *testing.T) {
require.True(t, acc.HasInt64Field("processes", "total")) require.True(t, acc.HasInt64Field("processes", "total"))
total, ok := acc.Get("processes") total, ok := acc.Get("processes")
require.True(t, ok) require.True(t, ok)
require.Greater(t, total.Fields["total"].(int64), int64(0)) require.Positive(t, total.Fields["total"])
} }
func TestFromPS(t *testing.T) { func TestFromPS(t *testing.T) {
@ -46,8 +45,7 @@ func TestFromPS(t *testing.T) {
} }
var acc testutil.Accumulator var acc testutil.Accumulator
err := processes.Gather(&acc) require.NoError(t, processes.Gather(&acc))
require.NoError(t, err)
fields := getEmptyFields() fields := getEmptyFields()
fields["blocked"] = int64(3) fields["blocked"] = int64(3)
@ -68,8 +66,7 @@ func TestFromPSError(t *testing.T) {
} }
var acc testutil.Accumulator var acc testutil.Accumulator
err := processes.Gather(&acc) require.Error(t, processes.Gather(&acc))
require.Error(t, err)
} }
func TestFromProcFiles(t *testing.T) { func TestFromProcFiles(t *testing.T) {
@ -84,8 +81,7 @@ func TestFromProcFiles(t *testing.T) {
} }
var acc testutil.Accumulator var acc testutil.Accumulator
err := processes.Gather(&acc) require.NoError(t, processes.Gather(&acc))
require.NoError(t, err)
fields := getEmptyFields() fields := getEmptyFields()
fields["sleeping"] = tester.calls fields["sleeping"] = tester.calls
@ -107,8 +103,7 @@ func TestFromProcFilesWithSpaceInCmd(t *testing.T) {
} }
var acc testutil.Accumulator var acc testutil.Accumulator
err := processes.Gather(&acc) require.NoError(t, processes.Gather(&acc))
require.NoError(t, err)
fields := getEmptyFields() fields := getEmptyFields()
fields["sleeping"] = tester.calls fields["sleeping"] = tester.calls
@ -141,8 +136,7 @@ func TestParkedProcess(t *testing.T) {
} }
var acc testutil.Accumulator var acc testutil.Accumulator
err := plugin.Gather(&acc) require.NoError(t, plugin.Gather(&acc))
require.NoError(t, err)
expected := []telegraf.Metric{ expected := []telegraf.Metric{
testutil.MustMetric( testutil.MustMetric(
@ -164,13 +158,12 @@ func TestParkedProcess(t *testing.T) {
telegraf.Gauge, telegraf.Gauge,
), ),
} }
actual := acc.GetTelegrafMetrics()
for _, a := range actual { options := []cmp.Option{
a.RemoveField("total") testutil.IgnoreTime(),
a.RemoveField("total_threads") testutil.IgnoreFields("total", "total_threads"),
} }
testutil.RequireMetricsEqual(t, expected, actual, testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), options...)
testutil.IgnoreTime())
} }
func testExecPS(out string) func(_ bool) ([]byte, error) { func testExecPS(out string) func(_ bool) ([]byte, error) {

View File

@ -995,12 +995,12 @@ func parseThermalThrottle(acc telegraf.Accumulator, fields map[string]interface{
} }
func parseWearLeveling(acc telegraf.Accumulator, fields map[string]interface{}, tags map[string]string, str string) error { func parseWearLeveling(acc telegraf.Accumulator, fields map[string]interface{}, tags map[string]string, str string) error {
var min, max, avg int64 var vmin, vmax, avg int64
if _, err := fmt.Sscanf(str, "min: %d, max: %d, avg: %d", &min, &max, &avg); err != nil { if _, err := fmt.Sscanf(str, "min: %d, max: %d, avg: %d", &vmin, &vmax, &avg); err != nil {
return err return err
} }
values := []int64{min, max, avg} values := []int64{vmin, vmax, avg}
for i, submetricName := range []string{"Min", "Max", "Avg"} { for i, submetricName := range []string{"Min", "Max", "Avg"} {
fields["raw_value"] = values[i] fields["raw_value"] = values[i]
tags["name"] = "Wear_Leveling_" + submetricName tags["name"] = "Wear_Leveling_" + submetricName

View File

@ -146,15 +146,5 @@ func (rs *RunningStats) Percentile(n float64) float64 {
} }
i := float64(len(rs.perc)) * n / float64(100) i := float64(len(rs.perc)) * n / float64(100)
return rs.perc[clamp(i, 0, len(rs.perc)-1)] return rs.perc[max(0, min(int(i), len(rs.perc)-1))]
}
func clamp(i float64, min int, max int) int {
if i < float64(min) {
return min
}
if i > float64(max) {
return max
}
return int(i)
} }

View File

@ -6,11 +6,14 @@ import (
"testing" "testing"
"time" "time"
"github.com/google/go-cmp/cmp"
"github.com/nwaples/tacplus" "github.com/nwaples/tacplus"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/testcontainers/testcontainers-go/wait" "github.com/testcontainers/testcontainers-go/wait"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/testutil" "github.com/influxdata/telegraf/testutil"
) )
@ -49,7 +52,7 @@ func (t testRequestHandler) HandleAuthorRequest(_ context.Context, a *tacplus.Au
return &tacplus.AuthorResponse{Status: tacplus.AuthorStatusFail} return &tacplus.AuthorResponse{Status: tacplus.AuthorStatusFail}
} }
func (t testRequestHandler) HandleAcctRequest(_ context.Context, _ *tacplus.AcctRequest, _ *tacplus.ServerSession) *tacplus.AcctReply { func (testRequestHandler) HandleAcctRequest(context.Context, *tacplus.AcctRequest, *tacplus.ServerSession) *tacplus.AcctReply {
return &tacplus.AcctReply{Status: tacplus.AcctStatusSuccess} return &tacplus.AcctReply{Status: tacplus.AcctStatusSuccess}
} }
@ -105,7 +108,6 @@ func TestTacacsInit(t *testing.T) {
} }
err := plugin.Init() err := plugin.Init()
if tt.errContains == "" { if tt.errContains == "" {
require.NoError(t, err) require.NoError(t, err)
if tt.requestAddr == "" { if tt.requestAddr == "" {
@ -145,8 +147,9 @@ func TestTacacsLocal(t *testing.T) {
} }
go func() { go func() {
err = srv.Serve(l) if err := srv.Serve(l); err != nil {
require.NoError(t, err, "local srv.Serve failed to start serving on "+srvLocal) t.Logf("local srv.Serve failed to start serving on %s", srvLocal)
}
}() }()
var testset = []struct { var testset = []struct {
@ -190,16 +193,6 @@ func TestTacacsLocal(t *testing.T) {
requestAddr: "127.0.0.1", requestAddr: "127.0.0.1",
errContains: "error on new tacacs authentication start request to " + srvLocal + " : bad secret or packet", errContains: "error on new tacacs authentication start request to " + srvLocal + " : bad secret or packet",
}, },
{
name: "unreachable",
testingTimeout: config.Duration(time.Nanosecond * 1000),
serverToTest: []string{"unreachable.test:49"},
usedUsername: config.NewSecret([]byte(`testusername`)),
usedPassword: config.NewSecret([]byte(`testpassword`)),
usedSecret: config.NewSecret([]byte(`testsecret`)),
requestAddr: "127.0.0.1",
errContains: "error on new tacacs authentication start request to unreachable.test:49 : dial tcp",
},
} }
for _, tt := range testset { for _, tt := range testset {
@ -221,23 +214,97 @@ func TestTacacsLocal(t *testing.T) {
if tt.errContains == "" { if tt.errContains == "" {
require.Empty(t, acc.Errors) require.Empty(t, acc.Errors)
require.True(t, acc.HasMeasurement("tacacs")) expected := []telegraf.Metric{
require.True(t, acc.HasTag("tacacs", "source")) metric.New(
require.Equal(t, srvLocal, acc.TagValue("tacacs", "source")) "tacacs",
require.True(t, acc.HasInt64Field("tacacs", "responsetime_ms")) map[string]string{"source": srvLocal},
require.True(t, acc.HasStringField("tacacs", "response_status")) map[string]interface{}{
require.Equal(t, tt.reqRespStatus, acc.Metrics[0].Fields["response_status"]) "responsetime_ms": int64(0),
"response_status": tt.reqRespStatus,
},
time.Unix(0, 0),
),
}
options := []cmp.Option{
testutil.IgnoreTime(),
testutil.IgnoreFields("responsetime_ms"),
}
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), options...)
} else { } else {
require.Len(t, acc.Errors, 1) require.Len(t, acc.Errors, 1)
require.ErrorContains(t, acc.FirstError(), tt.errContains) require.ErrorContains(t, acc.FirstError(), tt.errContains)
require.False(t, acc.HasTag("tacacs", "source")) require.Empty(t, acc.GetTelegrafMetrics())
require.False(t, acc.HasInt64Field("tacacs", "responsetime_ms"))
require.False(t, acc.HasStringField("tacacs", "response_status"))
} }
}) })
} }
} }
func TestTacacsLocalTimeout(t *testing.T) {
testHandler := tacplus.ServerConnHandler{
Handler: &testRequestHandler{
"testusername": {
password: "testpassword",
},
},
ConnConfig: tacplus.ConnConfig{
Secret: []byte(`testsecret`),
Mux: true,
},
}
l, err := net.Listen("tcp", "localhost:0")
require.NoError(t, err, "local net listen failed to start listening")
srvLocal := l.Addr().String()
srv := &tacplus.Server{
ServeConn: func(nc net.Conn) {
testHandler.Serve(nc)
},
}
go func() {
if err := srv.Serve(l); err != nil {
t.Logf("local srv.Serve failed to start serving on %s", srvLocal)
}
}()
// Initialize the plugin
plugin := &Tacacs{
ResponseTimeout: config.Duration(time.Microsecond),
Servers: []string{"unreachable.test:49"},
Username: config.NewSecret([]byte(`testusername`)),
Password: config.NewSecret([]byte(`testpassword`)),
Secret: config.NewSecret([]byte(`testsecret`)),
RequestAddr: "127.0.0.1",
Log: &testutil.Logger{},
}
require.NoError(t, plugin.Init())
// Try to connect, this will return a metric with the timeout...
var acc testutil.Accumulator
require.NoError(t, plugin.Gather(&acc))
expected := []telegraf.Metric{
metric.New(
"tacacs",
map[string]string{"source": "unreachable.test:49"},
map[string]interface{}{
"response_status": string("Timeout"),
"responsetime_ms": int64(0),
},
time.Unix(0, 0),
),
}
options := []cmp.Option{
testutil.IgnoreTime(),
testutil.IgnoreFields("responsetime_ms"),
}
require.Empty(t, acc.Errors)
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), options...)
}
func TestTacacsIntegration(t *testing.T) { func TestTacacsIntegration(t *testing.T) {
if testing.Short() { if testing.Short() {
t.Skip("Skipping integration test in short mode") t.Skip("Skipping integration test in short mode")
@ -250,8 +317,7 @@ func TestTacacsIntegration(t *testing.T) {
wait.ForLog("Starting server..."), wait.ForLog("Starting server..."),
), ),
} }
err := container.Start() require.NoError(t, container.Start(), "failed to start container")
require.NoError(t, err, "failed to start container")
defer container.Terminate() defer container.Terminate()
port := container.Ports["49"] port := container.Ports["49"]
@ -302,14 +368,22 @@ func TestTacacsIntegration(t *testing.T) {
require.NoError(t, plugin.Gather(&acc)) require.NoError(t, plugin.Gather(&acc))
require.NoError(t, acc.FirstError()) require.NoError(t, acc.FirstError())
expected := []telegraf.Metric{
require.True(t, acc.HasMeasurement("tacacs")) metric.New(
require.True(t, acc.HasStringField("tacacs", "response_status")) "tacacs",
require.True(t, acc.HasInt64Field("tacacs", "responsetime_ms")) map[string]string{"source": container.Address + ":" + port},
require.True(t, acc.HasTag("tacacs", "source")) map[string]interface{}{
"responsetime_ms": int64(0),
require.Equal(t, tt.reqRespStatus, acc.Metrics[0].Fields["response_status"]) "response_status": tt.reqRespStatus,
require.Equal(t, container.Address+":"+port, acc.TagValue("tacacs", "source")) },
time.Unix(0, 0),
),
}
options := []cmp.Option{
testutil.IgnoreTime(),
testutil.IgnoreFields("responsetime_ms"),
}
testutil.RequireMetricsStructureEqual(t, expected, acc.GetTelegrafMetrics(), options...)
}) })
} }
} }

View File

@ -38,7 +38,8 @@ func TestInvalidMetricFormat(t *testing.T) {
} }
func TestNameCollisions(t *testing.T) { func TestNameCollisions(t *testing.T) {
require.NoError(t, os.Setenv("HOST_SYS", filepath.Join("testcases", "with_name", "sys"))) t.Setenv("HOST_SYS", filepath.Join("testcases", "with_name", "sys"))
plugin := &Temperature{Log: &testutil.Logger{}} plugin := &Temperature{Log: &testutil.Logger{}}
require.NoError(t, plugin.Init()) require.NoError(t, plugin.Init())
@ -87,7 +88,7 @@ func TestCases(t *testing.T) {
} }
// Prepare the environment // Prepare the environment
require.NoError(t, os.Setenv("HOST_SYS", filepath.Join(testcasePath, "sys"))) t.Setenv("HOST_SYS", filepath.Join(testcasePath, "sys"))
// Configure the plugin // Configure the plugin
cfg := config.NewConfig() cfg := config.NewConfig()
@ -120,7 +121,7 @@ func TestCases(t *testing.T) {
} }
// Prepare the environment // Prepare the environment
require.NoError(t, os.Setenv("HOST_SYS", filepath.Join(testcasePath, "sys"))) t.Setenv("HOST_SYS", filepath.Join(testcasePath, "sys"))
// Configure the plugin // Configure the plugin
cfg := config.NewConfig() cfg := config.NewConfig()
@ -221,7 +222,7 @@ func TestRegression(t *testing.T) {
} }
// Prepare the environment // Prepare the environment
require.NoError(t, os.Setenv("HOST_SYS", filepath.Join(testcasePath, "sys"))) t.Setenv("HOST_SYS", filepath.Join(testcasePath, "sys"))
// Use the v1.28.x code to compare against // Use the v1.28.x code to compare against
var acc testutil.Accumulator var acc testutil.Accumulator

View File

@ -580,7 +580,7 @@ func TestWinPerfCountersCollectRawIntegration(t *testing.T) {
require.True(t, ok, "Expected presence of %s field", expectedCounter) require.True(t, ok, "Expected presence of %s field", expectedCounter)
valInt64, ok := val.(int64) valInt64, ok := val.(int64)
require.Truef(t, ok, "Expected int64, got %T", val) require.Truef(t, ok, "Expected int64, got %T", val)
require.Greaterf(t, valInt64, int64(0), "Expected > 0, got %d, for %#v", valInt64, metric) require.Positivef(t, valInt64, "Value not positive for metric %#v", metric)
} }
// Test *Array way // Test *Array way
@ -604,6 +604,6 @@ func TestWinPerfCountersCollectRawIntegration(t *testing.T) {
require.True(t, ok, "Expected presence of %s field", expectedCounter) require.True(t, ok, "Expected presence of %s field", expectedCounter)
valInt64, ok := val.(int64) valInt64, ok := val.(int64)
require.Truef(t, ok, "Expected int64, got %T", val) require.Truef(t, ok, "Expected int64, got %T", val)
require.Greaterf(t, valInt64, int64(0), "Expected > 0, got %d, for %#v", valInt64, metric) require.Positivef(t, valInt64, "Value not positive for metric %#v", metric)
} }
} }

View File

@ -134,21 +134,21 @@ func NewBinaryAnnotations(annotations []BinaryAnnotation, endpoint Endpoint) []t
} }
func minMax(span Span) (time.Time, time.Time) { func minMax(span Span) (time.Time, time.Time) {
min := now().UTC() low := now().UTC()
max := time.Time{}.UTC() high := time.Time{}.UTC()
for _, annotation := range span.Annotations() { for _, annotation := range span.Annotations() {
ts := annotation.Timestamp() ts := annotation.Timestamp()
if !ts.IsZero() && ts.Before(min) { if !ts.IsZero() && ts.Before(low) {
min = ts low = ts
} }
if !ts.IsZero() && ts.After(max) { if !ts.IsZero() && ts.After(high) {
max = ts high = ts
} }
} }
if max.IsZero() { if high.IsZero() {
max = min high = low
} }
return min, max return low, high
} }
func guessTimestamp(span Span) time.Time { func guessTimestamp(span Span) time.Time {
@ -157,8 +157,8 @@ func guessTimestamp(span Span) time.Time {
return ts return ts
} }
min, _ := minMax(span) low, _ := minMax(span)
return min return low
} }
func convertDuration(span Span) time.Duration { func convertDuration(span Span) time.Duration {
@ -166,8 +166,8 @@ func convertDuration(span Span) time.Duration {
if duration != 0 { if duration != 0 {
return duration return duration
} }
min, max := minMax(span) low, high := minMax(span)
return max.Sub(min) return high.Sub(low)
} }
func parentID(span Span) (string, error) { func parentID(span Span) (string, error) {

View File

@ -380,19 +380,19 @@ func translate(m telegraf.Metric, prefix string) (*azureMonitorMetric, error) {
dimensionValues = append(dimensionValues, tag.Value) dimensionValues = append(dimensionValues, tag.Value)
} }
min, err := getFloatField(m, "min") vmin, err := getFloatField(m, "min")
if err != nil { if err != nil {
return nil, err return nil, err
} }
max, err := getFloatField(m, "max") vmax, err := getFloatField(m, "max")
if err != nil { if err != nil {
return nil, err return nil, err
} }
sum, err := getFloatField(m, "sum") vsum, err := getFloatField(m, "sum")
if err != nil { if err != nil {
return nil, err return nil, err
} }
count, err := getIntField(m, "count") vcount, err := getIntField(m, "count")
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -417,10 +417,10 @@ func translate(m telegraf.Metric, prefix string) (*azureMonitorMetric, error) {
Series: []*azureMonitorSeries{ Series: []*azureMonitorSeries{
{ {
DimensionValues: dimensionValues, DimensionValues: dimensionValues,
Min: min, Min: vmin,
Max: max, Max: vmax,
Sum: sum, Sum: vsum,
Count: count, Count: vcount,
}, },
}, },
}, },

View File

@ -69,20 +69,20 @@ func (f *statisticField) buildDatum() []types.MetricDatum {
if f.hasAllFields() { if f.hasAllFields() {
// If we have all required fields, we build datum with StatisticValues // If we have all required fields, we build datum with StatisticValues
min := f.values[statisticTypeMin] vmin := f.values[statisticTypeMin]
max := f.values[statisticTypeMax] vmax := f.values[statisticTypeMax]
sum := f.values[statisticTypeSum] vsum := f.values[statisticTypeSum]
count := f.values[statisticTypeCount] vcount := f.values[statisticTypeCount]
datum := types.MetricDatum{ datum := types.MetricDatum{
MetricName: aws.String(strings.Join([]string{f.metricName, f.fieldName}, "_")), MetricName: aws.String(strings.Join([]string{f.metricName, f.fieldName}, "_")),
Dimensions: BuildDimensions(f.tags), Dimensions: BuildDimensions(f.tags),
Timestamp: aws.Time(f.timestamp), Timestamp: aws.Time(f.timestamp),
StatisticValues: &types.StatisticSet{ StatisticValues: &types.StatisticSet{
Minimum: aws.Float64(min), Minimum: aws.Float64(vmin),
Maximum: aws.Float64(max), Maximum: aws.Float64(vmax),
Sum: aws.Float64(sum), Sum: aws.Float64(vsum),
SampleCount: aws.Float64(count), SampleCount: aws.Float64(vcount),
}, },
StorageResolution: aws.Int32(int32(f.storageResolution)), StorageResolution: aws.Int32(int32(f.storageResolution)),
} }

View File

@ -15,7 +15,7 @@ import (
) )
func TestSqlite(t *testing.T) { func TestSqlite(t *testing.T) {
dbfile := filepath.Join(os.TempDir(), "db") dbfile := filepath.Join(t.TempDir(), "db")
defer os.Remove(dbfile) defer os.Remove(dbfile)
// Use the plugin to write to the database address := // Use the plugin to write to the database address :=
@ -27,9 +27,8 @@ func TestSqlite(t *testing.T) {
p.DataSourceName = address p.DataSourceName = address
require.NoError(t, p.Connect()) require.NoError(t, p.Connect())
require.NoError(t, p.Write( defer p.Close()
testMetrics, require.NoError(t, p.Write(testMetrics))
))
//read directly from the database //read directly from the database
db, err := gosql.Open("sqlite", address) db, err := gosql.Open("sqlite", address)
@ -41,7 +40,7 @@ func TestSqlite(t *testing.T) {
require.Equal(t, 1, countMetricOne) require.Equal(t, 1, countMetricOne)
var countMetricTwo int var countMetricTwo int
require.NoError(t, db.QueryRow("select count(*) from metric_one").Scan(&countMetricTwo)) require.NoError(t, db.QueryRow("select count(*) from metric_two").Scan(&countMetricTwo))
require.Equal(t, 1, countMetricTwo) require.Equal(t, 1, countMetricTwo)
var rows *gosql.Rows var rows *gosql.Rows

View File

@ -278,30 +278,30 @@ const (
var ErrBadThresholdFormat = errors.New("bad threshold format") var ErrBadThresholdFormat = errors.New("bad threshold format")
// Handles all cases from https://nagios-plugins.org/doc/guidelines.html#THRESHOLDFORMAT // Handles all cases from https://nagios-plugins.org/doc/guidelines.html#THRESHOLDFORMAT
func parseThreshold(threshold string) (min float64, max float64, err error) { func parseThreshold(threshold string) (vmin, vmax float64, err error) {
thresh := strings.Split(threshold, ":") thresh := strings.Split(threshold, ":")
switch len(thresh) { switch len(thresh) {
case 1: case 1:
max, err = strconv.ParseFloat(thresh[0], 64) vmax, err = strconv.ParseFloat(thresh[0], 64)
if err != nil { if err != nil {
return 0, 0, ErrBadThresholdFormat return 0, 0, ErrBadThresholdFormat
} }
return 0, max, nil return 0, vmax, nil
case 2: case 2:
if thresh[0] == "~" { if thresh[0] == "~" {
min = MinFloat64 vmin = MinFloat64
} else { } else {
min, err = strconv.ParseFloat(thresh[0], 64) vmin, err = strconv.ParseFloat(thresh[0], 64)
if err != nil { if err != nil {
min = 0 vmin = 0
} }
} }
if thresh[1] == "" { if thresh[1] == "" {
max = MaxFloat64 vmax = MaxFloat64
} else { } else {
max, err = strconv.ParseFloat(thresh[1], 64) vmax, err = strconv.ParseFloat(thresh[1], 64)
if err != nil { if err != nil {
return 0, 0, ErrBadThresholdFormat return 0, 0, ErrBadThresholdFormat
} }
@ -310,7 +310,7 @@ func parseThreshold(threshold string) (min float64, max float64, err error) {
return 0, 0, ErrBadThresholdFormat return 0, 0, ErrBadThresholdFormat
} }
return min, max, err return vmin, vmax, err
} }
func init() { func init() {

View File

@ -518,9 +518,9 @@ func TestParseThreshold(t *testing.T) {
} }
for i := range tests { for i := range tests {
min, max, err := parseThreshold(tests[i].input) vmin, vmax, err := parseThreshold(tests[i].input)
require.InDelta(t, tests[i].eMin, min, testutil.DefaultDelta) require.InDelta(t, tests[i].eMin, vmin, testutil.DefaultDelta)
require.InDelta(t, tests[i].eMax, max, testutil.DefaultDelta) require.InDelta(t, tests[i].eMax, vmax, testutil.DefaultDelta)
require.Equal(t, tests[i].eErr, err) require.Equal(t, tests[i].eErr, err)
} }
} }

View File

@ -19,7 +19,7 @@ tftp 69/udp`
func TestReadServicesFile(t *testing.T) { func TestReadServicesFile(t *testing.T) {
readServicesFile() readServicesFile()
require.NotZero(t, len(services)) require.NotEmpty(t, services)
} }
func TestFakeServices(t *testing.T) { func TestFakeServices(t *testing.T) {

View File

@ -3755,7 +3755,7 @@ func parseMetricsFrom(t *testing.T, lines []string, header string) (metrics []te
parser := &influx.Parser{} parser := &influx.Parser{}
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
require.NotZero(t, len(lines), "Expected some lines to parse from .star file, found none") require.NotEmpty(t, lines, "Expected some lines to parse from .star file, found none")
startIdx := -1 startIdx := -1
endIdx := len(lines) endIdx := len(lines)
for i := range lines { for i := range lines {
@ -3782,7 +3782,7 @@ func parseMetricsFrom(t *testing.T, lines []string, header string) (metrics []te
// parses error message out of line protocol following a header // parses error message out of line protocol following a header
func parseErrorMessage(t *testing.T, lines []string, header string) string { func parseErrorMessage(t *testing.T, lines []string, header string) string {
require.NotZero(t, len(lines), "Expected some lines to parse from .star file, found none") require.NotEmpty(t, lines, "Expected some lines to parse from .star file, found none")
startIdx := -1 startIdx := -1
for i := range lines { for i := range lines {
if strings.TrimLeft(lines[i], "# ") == header { if strings.TrimLeft(lines[i], "# ") == header {

View File

@ -299,7 +299,7 @@ func (t *TopK) getAggregationFunction(aggOperation string) (func([]telegraf.Metr
case "min": case "min":
return func(ms []telegraf.Metric, fields []string) map[string]float64 { return func(ms []telegraf.Metric, fields []string) map[string]float64 {
min := func(agg map[string]float64, val float64, field string) { vmin := func(agg map[string]float64, val float64, field string) {
// If this field has not been set, set it to the maximum float64 // If this field has not been set, set it to the maximum float64
_, ok := agg[field] _, ok := agg[field]
if !ok { if !ok {
@ -311,12 +311,12 @@ func (t *TopK) getAggregationFunction(aggOperation string) (func([]telegraf.Metr
agg[field] = val agg[field] = val
} }
} }
return aggregator(ms, fields, min) return aggregator(ms, fields, vmin)
}, nil }, nil
case "max": case "max":
return func(ms []telegraf.Metric, fields []string) map[string]float64 { return func(ms []telegraf.Metric, fields []string) map[string]float64 {
max := func(agg map[string]float64, val float64, field string) { vmax := func(agg map[string]float64, val float64, field string) {
// If this field has not been set, set it to the minimum float64 // If this field has not been set, set it to the minimum float64
_, ok := agg[field] _, ok := agg[field]
if !ok { if !ok {
@ -328,7 +328,7 @@ func (t *TopK) getAggregationFunction(aggOperation string) (func([]telegraf.Metr
agg[field] = val agg[field] = val
} }
} }
return aggregator(ms, fields, max) return aggregator(ms, fields, vmax)
}, nil }, nil
case "mean": case "mean":

View File

@ -1,4 +1,4 @@
FROM golang:1.22.6 FROM golang:1.23.0
RUN chmod -R 755 "$GOPATH" RUN chmod -R 755 "$GOPATH"

View File

@ -2,10 +2,10 @@
set -eux set -eux
GO_VERSION="1.22.6" GO_VERSION="1.23.0"
GO_ARCH="linux-amd64" GO_ARCH="linux-amd64"
# from https://golang.org/dl # from https://golang.org/dl
GO_VERSION_SHA="999805bed7d9039ec3da1a53bfbcafc13e367da52aa823cb60b68ba22d44c616" GO_VERSION_SHA="905a297f19ead44780548933e0ff1a1b86e8327bb459e92f9c0012569f76f5e3"
# Download Go and verify Go tarball # Download Go and verify Go tarball
setup_go () { setup_go () {

View File

@ -3,9 +3,9 @@
set -eux set -eux
ARCH=$(uname -m) ARCH=$(uname -m)
GO_VERSION="1.22.6" GO_VERSION="1.23.0"
GO_VERSION_SHA_arm64="ebac39fd44fc22feed1bb519af431c84c55776e39b30f4fd62930da9c0cfd1e3" # from https://golang.org/dl GO_VERSION_SHA_arm64="b770812aef17d7b2ea406588e2b97689e9557aac7e646fe76218b216e2c51406" # from https://golang.org/dl
GO_VERSION_SHA_amd64="9c3c0124b01b5365f73a1489649f78f971ecf84844ad9ca58fde133096ddb61b" # from https://golang.org/dl GO_VERSION_SHA_amd64="ffd070acf59f054e8691b838f274d540572db0bd09654af851e4e76ab88403dc" # from https://golang.org/dl
if [ "$ARCH" = 'arm64' ]; then if [ "$ARCH" = 'arm64' ]; then
GO_ARCH="darwin-arm64" GO_ARCH="darwin-arm64"

View File

@ -2,7 +2,7 @@
set -eux set -eux
GO_VERSION="1.22.6" GO_VERSION="1.23.0"
setup_go () { setup_go () {
choco upgrade golang --allow-downgrade --version=${GO_VERSION} choco upgrade golang --allow-downgrade --version=${GO_VERSION}

View File

@ -255,6 +255,7 @@ func extractPluginInfo(file *ast.File, pluginType string, declarations map[strin
return registeredNames, nil return registeredNames, nil
} }
//nolint:staticcheck // Use deprecated ast.Package for now
func extractPackageDeclarations(pkg *ast.Package) map[string]string { func extractPackageDeclarations(pkg *ast.Package) map[string]string {
declarations := make(map[string]string) declarations := make(map[string]string)
@ -286,6 +287,7 @@ func extractPackageDeclarations(pkg *ast.Package) map[string]string {
return declarations return declarations
} }
//nolint:staticcheck // Use deprecated ast.Package for now
func extractRegisteredNames(pkg *ast.Package, pluginType string) []string { func extractRegisteredNames(pkg *ast.Package, pluginType string) []string {
var registeredNames []string var registeredNames []string

View File

@ -32,9 +32,8 @@ func firstSection(t *T, root ast.Node) error {
n = n.NextSibling() n = n.NextSibling()
t.assertKind(ast.KindParagraph, n) t.assertKind(ast.KindParagraph, n)
length := len(n.Text(t.markdown)) length := len(n.Text(t.markdown))
min := 30 if length < 30 {
if length < min { t.assertNodef(n, "short first section. Please add short description of plugin. length %d, minimum 30", length)
t.assertNodef(n, "short first section. Please add short description of plugin. length %d, minimum %d", length, min)
} }
return nil return nil