chore(linters): Enable `usetesting` linter (#16456)
Co-authored-by: pzak <pzak> Co-authored-by: Zak <pawel.zak@intel.com>
This commit is contained in:
parent
d231442040
commit
9bf9fb3a4d
|
|
@ -359,10 +359,9 @@ linters-settings:
|
|||
- suite-thelper
|
||||
- useless-assert
|
||||
usetesting:
|
||||
os-create-temp: false
|
||||
os-mkdir-temp: false
|
||||
context-background: false
|
||||
context-todo: false
|
||||
# Enable/disable `os.TempDir()` detections.
|
||||
# Default: false
|
||||
os-temp-dir: true
|
||||
|
||||
issues:
|
||||
# List of regexps of issue texts to exclude.
|
||||
|
|
|
|||
|
|
@ -213,7 +213,7 @@ func TestCases(t *testing.T) {
|
|||
|
||||
// Setup the agent and run the agent in "once" mode
|
||||
agent := NewAgent(cfg)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second)
|
||||
defer cancel()
|
||||
actual, err := collect(ctx, agent, 0)
|
||||
require.NoError(t, err)
|
||||
|
|
|
|||
|
|
@ -1093,11 +1093,10 @@ func TestConfigPluginIDsSame(t *testing.T) {
|
|||
|
||||
func TestPersisterInputStoreLoad(t *testing.T) {
|
||||
// Reserve a temporary state file
|
||||
file, err := os.CreateTemp("", "telegraf_state-*.json")
|
||||
file, err := os.CreateTemp(t.TempDir(), "telegraf_state-*.json")
|
||||
require.NoError(t, err)
|
||||
filename := file.Name()
|
||||
require.NoError(t, file.Close())
|
||||
defer os.Remove(filename)
|
||||
|
||||
// Load the plugins
|
||||
cstore := config.NewConfig()
|
||||
|
|
|
|||
|
|
@ -260,7 +260,7 @@ func TestAuthConfig_Start(t *testing.T) {
|
|||
defer ticker.Stop()
|
||||
|
||||
c.wg.Add(1)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
go c.authRenewal(ctx, ticker, testutil.Logger{Name: "cookie_auth"})
|
||||
|
||||
srv.checkAuthCount(t, tt.firstAuthCount)
|
||||
|
|
|
|||
|
|
@ -136,7 +136,7 @@ func TestIntegration(t *testing.T) {
|
|||
|
||||
// Write metrics
|
||||
for _, x := range metrics {
|
||||
require.NoError(t, client.write(exchange, queueName, []byte(x)))
|
||||
require.NoError(t, client.write(t.Context(), exchange, queueName, []byte(x)))
|
||||
}
|
||||
|
||||
// Verify that the metrics were actually written
|
||||
|
|
@ -369,7 +369,7 @@ func TestStartupErrorBehaviorRetry(t *testing.T) {
|
|||
|
||||
// Write metrics
|
||||
for _, x := range metrics {
|
||||
require.NoError(t, client.write(exchange, queueName, []byte(x)))
|
||||
require.NoError(t, client.write(t.Context(), exchange, queueName, []byte(x)))
|
||||
}
|
||||
|
||||
// Verify that the metrics were actually collected
|
||||
|
|
@ -429,7 +429,7 @@ func (p *producer) close() {
|
|||
p.conn.Close()
|
||||
}
|
||||
|
||||
func (p *producer) write(exchange, key string, payload []byte) error {
|
||||
func (p *producer) write(testContext context.Context, exchange, key string, payload []byte) error {
|
||||
msg := amqp091.Publishing{
|
||||
DeliveryMode: amqp091.Persistent,
|
||||
Timestamp: time.Now(),
|
||||
|
|
@ -437,7 +437,7 @@ func (p *producer) write(exchange, key string, payload []byte) error {
|
|||
Body: payload,
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
ctx, cancel := context.WithTimeout(testContext, 3*time.Second)
|
||||
defer cancel()
|
||||
|
||||
return p.channel.PublishWithContext(ctx, exchange, key, true, false, msg)
|
||||
|
|
|
|||
|
|
@ -173,7 +173,7 @@ func TestApcupsdGather(t *testing.T) {
|
|||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
|
||||
lAddr, err := listen(ctx, t, tt.out())
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -24,16 +24,14 @@ const (
|
|||
)
|
||||
|
||||
func TestBcacheGeneratesMetrics(t *testing.T) {
|
||||
tmpDir, err := os.MkdirTemp("", "telegraf-bcache")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
testBcachePath := tmpDir + "/telegraf-bcache/sys/fs/bcache"
|
||||
testBcacheUUIDPath := testBcachePath + "/663955a3-765a-4737-a9fd-8250a7a78411"
|
||||
testBcacheDevPath := tmpDir + "/telegraf/sys/devices/virtual/block/bcache0"
|
||||
testBcacheBackingDevPath := tmpDir + "/telegraf/sys/devices/virtual/block/md10"
|
||||
|
||||
err = os.MkdirAll(testBcacheUUIDPath, 0750)
|
||||
err := os.MkdirAll(testBcacheUUIDPath, 0750)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = os.MkdirAll(testBcacheDevPath, 0750)
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
package cisco_telemetry_mdt
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
|
|
@ -1190,7 +1189,7 @@ func TestGRPCDialoutError(t *testing.T) {
|
|||
conn, err := grpc.NewClient(addr.String(), grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
require.NoError(t, err)
|
||||
client := mdtdialout.NewGRPCMdtDialoutClient(conn)
|
||||
stream, err := client.MdtDialout(context.Background())
|
||||
stream, err := client.MdtDialout(t.Context())
|
||||
require.NoError(t, err)
|
||||
|
||||
args := &mdtdialout.MdtDialoutArgs{Errors: "foobar"}
|
||||
|
|
@ -1223,9 +1222,9 @@ func TestGRPCDialoutMultiple(t *testing.T) {
|
|||
addr := c.listener.Addr()
|
||||
conn, err := grpc.NewClient(addr.String(), grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
require.NoError(t, err)
|
||||
require.True(t, conn.WaitForStateChange(context.Background(), connectivity.Connecting))
|
||||
require.True(t, conn.WaitForStateChange(t.Context(), connectivity.Connecting))
|
||||
client := mdtdialout.NewGRPCMdtDialoutClient(conn)
|
||||
stream, err := client.MdtDialout(context.TODO())
|
||||
stream, err := client.MdtDialout(t.Context())
|
||||
require.NoError(t, err)
|
||||
|
||||
data, err := proto.Marshal(tel)
|
||||
|
|
@ -1235,9 +1234,9 @@ func TestGRPCDialoutMultiple(t *testing.T) {
|
|||
|
||||
conn2, err := grpc.NewClient(addr.String(), grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
require.NoError(t, err)
|
||||
require.True(t, conn.WaitForStateChange(context.Background(), connectivity.Connecting))
|
||||
require.True(t, conn.WaitForStateChange(t.Context(), connectivity.Connecting))
|
||||
client2 := mdtdialout.NewGRPCMdtDialoutClient(conn2)
|
||||
stream2, err := client2.MdtDialout(context.TODO())
|
||||
stream2, err := client2.MdtDialout(t.Context())
|
||||
require.NoError(t, err)
|
||||
|
||||
tel.EncodingPath = "type:model/parallel/path"
|
||||
|
|
@ -1310,7 +1309,7 @@ func TestGRPCDialoutKeepalive(t *testing.T) {
|
|||
conn, err := grpc.NewClient(addr.String(), grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
require.NoError(t, err)
|
||||
client := mdtdialout.NewGRPCMdtDialoutClient(conn)
|
||||
stream, err := client.MdtDialout(context.Background())
|
||||
stream, err := client.MdtDialout(t.Context())
|
||||
require.NoError(t, err)
|
||||
|
||||
tel := mockTelemetryMessage()
|
||||
|
|
|
|||
|
|
@ -169,7 +169,7 @@ func TestServeHTTP(t *testing.T) {
|
|||
WriteTimeout: config.Duration(time.Millisecond * 10),
|
||||
}
|
||||
|
||||
pubPush.ctx, pubPush.cancel = context.WithCancel(context.Background())
|
||||
pubPush.ctx, pubPush.cancel = context.WithCancel(t.Context())
|
||||
|
||||
if test.full {
|
||||
// fill buffer with fake message
|
||||
|
|
|
|||
|
|
@ -36,9 +36,7 @@ func TestNoFilesFound(t *testing.T) {
|
|||
|
||||
func TestDefaultsUsed(t *testing.T) {
|
||||
defer restoreDflts(dfltFiles, dfltDirs)
|
||||
tmpdir, err := os.MkdirTemp("", "tmp1")
|
||||
require.NoError(t, err)
|
||||
defer os.Remove(tmpdir)
|
||||
tmpdir := t.TempDir()
|
||||
|
||||
tmpFile, err := os.CreateTemp(tmpdir, "ip_conntrack_count")
|
||||
require.NoError(t, err)
|
||||
|
|
@ -61,9 +59,7 @@ func TestDefaultsUsed(t *testing.T) {
|
|||
|
||||
func TestConfigsUsed(t *testing.T) {
|
||||
defer restoreDflts(dfltFiles, dfltDirs)
|
||||
tmpdir, err := os.MkdirTemp("", "tmp1")
|
||||
require.NoError(t, err)
|
||||
defer os.Remove(tmpdir)
|
||||
tmpdir := t.TempDir()
|
||||
|
||||
cntFile, err := os.CreateTemp(tmpdir, "nf_conntrack_count")
|
||||
require.NoError(t, err)
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
package dcos
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
|
|
@ -61,13 +60,12 @@ func TestLogin(t *testing.T) {
|
|||
u, err := url.Parse(ts.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
sa := &serviceAccount{
|
||||
accountID: "telegraf",
|
||||
privateKey: key,
|
||||
}
|
||||
client := newClusterClient(u, defaultResponseTimeout, 1, nil)
|
||||
auth, err := client.login(ctx, sa)
|
||||
auth, err := client.login(t.Context(), sa)
|
||||
|
||||
require.Equal(t, tt.expectedError, err)
|
||||
|
||||
|
|
@ -135,9 +133,8 @@ func TestGetSummary(t *testing.T) {
|
|||
u, err := url.Parse(ts.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
client := newClusterClient(u, defaultResponseTimeout, 1, nil)
|
||||
summary, err := client.getSummary(ctx)
|
||||
summary, err := client.getSummary(t.Context())
|
||||
|
||||
require.Equal(t, tt.expectedError, err)
|
||||
require.Equal(t, tt.expectedValue, summary)
|
||||
|
|
@ -176,9 +173,8 @@ func TestGetNodeMetrics(t *testing.T) {
|
|||
u, err := url.Parse(ts.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
client := newClusterClient(u, defaultResponseTimeout, 1, nil)
|
||||
m, err := client.getNodeMetrics(ctx, "foo")
|
||||
m, err := client.getNodeMetrics(t.Context(), "foo")
|
||||
|
||||
require.Equal(t, tt.expectedError, err)
|
||||
require.Equal(t, tt.expectedValue, m)
|
||||
|
|
@ -217,9 +213,8 @@ func TestGetContainerMetrics(t *testing.T) {
|
|||
u, err := url.Parse(ts.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
client := newClusterClient(u, defaultResponseTimeout, 1, nil)
|
||||
m, err := client.getContainerMetrics(ctx, "foo", "bar")
|
||||
m, err := client.getContainerMetrics(t.Context(), "foo", "bar")
|
||||
|
||||
require.Equal(t, tt.expectedError, err)
|
||||
require.Equal(t, tt.expectedValue, m)
|
||||
|
|
|
|||
|
|
@ -852,6 +852,9 @@ func createSocketForTest(t *testing.T, dirPath string) (string, net.Listener) {
|
|||
var pathToSocket string
|
||||
|
||||
if len(dirPath) == 0 {
|
||||
// The Maximum length of the socket path is 104/108 characters, path created with t.TempDir() is too long for some cases
|
||||
// (it combines test name with subtest name and some random numbers in the path). Therefore, in this case, it is safer to stick with `os.MkdirTemp()`.
|
||||
//nolint:usetesting // Ignore "os.MkdirTemp() could be replaced by t.TempDir() in createSocketForTest" finding.
|
||||
dirPath, err = os.MkdirTemp("", "dpdk-test-socket")
|
||||
require.NoError(t, err)
|
||||
pathToSocket = filepath.Join(dirPath, dpdkSocketTemplateName)
|
||||
|
|
|
|||
|
|
@ -582,14 +582,13 @@ func setupIntegrationTest(t *testing.T) (*testutil.Container, error) {
|
|||
return &container, err
|
||||
}
|
||||
|
||||
_, err = bulkRequest.Do(context.Background())
|
||||
_, err = bulkRequest.Do(t.Context())
|
||||
if err != nil {
|
||||
return &container, err
|
||||
}
|
||||
|
||||
// force elastic to refresh indexes to get new batch data
|
||||
ctx := context.Background()
|
||||
_, err = e.esClient.Refresh().Do(ctx)
|
||||
_, err = e.esClient.Refresh().Do(t.Context())
|
||||
if err != nil {
|
||||
return &container, err
|
||||
}
|
||||
|
|
@ -700,7 +699,7 @@ func TestElasticsearchQueryIntegration_getMetricFields(t *testing.T) {
|
|||
tests = append(tests, test{
|
||||
"getMetricFields " + d.queryName,
|
||||
e,
|
||||
args{context.Background(), d.testAggregationQueryInput},
|
||||
args{t.Context(), d.testAggregationQueryInput},
|
||||
d.testAggregationQueryInput.mapMetricFields,
|
||||
d.wantGetMetricFieldsErr,
|
||||
})
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ func TestShimUSR1SignalingWorks(t *testing.T) {
|
|||
stdinReader, stdinWriter := io.Pipe()
|
||||
stdoutReader, stdoutWriter := io.Pipe()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
defer cancel()
|
||||
metricProcessed, exited := runInputPlugin(t, 20*time.Minute, stdinReader, stdoutWriter, nil)
|
||||
|
||||
|
|
|
|||
|
|
@ -134,17 +134,19 @@ func TestHaproxyGeneratesMetricsUsingSocket(t *testing.T) {
|
|||
var randomNumber int64
|
||||
var sockets [5]net.Listener
|
||||
|
||||
_globmask := filepath.Join(os.TempDir(), "test-haproxy*.sock")
|
||||
_badmask := filepath.Join(os.TempDir(), "test-fail-haproxy*.sock")
|
||||
// The Maximum length of the socket path is 104/108 characters, path created with t.TempDir() is too long for some cases
|
||||
// (it combines test name with subtest name and some random numbers in the path). Therefore, in this case, it is safer to stick with `os.MkdirTemp()`.
|
||||
//nolint:usetesting // Ignore "os.TempDir() could be replaced by t.TempDir() in TestHaproxyGeneratesMetricsUsingSocket" finding.
|
||||
tempDir := os.TempDir()
|
||||
_globmask := filepath.Join(tempDir, "test-haproxy*.sock")
|
||||
_badmask := filepath.Join(tempDir, "test-fail-haproxy*.sock")
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
require.NoError(t, binary.Read(rand.Reader, binary.LittleEndian, &randomNumber))
|
||||
sockname := filepath.Join(os.TempDir(), fmt.Sprintf("test-haproxy%d.sock", randomNumber))
|
||||
sockname := filepath.Join(tempDir, fmt.Sprintf("test-haproxy%d.sock", randomNumber))
|
||||
|
||||
sock, err := net.Listen("unix", sockname)
|
||||
if err != nil {
|
||||
t.Fatal("Cannot initialize socket ")
|
||||
}
|
||||
require.NoError(t, err, "Cannot initialize socket")
|
||||
|
||||
sockets[i] = sock
|
||||
defer sock.Close() //nolint:revive,gocritic // done on purpose, closing will be executed properly
|
||||
|
|
|
|||
|
|
@ -488,6 +488,9 @@ func TestConnectionOverUnixSocket(t *testing.T) {
|
|||
}
|
||||
}))
|
||||
|
||||
// The Maximum length of the socket path is 104/108 characters, path created with t.TempDir() is too long for some cases
|
||||
// (it combines test name with subtest name and some random numbers in the path). Therefore, in this case, it is safer to stick with `os.MkdirTemp()`.
|
||||
//nolint:usetesting // Ignore "os.TempDir() could be replaced by t.TempDir() in TestConnectionOverUnixSocket" finding.
|
||||
unixListenAddr := filepath.Join(os.TempDir(), fmt.Sprintf("httptestserver.%d.sock", rand.Intn(1_000_000)))
|
||||
t.Cleanup(func() { os.Remove(unixListenAddr) })
|
||||
|
||||
|
|
|
|||
|
|
@ -736,10 +736,10 @@ func TestServerHeaders(t *testing.T) {
|
|||
func TestUnixSocket(t *testing.T) {
|
||||
listener, err := newTestHTTPListenerV2()
|
||||
require.NoError(t, err)
|
||||
file, err := os.CreateTemp("", "*.socket")
|
||||
file, err := os.CreateTemp(t.TempDir(), "*.socket")
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, file.Close())
|
||||
defer os.Remove(file.Name())
|
||||
|
||||
socketName := file.Name()
|
||||
if runtime.GOOS == "windows" {
|
||||
listener.ServiceAddress = "unix:///" + socketName
|
||||
|
|
|
|||
|
|
@ -129,6 +129,9 @@ func (ts *tempSocket) Close() {
|
|||
}
|
||||
|
||||
func newTempSocket(t *testing.T) *tempSocket {
|
||||
// The Maximum length of the socket path is 104/108 characters, path created with t.TempDir() is too long for some cases
|
||||
// (it combines test name with subtest name and some random numbers in the path). Therefore, in this case, it is safer to stick with `os.MkdirTemp()`.
|
||||
//nolint:usetesting // Ignore "os.MkdirTemp() could be replaced by t.TempDir() in newTempSocket" finding.
|
||||
dirPath, err := os.MkdirTemp("", "test-socket")
|
||||
require.NoError(t, err)
|
||||
|
||||
|
|
@ -160,7 +163,7 @@ func (tlf *tempLogFile) close() {
|
|||
}
|
||||
|
||||
func newTempLogFile(t *testing.T) *tempLogFile {
|
||||
file, err := os.CreateTemp("", "*.log")
|
||||
file, err := os.CreateTemp(t.TempDir(), "*.log")
|
||||
require.NoError(t, err)
|
||||
|
||||
return &tempLogFile{
|
||||
|
|
|
|||
|
|
@ -879,7 +879,7 @@ func Test_gatherRasMetrics(t *testing.T) {
|
|||
func Test_rasReader(t *testing.T) {
|
||||
file := rasReaderImpl{}
|
||||
// Create unique temporary file
|
||||
fileobj, err := os.CreateTemp("", "qat")
|
||||
fileobj, err := os.CreateTemp(t.TempDir(), "qat")
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("tests with existing file", func(t *testing.T) {
|
||||
|
|
@ -908,7 +908,7 @@ func Test_rasReader(t *testing.T) {
|
|||
expectedErrMsg string
|
||||
}{
|
||||
{"error if file does not exist", fileobj.Name(), "no such file or directory"},
|
||||
{"error if path does not point to regular file", os.TempDir(), "is a directory"},
|
||||
{"error if path does not point to regular file", t.TempDir(), "is a directory"},
|
||||
{"error if file does not exist", "/not/path/unreal/path", "no such file or directory"},
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -303,7 +303,7 @@ func TestConsumerGroupHandlerLifecycle(t *testing.T) {
|
|||
}
|
||||
cg := newConsumerGroupHandler(acc, 1, &parser, testutil.Logger{})
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
defer cancel()
|
||||
|
||||
session := &FakeConsumerGroupSession{
|
||||
|
|
@ -337,7 +337,7 @@ func TestConsumerGroupHandlerConsumeClaim(t *testing.T) {
|
|||
require.NoError(t, parser.Init())
|
||||
cg := newConsumerGroupHandler(acc, 1, &parser, testutil.Logger{})
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
defer cancel()
|
||||
|
||||
session := &FakeConsumerGroupSession{ctx: ctx}
|
||||
|
|
@ -461,10 +461,9 @@ func TestConsumerGroupHandlerHandle(t *testing.T) {
|
|||
cg.maxMessageLen = tt.maxMessageLen
|
||||
cg.topicTag = tt.topicTag
|
||||
|
||||
ctx := context.Background()
|
||||
session := &FakeConsumerGroupSession{ctx: ctx}
|
||||
session := &FakeConsumerGroupSession{ctx: t.Context()}
|
||||
|
||||
require.NoError(t, cg.reserve(ctx))
|
||||
require.NoError(t, cg.reserve(t.Context()))
|
||||
err := cg.handle(session, tt.msg)
|
||||
if tt.expectedHandleError != "" {
|
||||
require.Error(t, err)
|
||||
|
|
@ -584,12 +583,11 @@ func TestKafkaRoundTripIntegration(t *testing.T) {
|
|||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
kafkaContainer, err := kafkacontainer.Run(ctx, "confluentinc/confluent-local:7.5.0")
|
||||
kafkaContainer, err := kafkacontainer.Run(t.Context(), "confluentinc/confluent-local:7.5.0")
|
||||
require.NoError(t, err)
|
||||
defer kafkaContainer.Terminate(ctx) //nolint:errcheck // ignored
|
||||
defer kafkaContainer.Terminate(t.Context()) //nolint:errcheck // ignored
|
||||
|
||||
brokers, err := kafkaContainer.Brokers(ctx)
|
||||
brokers, err := kafkaContainer.Brokers(t.Context())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Make kafka output
|
||||
|
|
@ -661,12 +659,11 @@ func TestKafkaTimestampSourceIntegration(t *testing.T) {
|
|||
|
||||
for _, source := range []string{"metric", "inner", "outer"} {
|
||||
t.Run(source, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
kafkaContainer, err := kafkacontainer.Run(ctx, "confluentinc/confluent-local:7.5.0")
|
||||
kafkaContainer, err := kafkacontainer.Run(t.Context(), "confluentinc/confluent-local:7.5.0")
|
||||
require.NoError(t, err)
|
||||
defer kafkaContainer.Terminate(ctx) //nolint:errcheck // ignored
|
||||
defer kafkaContainer.Terminate(t.Context()) //nolint:errcheck // ignored
|
||||
|
||||
brokers, err := kafkaContainer.Brokers(ctx)
|
||||
brokers, err := kafkaContainer.Brokers(t.Context())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Make kafka output
|
||||
|
|
@ -726,21 +723,20 @@ func TestStartupErrorBehaviorErrorIntegration(t *testing.T) {
|
|||
}
|
||||
|
||||
// Startup the container
|
||||
ctx := context.Background()
|
||||
container, err := kafkacontainer.Run(ctx, "confluentinc/confluent-local:7.5.0")
|
||||
container, err := kafkacontainer.Run(t.Context(), "confluentinc/confluent-local:7.5.0")
|
||||
require.NoError(t, err)
|
||||
defer container.Terminate(ctx) //nolint:errcheck // ignored
|
||||
defer container.Terminate(t.Context()) //nolint:errcheck // ignored
|
||||
|
||||
brokers, err := container.Brokers(ctx)
|
||||
brokers, err := container.Brokers(t.Context())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Pause the container for simulating connectivity issues
|
||||
containerID := container.GetContainerID()
|
||||
provider, err := testcontainers.NewDockerProvider()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, provider.Client().ContainerPause(ctx, containerID))
|
||||
require.NoError(t, provider.Client().ContainerPause(t.Context(), containerID))
|
||||
//nolint:errcheck // Ignore the returned error as we cannot do anything about it anyway
|
||||
defer provider.Client().ContainerUnpause(ctx, containerID)
|
||||
defer provider.Client().ContainerUnpause(t.Context(), containerID)
|
||||
|
||||
// Setup the plugin and connect to the broker
|
||||
plugin := &KafkaConsumer{
|
||||
|
|
@ -780,21 +776,20 @@ func TestStartupErrorBehaviorIgnoreIntegration(t *testing.T) {
|
|||
}
|
||||
|
||||
// Startup the container
|
||||
ctx := context.Background()
|
||||
container, err := kafkacontainer.Run(ctx, "confluentinc/confluent-local:7.5.0")
|
||||
container, err := kafkacontainer.Run(t.Context(), "confluentinc/confluent-local:7.5.0")
|
||||
require.NoError(t, err)
|
||||
defer container.Terminate(ctx) //nolint:errcheck // ignored
|
||||
defer container.Terminate(t.Context()) //nolint:errcheck // ignored
|
||||
|
||||
brokers, err := container.Brokers(ctx)
|
||||
brokers, err := container.Brokers(t.Context())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Pause the container for simulating connectivity issues
|
||||
containerID := container.GetContainerID()
|
||||
provider, err := testcontainers.NewDockerProvider()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, provider.Client().ContainerPause(ctx, containerID))
|
||||
require.NoError(t, provider.Client().ContainerPause(t.Context(), containerID))
|
||||
//nolint:errcheck // Ignore the returned error as we cannot do anything about it anyway
|
||||
defer provider.Client().ContainerUnpause(ctx, containerID)
|
||||
defer provider.Client().ContainerUnpause(t.Context(), containerID)
|
||||
|
||||
// Setup the plugin and connect to the broker
|
||||
plugin := &KafkaConsumer{
|
||||
|
|
@ -840,21 +835,20 @@ func TestStartupErrorBehaviorRetryIntegration(t *testing.T) {
|
|||
}
|
||||
|
||||
// Startup the container
|
||||
ctx := context.Background()
|
||||
container, err := kafkacontainer.Run(ctx, "confluentinc/confluent-local:7.5.0")
|
||||
container, err := kafkacontainer.Run(t.Context(), "confluentinc/confluent-local:7.5.0")
|
||||
require.NoError(t, err)
|
||||
defer container.Terminate(ctx) //nolint:errcheck // ignored
|
||||
defer container.Terminate(t.Context()) //nolint:errcheck // ignored
|
||||
|
||||
brokers, err := container.Brokers(ctx)
|
||||
brokers, err := container.Brokers(t.Context())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Pause the container for simulating connectivity issues
|
||||
containerID := container.GetContainerID()
|
||||
provider, err := testcontainers.NewDockerProvider()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, provider.Client().ContainerPause(ctx, containerID))
|
||||
require.NoError(t, provider.Client().ContainerPause(t.Context(), containerID))
|
||||
//nolint:errcheck // Ignore the returned error as we cannot do anything about it anyway
|
||||
defer provider.Client().ContainerUnpause(ctx, containerID)
|
||||
defer provider.Client().ContainerUnpause(t.Context(), containerID)
|
||||
|
||||
// Setup the plugin and connect to the broker
|
||||
plugin := &KafkaConsumer{
|
||||
|
|
@ -895,7 +889,7 @@ func TestStartupErrorBehaviorRetryIntegration(t *testing.T) {
|
|||
require.Equal(t, int64(2), model.StartupErrors.Get())
|
||||
|
||||
// Unpause the container, now writes should succeed
|
||||
require.NoError(t, provider.Client().ContainerUnpause(ctx, containerID))
|
||||
require.NoError(t, provider.Client().ContainerUnpause(t.Context(), containerID))
|
||||
require.NoError(t, model.Gather(&acc))
|
||||
defer model.Stop()
|
||||
require.Equal(t, int64(2), model.StartupErrors.Get())
|
||||
|
|
|
|||
|
|
@ -298,7 +298,7 @@ thp_collapse_alloc_failed 102214
|
|||
thp_split abcd`
|
||||
|
||||
func makeFakeVMStatFile(t *testing.T, content []byte) string {
|
||||
tmpfile, err := os.CreateTemp("", "kernel_vmstat_test")
|
||||
tmpfile, err := os.CreateTemp(t.TempDir(), "kernel_vmstat_test")
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = tmpfile.Write(content)
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package leofs
|
|||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
|
|
@ -131,7 +132,7 @@ func testMain(t *testing.T, code, endpoint string, serverType serverType) {
|
|||
}
|
||||
|
||||
// Build the fake snmpwalk for test
|
||||
src := os.TempDir() + "/test.go"
|
||||
src := filepath.Join(t.TempDir(), "test.go")
|
||||
require.NoError(t, os.WriteFile(src, []byte(code), 0600))
|
||||
defer os.Remove(src)
|
||||
|
||||
|
|
|
|||
|
|
@ -109,18 +109,7 @@ func TestGrokParseLogFiles(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGrokParseLogFilesAppearLater(t *testing.T) {
|
||||
// TODO: t.TempDir will fail on Windows because it could not remove
|
||||
// test.a.log file. This seems like an issue with the tail package, it
|
||||
// is not closing the os.File properly on Stop.
|
||||
// === RUN TestGrokParseLogFilesAppearLater
|
||||
// 2022/04/16 11:05:13 D! [] Tail added for file: C:\Users\circleci\AppData\Local\Temp\TestGrokParseLogFilesAppearLater3687440534\001\test_a.log
|
||||
// 2022/04/16 11:05:13 D! [] Tail dropped for file: C:\Users\circleci\AppData\Local\Temp\TestGrokParseLogFilesAppearLater3687440534\001\test_a.log
|
||||
// testing.go:1090: TempDir RemoveAll cleanup:
|
||||
// CreateFile C:\Users\circleci\AppData\Local\Temp\TestGrokParseLogFilesAppearLater3687440534\001: Access is denied.
|
||||
// --- FAIL: TestGrokParseLogFilesAppearLater (1.68s)
|
||||
emptydir, err := os.MkdirTemp("", "TestGrokParseLogFilesAppearLater")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(emptydir)
|
||||
emptydir := t.TempDir()
|
||||
|
||||
logparser := &LogParser{
|
||||
Log: testutil.Logger{},
|
||||
|
|
|
|||
|
|
@ -172,13 +172,11 @@ disk I/O size ios % cum % | ios % cum %
|
|||
`
|
||||
|
||||
func TestLustre2GeneratesHealth(t *testing.T) {
|
||||
tmpDir, err := os.MkdirTemp("", "telegraf-lustre")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
rootdir := tmpDir + "/telegraf"
|
||||
sysdir := rootdir + "/sys/fs/lustre/"
|
||||
err = os.MkdirAll(sysdir, 0750)
|
||||
err := os.MkdirAll(sysdir, 0750)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = os.WriteFile(sysdir+"health_check", []byte("healthy\n"), 0640)
|
||||
|
|
@ -202,16 +200,14 @@ func TestLustre2GeneratesHealth(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestLustre2GeneratesMetrics(t *testing.T) {
|
||||
tmpDir, err := os.MkdirTemp("", "telegraf-lustre")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
rootdir := tmpDir + "/telegraf"
|
||||
tempdir := rootdir + "/proc/fs/lustre/"
|
||||
ostName := "OST0001"
|
||||
|
||||
mdtdir := tempdir + "/mdt/"
|
||||
err = os.MkdirAll(mdtdir+"/"+ostName, 0750)
|
||||
err := os.MkdirAll(mdtdir+"/"+ostName, 0750)
|
||||
require.NoError(t, err)
|
||||
|
||||
osddir := tempdir + "/osd-ldiskfs/"
|
||||
|
|
@ -273,16 +269,14 @@ func TestLustre2GeneratesMetrics(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestLustre2GeneratesClientMetrics(t *testing.T) {
|
||||
tmpDir, err := os.MkdirTemp("", "telegraf-lustre-client")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
rootdir := tmpDir + "/telegraf"
|
||||
tempdir := rootdir + "/proc/fs/lustre/"
|
||||
ostName := "OST0001"
|
||||
clientName := "10.2.4.27@o2ib1"
|
||||
mdtdir := tempdir + "/mdt/"
|
||||
err = os.MkdirAll(mdtdir+"/"+ostName+"/exports/"+clientName, 0750)
|
||||
err := os.MkdirAll(mdtdir+"/"+ostName+"/exports/"+clientName, 0750)
|
||||
require.NoError(t, err)
|
||||
|
||||
obddir := tempdir + "/obdfilter/"
|
||||
|
|
@ -338,9 +332,7 @@ func TestLustre2GeneratesClientMetrics(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestLustre2GeneratesJobstatsMetrics(t *testing.T) {
|
||||
tmpDir, err := os.MkdirTemp("", "telegraf-lustre-jobstats")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
rootdir := tmpDir + "/telegraf"
|
||||
tempdir := rootdir + "/proc/fs/lustre/"
|
||||
|
|
@ -348,7 +340,7 @@ func TestLustre2GeneratesJobstatsMetrics(t *testing.T) {
|
|||
jobNames := []string{"cluster-testjob1", "testjob2"}
|
||||
|
||||
mdtdir := tempdir + "/mdt/"
|
||||
err = os.MkdirAll(mdtdir+"/"+ostName, 0750)
|
||||
err := os.MkdirAll(mdtdir+"/"+ostName, 0750)
|
||||
require.NoError(t, err)
|
||||
|
||||
obddir := tempdir + "/obdfilter/"
|
||||
|
|
@ -499,16 +491,14 @@ func TestLustre2CanParseConfiguration(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestLustre2GeneratesBrwstatsMetrics(t *testing.T) {
|
||||
tmpdir, err := os.MkdirTemp("", "telegraf-lustre-brwstats")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(tmpdir)
|
||||
tmpdir := t.TempDir()
|
||||
|
||||
rootdir := tmpdir + "/telegraf"
|
||||
tempdir := rootdir + "/proc/fs/lustre"
|
||||
ostname := "OST0001"
|
||||
|
||||
osddir := tempdir + "/osd-ldiskfs/"
|
||||
err = os.MkdirAll(osddir+"/"+ostname, 0750)
|
||||
err := os.MkdirAll(osddir+"/"+ostname, 0750)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = os.WriteFile(osddir+"/"+ostname+"/brw_stats", []byte(brwstatsProcContents), 0640)
|
||||
|
|
@ -574,9 +564,7 @@ func TestLustre2GeneratesBrwstatsMetrics(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestLustre2GeneratesEvictionMetrics(t *testing.T) {
|
||||
rootdir, err := os.MkdirTemp("", "telegraf-lustre-evictions")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(rootdir)
|
||||
rootdir := t.TempDir()
|
||||
|
||||
// setup files in mock sysfs
|
||||
type fileEntry struct {
|
||||
|
|
@ -600,7 +588,7 @@ func TestLustre2GeneratesEvictionMetrics(t *testing.T) {
|
|||
// gather metrics
|
||||
m := &Lustre2{rootdir: rootdir}
|
||||
var acc testutil.Accumulator
|
||||
err = m.Gather(&acc)
|
||||
err := m.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
// compare with expectations
|
||||
|
|
|
|||
|
|
@ -191,7 +191,7 @@ func TestSubscribeClientIntegration(t *testing.T) {
|
|||
err = o.connect()
|
||||
require.NoError(t, err, "Connection failed")
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
|
||||
ctx, cancel := context.WithTimeout(t.Context(), time.Second*10)
|
||||
defer cancel()
|
||||
res, err := o.startStreamValues(ctx)
|
||||
require.Equal(t, opcua.Connected, o.State())
|
||||
|
|
@ -334,7 +334,7 @@ func TestSubscribeClientIntegrationAdditionalFields(t *testing.T) {
|
|||
|
||||
require.NoError(t, o.connect(), "Connection failed")
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
|
||||
ctx, cancel := context.WithTimeout(t.Context(), time.Second*10)
|
||||
defer cancel()
|
||||
res, err := o.startStreamValues(ctx)
|
||||
require.NoError(t, err)
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@ package opensearch_query
|
|||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
|
@ -609,7 +608,7 @@ func setupIntegrationTest(t *testing.T, image string) (*testutil.Container, *Ope
|
|||
}
|
||||
|
||||
e = indexer.Add(
|
||||
context.Background(),
|
||||
t.Context(),
|
||||
opensearchutil.BulkIndexerItem{
|
||||
Index: testindex,
|
||||
Action: "index",
|
||||
|
|
@ -624,7 +623,7 @@ func setupIntegrationTest(t *testing.T, image string) (*testutil.Container, *Ope
|
|||
return &container, o, err
|
||||
}
|
||||
|
||||
if err := indexer.Close(context.Background()); err != nil {
|
||||
if err := indexer.Close(t.Context()); err != nil {
|
||||
return &container, o, err
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ func TestOpenTelemetry(t *testing.T) {
|
|||
defer plugin.Stop()
|
||||
|
||||
// Setup the OpenTelemetry exporter
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
ctx, cancel := context.WithTimeout(t.Context(), time.Second)
|
||||
defer cancel()
|
||||
|
||||
exporter, err := otlpmetricgrpc.New(ctx,
|
||||
|
|
@ -196,7 +196,7 @@ func TestCases(t *testing.T) {
|
|||
|
||||
// Send all data to the plugin
|
||||
addr := plugin.listener.Addr().String()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
ctx, cancel := context.WithTimeout(t.Context(), time.Second)
|
||||
defer cancel()
|
||||
|
||||
grpcClient, err := grpc.NewClient(addr, grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
|
|
|
|||
|
|
@ -5,7 +5,6 @@ package powerdns
|
|||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
|
|
@ -37,7 +36,7 @@ func serverSocket(l net.Listener) {
|
|||
func TestPowerdnsGeneratesMetrics(t *testing.T) {
|
||||
// We create a fake server to return test data
|
||||
randomNumber := int64(5239846799706671610)
|
||||
sockname := filepath.Join(os.TempDir(), fmt.Sprintf("pdns%d.controlsocket", randomNumber))
|
||||
sockname := filepath.Join(t.TempDir(), fmt.Sprintf("pdns%d.controlsocket", randomNumber))
|
||||
socket, err := net.Listen("unix", sockname)
|
||||
if err != nil {
|
||||
t.Fatal("Cannot initialize server on port ")
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@ func TestChildPattern(t *testing.T) {
|
|||
|
||||
// Spawn two child processes and get their PIDs
|
||||
expected := make([]pid, 0, 2)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
defer cancel()
|
||||
|
||||
// First process
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
package radius
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net"
|
||||
"path/filepath"
|
||||
|
|
@ -79,7 +78,7 @@ func TestRadiusLocal(t *testing.T) {
|
|||
require.Equal(t, radius.CodeAccessAccept.String(), acc.TagValue("radius", "response_code"))
|
||||
require.True(t, acc.HasInt64Field("radius", "responsetime_ms"))
|
||||
|
||||
if err := server.Shutdown(context.Background()); err != nil {
|
||||
if err := server.Shutdown(t.Context()); err != nil {
|
||||
require.NoError(t, err, "failed to properly shutdown local radius server")
|
||||
}
|
||||
}
|
||||
|
|
@ -149,7 +148,7 @@ func TestRadiusNASIP(t *testing.T) {
|
|||
require.Equal(t, radius.CodeAccessAccept.String(), acc.TagValue("radius", "response_code"))
|
||||
require.True(t, acc.HasInt64Field("radius", "responsetime_ms"))
|
||||
|
||||
if err := server.Shutdown(context.Background()); err != nil {
|
||||
if err := server.Shutdown(t.Context()); err != nil {
|
||||
require.NoError(t, err, "failed to properly shutdown local radius server")
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@ package redis_sentinel
|
|||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
|
@ -26,11 +25,10 @@ func TestRedisSentinelConnectIntegration(t *testing.T) {
|
|||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
net, err := network.New(ctx)
|
||||
net, err := network.New(t.Context())
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
require.NoError(t, net.Remove(ctx), "terminating network failed")
|
||||
require.NoError(t, net.Remove(t.Context()), "terminating network failed")
|
||||
}()
|
||||
|
||||
redis := createRedisContainer(net.Name)
|
||||
|
|
|
|||
|
|
@ -130,10 +130,14 @@ func TestSocketListener(t *testing.T) {
|
|||
}
|
||||
|
||||
// Create a socket
|
||||
// The Maximum length of the socket path is 104/108 characters, path created with t.TempDir() is too long for some cases
|
||||
// (it combines test name with subtest name and some random numbers in the path).
|
||||
// Therefore, in this case, it is safer to stick with `os.MkdirTemp()`.
|
||||
//nolint:usetesting // Ignore "os.CreateTemp("", ...) could be replaced by os.CreateTemp(t.TempDir(), ...) in TestSocketListener" finding.
|
||||
sock, err := os.CreateTemp("", "sock-")
|
||||
require.NoError(t, err)
|
||||
defer sock.Close()
|
||||
defer os.Remove(sock.Name())
|
||||
defer sock.Close()
|
||||
serverAddr = sock.Name()
|
||||
}
|
||||
|
||||
|
|
@ -277,10 +281,10 @@ func TestLargeReadBufferUnixgram(t *testing.T) {
|
|||
require.NoError(t, bufsize.UnmarshalText([]byte("100KiB")))
|
||||
|
||||
// Create a socket
|
||||
sock, err := os.CreateTemp("", "sock-")
|
||||
sock, err := os.CreateTemp(t.TempDir(), "sock-")
|
||||
require.NoError(t, err)
|
||||
defer sock.Close()
|
||||
defer os.Remove(sock.Name())
|
||||
|
||||
var serverAddr = sock.Name()
|
||||
|
||||
// Setup plugin with a sufficient read buffer
|
||||
|
|
|
|||
|
|
@ -194,10 +194,9 @@ func TestGrokParseLogFilesWithMultiline(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGrokParseLogFilesWithMultilineTimeout(t *testing.T) {
|
||||
tmpfile, err := os.CreateTemp("", "")
|
||||
tmpfile, err := os.CreateTemp(t.TempDir(), "")
|
||||
require.NoError(t, err)
|
||||
defer tmpfile.Close()
|
||||
defer os.Remove(tmpfile.Name())
|
||||
|
||||
// This seems necessary in order to get the test to read the following lines.
|
||||
_, err = tmpfile.WriteString("[04/Jun/2016:12:41:48 +0100] INFO HelloExample: This is fluff\r\n")
|
||||
|
|
@ -605,10 +604,9 @@ func TestCharacterEncoding(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTailEOF(t *testing.T) {
|
||||
tmpfile, err := os.CreateTemp("", "")
|
||||
tmpfile, err := os.CreateTemp(t.TempDir(), "")
|
||||
require.NoError(t, err)
|
||||
defer tmpfile.Close()
|
||||
defer os.Remove(tmpfile.Name())
|
||||
_, err = tmpfile.WriteString("cpu usage_idle=100\r\n")
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, tmpfile.Sync())
|
||||
|
|
@ -645,10 +643,9 @@ func TestTailEOF(t *testing.T) {
|
|||
|
||||
func TestCSVBehavior(t *testing.T) {
|
||||
// Prepare the input file
|
||||
input, err := os.CreateTemp("", "")
|
||||
input, err := os.CreateTemp(t.TempDir(), "")
|
||||
require.NoError(t, err)
|
||||
defer input.Close()
|
||||
defer os.Remove(input.Name())
|
||||
// Write header
|
||||
_, err = input.WriteString("a,b\n")
|
||||
require.NoError(t, err)
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ import (
|
|||
func TestBadServer(t *testing.T) {
|
||||
// Create and start a server without interactions
|
||||
server := &mockServer{}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
addr, err := server.listen(ctx)
|
||||
require.NoError(t, err)
|
||||
defer cancel()
|
||||
|
|
@ -78,7 +78,7 @@ func TestCases(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
// Start the server
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
addr, err := server.listen(ctx)
|
||||
require.NoError(t, err)
|
||||
defer cancel()
|
||||
|
|
|
|||
|
|
@ -224,21 +224,19 @@ func TestMaxQuery(t *testing.T) {
|
|||
|
||||
v := defaultVSphere()
|
||||
v.MaxQueryMetrics = 256
|
||||
ctx := context.Background()
|
||||
c, err := newClient(ctx, s.URL, v)
|
||||
c, err := newClient(t.Context(), s.URL, v)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 256, v.MaxQueryMetrics)
|
||||
|
||||
om := object.NewOptionManager(c.client.Client, *c.client.Client.ServiceContent.Setting)
|
||||
err = om.Update(ctx, []types.BaseOptionValue{&types.OptionValue{
|
||||
err = om.Update(t.Context(), []types.BaseOptionValue{&types.OptionValue{
|
||||
Key: "config.vpxd.stats.maxQueryMetrics",
|
||||
Value: "42",
|
||||
}})
|
||||
require.NoError(t, err)
|
||||
|
||||
v.MaxQueryMetrics = 256
|
||||
ctx = context.Background()
|
||||
c2, err := newClient(ctx, s.URL, v)
|
||||
c2, err := newClient(t.Context(), s.URL, v)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 42, v.MaxQueryMetrics)
|
||||
c.close()
|
||||
|
|
@ -271,58 +269,56 @@ func TestFinder(t *testing.T) {
|
|||
defer s.Close()
|
||||
|
||||
v := defaultVSphere()
|
||||
ctx := context.Background()
|
||||
|
||||
c, err := newClient(ctx, s.URL, v)
|
||||
c, err := newClient(t.Context(), s.URL, v)
|
||||
require.NoError(t, err)
|
||||
|
||||
f := finder{c}
|
||||
|
||||
var dc []mo.Datacenter
|
||||
err = f.find(ctx, "Datacenter", "/DC0", &dc)
|
||||
err = f.find(t.Context(), "Datacenter", "/DC0", &dc)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, dc, 1)
|
||||
require.Equal(t, "DC0", dc[0].Name)
|
||||
|
||||
var host []mo.HostSystem
|
||||
err = f.find(ctx, "HostSystem", "/DC0/host/DC0_H0/DC0_H0", &host)
|
||||
err = f.find(t.Context(), "HostSystem", "/DC0/host/DC0_H0/DC0_H0", &host)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, host, 1)
|
||||
require.Equal(t, "DC0_H0", host[0].Name)
|
||||
|
||||
host = make([]mo.HostSystem, 0)
|
||||
err = f.find(ctx, "HostSystem", "/DC0/host/DC0_C0/DC0_C0_H0", &host)
|
||||
err = f.find(t.Context(), "HostSystem", "/DC0/host/DC0_C0/DC0_C0_H0", &host)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, host, 1)
|
||||
require.Equal(t, "DC0_C0_H0", host[0].Name)
|
||||
|
||||
resourcepool := make([]mo.ResourcePool, 0)
|
||||
err = f.find(ctx, "ResourcePool", "/DC0/host/DC0_C0/Resources/DC0_C0_RP0", &resourcepool)
|
||||
err = f.find(t.Context(), "ResourcePool", "/DC0/host/DC0_C0/Resources/DC0_C0_RP0", &resourcepool)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, host, 1)
|
||||
require.Equal(t, "DC0_C0_H0", host[0].Name)
|
||||
|
||||
host = make([]mo.HostSystem, 0)
|
||||
err = f.find(ctx, "HostSystem", "/DC0/host/DC0_C0/*", &host)
|
||||
err = f.find(t.Context(), "HostSystem", "/DC0/host/DC0_C0/*", &host)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, host, 3)
|
||||
|
||||
var vm []mo.VirtualMachine
|
||||
testLookupVM(ctx, t, &f, "/DC0/vm/DC0_H0_VM0", 1, "")
|
||||
testLookupVM(ctx, t, &f, "/DC0/vm/DC0_C0*", 2, "")
|
||||
testLookupVM(ctx, t, &f, "/DC0/*/DC0_H0_VM0", 1, "DC0_H0_VM0")
|
||||
testLookupVM(ctx, t, &f, "/DC0/*/DC0_H0_*", 2, "")
|
||||
testLookupVM(ctx, t, &f, "/DC0/**/DC0_H0_VM*", 2, "")
|
||||
testLookupVM(ctx, t, &f, "/DC0/**", 4, "")
|
||||
testLookupVM(ctx, t, &f, "/DC1/**", 4, "")
|
||||
testLookupVM(ctx, t, &f, "/**", 8, "")
|
||||
testLookupVM(ctx, t, &f, "/**/vm/**", 8, "")
|
||||
testLookupVM(ctx, t, &f, "/*/host/**/*DC*", 8, "")
|
||||
testLookupVM(ctx, t, &f, "/*/host/**/*DC*VM*", 8, "")
|
||||
testLookupVM(ctx, t, &f, "/*/host/**/*DC*/*/*DC*", 4, "")
|
||||
testLookupVM(t.Context(), t, &f, "/DC0/vm/DC0_H0_VM0", 1, "")
|
||||
testLookupVM(t.Context(), t, &f, "/DC0/vm/DC0_C0*", 2, "")
|
||||
testLookupVM(t.Context(), t, &f, "/DC0/*/DC0_H0_VM0", 1, "DC0_H0_VM0")
|
||||
testLookupVM(t.Context(), t, &f, "/DC0/*/DC0_H0_*", 2, "")
|
||||
testLookupVM(t.Context(), t, &f, "/DC0/**/DC0_H0_VM*", 2, "")
|
||||
testLookupVM(t.Context(), t, &f, "/DC0/**", 4, "")
|
||||
testLookupVM(t.Context(), t, &f, "/DC1/**", 4, "")
|
||||
testLookupVM(t.Context(), t, &f, "/**", 8, "")
|
||||
testLookupVM(t.Context(), t, &f, "/**/vm/**", 8, "")
|
||||
testLookupVM(t.Context(), t, &f, "/*/host/**/*DC*", 8, "")
|
||||
testLookupVM(t.Context(), t, &f, "/*/host/**/*DC*VM*", 8, "")
|
||||
testLookupVM(t.Context(), t, &f, "/*/host/**/*DC*/*/*DC*", 4, "")
|
||||
|
||||
vm = make([]mo.VirtualMachine, 0)
|
||||
err = f.findAll(ctx, "VirtualMachine", []string{"/DC0/vm/DC0_H0*", "/DC0/vm/DC0_C0*"}, nil, &vm)
|
||||
err = f.findAll(t.Context(), "VirtualMachine", []string{"/DC0/vm/DC0_H0*", "/DC0/vm/DC0_C0*"}, nil, &vm)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, vm, 4)
|
||||
|
||||
|
|
@ -333,7 +329,7 @@ func TestFinder(t *testing.T) {
|
|||
resType: "VirtualMachine",
|
||||
}
|
||||
vm = make([]mo.VirtualMachine, 0)
|
||||
require.NoError(t, rf.findAll(ctx, &vm))
|
||||
require.NoError(t, rf.findAll(t.Context(), &vm))
|
||||
require.Len(t, vm, 3)
|
||||
|
||||
rf = resourceFilter{
|
||||
|
|
@ -343,7 +339,7 @@ func TestFinder(t *testing.T) {
|
|||
resType: "VirtualMachine",
|
||||
}
|
||||
vm = make([]mo.VirtualMachine, 0)
|
||||
require.NoError(t, rf.findAll(ctx, &vm))
|
||||
require.NoError(t, rf.findAll(t.Context(), &vm))
|
||||
require.Empty(t, vm)
|
||||
|
||||
rf = resourceFilter{
|
||||
|
|
@ -353,7 +349,7 @@ func TestFinder(t *testing.T) {
|
|||
resType: "VirtualMachine",
|
||||
}
|
||||
vm = make([]mo.VirtualMachine, 0)
|
||||
require.NoError(t, rf.findAll(ctx, &vm))
|
||||
require.NoError(t, rf.findAll(t.Context(), &vm))
|
||||
require.Empty(t, vm)
|
||||
|
||||
rf = resourceFilter{
|
||||
|
|
@ -363,7 +359,7 @@ func TestFinder(t *testing.T) {
|
|||
resType: "VirtualMachine",
|
||||
}
|
||||
vm = make([]mo.VirtualMachine, 0)
|
||||
require.NoError(t, rf.findAll(ctx, &vm))
|
||||
require.NoError(t, rf.findAll(t.Context(), &vm))
|
||||
require.Len(t, vm, 8)
|
||||
|
||||
rf = resourceFilter{
|
||||
|
|
@ -373,7 +369,7 @@ func TestFinder(t *testing.T) {
|
|||
resType: "VirtualMachine",
|
||||
}
|
||||
vm = make([]mo.VirtualMachine, 0)
|
||||
require.NoError(t, rf.findAll(ctx, &vm))
|
||||
require.NoError(t, rf.findAll(t.Context(), &vm))
|
||||
require.Len(t, vm, 4)
|
||||
}
|
||||
|
||||
|
|
@ -387,30 +383,27 @@ func TestFolders(t *testing.T) {
|
|||
defer m.Remove()
|
||||
defer s.Close()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
v := defaultVSphere()
|
||||
|
||||
c, err := newClient(ctx, s.URL, v)
|
||||
c, err := newClient(t.Context(), s.URL, v)
|
||||
require.NoError(t, err)
|
||||
|
||||
f := finder{c}
|
||||
|
||||
var folder []mo.Folder
|
||||
err = f.find(ctx, "Folder", "/F0", &folder)
|
||||
err = f.find(t.Context(), "Folder", "/F0", &folder)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, folder, 1)
|
||||
require.Equal(t, "F0", folder[0].Name)
|
||||
|
||||
var dc []mo.Datacenter
|
||||
err = f.find(ctx, "Datacenter", "/F0/DC1", &dc)
|
||||
err = f.find(t.Context(), "Datacenter", "/F0/DC1", &dc)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, dc, 1)
|
||||
require.Equal(t, "DC1", dc[0].Name)
|
||||
|
||||
testLookupVM(ctx, t, &f, "/F0/DC0/vm/**/F*", 0, "")
|
||||
testLookupVM(ctx, t, &f, "/F0/DC1/vm/**/F*/*VM*", 4, "")
|
||||
testLookupVM(ctx, t, &f, "/F0/DC1/vm/**/F*/**", 4, "")
|
||||
testLookupVM(t.Context(), t, &f, "/F0/DC0/vm/**/F*", 0, "")
|
||||
testLookupVM(t.Context(), t, &f, "/F0/DC1/vm/**/F*/*VM*", 4, "")
|
||||
testLookupVM(t.Context(), t, &f, "/F0/DC1/vm/**/F*/**", 4, "")
|
||||
}
|
||||
|
||||
func TestVsanCmmds(t *testing.T) {
|
||||
|
|
@ -420,18 +413,16 @@ func TestVsanCmmds(t *testing.T) {
|
|||
defer s.Close()
|
||||
|
||||
v := defaultVSphere()
|
||||
ctx := context.Background()
|
||||
|
||||
c, err := newClient(ctx, s.URL, v)
|
||||
c, err := newClient(t.Context(), s.URL, v)
|
||||
require.NoError(t, err)
|
||||
|
||||
f := finder{c}
|
||||
var clusters []mo.ClusterComputeResource
|
||||
err = f.findAll(ctx, "ClusterComputeResource", []string{"/**"}, nil, &clusters)
|
||||
err = f.findAll(t.Context(), "ClusterComputeResource", []string{"/**"}, nil, &clusters)
|
||||
require.NoError(t, err)
|
||||
|
||||
clusterObj := object.NewClusterComputeResource(c.client.Client, clusters[0].Reference())
|
||||
_, err = getCmmdsMap(ctx, c.client.Client, clusterObj)
|
||||
_, err = getCmmdsMap(t.Context(), c.client.Client, clusterObj)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
|
|
@ -472,13 +463,13 @@ func TestDisconnectedServerBehavior(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
v := defaultVSphere()
|
||||
v.DisconnectedServersBehavior = "error"
|
||||
_, err = newEndpoint(context.Background(), v, u, v.Log)
|
||||
_, err = newEndpoint(t.Context(), v, u, v.Log)
|
||||
require.Error(t, err)
|
||||
v.DisconnectedServersBehavior = "ignore"
|
||||
_, err = newEndpoint(context.Background(), v, u, v.Log)
|
||||
_, err = newEndpoint(t.Context(), v, u, v.Log)
|
||||
require.NoError(t, err)
|
||||
v.DisconnectedServersBehavior = "something else"
|
||||
_, err = newEndpoint(context.Background(), v, u, v.Log)
|
||||
_, err = newEndpoint(t.Context(), v, u, v.Log)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, `"something else" is not a valid value for disconnected_servers_behavior`, err.Error())
|
||||
}
|
||||
|
|
@ -520,7 +511,7 @@ func testCollection(t *testing.T, excludeClusters bool) {
|
|||
require.Emptyf(t, acc.Errors, "Errors found: %s", acc.Errors)
|
||||
require.NotEmpty(t, acc.Metrics, "No metrics were collected")
|
||||
cache := make(map[string]string)
|
||||
client, err := v.endpoints[0].clientFactory.getClient(context.Background())
|
||||
client, err := v.endpoints[0].clientFactory.getClient(t.Context())
|
||||
require.NoError(t, err)
|
||||
hostCache := make(map[string]string)
|
||||
for _, m := range acc.Metrics {
|
||||
|
|
@ -534,17 +525,17 @@ func testCollection(t *testing.T, excludeClusters bool) {
|
|||
// We have to follow the host parent path to locate a cluster. Look up the host!
|
||||
finder := finder{client}
|
||||
var hosts []mo.HostSystem
|
||||
err := finder.find(context.Background(), "HostSystem", "/**/"+hostName, &hosts)
|
||||
err := finder.find(t.Context(), "HostSystem", "/**/"+hostName, &hosts)
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, hosts)
|
||||
hostMoid = hosts[0].Reference().Value
|
||||
hostCache[hostName] = hostMoid
|
||||
}
|
||||
if isInCluster(v, client, cache, "HostSystem", hostMoid) { // If the VM lives in a cluster
|
||||
if isInCluster(t, v, client, cache, "HostSystem", hostMoid) { // If the VM lives in a cluster
|
||||
mustContainAll(t, m.Tags, []string{"clustername"})
|
||||
}
|
||||
} else if strings.HasPrefix(m.Measurement, "vsphere.host.") {
|
||||
if isInCluster(v, client, cache, "HostSystem", m.Tags["moid"]) { // If the host lives in a cluster
|
||||
if isInCluster(t, v, client, cache, "HostSystem", m.Tags["moid"]) { // If the host lives in a cluster
|
||||
mustContainAll(t, m.Tags, []string{"esxhostname", "clustername", "moid", "dcname"})
|
||||
} else {
|
||||
mustContainAll(t, m.Tags, []string{"esxhostname", "moid", "dcname"})
|
||||
|
|
@ -558,13 +549,12 @@ func testCollection(t *testing.T, excludeClusters bool) {
|
|||
require.Empty(t, mustHaveMetrics, "Some metrics were not found")
|
||||
}
|
||||
|
||||
func isInCluster(v *VSphere, client *client, cache map[string]string, resourceKind, moid string) bool {
|
||||
ctx := context.Background()
|
||||
func isInCluster(t *testing.T, v *VSphere, client *client, cache map[string]string, resourceKind, moid string) bool {
|
||||
ref := types.ManagedObjectReference{
|
||||
Type: resourceKind,
|
||||
Value: moid,
|
||||
}
|
||||
_, ok := v.endpoints[0].getAncestorName(ctx, client, "ClusterComputeResource", cache, ref)
|
||||
_, ok := v.endpoints[0].getAncestorName(t.Context(), client, "ClusterComputeResource", cache, ref)
|
||||
return ok
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -39,11 +39,9 @@ var _ telegraf.Input = &X509Cert{}
|
|||
func TestGatherRemoteIntegration(t *testing.T) {
|
||||
t.Skip("Skipping network-dependent test due to race condition when test-all")
|
||||
|
||||
tmpfile, err := os.CreateTemp("", "example")
|
||||
tmpfile, err := os.CreateTemp(t.TempDir(), "example")
|
||||
require.NoError(t, err)
|
||||
|
||||
defer os.Remove(tmpfile.Name())
|
||||
|
||||
_, err = tmpfile.WriteString(pki.ReadServerCert())
|
||||
require.NoError(t, err)
|
||||
|
||||
|
|
@ -164,7 +162,7 @@ func TestGatherLocal(t *testing.T) {
|
|||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
f, err := os.CreateTemp("", "x509_cert")
|
||||
f, err := os.CreateTemp(t.TempDir(), "x509_cert")
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = f.WriteString(test.content)
|
||||
|
|
@ -176,8 +174,6 @@ func TestGatherLocal(t *testing.T) {
|
|||
|
||||
require.NoError(t, f.Close())
|
||||
|
||||
defer os.Remove(f.Name())
|
||||
|
||||
sc := X509Cert{
|
||||
Sources: []string{f.Name()},
|
||||
Log: testutil.Logger{},
|
||||
|
|
@ -197,7 +193,7 @@ func TestGatherLocal(t *testing.T) {
|
|||
func TestTags(t *testing.T) {
|
||||
cert := fmt.Sprintf("%s\n%s", pki.ReadServerCert(), pki.ReadCACert())
|
||||
|
||||
f, err := os.CreateTemp("", "x509_cert")
|
||||
f, err := os.CreateTemp(t.TempDir(), "x509_cert")
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = f.WriteString(cert)
|
||||
|
|
@ -246,16 +242,13 @@ func TestTags(t *testing.T) {
|
|||
func TestGatherExcludeRootCerts(t *testing.T) {
|
||||
cert := fmt.Sprintf("%s\n%s", pki.ReadServerCert(), pki.ReadCACert())
|
||||
|
||||
f, err := os.CreateTemp("", "x509_cert")
|
||||
f, err := os.CreateTemp(t.TempDir(), "x509_cert")
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = f.WriteString(cert)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, f.Close())
|
||||
|
||||
defer os.Remove(f.Name())
|
||||
|
||||
sc := X509Cert{
|
||||
Sources: []string{f.Name()},
|
||||
ExcludeRootCerts: true,
|
||||
|
|
@ -283,16 +276,13 @@ func TestGatherChain(t *testing.T) {
|
|||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
f, err := os.CreateTemp("", "x509_cert")
|
||||
f, err := os.CreateTemp(t.TempDir(), "x509_cert")
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = f.WriteString(test.content)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, f.Close())
|
||||
|
||||
defer os.Remove(f.Name())
|
||||
|
||||
sc := X509Cert{
|
||||
Sources: []string{f.Name()},
|
||||
Log: testutil.Logger{},
|
||||
|
|
@ -501,9 +491,7 @@ func TestCertificateSerialNumberRetainsLeadingZeroes(t *testing.T) {
|
|||
func TestClassification(t *testing.T) {
|
||||
start := time.Now()
|
||||
end := time.Now().AddDate(0, 0, 1)
|
||||
tmpDir, err := os.MkdirTemp("", "telegraf-x509-*")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Create the CA certificate
|
||||
caPriv, err := rsa.GenerateKey(rand.Reader, 4096)
|
||||
|
|
|
|||
|
|
@ -220,12 +220,9 @@ scatter_sg_table_retry 4 99221
|
|||
`
|
||||
|
||||
func TestZfsPoolMetrics(t *testing.T) {
|
||||
tmpDir, err := os.MkdirTemp("", "telegraf-zfs-pool")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
testKstatPath := tmpDir + "/telegraf/proc/spl/kstat/zfs"
|
||||
err = os.MkdirAll(testKstatPath, 0750)
|
||||
err := os.MkdirAll(testKstatPath, 0750)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = os.MkdirAll(testKstatPath+"/HOME", 0750)
|
||||
|
|
@ -279,12 +276,10 @@ func TestZfsPoolMetrics(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestZfsGeneratesMetrics(t *testing.T) {
|
||||
tmpDir, err := os.MkdirTemp("", "telegraf-zfs-generates")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
testKstatPath := tmpDir + "/telegraf/proc/spl/kstat/zfs"
|
||||
err = os.MkdirAll(testKstatPath, 0750)
|
||||
err := os.MkdirAll(testKstatPath, 0750)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = os.MkdirAll(testKstatPath, 0750)
|
||||
|
|
|
|||
|
|
@ -178,7 +178,7 @@ func TestCreateAzureDataExplorerTable(t *testing.T) {
|
|||
log.SetOutput(os.Stderr)
|
||||
}()
|
||||
|
||||
err := plugin.createAzureDataExplorerTable(context.Background(), "test1")
|
||||
err := plugin.createAzureDataExplorerTable(t.Context(), "test1")
|
||||
|
||||
output := buf.String()
|
||||
|
||||
|
|
|
|||
|
|
@ -281,8 +281,6 @@ func TestTemplateManagementEmptyTemplateIntegration(t *testing.T) {
|
|||
fmt.Sprintf("http://%s:%s", container.Address, container.Ports[servicePort]),
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
e := &Elasticsearch{
|
||||
URLs: urls,
|
||||
IndexName: "test-%Y.%m.%d",
|
||||
|
|
@ -294,7 +292,7 @@ func TestTemplateManagementEmptyTemplateIntegration(t *testing.T) {
|
|||
Log: testutil.Logger{},
|
||||
}
|
||||
|
||||
err := e.manageTemplate(ctx)
|
||||
err := e.manageTemplate(t.Context())
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
|
|
@ -322,7 +320,7 @@ func TestUseOpTypeCreate(t *testing.T) {
|
|||
Log: testutil.Logger{},
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(e.Timeout))
|
||||
ctx, cancel := context.WithTimeout(t.Context(), time.Duration(e.Timeout))
|
||||
defer cancel()
|
||||
|
||||
metrics := []telegraf.Metric{
|
||||
|
|
@ -365,7 +363,7 @@ func TestTemplateManagementIntegration(t *testing.T) {
|
|||
Log: testutil.Logger{},
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(e.Timeout))
|
||||
ctx, cancel := context.WithTimeout(t.Context(), time.Duration(e.Timeout))
|
||||
defer cancel()
|
||||
|
||||
err := e.Connect()
|
||||
|
|
|
|||
|
|
@ -39,8 +39,7 @@ func TestEmulatorIntegration(t *testing.T) {
|
|||
|
||||
// Setup the Azure Event Hub emulator environment
|
||||
// See https://learn.microsoft.com/en-us/azure/event-hubs/test-locally-with-event-hub-emulator
|
||||
ctx := context.Background()
|
||||
azuriteContainer, err := azurite.Run(ctx, "mcr.microsoft.com/azure-storage/azurite:3.28.0")
|
||||
azuriteContainer, err := azurite.Run(t.Context(), "mcr.microsoft.com/azure-storage/azurite:3.28.0")
|
||||
require.NoError(t, err, "failed to start Azurite container")
|
||||
defer func() {
|
||||
if err := testcontainers.TerminateContainer(azuriteContainer); err != nil {
|
||||
|
|
@ -48,10 +47,10 @@ func TestEmulatorIntegration(t *testing.T) {
|
|||
}
|
||||
}()
|
||||
|
||||
blobPort, err := azuriteContainer.MappedPort(ctx, azurite.BlobPort)
|
||||
blobPort, err := azuriteContainer.MappedPort(t.Context(), azurite.BlobPort)
|
||||
require.NoError(t, err)
|
||||
|
||||
metadataPort, err := azuriteContainer.MappedPort(ctx, azurite.TablePort)
|
||||
metadataPort, err := azuriteContainer.MappedPort(t.Context(), azurite.TablePort)
|
||||
require.NoError(t, err)
|
||||
|
||||
cfgfile, err := filepath.Abs(filepath.Join("testdata", "Config.json"))
|
||||
|
|
@ -169,8 +168,7 @@ func TestReconnectIntegration(t *testing.T) {
|
|||
|
||||
// Setup the Azure Event Hub emulator environment
|
||||
// See https://learn.microsoft.com/en-us/azure/event-hubs/test-locally-with-event-hub-emulator
|
||||
ctx := context.Background()
|
||||
azuriteContainer, err := azurite.Run(ctx, "mcr.microsoft.com/azure-storage/azurite:3.28.0")
|
||||
azuriteContainer, err := azurite.Run(t.Context(), "mcr.microsoft.com/azure-storage/azurite:3.28.0")
|
||||
require.NoError(t, err, "failed to start Azurite container")
|
||||
defer func() {
|
||||
if err := testcontainers.TerminateContainer(azuriteContainer); err != nil {
|
||||
|
|
@ -178,10 +176,10 @@ func TestReconnectIntegration(t *testing.T) {
|
|||
}
|
||||
}()
|
||||
|
||||
blobPort, err := azuriteContainer.MappedPort(ctx, azurite.BlobPort)
|
||||
blobPort, err := azuriteContainer.MappedPort(t.Context(), azurite.BlobPort)
|
||||
require.NoError(t, err)
|
||||
|
||||
metadataPort, err := azuriteContainer.MappedPort(ctx, azurite.TablePort)
|
||||
metadataPort, err := azuriteContainer.MappedPort(t.Context(), azurite.TablePort)
|
||||
require.NoError(t, err)
|
||||
|
||||
cfgfile, err := filepath.Abs(filepath.Join("testdata", "Config.json"))
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@ package influxdb_test
|
|||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
|
|
@ -257,11 +256,9 @@ func TestHTTP_CreateDatabase(t *testing.T) {
|
|||
}
|
||||
})
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
client, err := influxdb.NewHTTPClient(tt.config)
|
||||
require.NoError(t, err)
|
||||
err = client.CreateDatabase(ctx, client.Database())
|
||||
err = client.CreateDatabase(t.Context(), client.Database())
|
||||
if tt.errFunc != nil {
|
||||
tt.errFunc(t, err)
|
||||
} else {
|
||||
|
|
@ -502,8 +499,6 @@ func TestHTTP_Write(t *testing.T) {
|
|||
log.SetOutput(&b)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
m := metric.New(
|
||||
"cpu",
|
||||
map[string]string{},
|
||||
|
|
@ -516,7 +511,7 @@ func TestHTTP_Write(t *testing.T) {
|
|||
|
||||
client, err := influxdb.NewHTTPClient(tt.config)
|
||||
require.NoError(t, err)
|
||||
err = client.Write(ctx, metrics)
|
||||
err = client.Write(t.Context(), metrics)
|
||||
if tt.errFunc != nil {
|
||||
tt.errFunc(t, err)
|
||||
} else {
|
||||
|
|
@ -552,8 +547,6 @@ func TestHTTP_WritePathPrefix(t *testing.T) {
|
|||
u, err := url.Parse(fmt.Sprintf("http://%s/x/y/z", ts.Listener.Addr().String()))
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
m := metric.New(
|
||||
"cpu",
|
||||
map[string]string{},
|
||||
|
|
@ -572,9 +565,9 @@ func TestHTTP_WritePathPrefix(t *testing.T) {
|
|||
|
||||
client, err := influxdb.NewHTTPClient(cfg)
|
||||
require.NoError(t, err)
|
||||
err = client.CreateDatabase(ctx, cfg.Database)
|
||||
err = client.CreateDatabase(t.Context(), cfg.Database)
|
||||
require.NoError(t, err)
|
||||
err = client.Write(ctx, metrics)
|
||||
err = client.Write(t.Context(), metrics)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
|
|
@ -621,8 +614,6 @@ func TestHTTP_WriteContentEncodingGzip(t *testing.T) {
|
|||
u, err := url.Parse(fmt.Sprintf("http://%s/", ts.Listener.Addr().String()))
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
m := metric.New(
|
||||
"cpu",
|
||||
map[string]string{},
|
||||
|
|
@ -643,7 +634,7 @@ func TestHTTP_WriteContentEncodingGzip(t *testing.T) {
|
|||
|
||||
client, err := influxdb.NewHTTPClient(cfg)
|
||||
require.NoError(t, err)
|
||||
err = client.Write(ctx, metrics)
|
||||
err = client.Write(t.Context(), metrics)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
|
|
@ -706,11 +697,9 @@ func TestHTTP_UnixSocket(t *testing.T) {
|
|||
}
|
||||
})
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
client, err := influxdb.NewHTTPClient(tt.config)
|
||||
require.NoError(t, err)
|
||||
err = client.CreateDatabase(ctx, tt.config.Database)
|
||||
err = client.CreateDatabase(t.Context(), tt.config.Database)
|
||||
if tt.errFunc != nil {
|
||||
tt.errFunc(t, err)
|
||||
} else {
|
||||
|
|
@ -787,10 +776,9 @@ func TestHTTP_WriteDatabaseTagWorksOnRetry(t *testing.T) {
|
|||
),
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
err = client.Write(ctx, metrics)
|
||||
err = client.Write(t.Context(), metrics)
|
||||
require.NoError(t, err)
|
||||
err = client.Write(ctx, metrics)
|
||||
err = client.Write(t.Context(), metrics)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
|
|
@ -1015,8 +1003,7 @@ func TestDBRPTags(t *testing.T) {
|
|||
client, err := influxdb.NewHTTPClient(tt.config)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
err = client.Write(ctx, tt.metrics)
|
||||
err = client.Write(t.Context(), tt.metrics)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -104,8 +104,7 @@ func TestUDP_Simple(t *testing.T) {
|
|||
client, err := influxdb.NewUDPClient(config)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
err = client.Write(ctx, []telegraf.Metric{
|
||||
err = client.Write(t.Context(), []telegraf.Metric{
|
||||
getMetric(),
|
||||
getMetric(),
|
||||
})
|
||||
|
|
@ -129,8 +128,7 @@ func TestUDP_DialError(t *testing.T) {
|
|||
client, err := influxdb.NewUDPClient(config)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
err = client.Write(ctx, []telegraf.Metric{getMetric()})
|
||||
err = client.Write(t.Context(), []telegraf.Metric{getMetric()})
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
|
|
@ -157,8 +155,7 @@ func TestUDP_WriteError(t *testing.T) {
|
|||
client, err := influxdb.NewUDPClient(config)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
err = client.Write(ctx, []telegraf.Metric{getMetric()})
|
||||
err = client.Write(t.Context(), []telegraf.Metric{getMetric()})
|
||||
require.Error(t, err)
|
||||
require.True(t, closed)
|
||||
}
|
||||
|
|
@ -222,8 +219,7 @@ func TestUDP_ErrorLogging(t *testing.T) {
|
|||
client, err := influxdb.NewUDPClient(tt.config)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
err = client.Write(ctx, tt.metrics)
|
||||
err = client.Write(t.Context(), tt.metrics)
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, b.String(), tt.logContains)
|
||||
})
|
||||
|
|
@ -265,8 +261,7 @@ func TestUDP_WriteWithRealConn(t *testing.T) {
|
|||
client, err := influxdb.NewUDPClient(config)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
err = client.Write(ctx, metrics)
|
||||
err = client.Write(t.Context(), metrics)
|
||||
require.NoError(t, err)
|
||||
|
||||
wg.Wait()
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
package kafka
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
|
@ -25,12 +24,11 @@ func TestConnectAndWriteIntegration(t *testing.T) {
|
|||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
kafkaContainer, err := kafkacontainer.Run(ctx, "confluentinc/confluent-local:7.5.0")
|
||||
kafkaContainer, err := kafkacontainer.Run(t.Context(), "confluentinc/confluent-local:7.5.0")
|
||||
require.NoError(t, err)
|
||||
defer kafkaContainer.Terminate(ctx) //nolint:errcheck // ignored
|
||||
defer kafkaContainer.Terminate(t.Context()) //nolint:errcheck // ignored
|
||||
|
||||
brokers, err := kafkaContainer.Brokers(ctx)
|
||||
brokers, err := kafkaContainer.Brokers(t.Context())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Setup the plugin
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
package nats
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
|
@ -132,9 +131,9 @@ func TestConnectAndWriteIntegration(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
if tc.nats.Jetstream != nil {
|
||||
stream, err := tc.nats.jetstreamClient.Stream(context.Background(), tc.nats.Jetstream.Name)
|
||||
stream, err := tc.nats.jetstreamClient.Stream(t.Context(), tc.nats.Jetstream.Name)
|
||||
require.NoError(t, err)
|
||||
si, err := stream.Info(context.Background())
|
||||
si, err := stream.Info(t.Context())
|
||||
require.NoError(t, err)
|
||||
|
||||
tc.streamConfigCompareFunc(t, si)
|
||||
|
|
|
|||
|
|
@ -300,7 +300,7 @@ func TestTemplateManagementIntegration(t *testing.T) {
|
|||
Log: testutil.Logger{},
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(e.Timeout))
|
||||
ctx, cancel := context.WithTimeout(t.Context(), time.Duration(e.Timeout))
|
||||
defer cancel()
|
||||
var err error
|
||||
e.indexTmpl, err = template.New("index").Parse(e.IndexName)
|
||||
|
|
|
|||
|
|
@ -300,7 +300,7 @@ func TestTemplateManagementIntegrationV2(t *testing.T) {
|
|||
Log: testutil.Logger{},
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(e.Timeout))
|
||||
ctx, cancel := context.WithTimeout(t.Context(), time.Duration(e.Timeout))
|
||||
defer cancel()
|
||||
var err error
|
||||
e.indexTmpl, err = template.New("index").Parse(e.IndexName)
|
||||
|
|
|
|||
|
|
@ -113,7 +113,7 @@ func newMockOtelService(t *testing.T) *mockOtelService {
|
|||
|
||||
grpcClient, err := grpc.NewClient(listener.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
require.NoError(t, err)
|
||||
require.True(t, grpcClient.WaitForStateChange(context.Background(), connectivity.Connecting))
|
||||
require.True(t, grpcClient.WaitForStateChange(t.Context(), connectivity.Connecting))
|
||||
mockOtelService.grpcClient = grpcClient
|
||||
|
||||
return mockOtelService
|
||||
|
|
|
|||
|
|
@ -630,12 +630,12 @@ func TestWriteIntegration_sequentialTempError(t *testing.T) {
|
|||
|
||||
conf := p.db.Config().ConnConfig
|
||||
conf.Logger = nil
|
||||
c, err := pgx.ConnectConfig(context.Background(), conf)
|
||||
c, err := pgx.ConnectConfig(t.Context(), conf)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return true
|
||||
}
|
||||
_, err = c.Exec(context.Background(), "SELECT pg_terminate_backend($1)", pid)
|
||||
_, err = c.Exec(t.Context(), "SELECT pg_terminate_backend($1)", pid)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
|
@ -683,12 +683,12 @@ func TestWriteIntegration_concurrentTempError(t *testing.T) {
|
|||
|
||||
conf := p.db.Config().ConnConfig
|
||||
conf.Logger = nil
|
||||
c, err := pgx.ConnectConfig(context.Background(), conf)
|
||||
c, err := pgx.ConnectConfig(t.Context(), conf)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return true
|
||||
}
|
||||
_, err = c.Exec(context.Background(), "SELECT pg_terminate_backend($1)", pid)
|
||||
_, err = c.Exec(t.Context(), "SELECT pg_terminate_backend($1)", pid)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
package quix
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
|
|
@ -111,12 +110,11 @@ func TestConnectAndWriteIntegration(t *testing.T) {
|
|||
topic := "telegraf"
|
||||
|
||||
// Setup a kafka container
|
||||
ctx := context.Background()
|
||||
kafkaContainer, err := kafkacontainer.Run(ctx, "confluentinc/confluent-local:7.5.0")
|
||||
kafkaContainer, err := kafkacontainer.Run(t.Context(), "confluentinc/confluent-local:7.5.0")
|
||||
require.NoError(t, err)
|
||||
defer kafkaContainer.Terminate(ctx) //nolint:errcheck // ignored
|
||||
defer kafkaContainer.Terminate(t.Context()) //nolint:errcheck // ignored
|
||||
|
||||
brokers, err := kafkaContainer.Brokers(ctx)
|
||||
brokers, err := kafkaContainer.Brokers(t.Context())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Setup broker config distributed via HTTP
|
||||
|
|
|
|||
|
|
@ -133,15 +133,15 @@ func TestCases(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
// // Check the metric nevertheless as we might get some metrics despite errors.
|
||||
actual := getAllRecords(address)
|
||||
actual := getAllRecords(t.Context(), address)
|
||||
require.ElementsMatch(t, expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func getAllRecords(address string) []string {
|
||||
func getAllRecords(testContext context.Context, address string) []string {
|
||||
client := redis.NewClient(&redis.Options{Addr: address})
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
ctx, cancel := context.WithTimeout(testContext, 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
var records []string
|
||||
|
|
|
|||
|
|
@ -30,9 +30,7 @@ func TestStaticFileCreation(t *testing.T) {
|
|||
}
|
||||
expected := "test,source=localhost value=42i 1719410485000000000\n"
|
||||
|
||||
tmpdir, err := os.MkdirTemp("", "telegraf-remotefile-*")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(tmpdir)
|
||||
tmpdir := t.TempDir()
|
||||
|
||||
// Setup the plugin including the serializer
|
||||
plugin := &File{
|
||||
|
|
@ -77,9 +75,7 @@ func TestStaticFileAppend(t *testing.T) {
|
|||
expected := "test,source=remotehost value=23i 1719410465000000000\n"
|
||||
expected += "test,source=localhost value=42i 1719410485000000000\n"
|
||||
|
||||
tmpdir, err := os.MkdirTemp("", "telegraf-remotefile-*")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(tmpdir)
|
||||
tmpdir := t.TempDir()
|
||||
|
||||
// Create a file where we want to append to
|
||||
f, err := os.OpenFile(filepath.Join(tmpdir, "test"), os.O_CREATE|os.O_WRONLY, 0600)
|
||||
|
|
@ -174,9 +170,7 @@ func TestDynamicFiles(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
tmpdir, err := os.MkdirTemp("", "telegraf-remotefile-*")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(tmpdir)
|
||||
tmpdir := t.TempDir()
|
||||
|
||||
// Setup the plugin including the serializer
|
||||
plugin := &File{
|
||||
|
|
@ -242,9 +236,7 @@ func TestCustomTemplateFunctions(t *testing.T) {
|
|||
|
||||
expectedFilename := fmt.Sprintf("test-%d", time.Now().Year())
|
||||
|
||||
tmpdir, err := os.MkdirTemp("", "telegraf-remotefile-*")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(tmpdir)
|
||||
tmpdir := t.TempDir()
|
||||
|
||||
// Setup the plugin including the serializer
|
||||
plugin := &File{
|
||||
|
|
@ -297,9 +289,7 @@ func TestCSVSerialization(t *testing.T) {
|
|||
"test-b.csv": "timestamp,measurement,source,value\n1587686400,test,b,23\n",
|
||||
}
|
||||
|
||||
tmpdir, err := os.MkdirTemp("", "telegraf-remotefile-*")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(tmpdir)
|
||||
tmpdir := t.TempDir()
|
||||
|
||||
// Setup the plugin including the serializer
|
||||
plugin := &File{
|
||||
|
|
@ -357,9 +347,7 @@ func TestForgettingFiles(t *testing.T) {
|
|||
),
|
||||
}
|
||||
|
||||
tmpdir, err := os.MkdirTemp("", "telegraf-remotefile-*")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(tmpdir)
|
||||
tmpdir := t.TempDir()
|
||||
|
||||
// Setup the plugin including the serializer
|
||||
plugin := &File{
|
||||
|
|
@ -467,9 +455,7 @@ func TestTrackingMetrics(t *testing.T) {
|
|||
}
|
||||
|
||||
// Prepare the output filesystem
|
||||
tmpdir, err := os.MkdirTemp("", "telegraf-remotefile-*")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(tmpdir)
|
||||
tmpdir := t.TempDir()
|
||||
|
||||
// Setup the plugin including the serializer
|
||||
plugin := &File{
|
||||
|
|
|
|||
|
|
@ -100,7 +100,7 @@ func TestWrite(t *testing.T) {
|
|||
mockMetric.reqs = nil
|
||||
mockMetric.resps = append(mockMetric.resps[:0], expectedResponse)
|
||||
|
||||
c, err := monitoring.NewMetricClient(context.Background(), clientOpt)
|
||||
c, err := monitoring.NewMetricClient(t.Context(), clientOpt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
@ -128,7 +128,7 @@ func TestWriteResourceTypeAndLabels(t *testing.T) {
|
|||
mockMetric.reqs = nil
|
||||
mockMetric.resps = append(mockMetric.resps[:0], expectedResponse)
|
||||
|
||||
c, err := monitoring.NewMetricClient(context.Background(), clientOpt)
|
||||
c, err := monitoring.NewMetricClient(t.Context(), clientOpt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
@ -161,7 +161,7 @@ func TestWriteTagsAsResourceLabels(t *testing.T) {
|
|||
mockMetric.reqs = nil
|
||||
mockMetric.resps = append(mockMetric.resps[:0], expectedResponse)
|
||||
|
||||
c, err := monitoring.NewMetricClient(context.Background(), clientOpt)
|
||||
c, err := monitoring.NewMetricClient(t.Context(), clientOpt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
@ -225,7 +225,7 @@ func TestWriteMetricTypesOfficial(t *testing.T) {
|
|||
mockMetric.reqs = nil
|
||||
mockMetric.resps = append(mockMetric.resps[:0], expectedResponse)
|
||||
|
||||
c, err := monitoring.NewMetricClient(context.Background(), clientOpt)
|
||||
c, err := monitoring.NewMetricClient(t.Context(), clientOpt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
@ -301,7 +301,7 @@ func TestWriteMetricTypesPath(t *testing.T) {
|
|||
mockMetric.reqs = nil
|
||||
mockMetric.resps = append(mockMetric.resps[:0], expectedResponse)
|
||||
|
||||
c, err := monitoring.NewMetricClient(context.Background(), clientOpt)
|
||||
c, err := monitoring.NewMetricClient(t.Context(), clientOpt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
@ -361,7 +361,7 @@ func TestWriteAscendingTime(t *testing.T) {
|
|||
mockMetric.reqs = nil
|
||||
mockMetric.resps = append(mockMetric.resps[:0], expectedResponse)
|
||||
|
||||
c, err := monitoring.NewMetricClient(context.Background(), clientOpt)
|
||||
c, err := monitoring.NewMetricClient(t.Context(), clientOpt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
@ -437,7 +437,7 @@ func TestWriteBatchable(t *testing.T) {
|
|||
mockMetric.reqs = nil
|
||||
mockMetric.resps = append(mockMetric.resps[:0], expectedResponse)
|
||||
|
||||
c, err := monitoring.NewMetricClient(context.Background(), clientOpt)
|
||||
c, err := monitoring.NewMetricClient(t.Context(), clientOpt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
@ -630,7 +630,7 @@ func TestWriteIgnoredErrors(t *testing.T) {
|
|||
mockMetric.err = tt.err
|
||||
mockMetric.reqs = nil
|
||||
|
||||
c, err := monitoring.NewMetricClient(context.Background(), clientOpt)
|
||||
c, err := monitoring.NewMetricClient(t.Context(), clientOpt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
@ -707,7 +707,7 @@ func TestGetStackdriverLabels(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGetStackdriverIntervalEndpoints(t *testing.T) {
|
||||
c, err := monitoring.NewMetricClient(context.Background(), clientOpt)
|
||||
c, err := monitoring.NewMetricClient(t.Context(), clientOpt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -124,7 +124,7 @@ func TestParse(t *testing.T) {
|
|||
cases := []testCase{singleMetric, multiMetric}
|
||||
|
||||
for _, tc := range cases {
|
||||
buf, err := writeValueList(tc.vl)
|
||||
buf, err := writeValueList(t.Context(), tc.vl)
|
||||
require.NoError(t, err)
|
||||
bytes, err := buf.Bytes()
|
||||
require.NoError(t, err)
|
||||
|
|
@ -139,7 +139,7 @@ func TestParse(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestParseMultiValueSplit(t *testing.T) {
|
||||
buf, err := writeValueList(multiMetric.vl)
|
||||
buf, err := writeValueList(t.Context(), multiMetric.vl)
|
||||
require.NoError(t, err)
|
||||
bytes, err := buf.Bytes()
|
||||
require.NoError(t, err)
|
||||
|
|
@ -153,7 +153,7 @@ func TestParseMultiValueSplit(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestParseMultiValueJoin(t *testing.T) {
|
||||
buf, err := writeValueList(multiMetric.vl)
|
||||
buf, err := writeValueList(t.Context(), multiMetric.vl)
|
||||
require.NoError(t, err)
|
||||
bytes, err := buf.Bytes()
|
||||
require.NoError(t, err)
|
||||
|
|
@ -167,7 +167,7 @@ func TestParseMultiValueJoin(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestParse_DefaultTags(t *testing.T) {
|
||||
buf, err := writeValueList(singleMetric.vl)
|
||||
buf, err := writeValueList(t.Context(), singleMetric.vl)
|
||||
require.NoError(t, err)
|
||||
bytes, err := buf.Bytes()
|
||||
require.NoError(t, err)
|
||||
|
|
@ -192,7 +192,7 @@ func TestParse_SignSecurityLevel(t *testing.T) {
|
|||
require.NoError(t, parser.Init())
|
||||
|
||||
// Signed data
|
||||
buf, err := writeValueList(singleMetric.vl)
|
||||
buf, err := writeValueList(t.Context(), singleMetric.vl)
|
||||
require.NoError(t, err)
|
||||
buf.Sign("user0", "bar")
|
||||
bytes, err := buf.Bytes()
|
||||
|
|
@ -203,7 +203,7 @@ func TestParse_SignSecurityLevel(t *testing.T) {
|
|||
assertEqualMetrics(t, singleMetric.expected, metrics)
|
||||
|
||||
// Encrypted data
|
||||
buf, err = writeValueList(singleMetric.vl)
|
||||
buf, err = writeValueList(t.Context(), singleMetric.vl)
|
||||
require.NoError(t, err)
|
||||
buf.Encrypt("user0", "bar")
|
||||
bytes, err = buf.Bytes()
|
||||
|
|
@ -214,7 +214,7 @@ func TestParse_SignSecurityLevel(t *testing.T) {
|
|||
assertEqualMetrics(t, singleMetric.expected, metrics)
|
||||
|
||||
// Plain text data skipped
|
||||
buf, err = writeValueList(singleMetric.vl)
|
||||
buf, err = writeValueList(t.Context(), singleMetric.vl)
|
||||
require.NoError(t, err)
|
||||
bytes, err = buf.Bytes()
|
||||
require.NoError(t, err)
|
||||
|
|
@ -224,7 +224,7 @@ func TestParse_SignSecurityLevel(t *testing.T) {
|
|||
require.Empty(t, metrics)
|
||||
|
||||
// Wrong password error
|
||||
buf, err = writeValueList(singleMetric.vl)
|
||||
buf, err = writeValueList(t.Context(), singleMetric.vl)
|
||||
require.NoError(t, err)
|
||||
buf.Sign("x", "y")
|
||||
bytes, err = buf.Bytes()
|
||||
|
|
@ -242,7 +242,7 @@ func TestParse_EncryptSecurityLevel(t *testing.T) {
|
|||
require.NoError(t, parser.Init())
|
||||
|
||||
// Signed data skipped
|
||||
buf, err := writeValueList(singleMetric.vl)
|
||||
buf, err := writeValueList(t.Context(), singleMetric.vl)
|
||||
require.NoError(t, err)
|
||||
buf.Sign("user0", "bar")
|
||||
bytes, err := buf.Bytes()
|
||||
|
|
@ -253,7 +253,7 @@ func TestParse_EncryptSecurityLevel(t *testing.T) {
|
|||
require.Empty(t, metrics)
|
||||
|
||||
// Encrypted data
|
||||
buf, err = writeValueList(singleMetric.vl)
|
||||
buf, err = writeValueList(t.Context(), singleMetric.vl)
|
||||
require.NoError(t, err)
|
||||
buf.Encrypt("user0", "bar")
|
||||
bytes, err = buf.Bytes()
|
||||
|
|
@ -264,7 +264,7 @@ func TestParse_EncryptSecurityLevel(t *testing.T) {
|
|||
assertEqualMetrics(t, singleMetric.expected, metrics)
|
||||
|
||||
// Plain text data skipped
|
||||
buf, err = writeValueList(singleMetric.vl)
|
||||
buf, err = writeValueList(t.Context(), singleMetric.vl)
|
||||
require.NoError(t, err)
|
||||
bytes, err = buf.Bytes()
|
||||
require.NoError(t, err)
|
||||
|
|
@ -274,7 +274,7 @@ func TestParse_EncryptSecurityLevel(t *testing.T) {
|
|||
require.Empty(t, metrics)
|
||||
|
||||
// Wrong password error
|
||||
buf, err = writeValueList(singleMetric.vl)
|
||||
buf, err = writeValueList(t.Context(), singleMetric.vl)
|
||||
require.NoError(t, err)
|
||||
buf.Sign("x", "y")
|
||||
bytes, err = buf.Bytes()
|
||||
|
|
@ -285,7 +285,7 @@ func TestParse_EncryptSecurityLevel(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestParseLine(t *testing.T) {
|
||||
buf, err := writeValueList(singleMetric.vl)
|
||||
buf, err := writeValueList(t.Context(), singleMetric.vl)
|
||||
require.NoError(t, err)
|
||||
bytes, err := buf.Bytes()
|
||||
require.NoError(t, err)
|
||||
|
|
@ -299,12 +299,11 @@ func TestParseLine(t *testing.T) {
|
|||
assertEqualMetrics(t, singleMetric.expected, []telegraf.Metric{m})
|
||||
}
|
||||
|
||||
func writeValueList(valueLists []api.ValueList) (*network.Buffer, error) {
|
||||
func writeValueList(testContext context.Context, valueLists []api.ValueList) (*network.Buffer, error) {
|
||||
buffer := network.NewBuffer(0)
|
||||
|
||||
ctx := context.Background()
|
||||
for i := range valueLists {
|
||||
err := buffer.Write(ctx, &valueLists[i])
|
||||
err := buffer.Write(testContext, &valueLists[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -381,7 +380,7 @@ func TestBenchmarkData(t *testing.T) {
|
|||
),
|
||||
}
|
||||
|
||||
buf, err := writeValueList(benchmarkData)
|
||||
buf, err := writeValueList(t.Context(), benchmarkData)
|
||||
require.NoError(t, err)
|
||||
bytes, err := buf.Bytes()
|
||||
require.NoError(t, err)
|
||||
|
|
@ -395,7 +394,7 @@ func TestBenchmarkData(t *testing.T) {
|
|||
}
|
||||
|
||||
func BenchmarkParsing(b *testing.B) {
|
||||
buf, err := writeValueList(benchmarkData)
|
||||
buf, err := writeValueList(b.Context(), benchmarkData)
|
||||
require.NoError(b, err)
|
||||
bytes, err := buf.Bytes()
|
||||
require.NoError(b, err)
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ func TestSimpleReverseDNSLookup(t *testing.T) {
|
|||
answer, err := d.Lookup("127.0.0.1")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []string{"localhost"}, answer)
|
||||
err = blockAllWorkers(d)
|
||||
err = blockAllWorkers(t.Context(), d)
|
||||
require.NoError(t, err)
|
||||
|
||||
// do another request with no workers available.
|
||||
|
|
@ -137,6 +137,6 @@ func (*localResolver) LookupAddr(context.Context, string) (names []string, err e
|
|||
|
||||
// blockAllWorkers is a test function that eats up all the worker pool space to
|
||||
// make sure workers are done running and there's no room to acquire a new worker.
|
||||
func blockAllWorkers(d *ReverseDNSCache) error {
|
||||
return d.sem.Acquire(context.Background(), int64(d.maxWorkers))
|
||||
func blockAllWorkers(testContext context.Context, d *ReverseDNSCache) error {
|
||||
return d.sem.Acquire(testContext, int64(d.maxWorkers))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ func TestInitFail(t *testing.T) {
|
|||
name: "invalid password",
|
||||
plugin: &Jose{
|
||||
ID: "test",
|
||||
Path: os.TempDir(),
|
||||
Path: t.TempDir(),
|
||||
Password: config.NewSecret([]byte("@{unresolvable:secret}")),
|
||||
},
|
||||
expected: "getting password failed",
|
||||
|
|
@ -58,9 +58,7 @@ func TestSetListGet(t *testing.T) {
|
|||
}
|
||||
|
||||
// Create a temporary directory we can use to store the secrets
|
||||
testdir, err := os.MkdirTemp("", "jose-*")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(testdir)
|
||||
testdir := t.TempDir()
|
||||
|
||||
// Initialize the plugin
|
||||
plugin := &Jose{
|
||||
|
|
@ -110,9 +108,7 @@ func TestResolver(t *testing.T) {
|
|||
secretVal := "I won't tell"
|
||||
|
||||
// Create a temporary directory we can use to store the secrets
|
||||
testdir, err := os.MkdirTemp("", "jose-*")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(testdir)
|
||||
testdir := t.TempDir()
|
||||
|
||||
// Initialize the plugin
|
||||
plugin := &Jose{
|
||||
|
|
@ -138,9 +134,7 @@ func TestResolverInvalid(t *testing.T) {
|
|||
secretVal := "I won't tell"
|
||||
|
||||
// Create a temporary directory we can use to store the secrets
|
||||
testdir, err := os.MkdirTemp("", "jose-*")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(testdir)
|
||||
testdir := t.TempDir()
|
||||
|
||||
// Initialize the plugin
|
||||
plugin := &Jose{
|
||||
|
|
@ -164,9 +158,7 @@ func TestGetNonExistent(t *testing.T) {
|
|||
secretVal := "I won't tell"
|
||||
|
||||
// Create a temporary directory we can use to store the secrets
|
||||
testdir, err := os.MkdirTemp("", "jose-*")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(testdir)
|
||||
testdir := t.TempDir()
|
||||
|
||||
// Initialize the plugin
|
||||
plugin := &Jose{
|
||||
|
|
@ -178,7 +170,7 @@ func TestGetNonExistent(t *testing.T) {
|
|||
require.NoError(t, plugin.Set(secretKey, secretVal))
|
||||
|
||||
// Get the resolver
|
||||
_, err = plugin.Get("foo")
|
||||
_, err := plugin.Get("foo")
|
||||
require.EqualError(t, err, "The specified item could not be found in the keyring")
|
||||
}
|
||||
|
||||
|
|
@ -187,9 +179,7 @@ func TestGetInvalidPassword(t *testing.T) {
|
|||
secretVal := "I won't tell"
|
||||
|
||||
// Create a temporary directory we can use to store the secrets
|
||||
testdir, err := os.MkdirTemp("", "jose-*")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(testdir)
|
||||
testdir := t.TempDir()
|
||||
|
||||
// Initialize the stored secrets
|
||||
creator := &Jose{
|
||||
|
|
@ -208,6 +198,6 @@ func TestGetInvalidPassword(t *testing.T) {
|
|||
Path: testdir,
|
||||
}
|
||||
require.NoError(t, plugin.Init())
|
||||
_, err = plugin.Get(secretKey)
|
||||
_, err := plugin.Get(secretKey)
|
||||
require.ErrorContains(t, err, "integrity check failed")
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue