From 9415d8e7e917d8eb8cb6e9809a1e7bfac2d64f8a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Tue, 1 Oct 2024 20:49:53 +0200 Subject: [PATCH] chore(linters): Enable `import-alias-naming` and `redundant-import-alias` rules for revive (#15836) --- .golangci.yml | 4 + config/config_test.go | 8 +- internal/internal.go | 4 +- plugins/aggregators/histogram/histogram.go | 26 +- .../aggregators/histogram/histogram_test.go | 40 +- plugins/common/auth/basic_auth_test.go | 3 +- plugins/common/aws/credentials.go | 42 +- plugins/common/http/config.go | 4 +- plugins/common/kafka/config.go | 67 ++- plugins/common/proxy/connect.go | 14 +- plugins/common/proxy/dialer.go | 6 +- plugins/common/shim/config_test.go | 8 +- plugins/common/shim/processor_test.go | 4 +- plugins/common/socket/socket.go | 4 +- plugins/inputs/aerospike/aerospike.go | 4 +- plugins/inputs/apcupsd/apcupsd.go | 6 +- plugins/inputs/azure_monitor/azure_monitor.go | 3 +- .../azure_monitor/azure_monitor_test.go | 4 +- plugins/inputs/beat/beat.go | 4 +- .../cisco_telemetry_mdt.go | 12 +- .../cisco_telemetry_mdt_test.go | 382 +++++++++--------- .../inputs/cloud_pubsub/subscription_gcp.go | 3 +- .../cloud_pubsub_push/cloud_pubsub_push.go | 4 +- plugins/inputs/cloudwatch/cloudwatch.go | 58 +-- plugins/inputs/cloudwatch/cloudwatch_test.go | 42 +- .../cloudwatch_metric_streams.go | 4 +- plugins/inputs/couchbase/couchbase.go | 14 +- plugins/inputs/cpu/cpu.go | 16 +- plugins/inputs/cpu/cpu_test.go | 26 +- .../inputs/ctrlx_datalayer/ctrlx_datalayer.go | 8 +- .../ctrlx_datalayer/ctrlx_datalayer_test.go | 4 +- plugins/inputs/disk/disk_test.go | 42 +- plugins/inputs/docker/client.go | 20 +- plugins/inputs/docker/docker.go | 60 +-- plugins/inputs/docker/docker_test.go | 42 +- plugins/inputs/docker_log/docker_log.go | 48 +-- plugins/inputs/dpdk/dpdk_connector.go | 4 +- plugins/inputs/elasticsearch/elasticsearch.go | 18 +- .../elasticsearch_query.go | 6 +- .../elasticsearch_query_test.go | 8 +- plugins/inputs/ethtool/namespace_linux.go | 8 +- .../eventhub_consumer/eventhub_consumer.go | 34 +- plugins/inputs/exec/run_notwinodws.go | 7 +- plugins/inputs/exec/run_windows.go | 7 +- plugins/inputs/execd/execd_test.go | 4 +- plugins/inputs/github/github.go | 26 +- plugins/inputs/gnmi/gnmi.go | 38 +- plugins/inputs/gnmi/gnmi_test.go | 362 ++++++++--------- plugins/inputs/gnmi/handler.go | 18 +- plugins/inputs/gnmi/path.go | 10 +- plugins/inputs/gnmi/update_fields.go | 16 +- .../google_cloud_storage_test.go | 4 +- plugins/inputs/http/http.go | 4 +- plugins/inputs/http/http_test.go | 4 +- .../http_listener_v2/http_listener_v2.go | 4 +- plugins/inputs/infiniband/infiniband_test.go | 6 +- .../influxdb_listener/influxdb_listener.go | 4 +- .../influxdb_v2_listener.go | 4 +- .../intel_baseband/sock_connector_test.go | 2 +- .../inputs/intel_powerstat/intel_powerstat.go | 34 +- .../jti_openconfig_telemetry.go | 4 +- .../kafka_consumer/kafka_consumer_test.go | 12 +- plugins/inputs/kibana/kibana.go | 6 +- .../kinesis_consumer/kinesis_consumer.go | 4 +- plugins/inputs/kube_inventory/daemonset.go | 4 +- .../inputs/kube_inventory/daemonset_test.go | 38 +- plugins/inputs/kube_inventory/deployment.go | 2 +- .../inputs/kube_inventory/deployment_test.go | 2 +- .../inputs/kube_inventory/endpoint_test.go | 2 +- plugins/inputs/kube_inventory/statefulset.go | 2 +- .../inputs/kube_inventory/statefulset_test.go | 2 +- plugins/inputs/kubernetes/kubernetes.go | 2 +- plugins/inputs/ldap/ldap.go | 4 +- plugins/inputs/ldap/ldap_test.go | 6 +- plugins/inputs/logstash/logstash.go | 24 +- plugins/inputs/memcached/memcached.go | 4 +- plugins/inputs/mesos/mesos.go | 4 +- plugins/inputs/mongodb/mongodb.go | 4 +- plugins/inputs/mysql/mysql.go | 4 +- .../opensearch_query/opensearch_query.go | 4 +- plugins/inputs/openstack/openstack.go | 4 +- .../opentelemetry/opentelemetry_test.go | 3 +- plugins/inputs/p4runtime/p4runtime.go | 34 +- .../p4runtime/p4runtime_fake_client_test.go | 56 +-- plugins/inputs/p4runtime/p4runtime_test.go | 224 +++++----- plugins/inputs/prometheus/prometheus.go | 8 +- .../riemann_listener/riemann_listener.go | 16 +- plugins/inputs/syslog/syslog_test.go | 4 +- plugins/inputs/vault/vault.go | 6 +- plugins/inputs/vault/vault_test.go | 14 +- plugins/inputs/vsphere/vsphere_test.go | 4 +- plugins/inputs/x509_cert/x509_cert.go | 4 +- plugins/inputs/x509_cert/x509_cert_test.go | 6 +- .../gen-go/zipkincore/zipkinCore-consts.go | 3 +- plugins/inputs/zookeeper/zookeeper.go | 4 +- .../mocks/diagnostics_message_listener.go | 2 +- .../azure_data_explorer_test.go | 8 +- plugins/outputs/cloud_pubsub/topic_gcp.go | 3 +- plugins/outputs/cloud_pubsub/topic_stubbed.go | 4 +- plugins/outputs/cloudwatch/cloudwatch.go | 8 +- .../cloudwatch_logs/cloudwatch_logs.go | 4 +- .../cloudwatch_logs/cloudwatch_logs_test.go | 52 +-- plugins/outputs/dynatrace/dynatrace.go | 28 +- plugins/outputs/exec/exec_test.go | 6 +- plugins/outputs/execd/execd_test.go | 10 +- plugins/outputs/graphite/graphite.go | 4 +- plugins/outputs/graylog/graylog.go | 4 +- plugins/outputs/graylog/graylog_test_linux.go | 10 +- plugins/outputs/health/health.go | 4 +- plugins/outputs/http/http.go | 16 +- plugins/outputs/http/http_test.go | 14 +- plugins/outputs/kinesis/kinesis.go | 4 +- plugins/outputs/mqtt/mqtt_test.go | 18 +- .../prometheus_client/prometheus_client.go | 38 +- .../outputs/prometheus_client/v1/collector.go | 12 +- .../outputs/prometheus_client/v2/collector.go | 10 +- .../outputs/socket_writer/socket_writer.go | 4 +- plugins/outputs/stomp/stomp.go | 4 +- plugins/outputs/syslog/syslog.go | 4 +- plugins/outputs/timestream/timestream.go | 6 +- plugins/outputs/timestream/timestream_test.go | 18 +- plugins/outputs/wavefront/wavefront.go | 18 +- plugins/outputs/wavefront/wavefront_test.go | 24 +- plugins/processors/dedup/dedup.go | 4 +- plugins/processors/execd/README.md | 4 +- plugins/processors/execd/execd_test.go | 10 +- plugins/secretstores/http/http.go | 4 +- testutil/container.go | 8 +- testutil/metric.go | 35 +- 129 files changed, 1305 insertions(+), 1293 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 80661461d..e5a4f7c95 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -270,6 +270,9 @@ linters-settings: - name: get-return - name: identical-branches - name: if-return + - name: import-alias-naming + arguments: + - "^[a-z][a-z0-9_]*[a-z0-9]+$" - name: import-shadowing - name: increment-decrement - name: indent-error-flow @@ -285,6 +288,7 @@ linters-settings: - name: range-val-in-closure - name: receiver-naming - name: redefines-builtin-id + - name: redundant-import-alias - name: string-of-int - name: struct-tag - name: superfluous-else diff --git a/config/config_test.go b/config/config_test.go index 3cb412daf..5aa47a7ad 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -18,7 +18,6 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" - "github.com/influxdata/telegraf/plugins/parsers/json_v2" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" @@ -33,10 +32,11 @@ import ( "github.com/influxdata/telegraf/plugins/parsers" _ "github.com/influxdata/telegraf/plugins/parsers/all" // Blank import to have all parsers for testing "github.com/influxdata/telegraf/plugins/parsers/json" + "github.com/influxdata/telegraf/plugins/parsers/json_v2" "github.com/influxdata/telegraf/plugins/processors" "github.com/influxdata/telegraf/plugins/serializers" _ "github.com/influxdata/telegraf/plugins/serializers/all" // Blank import to have all serializers for testing - promserializer "github.com/influxdata/telegraf/plugins/serializers/prometheus" + serializers_prometheus "github.com/influxdata/telegraf/plugins/serializers/prometheus" "github.com/influxdata/telegraf/testutil" ) @@ -687,7 +687,7 @@ func TestConfig_SerializerInterfaceNewFormat(t *testing.T) { // Ignore all unexported fields and fields not relevant for functionality options := []cmp.Option{ cmpopts.IgnoreUnexported(stype), - cmpopts.IgnoreUnexported(reflect.Indirect(reflect.ValueOf(promserializer.MetricTypes{})).Interface()), + cmpopts.IgnoreUnexported(reflect.Indirect(reflect.ValueOf(serializers_prometheus.MetricTypes{})).Interface()), cmpopts.IgnoreTypes(sync.Mutex{}, regexp.Regexp{}), cmpopts.IgnoreInterfaces(struct{ telegraf.Logger }{}), } @@ -779,7 +779,7 @@ func TestConfig_SerializerInterfaceOldFormat(t *testing.T) { // Ignore all unexported fields and fields not relevant for functionality options := []cmp.Option{ cmpopts.IgnoreUnexported(stype), - cmpopts.IgnoreUnexported(reflect.Indirect(reflect.ValueOf(promserializer.MetricTypes{})).Interface()), + cmpopts.IgnoreUnexported(reflect.Indirect(reflect.ValueOf(serializers_prometheus.MetricTypes{})).Interface()), cmpopts.IgnoreTypes(sync.Mutex{}, regexp.Regexp{}), cmpopts.IgnoreInterfaces(struct{ telegraf.Logger }{}), } diff --git a/internal/internal.go b/internal/internal.go index 6f3ded575..d82ce04d6 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -4,7 +4,7 @@ import ( "bufio" "compress/gzip" "context" - cryptoRand "crypto/rand" + crypto_rand "crypto/rand" "errors" "fmt" "io" @@ -94,7 +94,7 @@ func ReadLines(filename string) ([]string, error) { // RandomString returns a random string of alphanumeric characters func RandomString(n int) (string, error) { var bytes = make([]byte, n) - _, err := cryptoRand.Read(bytes) + _, err := crypto_rand.Read(bytes) if err != nil { return "", err } diff --git a/plugins/aggregators/histogram/histogram.go b/plugins/aggregators/histogram/histogram.go index fff10f959..0144220e2 100644 --- a/plugins/aggregators/histogram/histogram.go +++ b/plugins/aggregators/histogram/histogram.go @@ -8,7 +8,7 @@ import ( "time" "github.com/influxdata/telegraf" - telegrafConfig "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/aggregators" ) @@ -29,18 +29,18 @@ const bucketNegInf = "-Inf" // HistogramAggregator is aggregator with histogram configs and particular histograms for defined metrics type HistogramAggregator struct { - Configs []config `toml:"config"` - ResetBuckets bool `toml:"reset"` - Cumulative bool `toml:"cumulative"` - ExpirationInterval telegrafConfig.Duration `toml:"expiration_interval"` - PushOnlyOnUpdate bool `toml:"push_only_on_update"` + Configs []bucketConfig `toml:"config"` + ResetBuckets bool `toml:"reset"` + Cumulative bool `toml:"cumulative"` + ExpirationInterval config.Duration `toml:"expiration_interval"` + PushOnlyOnUpdate bool `toml:"push_only_on_update"` buckets bucketsByMetrics cache map[uint64]metricHistogramCollection } -// config is the config, which contains name, field of metric and histogram buckets. -type config struct { +// bucketConfig is the config, which contains name, field of metric and histogram buckets. +type bucketConfig struct { Metric string `toml:"measurement_name"` Fields []string `toml:"fields"` Buckets buckets `toml:"buckets"` @@ -239,9 +239,9 @@ func (h *HistogramAggregator) getBuckets(metric string, field string) []float64 return buckets } - for _, config := range h.Configs { - if config.Metric == metric { - if !isBucketExists(field, config) { + for _, cfg := range h.Configs { + if cfg.Metric == metric { + if !isBucketExists(field, cfg) { continue } @@ -249,7 +249,7 @@ func (h *HistogramAggregator) getBuckets(metric string, field string) []float64 h.buckets[metric] = make(bucketsByFields) } - h.buckets[metric][field] = sortBuckets(config.Buckets) + h.buckets[metric][field] = sortBuckets(cfg.Buckets) } } @@ -257,7 +257,7 @@ func (h *HistogramAggregator) getBuckets(metric string, field string) []float64 } // isBucketExists checks if buckets exists for the passed field -func isBucketExists(field string, cfg config) bool { +func isBucketExists(field string, cfg bucketConfig) bool { if len(cfg.Fields) == 0 { return true } diff --git a/plugins/aggregators/histogram/histogram_test.go b/plugins/aggregators/histogram/histogram_test.go index f87cc4584..ec31a578f 100644 --- a/plugins/aggregators/histogram/histogram_test.go +++ b/plugins/aggregators/histogram/histogram_test.go @@ -8,7 +8,7 @@ import ( "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" - telegrafConfig "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" ) @@ -17,16 +17,16 @@ type fields map[string]interface{} type tags map[string]string // NewTestHistogram creates new test histogram aggregation with specified config -func NewTestHistogram(cfg []config, reset bool, cumulative bool, pushOnlyOnUpdate bool) telegraf.Aggregator { +func NewTestHistogram(cfg []bucketConfig, reset bool, cumulative bool, pushOnlyOnUpdate bool) telegraf.Aggregator { return NewTestHistogramWithExpirationInterval(cfg, reset, cumulative, pushOnlyOnUpdate, 0) } func NewTestHistogramWithExpirationInterval( - cfg []config, + cfg []bucketConfig, reset bool, cumulative bool, pushOnlyOnUpdate bool, - expirationInterval telegrafConfig.Duration, + expirationInterval config.Duration, ) telegraf.Aggregator { htm := NewHistogramAggregator() htm.Configs = cfg @@ -85,8 +85,8 @@ func BenchmarkApply(b *testing.B) { // TestHistogram tests metrics for one period and for one field func TestHistogram(t *testing.T) { - var cfg []config - cfg = append(cfg, config{Metric: "first_metric_name", Fields: []string{"a"}, Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}}) + var cfg []bucketConfig + cfg = append(cfg, bucketConfig{Metric: "first_metric_name", Fields: []string{"a"}, Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}}) histogram := NewTestHistogram(cfg, false, true, false) acc := &testutil.Accumulator{} @@ -107,8 +107,8 @@ func TestHistogram(t *testing.T) { // TestHistogram tests metrics for one period, for one field and push only on histogram update func TestHistogramPushOnUpdate(t *testing.T) { - var cfg []config - cfg = append(cfg, config{Metric: "first_metric_name", Fields: []string{"a"}, Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}}) + var cfg []bucketConfig + cfg = append(cfg, bucketConfig{Metric: "first_metric_name", Fields: []string{"a"}, Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}}) histogram := NewTestHistogram(cfg, false, true, true) acc := &testutil.Accumulator{} @@ -143,8 +143,8 @@ func TestHistogramPushOnUpdate(t *testing.T) { // TestHistogramNonCumulative tests metrics for one period and for one field func TestHistogramNonCumulative(t *testing.T) { - var cfg []config - cfg = append(cfg, config{Metric: "first_metric_name", Fields: []string{"a"}, Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}}) + var cfg []bucketConfig + cfg = append(cfg, bucketConfig{Metric: "first_metric_name", Fields: []string{"a"}, Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}}) histogram := NewTestHistogram(cfg, false, false, false) acc := &testutil.Accumulator{} @@ -165,8 +165,8 @@ func TestHistogramNonCumulative(t *testing.T) { // TestHistogramWithReset tests metrics for one period and for one field, with reset between metrics adding func TestHistogramWithReset(t *testing.T) { - var cfg []config - cfg = append(cfg, config{Metric: "first_metric_name", Fields: []string{"a"}, Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}}) + var cfg []bucketConfig + cfg = append(cfg, bucketConfig{Metric: "first_metric_name", Fields: []string{"a"}, Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}}) histogram := NewTestHistogram(cfg, true, true, false) acc := &testutil.Accumulator{} @@ -187,7 +187,7 @@ func TestHistogramWithReset(t *testing.T) { // TestHistogramWithAllFields tests two metrics for one period and for all fields func TestHistogramWithAllFields(t *testing.T) { - cfg := []config{ + cfg := []bucketConfig{ {Metric: "first_metric_name", Buckets: []float64{0.0, 15.5, 20.0, 30.0, 40.0}}, {Metric: "second_metric_name", Buckets: []float64{0.0, 4.0, 10.0, 23.0, 30.0}}, } @@ -266,7 +266,7 @@ func TestHistogramWithAllFields(t *testing.T) { // TestHistogramWithAllFieldsNonCumulative tests two metrics for one period and for all fields func TestHistogramWithAllFieldsNonCumulative(t *testing.T) { - cfg := []config{ + cfg := []bucketConfig{ {Metric: "first_metric_name", Buckets: []float64{0.0, 15.5, 20.0, 30.0, 40.0}}, {Metric: "second_metric_name", Buckets: []float64{0.0, 4.0, 10.0, 23.0, 30.0}}, } @@ -370,8 +370,8 @@ func TestHistogramWithAllFieldsNonCumulative(t *testing.T) { // TestHistogramWithTwoPeriodsAndAllFields tests two metrics getting added with a push/reset in between (simulates // getting added in different periods) for all fields func TestHistogramWithTwoPeriodsAndAllFields(t *testing.T) { - var cfg []config - cfg = append(cfg, config{Metric: "first_metric_name", Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}}) + var cfg []bucketConfig + cfg = append(cfg, bucketConfig{Metric: "first_metric_name", Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}}) histogram := NewTestHistogram(cfg, false, true, false) acc := &testutil.Accumulator{} @@ -415,8 +415,8 @@ func TestWrongBucketsOrder(t *testing.T) { } }() - var cfg []config - cfg = append(cfg, config{Metric: "first_metric_name", Buckets: []float64{0.0, 90.0, 20.0, 30.0, 40.0}}) + var cfg []bucketConfig + cfg = append(cfg, bucketConfig{Metric: "first_metric_name", Buckets: []float64{0.0, 90.0, 20.0, 30.0, 40.0}}) histogram := NewTestHistogram(cfg, false, true, false) histogram.Add(firstMetric2) } @@ -431,11 +431,11 @@ func TestHistogramMetricExpiration(t *testing.T) { timeNow = time.Now }() - cfg := []config{ + cfg := []bucketConfig{ {Metric: "first_metric_name", Fields: []string{"a"}, Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}}, {Metric: "second_metric_name", Buckets: []float64{0.0, 4.0, 10.0, 23.0, 30.0}}, } - histogram := NewTestHistogramWithExpirationInterval(cfg, false, true, false, telegrafConfig.Duration(30)) + histogram := NewTestHistogramWithExpirationInterval(cfg, false, true, false, config.Duration(30)) acc := &testutil.Accumulator{} diff --git a/plugins/common/auth/basic_auth_test.go b/plugins/common/auth/basic_auth_test.go index 781f36ab8..04f313f2d 100644 --- a/plugins/common/auth/basic_auth_test.go +++ b/plugins/common/auth/basic_auth_test.go @@ -1,9 +1,10 @@ package auth import ( - "github.com/stretchr/testify/require" "net/http/httptest" "testing" + + "github.com/stretchr/testify/require" ) func TestBasicAuth_VerifyWithCredentials(t *testing.T) { diff --git a/plugins/common/aws/credentials.go b/plugins/common/aws/credentials.go index 8d2c36b6e..e93f1f226 100644 --- a/plugins/common/aws/credentials.go +++ b/plugins/common/aws/credentials.go @@ -3,10 +3,10 @@ package aws import ( "context" - awsV2 "github.com/aws/aws-sdk-go-v2/aws" - configV2 "github.com/aws/aws-sdk-go-v2/config" - credentialsV2 "github.com/aws/aws-sdk-go-v2/credentials" - stscredsV2 "github.com/aws/aws-sdk-go-v2/credentials/stscreds" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/credentials/stscreds" "github.com/aws/aws-sdk-go-v2/service/sts" ) @@ -24,61 +24,61 @@ type CredentialConfig struct { WebIdentityTokenFile string `toml:"web_identity_token_file"` } -func (c *CredentialConfig) Credentials() (awsV2.Config, error) { +func (c *CredentialConfig) Credentials() (aws.Config, error) { if c.RoleARN != "" { return c.configWithAssumeCredentials() } return c.configWithRootCredentials() } -func (c *CredentialConfig) configWithRootCredentials() (awsV2.Config, error) { - options := []func(*configV2.LoadOptions) error{ - configV2.WithRegion(c.Region), +func (c *CredentialConfig) configWithRootCredentials() (aws.Config, error) { + options := []func(*config.LoadOptions) error{ + config.WithRegion(c.Region), } if c.Profile != "" { - options = append(options, configV2.WithSharedConfigProfile(c.Profile)) + options = append(options, config.WithSharedConfigProfile(c.Profile)) } if c.Filename != "" { - options = append(options, configV2.WithSharedCredentialsFiles([]string{c.Filename})) + options = append(options, config.WithSharedCredentialsFiles([]string{c.Filename})) } if c.AccessKey != "" || c.SecretKey != "" { - provider := credentialsV2.NewStaticCredentialsProvider(c.AccessKey, c.SecretKey, c.Token) - options = append(options, configV2.WithCredentialsProvider(provider)) + provider := credentials.NewStaticCredentialsProvider(c.AccessKey, c.SecretKey, c.Token) + options = append(options, config.WithCredentialsProvider(provider)) } - return configV2.LoadDefaultConfig(context.Background(), options...) + return config.LoadDefaultConfig(context.Background(), options...) } -func (c *CredentialConfig) configWithAssumeCredentials() (awsV2.Config, error) { +func (c *CredentialConfig) configWithAssumeCredentials() (aws.Config, error) { // To generate credentials using assumeRole, we need to create AWS STS client with the default AWS endpoint, defaultConfig, err := c.configWithRootCredentials() if err != nil { - return awsV2.Config{}, err + return aws.Config{}, err } - var provider awsV2.CredentialsProvider + var provider aws.CredentialsProvider stsService := sts.NewFromConfig(defaultConfig) if c.WebIdentityTokenFile != "" { - provider = stscredsV2.NewWebIdentityRoleProvider( + provider = stscreds.NewWebIdentityRoleProvider( stsService, c.RoleARN, - stscredsV2.IdentityTokenFile(c.WebIdentityTokenFile), - func(opts *stscredsV2.WebIdentityRoleOptions) { + stscreds.IdentityTokenFile(c.WebIdentityTokenFile), + func(opts *stscreds.WebIdentityRoleOptions) { if c.RoleSessionName != "" { opts.RoleSessionName = c.RoleSessionName } }, ) } else { - provider = stscredsV2.NewAssumeRoleProvider(stsService, c.RoleARN, func(opts *stscredsV2.AssumeRoleOptions) { + provider = stscreds.NewAssumeRoleProvider(stsService, c.RoleARN, func(opts *stscreds.AssumeRoleOptions) { if c.RoleSessionName != "" { opts.RoleSessionName = c.RoleSessionName } }) } - defaultConfig.Credentials = awsV2.NewCredentialsCache(provider) + defaultConfig.Credentials = aws.NewCredentialsCache(provider) return defaultConfig, nil } diff --git a/plugins/common/http/config.go b/plugins/common/http/config.go index c94a625a6..8df45b9f9 100644 --- a/plugins/common/http/config.go +++ b/plugins/common/http/config.go @@ -12,7 +12,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/cookie" - oauthConfig "github.com/influxdata/telegraf/plugins/common/oauth" + "github.com/influxdata/telegraf/plugins/common/oauth" "github.com/influxdata/telegraf/plugins/common/proxy" "github.com/influxdata/telegraf/plugins/common/tls" ) @@ -27,7 +27,7 @@ type HTTPClientConfig struct { proxy.HTTPProxy tls.ClientConfig - oauthConfig.OAuth2Config + oauth.OAuth2Config cookie.CookieAuthConfig } diff --git a/plugins/common/kafka/config.go b/plugins/common/kafka/config.go index eba226412..f4ee6b58a 100644 --- a/plugins/common/kafka/config.go +++ b/plugins/common/kafka/config.go @@ -9,7 +9,7 @@ import ( "github.com/IBM/sarama" "github.com/influxdata/telegraf" - tgConf "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" ) @@ -19,10 +19,9 @@ type ReadConfig struct { } // SetConfig on the sarama.Config object from the ReadConfig struct. -func (k *ReadConfig) SetConfig(config *sarama.Config, log telegraf.Logger) error { - config.Consumer.Return.Errors = true - - return k.Config.SetConfig(config, log) +func (k *ReadConfig) SetConfig(cfg *sarama.Config, log telegraf.Logger) error { + cfg.Consumer.Return.Errors = true + return k.Config.SetConfig(cfg, log) } // WriteConfig for kafka clients meaning to write to kafka @@ -36,18 +35,18 @@ type WriteConfig struct { } // SetConfig on the sarama.Config object from the WriteConfig struct. -func (k *WriteConfig) SetConfig(config *sarama.Config, log telegraf.Logger) error { - config.Producer.Return.Successes = true - config.Producer.Idempotent = k.IdempotentWrites - config.Producer.Retry.Max = k.MaxRetry +func (k *WriteConfig) SetConfig(cfg *sarama.Config, log telegraf.Logger) error { + cfg.Producer.Return.Successes = true + cfg.Producer.Idempotent = k.IdempotentWrites + cfg.Producer.Retry.Max = k.MaxRetry if k.MaxMessageBytes > 0 { - config.Producer.MaxMessageBytes = k.MaxMessageBytes + cfg.Producer.MaxMessageBytes = k.MaxMessageBytes } - config.Producer.RequiredAcks = sarama.RequiredAcks(k.RequiredAcks) - if config.Producer.Idempotent { - config.Net.MaxOpenRequests = 1 + cfg.Producer.RequiredAcks = sarama.RequiredAcks(k.RequiredAcks) + if cfg.Producer.Idempotent { + cfg.Net.MaxOpenRequests = 1 } - return k.Config.SetConfig(config, log) + return k.Config.SetConfig(cfg, log) } // Config common to all Kafka clients. @@ -59,12 +58,12 @@ type Config struct { ClientID string `toml:"client_id"` CompressionCodec int `toml:"compression_codec"` EnableTLS *bool `toml:"enable_tls"` - KeepAlivePeriod *tgConf.Duration `toml:"keep_alive_period"` + KeepAlivePeriod *config.Duration `toml:"keep_alive_period"` MetadataRetryMax int `toml:"metadata_retry_max"` MetadataRetryType string `toml:"metadata_retry_type"` - MetadataRetryBackoff tgConf.Duration `toml:"metadata_retry_backoff"` - MetadataRetryMaxDuration tgConf.Duration `toml:"metadata_retry_max_duration"` + MetadataRetryBackoff config.Duration `toml:"metadata_retry_backoff"` + MetadataRetryMaxDuration config.Duration `toml:"metadata_retry_max_duration"` // Disable full metadata fetching MetadataFull *bool `toml:"metadata_full"` @@ -83,26 +82,26 @@ func makeBackoffFunc(backoff, maxDuration time.Duration) BackoffFunc { } // SetConfig on the sarama.Config object from the Config struct. -func (k *Config) SetConfig(config *sarama.Config, log telegraf.Logger) error { +func (k *Config) SetConfig(cfg *sarama.Config, log telegraf.Logger) error { if k.Version != "" { version, err := sarama.ParseKafkaVersion(k.Version) if err != nil { return err } - config.Version = version + cfg.Version = version } if k.ClientID != "" { - config.ClientID = k.ClientID + cfg.ClientID = k.ClientID } else { - config.ClientID = "Telegraf" + cfg.ClientID = "Telegraf" } - config.Producer.Compression = sarama.CompressionCodec(k.CompressionCodec) + cfg.Producer.Compression = sarama.CompressionCodec(k.CompressionCodec) if k.EnableTLS != nil && *k.EnableTLS { - config.Net.TLS.Enable = true + cfg.Net.TLS.Enable = true } tlsConfig, err := k.ClientConfig.TLSConfig() @@ -111,33 +110,33 @@ func (k *Config) SetConfig(config *sarama.Config, log telegraf.Logger) error { } if tlsConfig != nil { - config.Net.TLS.Config = tlsConfig + cfg.Net.TLS.Config = tlsConfig // To maintain backwards compatibility, if the enable_tls option is not // set TLS is enabled if a non-default TLS config is used. if k.EnableTLS == nil { - config.Net.TLS.Enable = true + cfg.Net.TLS.Enable = true } } if k.KeepAlivePeriod != nil { // Defaults to OS setting (15s currently) - config.Net.KeepAlive = time.Duration(*k.KeepAlivePeriod) + cfg.Net.KeepAlive = time.Duration(*k.KeepAlivePeriod) } if k.MetadataFull != nil { // Defaults to true in Sarama - config.Metadata.Full = *k.MetadataFull + cfg.Metadata.Full = *k.MetadataFull } if k.MetadataRetryMax != 0 { - config.Metadata.Retry.Max = k.MetadataRetryMax + cfg.Metadata.Retry.Max = k.MetadataRetryMax } if k.MetadataRetryBackoff != 0 { - // If config.Metadata.Retry.BackoffFunc is set, sarama ignores - // config.Metadata.Retry.Backoff - config.Metadata.Retry.Backoff = time.Duration(k.MetadataRetryBackoff) + // If cfg.Metadata.Retry.BackoffFunc is set, sarama ignores + // cfg.Metadata.Retry.Backoff + cfg.Metadata.Retry.Backoff = time.Duration(k.MetadataRetryBackoff) } switch strings.ToLower(k.MetadataRetryType) { @@ -145,15 +144,15 @@ func (k *Config) SetConfig(config *sarama.Config, log telegraf.Logger) error { return errors.New("invalid metadata retry type") case "exponential": if k.MetadataRetryBackoff == 0 { - k.MetadataRetryBackoff = tgConf.Duration(250 * time.Millisecond) + k.MetadataRetryBackoff = config.Duration(250 * time.Millisecond) log.Warnf("metadata_retry_backoff is 0, using %s", time.Duration(k.MetadataRetryBackoff)) } - config.Metadata.Retry.BackoffFunc = makeBackoffFunc( + cfg.Metadata.Retry.BackoffFunc = makeBackoffFunc( time.Duration(k.MetadataRetryBackoff), time.Duration(k.MetadataRetryMaxDuration), ) case "constant", "": } - return k.SetSASLConfig(config) + return k.SetSASLConfig(cfg) } diff --git a/plugins/common/proxy/connect.go b/plugins/common/proxy/connect.go index 6c95ddf95..d5543854e 100644 --- a/plugins/common/proxy/connect.go +++ b/plugins/common/proxy/connect.go @@ -8,12 +8,12 @@ import ( "net/http" "net/url" - netProxy "golang.org/x/net/proxy" + "golang.org/x/net/proxy" ) // httpConnectProxy proxies (only?) TCP over a HTTP tunnel using the CONNECT method type httpConnectProxy struct { - forward netProxy.Dialer + forward proxy.Dialer url *url.URL } @@ -25,7 +25,7 @@ func (c *httpConnectProxy) DialContext(ctx context.Context, network, addr string var proxyConn net.Conn var err error - if dialer, ok := c.forward.(netProxy.ContextDialer); ok { + if dialer, ok := c.forward.(proxy.ContextDialer); ok { proxyConn, err = dialer.DialContext(ctx, "tcp", c.url.Host) } else { shim := contextDialerShim{c.forward} @@ -93,14 +93,14 @@ func (c *httpConnectProxy) Dial(network, addr string) (net.Conn, error) { return c.DialContext(context.Background(), network, addr) } -func newHTTPConnectProxy(proxyURL *url.URL, forward netProxy.Dialer) (netProxy.Dialer, error) { +func newHTTPConnectProxy(proxyURL *url.URL, forward proxy.Dialer) (proxy.Dialer, error) { return &httpConnectProxy{forward, proxyURL}, nil } func init() { // Register new proxy types - netProxy.RegisterDialerType("http", newHTTPConnectProxy) - netProxy.RegisterDialerType("https", newHTTPConnectProxy) + proxy.RegisterDialerType("http", newHTTPConnectProxy) + proxy.RegisterDialerType("https", newHTTPConnectProxy) } // contextDialerShim allows cancellation of the dial from a context even if the underlying @@ -108,7 +108,7 @@ func init() { // unless a new proxy type is added that doesn't implement `proxy.ContextDialer`, as all the // standard library dialers implement `proxy.ContextDialer`. type contextDialerShim struct { - dialer netProxy.Dialer + dialer proxy.Dialer } func (cd *contextDialerShim) Dial(network, addr string) (net.Conn, error) { diff --git a/plugins/common/proxy/dialer.go b/plugins/common/proxy/dialer.go index 844d12ac7..dd5051b21 100644 --- a/plugins/common/proxy/dialer.go +++ b/plugins/common/proxy/dialer.go @@ -5,11 +5,11 @@ import ( "net" "time" - netProxy "golang.org/x/net/proxy" + "golang.org/x/net/proxy" ) type ProxiedDialer struct { - dialer netProxy.Dialer + dialer proxy.Dialer } func (pd *ProxiedDialer) Dial(network, addr string) (net.Conn, error) { @@ -17,7 +17,7 @@ func (pd *ProxiedDialer) Dial(network, addr string) (net.Conn, error) { } func (pd *ProxiedDialer) DialContext(ctx context.Context, network, addr string) (net.Conn, error) { - if contextDialer, ok := pd.dialer.(netProxy.ContextDialer); ok { + if contextDialer, ok := pd.dialer.(proxy.ContextDialer); ok { return contextDialer.DialContext(ctx, network, addr) } diff --git a/plugins/common/shim/config_test.go b/plugins/common/shim/config_test.go index 6bb807496..077ba30ac 100644 --- a/plugins/common/shim/config_test.go +++ b/plugins/common/shim/config_test.go @@ -7,7 +7,7 @@ import ( "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" - tgConfig "github.com/influxdata/telegraf/config" + cfg "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/processors" ) @@ -61,9 +61,9 @@ func TestLoadingProcessorWithConfig(t *testing.T) { } type testDurationInput struct { - Duration tgConfig.Duration `toml:"duration"` - Size tgConfig.Size `toml:"size"` - Hex int64 `toml:"hex"` + Duration cfg.Duration `toml:"duration"` + Size cfg.Size `toml:"size"` + Hex int64 `toml:"hex"` } func (i *testDurationInput) SampleConfig() string { diff --git a/plugins/common/shim/processor_test.go b/plugins/common/shim/processor_test.go index 83d135d8f..195d72572 100644 --- a/plugins/common/shim/processor_test.go +++ b/plugins/common/shim/processor_test.go @@ -13,7 +13,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/parsers/influx" - influxSerializer "github.com/influxdata/telegraf/plugins/serializers/influx" + serializers_influx "github.com/influxdata/telegraf/plugins/serializers/influx" ) func TestProcessorShim(t *testing.T) { @@ -52,7 +52,7 @@ func testSendAndReceive(t *testing.T, fieldKey string, fieldValue string) { wg.Done() }() - serializer := &influxSerializer.Serializer{} + serializer := &serializers_influx.Serializer{} require.NoError(t, serializer.Init()) parser := influx.Parser{} diff --git a/plugins/common/socket/socket.go b/plugins/common/socket/socket.go index 3582c16eb..482301c3c 100644 --- a/plugins/common/socket/socket.go +++ b/plugins/common/socket/socket.go @@ -12,7 +12,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" - tlsint "github.com/influxdata/telegraf/plugins/common/tls" + common_tls "github.com/influxdata/telegraf/plugins/common/tls" ) type CallbackData func(net.Addr, []byte) @@ -34,7 +34,7 @@ type Config struct { SocketMode string `toml:"socket_mode"` ContentEncoding string `toml:"content_encoding"` MaxDecompressionSize config.Size `toml:"max_decompression_size"` - tlsint.ServerConfig + common_tls.ServerConfig } type Socket struct { diff --git a/plugins/inputs/aerospike/aerospike.go b/plugins/inputs/aerospike/aerospike.go index 3dc552062..26a567707 100644 --- a/plugins/inputs/aerospike/aerospike.go +++ b/plugins/inputs/aerospike/aerospike.go @@ -14,7 +14,7 @@ import ( as "github.com/aerospike/aerospike-client-go/v5" "github.com/influxdata/telegraf" - tlsint "github.com/influxdata/telegraf/plugins/common/tls" + common_tls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -30,7 +30,7 @@ type Aerospike struct { EnableTLS bool `toml:"enable_tls"` EnableSSL bool `toml:"enable_ssl" deprecated:"1.7.0;1.35.0;use 'enable_tls' instead"` TLSName string `toml:"tls_name"` - tlsint.ClientConfig + common_tls.ClientConfig initialized bool tlsConfig *tls.Config diff --git a/plugins/inputs/apcupsd/apcupsd.go b/plugins/inputs/apcupsd/apcupsd.go index 299b97362..cf5e8c02c 100644 --- a/plugins/inputs/apcupsd/apcupsd.go +++ b/plugins/inputs/apcupsd/apcupsd.go @@ -9,7 +9,7 @@ import ( "strings" "time" - apcupsdClient "github.com/mdlayher/apcupsd" + "github.com/mdlayher/apcupsd" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" @@ -97,8 +97,8 @@ func (h *ApcUpsd) Gather(acc telegraf.Accumulator) error { return nil } -func fetchStatus(ctx context.Context, addr *url.URL) (*apcupsdClient.Status, error) { - client, err := apcupsdClient.DialContext(ctx, addr.Scheme, addr.Host) +func fetchStatus(ctx context.Context, addr *url.URL) (*apcupsd.Status, error) { + client, err := apcupsd.DialContext(ctx, addr.Scheme, addr.Host) if err != nil { return nil, err } diff --git a/plugins/inputs/azure_monitor/azure_monitor.go b/plugins/inputs/azure_monitor/azure_monitor.go index 495b67ec7..610007a29 100644 --- a/plugins/inputs/azure_monitor/azure_monitor.go +++ b/plugins/inputs/azure_monitor/azure_monitor.go @@ -4,10 +4,11 @@ package azure_monitor import ( _ "embed" "fmt" + "sync" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "sync" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" diff --git a/plugins/inputs/azure_monitor/azure_monitor_test.go b/plugins/inputs/azure_monitor/azure_monitor_test.go index 4bada8386..5e14e73e4 100644 --- a/plugins/inputs/azure_monitor/azure_monitor_test.go +++ b/plugins/inputs/azure_monitor/azure_monitor_test.go @@ -5,11 +5,11 @@ import ( "encoding/json" "errors" "fmt" - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" "os" "testing" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/monitor/armmonitor" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources" "github.com/influxdata/toml" diff --git a/plugins/inputs/beat/beat.go b/plugins/inputs/beat/beat.go index 5992bf9ec..3296f95cc 100644 --- a/plugins/inputs/beat/beat.go +++ b/plugins/inputs/beat/beat.go @@ -14,7 +14,7 @@ import ( "github.com/influxdata/telegraf/internal/choice" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" - jsonparser "github.com/influxdata/telegraf/plugins/parsers/json" + parsers_json "github.com/influxdata/telegraf/plugins/parsers/json" ) //go:embed sample.conf @@ -180,7 +180,7 @@ func (beat *Beat) Gather(accumulator telegraf.Accumulator) error { default: return fmt.Errorf("unknown stats-type %q", name) } - flattener := jsonparser.JSONFlattener{} + flattener := parsers_json.JSONFlattener{} err := flattener.FullFlattenJSON("", stats, true, true) if err != nil { return err diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go index ec0976162..80ff47832 100644 --- a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go @@ -16,7 +16,7 @@ import ( "sync" "time" - dialout "github.com/cisco-ie/nx-telemetry-proto/mdt_dialout" + mdtdialout "github.com/cisco-ie/nx-telemetry-proto/mdt_dialout" telemetry "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis" "google.golang.org/grpc" "google.golang.org/grpc/credentials" @@ -28,7 +28,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" - internaltls "github.com/influxdata/telegraf/plugins/common/tls" + common_tls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -65,7 +65,7 @@ type CiscoTelemetryMDT struct { Log telegraf.Logger // GRPC TLS settings - internaltls.ServerConfig + common_tls.ServerConfig // Internal listener / client handle grpcServer *grpc.Server @@ -83,7 +83,7 @@ type CiscoTelemetryMDT struct { wg sync.WaitGroup // Though unused in the code, required by protoc-gen-go-grpc to maintain compatibility - dialout.UnimplementedGRPCMdtDialoutServer + mdtdialout.UnimplementedGRPCMdtDialoutServer } type NxPayloadXfromStructure struct { @@ -200,7 +200,7 @@ func (c *CiscoTelemetryMDT) Start(acc telegraf.Accumulator) error { } c.grpcServer = grpc.NewServer(opts...) - dialout.RegisterGRPCMdtDialoutServer(c.grpcServer, c) + mdtdialout.RegisterGRPCMdtDialoutServer(c.grpcServer, c) c.wg.Add(1) go func() { @@ -312,7 +312,7 @@ func (c *CiscoTelemetryMDT) handleTCPClient(conn net.Conn) error { } // MdtDialout RPC server method for grpc-dialout transport -func (c *CiscoTelemetryMDT) MdtDialout(stream dialout.GRPCMdtDialout_MdtDialoutServer) error { +func (c *CiscoTelemetryMDT) MdtDialout(stream mdtdialout.GRPCMdtDialout_MdtDialoutServer) error { peerInCtx, peerOK := peer.FromContext(stream.Context()) if peerOK { c.Log.Debugf("Accepted Cisco MDT GRPC dialout connection from %s", peerInCtx.Addr) diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go index e648d8dc0..98d95f05f 100644 --- a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go @@ -10,8 +10,8 @@ import ( "testing" "time" - dialout "github.com/cisco-ie/nx-telemetry-proto/mdt_dialout" - telemetryBis "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis" + mdtdialout "github.com/cisco-ie/nx-telemetry-proto/mdt_dialout" + telemetry "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis" "github.com/stretchr/testify/require" "google.golang.org/grpc" "google.golang.org/grpc/connectivity" @@ -36,55 +36,55 @@ func TestHandleTelemetryTwoSimple(t *testing.T) { // error is expected since we are passing in dummy transport require.Error(t, err) - telemetry := &telemetryBis.Telemetry{ + tel := &telemetry.Telemetry{ MsgTimestamp: 1543236572000, EncodingPath: "type:model/some/path", - NodeId: &telemetryBis.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, - Subscription: &telemetryBis.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, - DataGpbkv: []*telemetryBis.TelemetryField{ + NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetry.TelemetryField{ { - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "keys", - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "name", - ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "str"}, + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "str"}, }, { Name: "uint64", - ValueByType: &telemetryBis.TelemetryField_Uint64Value{Uint64Value: 1234}, + ValueByType: &telemetry.TelemetryField_Uint64Value{Uint64Value: 1234}, }, }, }, { Name: "content", - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "bool", - ValueByType: &telemetryBis.TelemetryField_BoolValue{BoolValue: true}, + ValueByType: &telemetry.TelemetryField_BoolValue{BoolValue: true}, }, }, }, }, }, { - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "keys", - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "name", - ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "str2"}, + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "str2"}, }, }, }, { Name: "content", - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "bool", - ValueByType: &telemetryBis.TelemetryField_BoolValue{BoolValue: false}, + ValueByType: &telemetry.TelemetryField_BoolValue{BoolValue: false}, }, }, }, @@ -92,7 +92,7 @@ func TestHandleTelemetryTwoSimple(t *testing.T) { }, }, } - data, err := proto.Marshal(telemetry) + data, err := proto.Marshal(tel) require.NoError(t, err) c.handleTelemetry(data) @@ -142,55 +142,55 @@ func TestIncludeDeleteField(t *testing.T) { stateKey := "state" testCases := []struct { - telemetry *telemetryBis.Telemetry + telemetry *telemetry.Telemetry expected []telegraf.Metric }{{ - telemetry: &telemetryBis.Telemetry{ + telemetry: &telemetry.Telemetry{ MsgTimestamp: 1543236572000, EncodingPath: encodingPath.stringValue, - NodeId: &telemetryBis.Telemetry_NodeIdStr{NodeIdStr: source.stringValue}, - Subscription: &telemetryBis.Telemetry_SubscriptionIdStr{SubscriptionIdStr: subscription.stringValue}, - DataGpbkv: []*telemetryBis.TelemetryField{ + NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: source.stringValue}, + Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: subscription.stringValue}, + DataGpbkv: []*telemetry.TelemetryField{ { - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "keys", - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: name.name, - ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: name.stringValue}, + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: name.stringValue}, }, { Name: index.name, - ValueByType: &telemetryBis.TelemetryField_Uint32Value{Uint32Value: index.uint32Value}, + ValueByType: &telemetry.TelemetryField_Uint32Value{Uint32Value: index.uint32Value}, }, { Name: ip.name, - ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: ip.stringValue}, + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: ip.stringValue}, }, }, }, { Name: "content", - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: stateKey, - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: ip.name, - ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: ip.stringValue}, + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: ip.stringValue}, }, { Name: prefixLength.name, - ValueByType: &telemetryBis.TelemetryField_Uint32Value{Uint32Value: prefixLength.uint32Value}, + ValueByType: &telemetry.TelemetryField_Uint32Value{Uint32Value: prefixLength.uint32Value}, }, { Name: origin.name, - ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: origin.stringValue}, + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: origin.stringValue}, }, { Name: status.name, - ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: status.stringValue}, + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: status.stringValue}, }, }, }, @@ -222,29 +222,29 @@ func TestIncludeDeleteField(t *testing.T) { )}, }, { - telemetry: &telemetryBis.Telemetry{ + telemetry: &telemetry.Telemetry{ MsgTimestamp: 1543236572000, EncodingPath: encodingPath.stringValue, - NodeId: &telemetryBis.Telemetry_NodeIdStr{NodeIdStr: source.stringValue}, - Subscription: &telemetryBis.Telemetry_SubscriptionIdStr{SubscriptionIdStr: subscription.stringValue}, - DataGpbkv: []*telemetryBis.TelemetryField{ + NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: source.stringValue}, + Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: subscription.stringValue}, + DataGpbkv: []*telemetry.TelemetryField{ { Delete: true, - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "keys", - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: name.name, - ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: name.stringValue}, + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: name.stringValue}, }, { Name: index.name, - ValueByType: &telemetryBis.TelemetryField_Uint32Value{Uint32Value: index.uint32Value}, + ValueByType: &telemetry.TelemetryField_Uint32Value{Uint32Value: index.uint32Value}, }, { Name: ip.name, - ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: ip.stringValue}, + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: ip.stringValue}, }, }, }, @@ -299,26 +299,26 @@ func TestHandleTelemetrySingleNested(t *testing.T) { // error is expected since we are passing in dummy transport require.Error(t, err) - telemetry := &telemetryBis.Telemetry{ + tel := &telemetry.Telemetry{ MsgTimestamp: 1543236572000, EncodingPath: "type:model/nested/path", - NodeId: &telemetryBis.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, - Subscription: &telemetryBis.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, - DataGpbkv: []*telemetryBis.TelemetryField{ + NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetry.TelemetryField{ { - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "keys", - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "nested", - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "key", - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "level", - ValueByType: &telemetryBis.TelemetryField_DoubleValue{DoubleValue: 3}, + ValueByType: &telemetry.TelemetryField_DoubleValue{DoubleValue: 3}, }, }, }, @@ -328,16 +328,16 @@ func TestHandleTelemetrySingleNested(t *testing.T) { }, { Name: "content", - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "nested", - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "value", - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "foo", - ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "bar"}, + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"}, }, }, }, @@ -349,7 +349,7 @@ func TestHandleTelemetrySingleNested(t *testing.T) { }, }, } - data, err := proto.Marshal(telemetry) + data, err := proto.Marshal(tel) require.NoError(t, err) c.handleTelemetry(data) @@ -376,49 +376,49 @@ func TestHandleEmbeddedTags(t *testing.T) { // error is expected since we are passing in dummy transport require.Error(t, err) - telemetry := &telemetryBis.Telemetry{ + tel := &telemetry.Telemetry{ MsgTimestamp: 1543236572000, EncodingPath: "type:model/extra", - NodeId: &telemetryBis.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, - Subscription: &telemetryBis.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, - DataGpbkv: []*telemetryBis.TelemetryField{ + NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetry.TelemetryField{ { - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "keys", - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "foo", - ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "bar"}, + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"}, }, }, }, { Name: "content", - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "list", - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "name", - ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "entry1"}, + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "entry1"}, }, { Name: "test", - ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "foo"}, + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "foo"}, }, }, }, { Name: "list", - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "name", - ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "entry2"}, + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "entry2"}, }, { Name: "test", - ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "bar"}, + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"}, }, }, }, @@ -428,7 +428,7 @@ func TestHandleEmbeddedTags(t *testing.T) { }, }, } - data, err := proto.Marshal(telemetry) + data, err := proto.Marshal(tel) require.NoError(t, err) c.handleTelemetry(data) @@ -464,57 +464,57 @@ func TestHandleNXAPI(t *testing.T) { // error is expected since we are passing in dummy transport require.Error(t, err) - telemetry := &telemetryBis.Telemetry{ + tel := &telemetry.Telemetry{ MsgTimestamp: 1543236572000, EncodingPath: "show nxapi", - NodeId: &telemetryBis.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, - Subscription: &telemetryBis.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, - DataGpbkv: []*telemetryBis.TelemetryField{ + NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetry.TelemetryField{ { - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "keys", - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "foo", - ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "bar"}, + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"}, }, }, }, { Name: "content", - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "TABLE_nxapi", - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "ROW_nxapi", - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "index", - ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "i1"}, + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "i1"}, }, { Name: "value", - ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "foo"}, + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "foo"}, }, }, }, { - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "index", - ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "i2"}, + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "i2"}, }, { Name: "value", - ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "bar"}, + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"}, }, }, }, @@ -532,7 +532,7 @@ func TestHandleNXAPI(t *testing.T) { }, }, } - data, err := proto.Marshal(telemetry) + data, err := proto.Marshal(tel) require.NoError(t, err) c.handleTelemetry(data) @@ -571,45 +571,45 @@ func TestHandleNXAPIXformNXAPI(t *testing.T) { // error is expected since we are passing in dummy transport require.Error(t, err) - telemetry := &telemetryBis.Telemetry{ + tel := &telemetry.Telemetry{ MsgTimestamp: 1543236572000, EncodingPath: "show processes cpu", - NodeId: &telemetryBis.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, - Subscription: &telemetryBis.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, - DataGpbkv: []*telemetryBis.TelemetryField{ + NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetry.TelemetryField{ { - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "keys", - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "foo", - ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "bar"}, + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"}, }, }, }, { Name: "content", - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "TABLE_process_cpu", - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "ROW_process_cpu", - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "index", - ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "i1"}, + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "i1"}, }, { Name: "value", - ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "foo"}, + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "foo"}, }, }, }, @@ -627,7 +627,7 @@ func TestHandleNXAPIXformNXAPI(t *testing.T) { }, }, } - data, err := proto.Marshal(telemetry) + data, err := proto.Marshal(tel) require.NoError(t, err) c.handleTelemetry(data) @@ -655,57 +655,57 @@ func TestHandleNXXformMulti(t *testing.T) { // error is expected since we are passing in dummy transport require.Error(t, err) - telemetry := &telemetryBis.Telemetry{ + tel := &telemetry.Telemetry{ MsgTimestamp: 1543236572000, EncodingPath: "sys/lldp", - NodeId: &telemetryBis.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, - Subscription: &telemetryBis.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, - DataGpbkv: []*telemetryBis.TelemetryField{ + NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetry.TelemetryField{ { - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "keys", - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "foo", - ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "bar"}, + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"}, }, }, }, { Name: "content", - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "fooEntity", - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "attributes", - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "rn", - ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "some-rn"}, + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "some-rn"}, }, { Name: "portIdV", - ValueByType: &telemetryBis.TelemetryField_Uint32Value{Uint32Value: 12}, + ValueByType: &telemetry.TelemetryField_Uint32Value{Uint32Value: 12}, }, { Name: "portDesc", - ValueByType: &telemetryBis.TelemetryField_Uint64Value{Uint64Value: 100}, + ValueByType: &telemetry.TelemetryField_Uint64Value{Uint64Value: 100}, }, { Name: "test", - ValueByType: &telemetryBis.TelemetryField_Uint64Value{Uint64Value: 281474976710655}, + ValueByType: &telemetry.TelemetryField_Uint64Value{Uint64Value: 281474976710655}, }, { Name: "subscriptionId", - ValueByType: &telemetryBis.TelemetryField_Uint64Value{Uint64Value: 2814749767106551}, + ValueByType: &telemetry.TelemetryField_Uint64Value{Uint64Value: 2814749767106551}, }, }, }, @@ -723,7 +723,7 @@ func TestHandleNXXformMulti(t *testing.T) { }, }, } - data, err := proto.Marshal(telemetry) + data, err := proto.Marshal(tel) require.NoError(t, err) c.handleTelemetry(data) @@ -748,45 +748,45 @@ func TestHandleNXDME(t *testing.T) { // error is expected since we are passing in dummy transport require.Error(t, err) - telemetry := &telemetryBis.Telemetry{ + tel := &telemetry.Telemetry{ MsgTimestamp: 1543236572000, EncodingPath: "sys/dme", - NodeId: &telemetryBis.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, - Subscription: &telemetryBis.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, - DataGpbkv: []*telemetryBis.TelemetryField{ + NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetry.TelemetryField{ { - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "keys", - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "foo", - ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "bar"}, + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"}, }, }, }, { Name: "content", - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "fooEntity", - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "attributes", - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "rn", - ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "some-rn"}, + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "some-rn"}, }, { Name: "value", - ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "foo"}, + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "foo"}, }, }, }, @@ -804,7 +804,7 @@ func TestHandleNXDME(t *testing.T) { }, }, } - data, err := proto.Marshal(telemetry) + data, err := proto.Marshal(tel) require.NoError(t, err) c.handleTelemetry(data) @@ -852,13 +852,13 @@ func TestTCPDialoutOverflow(t *testing.T) { require.Contains(t, acc.Errors, errors.New("dialout packet too long: 1000000000")) } -func mockTelemetryMicroburstMessage() *telemetryBis.Telemetry { +func mockTelemetryMicroburstMessage() *telemetry.Telemetry { data, err := os.ReadFile("./testdata/microburst") if err != nil { panic(err) } - newMessage := &telemetryBis.Telemetry{} + newMessage := &telemetry.Telemetry{} err = proto.Unmarshal(data, newMessage) if err != nil { panic(err) @@ -866,30 +866,30 @@ func mockTelemetryMicroburstMessage() *telemetryBis.Telemetry { return newMessage } -func mockTelemetryMessage() *telemetryBis.Telemetry { - return &telemetryBis.Telemetry{ +func mockTelemetryMessage() *telemetry.Telemetry { + return &telemetry.Telemetry{ MsgTimestamp: 1543236572000, EncodingPath: "type:model/some/path", - NodeId: &telemetryBis.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, - Subscription: &telemetryBis.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, - DataGpbkv: []*telemetryBis.TelemetryField{ + NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetry.TelemetryField{ { - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "keys", - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "name", - ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "str"}, + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "str"}, }, }, }, { Name: "content", - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "value", - ValueByType: &telemetryBis.TelemetryField_Sint64Value{Sint64Value: -1}, + ValueByType: &telemetry.TelemetryField_Sint64Value{Sint64Value: -1}, }, }, }, @@ -914,8 +914,8 @@ func TestGRPCDialoutMicroburst(t *testing.T) { err := c.Start(acc) require.NoError(t, err) - telemetry := mockTelemetryMicroburstMessage() - data, err := proto.Marshal(telemetry) + tel := mockTelemetryMicroburstMessage() + data, err := proto.Marshal(tel) require.NoError(t, err) c.handleTelemetry(data) @@ -954,7 +954,7 @@ func TestTCPDialoutMultiple(t *testing.T) { err := c.Start(acc) require.NoError(t, err) - telemetry := mockTelemetryMessage() + tel := mockTelemetryMessage() hdr := struct { MsgType uint16 @@ -968,7 +968,7 @@ func TestTCPDialoutMultiple(t *testing.T) { conn, err := net.Dial(addr.Network(), addr.String()) require.NoError(t, err) - data, err := proto.Marshal(telemetry) + data, err := proto.Marshal(tel) require.NoError(t, err) hdr.MsgLen = uint32(len(data)) require.NoError(t, binary.Write(conn, binary.BigEndian, hdr)) @@ -978,8 +978,8 @@ func TestTCPDialoutMultiple(t *testing.T) { conn2, err := net.Dial(addr.Network(), addr.String()) require.NoError(t, err) - telemetry.EncodingPath = "type:model/parallel/path" - data, err = proto.Marshal(telemetry) + tel.EncodingPath = "type:model/parallel/path" + data, err = proto.Marshal(tel) require.NoError(t, err) hdr.MsgLen = uint32(len(data)) require.NoError(t, binary.Write(conn2, binary.BigEndian, hdr)) @@ -991,8 +991,8 @@ func TestTCPDialoutMultiple(t *testing.T) { require.True(t, err == nil || errors.Is(err, io.EOF)) require.NoError(t, conn2.Close()) - telemetry.EncodingPath = "type:model/other/path" - data, err = proto.Marshal(telemetry) + tel.EncodingPath = "type:model/other/path" + data, err = proto.Marshal(tel) require.NoError(t, err) hdr.MsgLen = uint32(len(data)) require.NoError(t, binary.Write(conn, binary.BigEndian, hdr)) @@ -1049,11 +1049,11 @@ func TestGRPCDialoutError(t *testing.T) { addr := c.Address() conn, err := grpc.NewClient(addr.String(), grpc.WithTransportCredentials(insecure.NewCredentials())) require.NoError(t, err) - client := dialout.NewGRPCMdtDialoutClient(conn) + client := mdtdialout.NewGRPCMdtDialoutClient(conn) stream, err := client.MdtDialout(context.Background()) require.NoError(t, err) - args := &dialout.MdtDialoutArgs{Errors: "foobar"} + args := &mdtdialout.MdtDialoutArgs{Errors: "foobar"} require.NoError(t, stream.Send(args)) // Wait for the server to close @@ -1078,44 +1078,44 @@ func TestGRPCDialoutMultiple(t *testing.T) { acc := &testutil.Accumulator{} err := c.Start(acc) require.NoError(t, err) - telemetry := mockTelemetryMessage() + tel := mockTelemetryMessage() addr := c.Address() conn, err := grpc.NewClient(addr.String(), grpc.WithTransportCredentials(insecure.NewCredentials())) require.NoError(t, err) require.True(t, conn.WaitForStateChange(context.Background(), connectivity.Connecting)) - client := dialout.NewGRPCMdtDialoutClient(conn) + client := mdtdialout.NewGRPCMdtDialoutClient(conn) stream, err := client.MdtDialout(context.TODO()) require.NoError(t, err) - data, err := proto.Marshal(telemetry) + data, err := proto.Marshal(tel) require.NoError(t, err) - args := &dialout.MdtDialoutArgs{Data: data, ReqId: 456} + args := &mdtdialout.MdtDialoutArgs{Data: data, ReqId: 456} require.NoError(t, stream.Send(args)) conn2, err := grpc.NewClient(addr.String(), grpc.WithTransportCredentials(insecure.NewCredentials())) require.NoError(t, err) require.True(t, conn.WaitForStateChange(context.Background(), connectivity.Connecting)) - client2 := dialout.NewGRPCMdtDialoutClient(conn2) + client2 := mdtdialout.NewGRPCMdtDialoutClient(conn2) stream2, err := client2.MdtDialout(context.TODO()) require.NoError(t, err) - telemetry.EncodingPath = "type:model/parallel/path" - data, err = proto.Marshal(telemetry) + tel.EncodingPath = "type:model/parallel/path" + data, err = proto.Marshal(tel) require.NoError(t, err) - args = &dialout.MdtDialoutArgs{Data: data} + args = &mdtdialout.MdtDialoutArgs{Data: data} require.NoError(t, stream2.Send(args)) - require.NoError(t, stream2.Send(&dialout.MdtDialoutArgs{Errors: "testclose"})) + require.NoError(t, stream2.Send(&mdtdialout.MdtDialoutArgs{Errors: "testclose"})) _, err = stream2.Recv() require.True(t, err == nil || errors.Is(err, io.EOF)) require.NoError(t, conn2.Close()) - telemetry.EncodingPath = "type:model/other/path" - data, err = proto.Marshal(telemetry) + tel.EncodingPath = "type:model/other/path" + data, err = proto.Marshal(tel) require.NoError(t, err) - args = &dialout.MdtDialoutArgs{Data: data} + args = &mdtdialout.MdtDialoutArgs{Data: data} require.NoError(t, stream.Send(args)) - require.NoError(t, stream.Send(&dialout.MdtDialoutArgs{Errors: "testclose"})) + require.NoError(t, stream.Send(&mdtdialout.MdtDialoutArgs{Errors: "testclose"})) _, err = stream.Recv() require.True(t, err == nil || errors.Is(err, io.EOF)) @@ -1169,14 +1169,14 @@ func TestGRPCDialoutKeepalive(t *testing.T) { addr := c.Address() conn, err := grpc.NewClient(addr.String(), grpc.WithTransportCredentials(insecure.NewCredentials())) require.NoError(t, err) - client := dialout.NewGRPCMdtDialoutClient(conn) + client := mdtdialout.NewGRPCMdtDialoutClient(conn) stream, err := client.MdtDialout(context.Background()) require.NoError(t, err) - telemetry := mockTelemetryMessage() - data, err := proto.Marshal(telemetry) + tel := mockTelemetryMessage() + data, err := proto.Marshal(tel) require.NoError(t, err) - args := &dialout.MdtDialoutArgs{Data: data, ReqId: 456} + args := &mdtdialout.MdtDialoutArgs{Data: data, ReqId: 456} require.NoError(t, stream.Send(args)) c.Stop() @@ -1195,29 +1195,29 @@ func TestSourceFieldRewrite(t *testing.T) { // error is expected since we are passing in dummy transport require.Error(t, err) - telemetry := &telemetryBis.Telemetry{ + tel := &telemetry.Telemetry{ MsgTimestamp: 1543236572000, EncodingPath: "type:model/some/path", - NodeId: &telemetryBis.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, - Subscription: &telemetryBis.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, - DataGpbkv: []*telemetryBis.TelemetryField{ + NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetry.TelemetryField{ { - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "keys", - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "source", - ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "str"}, + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "str"}, }, }, }, { Name: "content", - Fields: []*telemetryBis.TelemetryField{ + Fields: []*telemetry.TelemetryField{ { Name: "bool", - ValueByType: &telemetryBis.TelemetryField_BoolValue{BoolValue: false}, + ValueByType: &telemetry.TelemetryField_BoolValue{BoolValue: false}, }, }, }, @@ -1225,7 +1225,7 @@ func TestSourceFieldRewrite(t *testing.T) { }, }, } - data, err := proto.Marshal(telemetry) + data, err := proto.Marshal(tel) require.NoError(t, err) c.handleTelemetry(data) diff --git a/plugins/inputs/cloud_pubsub/subscription_gcp.go b/plugins/inputs/cloud_pubsub/subscription_gcp.go index f436d5219..86a76372f 100644 --- a/plugins/inputs/cloud_pubsub/subscription_gcp.go +++ b/plugins/inputs/cloud_pubsub/subscription_gcp.go @@ -1,9 +1,10 @@ package cloud_pubsub import ( - "cloud.google.com/go/pubsub" "context" "time" + + "cloud.google.com/go/pubsub" ) type ( diff --git a/plugins/inputs/cloud_pubsub_push/cloud_pubsub_push.go b/plugins/inputs/cloud_pubsub_push/cloud_pubsub_push.go index ff797520c..3634f9f83 100644 --- a/plugins/inputs/cloud_pubsub_push/cloud_pubsub_push.go +++ b/plugins/inputs/cloud_pubsub_push/cloud_pubsub_push.go @@ -15,7 +15,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" - tlsint "github.com/influxdata/telegraf/plugins/common/tls" + common_tls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -42,7 +42,7 @@ type PubSubPush struct { MaxUndeliveredMessages int `toml:"max_undelivered_messages"` - tlsint.ServerConfig + common_tls.ServerConfig telegraf.Parser server *http.Server diff --git a/plugins/inputs/cloudwatch/cloudwatch.go b/plugins/inputs/cloudwatch/cloudwatch.go index b7eb3310b..d84999c06 100644 --- a/plugins/inputs/cloudwatch/cloudwatch.go +++ b/plugins/inputs/cloudwatch/cloudwatch.go @@ -14,7 +14,7 @@ import ( "time" "github.com/aws/aws-sdk-go-v2/aws" - cwClient "github.com/aws/aws-sdk-go-v2/service/cloudwatch" + "github.com/aws/aws-sdk-go-v2/service/cloudwatch" "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" "github.com/influxdata/telegraf" @@ -22,9 +22,9 @@ import ( "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/limiter" - internalMetric "github.com/influxdata/telegraf/metric" - internalaws "github.com/influxdata/telegraf/plugins/common/aws" - internalProxy "github.com/influxdata/telegraf/plugins/common/proxy" + "github.com/influxdata/telegraf/metric" + common_aws "github.com/influxdata/telegraf/plugins/common/aws" + "github.com/influxdata/telegraf/plugins/common/proxy" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -37,7 +37,7 @@ type CloudWatch struct { StatisticInclude []string `toml:"statistic_include"` Timeout config.Duration `toml:"timeout"` - internalProxy.HTTPProxy + proxy.HTTPProxy Period config.Duration `toml:"period"` Delay config.Duration `toml:"delay"` @@ -59,7 +59,7 @@ type CloudWatch struct { windowStart time.Time windowEnd time.Time - internalaws.CredentialConfig + common_aws.CredentialConfig } // Metric defines a simplified Cloudwatch metric. @@ -86,8 +86,8 @@ type metricCache struct { } type cloudwatchClient interface { - ListMetrics(context.Context, *cwClient.ListMetricsInput, ...func(*cwClient.Options)) (*cwClient.ListMetricsOutput, error) - GetMetricData(context.Context, *cwClient.GetMetricDataInput, ...func(*cwClient.Options)) (*cwClient.GetMetricDataOutput, error) + ListMetrics(context.Context, *cloudwatch.ListMetricsInput, ...func(*cloudwatch.Options)) (*cloudwatch.ListMetricsOutput, error) + GetMetricData(context.Context, *cloudwatch.GetMetricDataInput, ...func(*cloudwatch.Options)) (*cloudwatch.GetMetricDataOutput, error) } func (*CloudWatch) SampleConfig() string { @@ -178,7 +178,7 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error { } func (c *CloudWatch) initializeCloudWatch() error { - proxy, err := c.HTTPProxy.Proxy() + proxyFunc, err := c.HTTPProxy.Proxy() if err != nil { return err } @@ -188,7 +188,7 @@ func (c *CloudWatch) initializeCloudWatch() error { return err } - c.client = cwClient.NewFromConfig(awsCreds, func(options *cwClient.Options) { + c.client = cloudwatch.NewFromConfig(awsCreds, func(options *cloudwatch.Options) { if c.CredentialConfig.EndpointURL != "" && c.CredentialConfig.Region != "" { options.BaseEndpoint = &c.CredentialConfig.EndpointURL } @@ -197,7 +197,7 @@ func (c *CloudWatch) initializeCloudWatch() error { options.HTTPClient = &http.Client{ // use values from DefaultTransport Transport: &http.Transport{ - Proxy: proxy, + Proxy: proxyFunc, DialContext: (&net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, @@ -271,13 +271,13 @@ func getFilteredMetrics(c *CloudWatch) ([]filteredMetric, error) { allMetrics, allAccounts := c.fetchNamespaceMetrics() for _, name := range m.MetricNames { - for i, metric := range allMetrics { - if isSelected(name, metric, m.Dimensions) { + for i, singleMetric := range allMetrics { + if isSelected(name, singleMetric, m.Dimensions) { for _, namespace := range c.Namespaces { metrics = append(metrics, types.Metric{ Namespace: aws.String(namespace), MetricName: aws.String(name), - Dimensions: metric.Dimensions, + Dimensions: singleMetric.Dimensions, }) } if c.IncludeLinkedAccounts { @@ -327,7 +327,7 @@ func (c *CloudWatch) fetchNamespaceMetrics() ([]types.Metric, []string) { metrics := []types.Metric{} var accounts []string for _, namespace := range c.Namespaces { - params := &cwClient.ListMetricsInput{ + params := &cloudwatch.ListMetricsInput{ Dimensions: []types.DimensionFilter{}, Namespace: aws.String(namespace), IncludeLinkedAccounts: &c.IncludeLinkedAccounts, @@ -379,9 +379,9 @@ func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) map[string dataQueries := map[string][]types.MetricDataQuery{} for i, filtered := range filteredMetrics { - for j, metric := range filtered.metrics { + for j, singleMetric := range filtered.metrics { id := strconv.Itoa(j) + "_" + strconv.Itoa(i) - dimension := ctod(metric.Dimensions) + dimension := ctod(singleMetric.Dimensions) var accountID *string if c.IncludeLinkedAccounts && len(filtered.accounts) > j { accountID = aws.String(filtered.accounts[j]) @@ -402,10 +402,10 @@ func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) map[string } queryID := statisticType + "_" + id c.queryDimensions[queryID] = dimension - dataQueries[*metric.Namespace] = append(dataQueries[*metric.Namespace], types.MetricDataQuery{ + dataQueries[*singleMetric.Namespace] = append(dataQueries[*singleMetric.Namespace], types.MetricDataQuery{ Id: aws.String(queryID), AccountId: accountID, - Label: aws.String(snakeCase(*metric.MetricName + "_" + statisticType)), + Label: aws.String(snakeCase(*singleMetric.MetricName + "_" + statisticType)), MetricStat: &types.MetricStat{ Metric: &filtered.metrics[j], Period: aws.Int32(int32(time.Duration(c.Period).Seconds())), @@ -436,7 +436,7 @@ func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) map[string // gatherMetrics gets metric data from Cloudwatch. func (c *CloudWatch) gatherMetrics( - params *cwClient.GetMetricDataInput, + params *cloudwatch.GetMetricDataInput, ) ([]types.MetricDataResult, error) { results := []types.MetricDataResult{} @@ -457,7 +457,7 @@ func (c *CloudWatch) gatherMetrics( } func (c *CloudWatch) aggregateMetrics(acc telegraf.Accumulator, metricDataResults map[string][]types.MetricDataResult) { - grouper := internalMetric.NewSeriesGrouper() + grouper := metric.NewSeriesGrouper() for namespace, results := range metricDataResults { namespace = sanitizeMeasurement(namespace) @@ -489,8 +489,8 @@ func (c *CloudWatch) aggregateMetrics(acc telegraf.Accumulator, metricDataResult } } - for _, metric := range grouper.Metrics() { - acc.AddMetric(metric) + for _, singleMetric := range grouper.Metrics() { + acc.AddMetric(singleMetric) } } @@ -532,8 +532,8 @@ func ctod(cDimensions []types.Dimension) *map[string]string { return &dimensions } -func (c *CloudWatch) getDataInputs(dataQueries []types.MetricDataQuery) *cwClient.GetMetricDataInput { - return &cwClient.GetMetricDataInput{ +func (c *CloudWatch) getDataInputs(dataQueries []types.MetricDataQuery) *cloudwatch.GetMetricDataInput { + return &cloudwatch.GetMetricDataInput{ StartTime: aws.Time(c.windowStart), EndTime: aws.Time(c.windowEnd), MetricDataQueries: dataQueries, @@ -554,16 +554,16 @@ func hasWildcard(dimensions []*Dimension) bool { return false } -func isSelected(name string, metric types.Metric, dimensions []*Dimension) bool { - if name != *metric.MetricName { +func isSelected(name string, cloudwatchMetric types.Metric, dimensions []*Dimension) bool { + if name != *cloudwatchMetric.MetricName { return false } - if len(metric.Dimensions) != len(dimensions) { + if len(cloudwatchMetric.Dimensions) != len(dimensions) { return false } for _, d := range dimensions { selected := false - for _, d2 := range metric.Dimensions { + for _, d2 := range cloudwatchMetric.Dimensions { if d.Name == *d2.Name { if d.Value == "" || d.valueMatcher.Match(*d2.Value) { selected = true diff --git a/plugins/inputs/cloudwatch/cloudwatch_test.go b/plugins/inputs/cloudwatch/cloudwatch_test.go index d80b057a1..cfcfad655 100644 --- a/plugins/inputs/cloudwatch/cloudwatch_test.go +++ b/plugins/inputs/cloudwatch/cloudwatch_test.go @@ -8,13 +8,13 @@ import ( "time" "github.com/aws/aws-sdk-go-v2/aws" - cwClient "github.com/aws/aws-sdk-go-v2/service/cloudwatch" + "github.com/aws/aws-sdk-go-v2/service/cloudwatch" "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" - internalaws "github.com/influxdata/telegraf/plugins/common/aws" + common_aws "github.com/influxdata/telegraf/plugins/common/aws" "github.com/influxdata/telegraf/plugins/common/proxy" "github.com/influxdata/telegraf/testutil" ) @@ -23,10 +23,10 @@ type mockGatherCloudWatchClient struct{} func (m *mockGatherCloudWatchClient) ListMetrics( _ context.Context, - params *cwClient.ListMetricsInput, - _ ...func(*cwClient.Options), -) (*cwClient.ListMetricsOutput, error) { - response := &cwClient.ListMetricsOutput{ + params *cloudwatch.ListMetricsInput, + _ ...func(*cloudwatch.Options), +) (*cloudwatch.ListMetricsOutput, error) { + response := &cloudwatch.ListMetricsOutput{ Metrics: []types.Metric{ { Namespace: params.Namespace, @@ -58,10 +58,10 @@ func (m *mockGatherCloudWatchClient) ListMetrics( func (m *mockGatherCloudWatchClient) GetMetricData( _ context.Context, - params *cwClient.GetMetricDataInput, - _ ...func(*cwClient.Options), -) (*cwClient.GetMetricDataOutput, error) { - return &cwClient.GetMetricDataOutput{ + params *cloudwatch.GetMetricDataInput, + _ ...func(*cloudwatch.Options), +) (*cloudwatch.GetMetricDataOutput, error) { + return &cloudwatch.GetMetricDataOutput{ MetricDataResults: []types.MetricDataResult{ { Id: aws.String("minimum_0_0"), @@ -167,7 +167,7 @@ func TestGather(t *testing.T) { require.NoError(t, err) internalDuration := config.Duration(duration) c := &CloudWatch{ - CredentialConfig: internalaws.CredentialConfig{ + CredentialConfig: common_aws.CredentialConfig{ Region: "us-east-1", }, Namespace: "AWS/ELB", @@ -204,7 +204,7 @@ func TestGatherDenseMetric(t *testing.T) { require.NoError(t, err) internalDuration := config.Duration(duration) c := &CloudWatch{ - CredentialConfig: internalaws.CredentialConfig{ + CredentialConfig: common_aws.CredentialConfig{ Region: "us-east-1", }, Namespace: "AWS/ELB", @@ -243,7 +243,7 @@ func TestMultiAccountGather(t *testing.T) { require.NoError(t, err) internalDuration := config.Duration(duration) c := &CloudWatch{ - CredentialConfig: internalaws.CredentialConfig{ + CredentialConfig: common_aws.CredentialConfig{ Region: "us-east-1", }, Namespace: "AWS/ELB", @@ -309,9 +309,9 @@ type mockSelectMetricsCloudWatchClient struct{} func (m *mockSelectMetricsCloudWatchClient) ListMetrics( _ context.Context, - _ *cwClient.ListMetricsInput, - _ ...func(*cwClient.Options), -) (*cwClient.ListMetricsOutput, error) { + _ *cloudwatch.ListMetricsInput, + _ ...func(*cloudwatch.Options), +) (*cloudwatch.ListMetricsOutput, error) { metrics := []types.Metric{} // 4 metrics are available metricNames := []string{"Latency", "RequestCount", "HealthyHostCount", "UnHealthyHostCount"} @@ -352,7 +352,7 @@ func (m *mockSelectMetricsCloudWatchClient) ListMetrics( } } - result := &cwClient.ListMetricsOutput{ + result := &cloudwatch.ListMetricsOutput{ Metrics: metrics, } return result, nil @@ -360,9 +360,9 @@ func (m *mockSelectMetricsCloudWatchClient) ListMetrics( func (m *mockSelectMetricsCloudWatchClient) GetMetricData( _ context.Context, - _ *cwClient.GetMetricDataInput, - _ ...func(*cwClient.Options), -) (*cwClient.GetMetricDataOutput, error) { + _ *cloudwatch.GetMetricDataInput, + _ ...func(*cloudwatch.Options), +) (*cloudwatch.GetMetricDataOutput, error) { return nil, nil } @@ -371,7 +371,7 @@ func TestSelectMetrics(t *testing.T) { require.NoError(t, err) internalDuration := config.Duration(duration) c := &CloudWatch{ - CredentialConfig: internalaws.CredentialConfig{ + CredentialConfig: common_aws.CredentialConfig{ Region: "us-east-1", }, Namespace: "AWS/ELB", diff --git a/plugins/inputs/cloudwatch_metric_streams/cloudwatch_metric_streams.go b/plugins/inputs/cloudwatch_metric_streams/cloudwatch_metric_streams.go index aef1c8d98..ef20ffa57 100644 --- a/plugins/inputs/cloudwatch_metric_streams/cloudwatch_metric_streams.go +++ b/plugins/inputs/cloudwatch_metric_streams/cloudwatch_metric_streams.go @@ -19,7 +19,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/choice" - tlsint "github.com/influxdata/telegraf/plugins/common/tls" + common_tls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/selfstat" ) @@ -48,7 +48,7 @@ type CloudWatchMetricStreams struct { ageMin selfstat.Stat Log telegraf.Logger - tlsint.ServerConfig + common_tls.ServerConfig wg sync.WaitGroup close chan struct{} listener net.Listener diff --git a/plugins/inputs/couchbase/couchbase.go b/plugins/inputs/couchbase/couchbase.go index e13d73213..fc2ed00c4 100644 --- a/plugins/inputs/couchbase/couchbase.go +++ b/plugins/inputs/couchbase/couchbase.go @@ -10,7 +10,7 @@ import ( "sync" "time" - couchbaseClient "github.com/couchbase/go-couchbase" + "github.com/couchbase/go-couchbase" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/filter" @@ -72,7 +72,7 @@ func (cb *Couchbase) Gather(acc telegraf.Accumulator) error { func (cb *Couchbase) gatherServer(acc telegraf.Accumulator, addr string) error { escapedAddr := regexpURI.ReplaceAllString(addr, "${1}") - client, err := couchbaseClient.Connect(addr) + client, err := couchbase.Connect(addr) if err != nil { return err } @@ -460,15 +460,15 @@ func (cb *Couchbase) Init() error { cb.client = &http.Client{ Timeout: 10 * time.Second, Transport: &http.Transport{ - MaxIdleConnsPerHost: couchbaseClient.MaxIdleConnsPerHost, + MaxIdleConnsPerHost: couchbase.MaxIdleConnsPerHost, TLSClientConfig: tlsConfig, }, } - couchbaseClient.SetSkipVerify(cb.ClientConfig.InsecureSkipVerify) - couchbaseClient.SetCertFile(cb.ClientConfig.TLSCert) - couchbaseClient.SetKeyFile(cb.ClientConfig.TLSKey) - couchbaseClient.SetRootFile(cb.ClientConfig.TLSCA) + couchbase.SetSkipVerify(cb.ClientConfig.InsecureSkipVerify) + couchbase.SetCertFile(cb.ClientConfig.TLSCert) + couchbase.SetKeyFile(cb.ClientConfig.TLSKey) + couchbase.SetRootFile(cb.ClientConfig.TLSCA) return nil } diff --git a/plugins/inputs/cpu/cpu.go b/plugins/inputs/cpu/cpu.go index 72c348bc0..1dbf51bdc 100644 --- a/plugins/inputs/cpu/cpu.go +++ b/plugins/inputs/cpu/cpu.go @@ -7,7 +7,7 @@ import ( "fmt" "time" - cpuUtil "github.com/shirou/gopsutil/v3/cpu" + "github.com/shirou/gopsutil/v3/cpu" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" @@ -19,8 +19,8 @@ var sampleConfig string type CPUStats struct { ps system.PS - lastStats map[string]cpuUtil.TimesStat - cpuInfo map[string]cpuUtil.InfoStat + lastStats map[string]cpu.TimesStat + cpuInfo map[string]cpu.InfoStat coreID bool physicalID bool @@ -119,7 +119,7 @@ func (c *CPUStats) Gather(acc telegraf.Accumulator) error { acc.AddGauge("cpu", fieldsG, tags, now) } - c.lastStats = make(map[string]cpuUtil.TimesStat) + c.lastStats = make(map[string]cpu.TimesStat) for _, cts := range times { c.lastStats[cts.CPU] = cts } @@ -129,12 +129,12 @@ func (c *CPUStats) Gather(acc telegraf.Accumulator) error { func (c *CPUStats) Init() error { if c.CoreTags { - cpuInfo, err := cpuUtil.Info() + cpuInfo, err := cpu.Info() if err == nil { c.coreID = cpuInfo[0].CoreID != "" c.physicalID = cpuInfo[0].PhysicalID != "" - c.cpuInfo = make(map[string]cpuUtil.InfoStat) + c.cpuInfo = make(map[string]cpu.InfoStat) for _, ci := range cpuInfo { c.cpuInfo[fmt.Sprintf("cpu%d", ci.CPU)] = ci } @@ -146,12 +146,12 @@ func (c *CPUStats) Init() error { return nil } -func totalCPUTime(t cpuUtil.TimesStat) float64 { +func totalCPUTime(t cpu.TimesStat) float64 { total := t.User + t.System + t.Nice + t.Iowait + t.Irq + t.Softirq + t.Steal + t.Idle return total } -func activeCPUTime(t cpuUtil.TimesStat) float64 { +func activeCPUTime(t cpu.TimesStat) float64 { active := totalCPUTime(t) - t.Idle return active } diff --git a/plugins/inputs/cpu/cpu_test.go b/plugins/inputs/cpu/cpu_test.go index 225be81c9..73090b558 100644 --- a/plugins/inputs/cpu/cpu_test.go +++ b/plugins/inputs/cpu/cpu_test.go @@ -4,7 +4,7 @@ import ( "fmt" "testing" - cpuUtil "github.com/shirou/gopsutil/v3/cpu" + "github.com/shirou/gopsutil/v3/cpu" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/plugins/inputs/system" @@ -24,7 +24,7 @@ func TestCPUStats(t *testing.T) { defer mps.AssertExpectations(t) var acc testutil.Accumulator - cts := cpuUtil.TimesStat{ + cts := cpu.TimesStat{ CPU: "cpu0", User: 8.8, System: 8.2, @@ -38,7 +38,7 @@ func TestCPUStats(t *testing.T) { GuestNice: 0.324, } - cts2 := cpuUtil.TimesStat{ + cts2 := cpu.TimesStat{ CPU: "cpu0", User: 24.9, // increased by 16.1 System: 10.9, // increased by 2.7 @@ -52,7 +52,7 @@ func TestCPUStats(t *testing.T) { GuestNice: 2.524, // increased by 2.2 } - mps.On("CPUTimes").Return([]cpuUtil.TimesStat{cts}, nil) + mps.On("CPUTimes").Return([]cpu.TimesStat{cts}, nil) cs := NewCPUStats(&mps) @@ -74,7 +74,7 @@ func TestCPUStats(t *testing.T) { assertContainsTaggedFloat(t, &acc, "time_guest_nice", 0.324, 0) mps2 := system.MockPS{} - mps2.On("CPUTimes").Return([]cpuUtil.TimesStat{cts2}, nil) + mps2.On("CPUTimes").Return([]cpu.TimesStat{cts2}, nil) cs.ps = &mps2 // Should have added cpu percentages too @@ -162,7 +162,7 @@ func TestCPUCountIncrease(t *testing.T) { cs := NewCPUStats(&mps) mps.On("CPUTimes").Return( - []cpuUtil.TimesStat{ + []cpu.TimesStat{ { CPU: "cpu0", }, @@ -172,7 +172,7 @@ func TestCPUCountIncrease(t *testing.T) { require.NoError(t, err) mps2.On("CPUTimes").Return( - []cpuUtil.TimesStat{ + []cpu.TimesStat{ { CPU: "cpu0", }, @@ -193,28 +193,28 @@ func TestCPUTimesDecrease(t *testing.T) { defer mps.AssertExpectations(t) var acc testutil.Accumulator - cts := cpuUtil.TimesStat{ + cts := cpu.TimesStat{ CPU: "cpu0", User: 18, Idle: 80, Iowait: 2, } - cts2 := cpuUtil.TimesStat{ + cts2 := cpu.TimesStat{ CPU: "cpu0", User: 38, // increased by 20 Idle: 40, // decreased by 40 Iowait: 1, // decreased by 1 } - cts3 := cpuUtil.TimesStat{ + cts3 := cpu.TimesStat{ CPU: "cpu0", User: 56, // increased by 18 Idle: 120, // increased by 80 Iowait: 3, // increased by 2 } - mps.On("CPUTimes").Return([]cpuUtil.TimesStat{cts}, nil) + mps.On("CPUTimes").Return([]cpu.TimesStat{cts}, nil) cs := NewCPUStats(&mps) @@ -228,7 +228,7 @@ func TestCPUTimesDecrease(t *testing.T) { assertContainsTaggedFloat(t, &acc, "time_iowait", 2, 0) mps2 := system.MockPS{} - mps2.On("CPUTimes").Return([]cpuUtil.TimesStat{cts2}, nil) + mps2.On("CPUTimes").Return([]cpu.TimesStat{cts2}, nil) cs.ps = &mps2 // CPU times decreased. An error should be raised @@ -236,7 +236,7 @@ func TestCPUTimesDecrease(t *testing.T) { require.Error(t, err) mps3 := system.MockPS{} - mps3.On("CPUTimes").Return([]cpuUtil.TimesStat{cts3}, nil) + mps3.On("CPUTimes").Return([]cpu.TimesStat{cts3}, nil) cs.ps = &mps3 err = cs.Gather(&acc) diff --git a/plugins/inputs/ctrlx_datalayer/ctrlx_datalayer.go b/plugins/inputs/ctrlx_datalayer/ctrlx_datalayer.go index 3e01ceb01..42c4628f9 100644 --- a/plugins/inputs/ctrlx_datalayer/ctrlx_datalayer.go +++ b/plugins/inputs/ctrlx_datalayer/ctrlx_datalayer.go @@ -22,9 +22,9 @@ import ( "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/choice" "github.com/influxdata/telegraf/metric" - httpconfig "github.com/influxdata/telegraf/plugins/common/http" + common_http "github.com/influxdata/telegraf/plugins/common/http" "github.com/influxdata/telegraf/plugins/inputs" - jsonParser "github.com/influxdata/telegraf/plugins/parsers/json" + parsers_json "github.com/influxdata/telegraf/plugins/parsers/json" ) // This plugin is based on the official ctrlX CORE API. Documentation can be found in OpenAPI format at: @@ -55,7 +55,7 @@ type CtrlXDataLayer struct { acc telegraf.Accumulator connection *http.Client tokenManager token.TokenManager - httpconfig.HTTPClientConfig + common_http.HTTPClientConfig } // convertTimestamp2UnixTime converts the given Data Layer timestamp of the payload to UnixTime. @@ -197,7 +197,7 @@ func (c *CtrlXDataLayer) createMetric(em *sseEventData, sub *subscription) (tele switch em.Type { case "object": - flattener := jsonParser.JSONFlattener{} + flattener := parsers_json.JSONFlattener{} err := flattener.FullFlattenJSON(fieldKey, em.Value, true, true) if err != nil { return nil, err diff --git a/plugins/inputs/ctrlx_datalayer/ctrlx_datalayer_test.go b/plugins/inputs/ctrlx_datalayer/ctrlx_datalayer_test.go index e8b744178..1d70022fa 100644 --- a/plugins/inputs/ctrlx_datalayer/ctrlx_datalayer_test.go +++ b/plugins/inputs/ctrlx_datalayer/ctrlx_datalayer_test.go @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/config" - httpconfig "github.com/influxdata/telegraf/plugins/common/http" + common_http "github.com/influxdata/telegraf/plugins/common/http" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/testutil" ) @@ -183,7 +183,7 @@ func initRunner(t *testing.T) (*CtrlXDataLayer, *httptest.Server) { url: server.URL, Username: config.NewSecret([]byte("user")), Password: config.NewSecret([]byte("password")), - HTTPClientConfig: httpconfig.HTTPClientConfig{ + HTTPClientConfig: common_http.HTTPClientConfig{ ClientConfig: tls.ClientConfig{ InsecureSkipVerify: true, }, diff --git a/plugins/inputs/disk/disk_test.go b/plugins/inputs/disk/disk_test.go index 2abe8ad2a..877ce416f 100644 --- a/plugins/inputs/disk/disk_test.go +++ b/plugins/inputs/disk/disk_test.go @@ -9,7 +9,7 @@ import ( "testing" "time" - diskUtil "github.com/shirou/gopsutil/v3/disk" + "github.com/shirou/gopsutil/v3/disk" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -30,7 +30,7 @@ func TestDiskUsage(t *testing.T) { var acc testutil.Accumulator var err error - psAll := []diskUtil.PartitionStat{ + psAll := []disk.PartitionStat{ { Device: "/dev/sda", Mountpoint: "/", @@ -50,7 +50,7 @@ func TestDiskUsage(t *testing.T) { Opts: []string{"ro", "noatime", "nodiratime", "bind"}, }, } - duAll := []diskUtil.UsageStat{ + duAll := []disk.UsageStat{ { Path: "/", Fstype: "ext4", @@ -170,15 +170,15 @@ func TestDiskUsage(t *testing.T) { func TestDiskUsageHostMountPrefix(t *testing.T) { tests := []struct { name string - partitionStats []diskUtil.PartitionStat - usageStats []*diskUtil.UsageStat + partitionStats []disk.PartitionStat + usageStats []*disk.UsageStat hostMountPrefix string expectedTags map[string]string expectedFields map[string]interface{} }{ { name: "no host mount prefix", - partitionStats: []diskUtil.PartitionStat{ + partitionStats: []disk.PartitionStat{ { Device: "/dev/sda", Mountpoint: "/", @@ -186,7 +186,7 @@ func TestDiskUsageHostMountPrefix(t *testing.T) { Opts: []string{"ro"}, }, }, - usageStats: []*diskUtil.UsageStat{ + usageStats: []*disk.UsageStat{ { Path: "/", Total: 42, @@ -211,7 +211,7 @@ func TestDiskUsageHostMountPrefix(t *testing.T) { }, { name: "host mount prefix", - partitionStats: []diskUtil.PartitionStat{ + partitionStats: []disk.PartitionStat{ { Device: "/dev/sda", Mountpoint: "/hostfs/var", @@ -219,7 +219,7 @@ func TestDiskUsageHostMountPrefix(t *testing.T) { Opts: []string{"ro"}, }, }, - usageStats: []*diskUtil.UsageStat{ + usageStats: []*disk.UsageStat{ { Path: "/hostfs/var", Total: 42, @@ -245,7 +245,7 @@ func TestDiskUsageHostMountPrefix(t *testing.T) { }, { name: "host mount prefix exact match", - partitionStats: []diskUtil.PartitionStat{ + partitionStats: []disk.PartitionStat{ { Device: "/dev/sda", Mountpoint: "/hostfs", @@ -253,7 +253,7 @@ func TestDiskUsageHostMountPrefix(t *testing.T) { Opts: []string{"ro"}, }, }, - usageStats: []*diskUtil.UsageStat{ + usageStats: []*disk.UsageStat{ { Path: "/hostfs", Total: 42, @@ -310,7 +310,7 @@ func TestDiskStats(t *testing.T) { var acc testutil.Accumulator var err error - duAll := []*diskUtil.UsageStat{ + duAll := []*disk.UsageStat{ { Path: "/", Fstype: "ext4", @@ -342,7 +342,7 @@ func TestDiskStats(t *testing.T) { InodesUsed: 1000, }, } - duMountFiltered := []*diskUtil.UsageStat{ + duMountFiltered := []*disk.UsageStat{ { Path: "/", Fstype: "ext4", @@ -354,7 +354,7 @@ func TestDiskStats(t *testing.T) { InodesUsed: 1000, }, } - duOptFiltered := []*diskUtil.UsageStat{ + duOptFiltered := []*disk.UsageStat{ { Path: "/", Fstype: "ext4", @@ -377,7 +377,7 @@ func TestDiskStats(t *testing.T) { }, } - psAll := []*diskUtil.PartitionStat{ + psAll := []*disk.PartitionStat{ { Device: "/dev/sda", Mountpoint: "/", @@ -398,7 +398,7 @@ func TestDiskStats(t *testing.T) { }, } - psMountFiltered := []*diskUtil.PartitionStat{ + psMountFiltered := []*disk.PartitionStat{ { Device: "/dev/sda", Mountpoint: "/", @@ -406,7 +406,7 @@ func TestDiskStats(t *testing.T) { Opts: []string{"ro", "noatime", "nodiratime"}, }, } - psOptFiltered := []*diskUtil.PartitionStat{ + psOptFiltered := []*disk.PartitionStat{ { Device: "/dev/sda", Mountpoint: "/", @@ -495,13 +495,13 @@ func TestDiskUsageIssues(t *testing.T) { tests := []struct { name string prefix string - du diskUtil.UsageStat + du disk.UsageStat expected []telegraf.Metric }{ { name: "success", prefix: "", - du: diskUtil.UsageStat{ + du: disk.UsageStat{ Total: 256, Free: 46, Used: 200, @@ -557,7 +557,7 @@ func TestDiskUsageIssues(t *testing.T) { { name: "issue 10297", prefix: "/host", - du: diskUtil.UsageStat{ + du: disk.UsageStat{ Total: 256, Free: 46, Used: 200, @@ -630,7 +630,7 @@ func TestDiskUsageIssues(t *testing.T) { t.Setenv("HOST_PROC", hostProcPrefix) t.Setenv("HOST_SYS", hostSysPrefix) - partitions, err := diskUtil.Partitions(true) + partitions, err := disk.Partitions(true) require.NoError(t, err) // Mock the disk usage diff --git a/plugins/inputs/docker/client.go b/plugins/inputs/docker/client.go index 5c9afc6f2..6ef50aeb6 100644 --- a/plugins/inputs/docker/client.go +++ b/plugins/inputs/docker/client.go @@ -9,7 +9,7 @@ import ( "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/api/types/system" - dockerClient "github.com/docker/docker/client" + "github.com/docker/docker/client" ) var ( @@ -30,11 +30,11 @@ type Client interface { } func NewEnvClient() (Client, error) { - client, err := dockerClient.NewClientWithOpts(dockerClient.FromEnv) + dockerClient, err := client.NewClientWithOpts(client.FromEnv) if err != nil { return nil, err } - return &SocketClient{client}, nil + return &SocketClient{dockerClient}, nil } func NewClient(host string, tlsConfig *tls.Config) (Client, error) { @@ -43,20 +43,20 @@ func NewClient(host string, tlsConfig *tls.Config) (Client, error) { } httpClient := &http.Client{Transport: transport} - client, err := dockerClient.NewClientWithOpts( - dockerClient.WithHTTPHeaders(defaultHeaders), - dockerClient.WithHTTPClient(httpClient), - dockerClient.WithAPIVersionNegotiation(), - dockerClient.WithHost(host)) + dockerClient, err := client.NewClientWithOpts( + client.WithHTTPHeaders(defaultHeaders), + client.WithHTTPClient(httpClient), + client.WithAPIVersionNegotiation(), + client.WithHost(host)) if err != nil { return nil, err } - return &SocketClient{client}, nil + return &SocketClient{dockerClient}, nil } type SocketClient struct { - client *dockerClient.Client + client *client.Client } func (c *SocketClient) Info(ctx context.Context) (system.Info, error) { diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go index b5e41c9ee..a8d8a71c0 100644 --- a/plugins/inputs/docker/docker.go +++ b/plugins/inputs/docker/docker.go @@ -17,7 +17,7 @@ import ( "github.com/Masterminds/semver/v3" "github.com/docker/docker/api/types" - typeContainer "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/swarm" @@ -25,8 +25,8 @@ import ( "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/internal/choice" - dockerint "github.com/influxdata/telegraf/internal/docker" - tlsint "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/influxdata/telegraf/internal/docker" + common_tls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -61,7 +61,7 @@ type Docker struct { Log telegraf.Logger - tlsint.ClientConfig + common_tls.ClientConfig newEnvClient func() (Client, error) newClient func(string, *tls.Config) (Client, error) @@ -218,7 +218,7 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error { } // List containers - opts := typeContainer.ListOptions{ + opts := container.ListOptions{ Filters: filterArgs, } ctx, cancel := context.WithTimeout(context.Background(), time.Duration(d.Timeout)) @@ -235,13 +235,13 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error { // Get container data var wg sync.WaitGroup wg.Add(len(containers)) - for _, container := range containers { + for _, cntnr := range containers { go func(c types.Container) { defer wg.Done() if err := d.gatherContainer(c, acc); err != nil { acc.AddError(err) } - }(container) + }(cntnr) } wg.Wait() @@ -468,12 +468,12 @@ func parseContainerName(containerNames []string) string { } func (d *Docker) gatherContainer( - container types.Container, + cntnr types.Container, acc telegraf.Accumulator, ) error { - var v *typeContainer.StatsResponse + var v *container.StatsResponse - cname := parseContainerName(container.Names) + cname := parseContainerName(cntnr.Names) if cname == "" { return nil @@ -483,7 +483,7 @@ func (d *Docker) gatherContainer( return nil } - imageName, imageVersion := dockerint.ParseImage(container.Image) + imageName, imageVersion := docker.ParseImage(cntnr.Image) tags := map[string]string{ "engine_host": d.engineHost, @@ -494,13 +494,13 @@ func (d *Docker) gatherContainer( } if d.IncludeSourceTag { - tags["source"] = hostnameFromID(container.ID) + tags["source"] = hostnameFromID(cntnr.ID) } ctx, cancel := context.WithTimeout(context.Background(), time.Duration(d.Timeout)) defer cancel() - r, err := d.client.ContainerStats(ctx, container.ID, false) + r, err := d.client.ContainerStats(ctx, cntnr.ID, false) if errors.Is(err, context.DeadlineExceeded) { return errStatsTimeout } @@ -519,26 +519,26 @@ func (d *Docker) gatherContainer( daemonOSType := r.OSType // Add labels to tags - for k, label := range container.Labels { + for k, label := range cntnr.Labels { if d.labelFilter.Match(k) { tags[k] = label } } - return d.gatherContainerInspect(container, acc, tags, daemonOSType, v) + return d.gatherContainerInspect(cntnr, acc, tags, daemonOSType, v) } func (d *Docker) gatherContainerInspect( - container types.Container, + cntnr types.Container, acc telegraf.Accumulator, tags map[string]string, daemonOSType string, - v *typeContainer.StatsResponse, + v *container.StatsResponse, ) error { ctx, cancel := context.WithTimeout(context.Background(), time.Duration(d.Timeout)) defer cancel() - info, err := d.client.ContainerInspect(ctx, container.ID) + info, err := d.client.ContainerInspect(ctx, cntnr.ID) if errors.Is(err, context.DeadlineExceeded) { return errInspectTimeout } @@ -566,7 +566,7 @@ func (d *Docker) gatherContainerInspect( "pid": info.State.Pid, "exitcode": info.State.ExitCode, "restart_count": info.RestartCount, - "container_id": container.ID, + "container_id": cntnr.ID, } finished, err := time.Parse(time.RFC3339, info.State.FinishedAt) @@ -599,13 +599,13 @@ func (d *Docker) gatherContainerInspect( } } - d.parseContainerStats(v, acc, tags, container.ID, daemonOSType) + d.parseContainerStats(v, acc, tags, cntnr.ID, daemonOSType) return nil } func (d *Docker) parseContainerStats( - stat *typeContainer.StatsResponse, + stat *container.StatsResponse, acc telegraf.Accumulator, tags map[string]string, id, daemonOSType string, @@ -781,7 +781,7 @@ func (d *Docker) parseContainerStats( } // Make a map of devices to their block io stats -func getDeviceStatMap(blkioStats typeContainer.BlkioStats) map[string]map[string]interface{} { +func getDeviceStatMap(blkioStats container.BlkioStats) map[string]map[string]interface{} { deviceStatMap := make(map[string]map[string]interface{}) for _, metric := range blkioStats.IoServiceBytesRecursive { @@ -844,7 +844,7 @@ func getDeviceStatMap(blkioStats typeContainer.BlkioStats) map[string]map[string func (d *Docker) gatherBlockIOMetrics( acc telegraf.Accumulator, - stat *typeContainer.StatsResponse, + stat *container.StatsResponse, tags map[string]string, tm time.Time, id string, @@ -921,24 +921,24 @@ func (d *Docker) gatherDiskUsage(acc telegraf.Accumulator, opts types.DiskUsageO acc.AddFields(duName, fields, tags, now) // Containers - for _, container := range du.Containers { + for _, cntnr := range du.Containers { fields := map[string]interface{}{ - "size_rw": container.SizeRw, - "size_root_fs": container.SizeRootFs, + "size_rw": cntnr.SizeRw, + "size_root_fs": cntnr.SizeRootFs, } - imageName, imageVersion := dockerint.ParseImage(container.Image) + imageName, imageVersion := docker.ParseImage(cntnr.Image) tags := map[string]string{ "engine_host": d.engineHost, "server_version": d.serverVersion, - "container_name": parseContainerName(container.Names), + "container_name": parseContainerName(cntnr.Names), "container_image": imageName, "container_version": imageVersion, } if d.IncludeSourceTag { - tags["source"] = hostnameFromID(container.ID) + tags["source"] = hostnameFromID(cntnr.ID) } acc.AddFields(duName, fields, tags, now) @@ -958,7 +958,7 @@ func (d *Docker) gatherDiskUsage(acc telegraf.Accumulator, opts types.DiskUsageO } if len(image.RepoTags) > 0 { - imageName, imageVersion := dockerint.ParseImage(image.RepoTags[0]) + imageName, imageVersion := docker.ParseImage(image.RepoTags[0]) tags["image_name"] = imageName tags["image_version"] = imageVersion } diff --git a/plugins/inputs/docker/docker_test.go b/plugins/inputs/docker/docker_test.go index ee39cf43d..0172a1a55 100644 --- a/plugins/inputs/docker/docker_test.go +++ b/plugins/inputs/docker/docker_test.go @@ -11,7 +11,7 @@ import ( "time" "github.com/docker/docker/api/types" - typeContainer "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/api/types/system" "github.com/stretchr/testify/require" @@ -23,8 +23,8 @@ import ( type MockClient struct { InfoF func() (system.Info, error) - ContainerListF func(options typeContainer.ListOptions) ([]types.Container, error) - ContainerStatsF func(containerID string) (typeContainer.StatsResponseReader, error) + ContainerListF func(options container.ListOptions) ([]types.Container, error) + ContainerStatsF func(containerID string) (container.StatsResponseReader, error) ContainerInspectF func() (types.ContainerJSON, error) ServiceListF func() ([]swarm.Service, error) TaskListF func() ([]swarm.Task, error) @@ -38,11 +38,11 @@ func (c *MockClient) Info(context.Context) (system.Info, error) { return c.InfoF() } -func (c *MockClient) ContainerList(_ context.Context, options typeContainer.ListOptions) ([]types.Container, error) { +func (c *MockClient) ContainerList(_ context.Context, options container.ListOptions) ([]types.Container, error) { return c.ContainerListF(options) } -func (c *MockClient) ContainerStats(_ context.Context, containerID string, _ bool) (typeContainer.StatsResponseReader, error) { +func (c *MockClient) ContainerStats(_ context.Context, containerID string, _ bool) (container.StatsResponseReader, error) { return c.ContainerStatsF(containerID) } @@ -78,10 +78,10 @@ var baseClient = MockClient{ InfoF: func() (system.Info, error) { return info, nil }, - ContainerListF: func(typeContainer.ListOptions) ([]types.Container, error) { + ContainerListF: func(container.ListOptions) ([]types.Container, error) { return containerList, nil }, - ContainerStatsF: func(s string) (typeContainer.StatsResponseReader, error) { + ContainerStatsF: func(s string) (container.StatsResponseReader, error) { return containerStats(s), nil }, ContainerInspectF: func() (types.ContainerJSON, error) { @@ -426,10 +426,10 @@ func TestDocker_WindowsMemoryContainerStats(t *testing.T) { InfoF: func() (system.Info, error) { return info, nil }, - ContainerListF: func(typeContainer.ListOptions) ([]types.Container, error) { + ContainerListF: func(container.ListOptions) ([]types.Container, error) { return containerList, nil }, - ContainerStatsF: func(string) (typeContainer.StatsResponseReader, error) { + ContainerStatsF: func(string) (container.StatsResponseReader, error) { return containerStatsWindows(), nil }, ContainerInspectF: func() (types.ContainerJSON, error) { @@ -561,7 +561,7 @@ func TestContainerLabels(t *testing.T) { newClientFunc := func(string, *tls.Config) (Client, error) { client := baseClient - client.ContainerListF = func(typeContainer.ListOptions) ([]types.Container, error) { + client.ContainerListF = func(container.ListOptions) ([]types.Container, error) { return []types.Container{tt.container}, nil } return &client, nil @@ -681,10 +681,10 @@ func TestContainerNames(t *testing.T) { newClientFunc := func(string, *tls.Config) (Client, error) { client := baseClient - client.ContainerListF = func(typeContainer.ListOptions) ([]types.Container, error) { + client.ContainerListF = func(container.ListOptions) ([]types.Container, error) { return containerList, nil } - client.ContainerStatsF = func(s string) (typeContainer.StatsResponseReader, error) { + client.ContainerStatsF = func(s string) (container.StatsResponseReader, error) { return containerStats(s), nil } @@ -891,7 +891,7 @@ func TestContainerStatus(t *testing.T) { acc testutil.Accumulator newClientFunc = func(string, *tls.Config) (Client, error) { client := baseClient - client.ContainerListF = func(typeContainer.ListOptions) ([]types.Container, error) { + client.ContainerListF = func(container.ListOptions) ([]types.Container, error) { return containerList[:1], nil } client.ContainerInspectF = func() (types.ContainerJSON, error) { @@ -1176,7 +1176,7 @@ func TestContainerStateFilter(t *testing.T) { newClientFunc := func(string, *tls.Config) (Client, error) { client := baseClient - client.ContainerListF = func(options typeContainer.ListOptions) ([]types.Container, error) { + client.ContainerListF = func(options container.ListOptions) ([]types.Container, error) { for k, v := range tt.expected { actual := options.Filters.Get(k) sort.Strings(actual) @@ -1212,15 +1212,15 @@ func TestContainerName(t *testing.T) { name: "container stats name is preferred", clientFunc: func(string, *tls.Config) (Client, error) { client := baseClient - client.ContainerListF = func(typeContainer.ListOptions) ([]types.Container, error) { + client.ContainerListF = func(container.ListOptions) ([]types.Container, error) { var containers []types.Container containers = append(containers, types.Container{ Names: []string{"/logspout/foo"}, }) return containers, nil } - client.ContainerStatsF = func(string) (typeContainer.StatsResponseReader, error) { - return typeContainer.StatsResponseReader{ + client.ContainerStatsF = func(string) (container.StatsResponseReader, error) { + return container.StatsResponseReader{ Body: io.NopCloser(strings.NewReader(`{"name": "logspout"}`)), }, nil } @@ -1232,15 +1232,15 @@ func TestContainerName(t *testing.T) { name: "container stats without name uses container list name", clientFunc: func(string, *tls.Config) (Client, error) { client := baseClient - client.ContainerListF = func(typeContainer.ListOptions) ([]types.Container, error) { + client.ContainerListF = func(container.ListOptions) ([]types.Container, error) { var containers []types.Container containers = append(containers, types.Container{ Names: []string{"/logspout"}, }) return containers, nil } - client.ContainerStatsF = func(string) (typeContainer.StatsResponseReader, error) { - return typeContainer.StatsResponseReader{ + client.ContainerStatsF = func(string) (container.StatsResponseReader, error) { + return container.StatsResponseReader{ Body: io.NopCloser(strings.NewReader(`{}`)), }, nil } @@ -1304,7 +1304,7 @@ func TestHostnameFromID(t *testing.T) { func Test_parseContainerStatsPerDeviceAndTotal(t *testing.T) { type args struct { - stat *typeContainer.StatsResponse + stat *container.StatsResponse tags map[string]string id string perDeviceInclude []string diff --git a/plugins/inputs/docker_log/docker_log.go b/plugins/inputs/docker_log/docker_log.go index 892cd0e10..dff33dd94 100644 --- a/plugins/inputs/docker_log/docker_log.go +++ b/plugins/inputs/docker_log/docker_log.go @@ -16,7 +16,7 @@ import ( "unicode" "github.com/docker/docker/api/types" - typeContainer "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/pkg/stdcopy" @@ -24,7 +24,7 @@ import ( "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/internal/docker" - tlsint "github.com/influxdata/telegraf/plugins/common/tls" + common_tls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -53,7 +53,7 @@ type DockerLogs struct { ContainerStateExclude []string `toml:"container_state_exclude"` IncludeSourceTag bool `toml:"source_tag"` - tlsint.ClientConfig + common_tls.ClientConfig newEnvClient func() (Client, error) newClient func(string, *tls.Config) (Client, error) @@ -62,7 +62,7 @@ type DockerLogs struct { labelFilter filter.Filter containerFilter filter.Filter stateFilter filter.Filter - opts typeContainer.ListOptions + opts container.ListOptions wg sync.WaitGroup mu sync.Mutex containerList map[string]context.CancelFunc @@ -117,7 +117,7 @@ func (d *DockerLogs) Init() error { } if filterArgs.Len() != 0 { - d.opts = typeContainer.ListOptions{ + d.opts = container.ListOptions{ Filters: filterArgs, } } @@ -206,18 +206,18 @@ func (d *DockerLogs) Gather(acc telegraf.Accumulator) error { return err } - for _, container := range containers { - if d.containerInContainerList(container.ID) { + for _, cntnr := range containers { + if d.containerInContainerList(cntnr.ID) { continue } - containerName := d.matchedContainerName(container.Names) + containerName := d.matchedContainerName(cntnr.Names) if containerName == "" { continue } ctx, cancel := context.WithCancel(context.Background()) - d.addToContainerList(container.ID, cancel) + d.addToContainerList(cntnr.ID, cancel) // Start a new goroutine for every new container that has logs to collect d.wg.Add(1) @@ -229,15 +229,15 @@ func (d *DockerLogs) Gather(acc telegraf.Accumulator) error { if err != nil && !errors.Is(err, context.Canceled) { acc.AddError(err) } - }(container) + }(cntnr) } return nil } -func (d *DockerLogs) hasTTY(ctx context.Context, container types.Container) (bool, error) { +func (d *DockerLogs) hasTTY(ctx context.Context, cntnr types.Container) (bool, error) { ctx, cancel := context.WithTimeout(ctx, time.Duration(d.Timeout)) defer cancel() - c, err := d.client.ContainerInspect(ctx, container.ID) + c, err := d.client.ContainerInspect(ctx, cntnr.ID) if err != nil { return false, err } @@ -247,10 +247,10 @@ func (d *DockerLogs) hasTTY(ctx context.Context, container types.Container) (boo func (d *DockerLogs) tailContainerLogs( ctx context.Context, acc telegraf.Accumulator, - container types.Container, + cntnr types.Container, containerName string, ) error { - imageName, imageVersion := docker.ParseImage(container.Image) + imageName, imageVersion := docker.ParseImage(cntnr.Image) tags := map[string]string{ "container_name": containerName, "container_image": imageName, @@ -258,17 +258,17 @@ func (d *DockerLogs) tailContainerLogs( } if d.IncludeSourceTag { - tags["source"] = hostnameFromID(container.ID) + tags["source"] = hostnameFromID(cntnr.ID) } // Add matching container labels as tags - for k, label := range container.Labels { + for k, label := range cntnr.Labels { if d.labelFilter.Match(k) { tags[k] = label } } - hasTTY, err := d.hasTTY(ctx, container) + hasTTY, err := d.hasTTY(ctx, cntnr) if err != nil { return err } @@ -276,13 +276,13 @@ func (d *DockerLogs) tailContainerLogs( since := time.Time{}.Format(time.RFC3339Nano) if !d.FromBeginning { d.lastRecordMtx.Lock() - if ts, ok := d.lastRecord[container.ID]; ok { + if ts, ok := d.lastRecord[cntnr.ID]; ok { since = ts.Format(time.RFC3339Nano) } d.lastRecordMtx.Unlock() } - logOptions := typeContainer.LogsOptions{ + logOptions := container.LogsOptions{ ShowStdout: true, ShowStderr: true, Timestamps: true, @@ -291,7 +291,7 @@ func (d *DockerLogs) tailContainerLogs( Since: since, } - logReader, err := d.client.ContainerLogs(ctx, container.ID, logOptions) + logReader, err := d.client.ContainerLogs(ctx, cntnr.ID, logOptions) if err != nil { return err } @@ -304,17 +304,17 @@ func (d *DockerLogs) tailContainerLogs( // multiplexed. var last time.Time if hasTTY { - last, err = tailStream(acc, tags, container.ID, logReader, "tty") + last, err = tailStream(acc, tags, cntnr.ID, logReader, "tty") } else { - last, err = tailMultiplexed(acc, tags, container.ID, logReader) + last, err = tailMultiplexed(acc, tags, cntnr.ID, logReader) } if err != nil { return err } - if ts, ok := d.lastRecord[container.ID]; !ok || ts.Before(last) { + if ts, ok := d.lastRecord[cntnr.ID]; !ok || ts.Before(last) { d.lastRecordMtx.Lock() - d.lastRecord[container.ID] = last + d.lastRecord[cntnr.ID] = last d.lastRecordMtx.Unlock() } diff --git a/plugins/inputs/dpdk/dpdk_connector.go b/plugins/inputs/dpdk/dpdk_connector.go index b87c67eda..fb6e48cb9 100644 --- a/plugins/inputs/dpdk/dpdk_connector.go +++ b/plugins/inputs/dpdk/dpdk_connector.go @@ -11,7 +11,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" - jsonparser "github.com/influxdata/telegraf/plugins/parsers/json" + parsers_json "github.com/influxdata/telegraf/plugins/parsers/json" ) const ( @@ -159,7 +159,7 @@ func (conn *dpdkConnector) processCommand(acc telegraf.Accumulator, log telegraf return } - jf := jsonparser.JSONFlattener{} + jf := parsers_json.JSONFlattener{} err = jf.FullFlattenJSON("", value, true, true) if err != nil { acc.AddError(fmt.Errorf("failed to flatten response: %w", err)) diff --git a/plugins/inputs/elasticsearch/elasticsearch.go b/plugins/inputs/elasticsearch/elasticsearch.go index 9341abc56..199f28271 100644 --- a/plugins/inputs/elasticsearch/elasticsearch.go +++ b/plugins/inputs/elasticsearch/elasticsearch.go @@ -18,9 +18,9 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" - httpconfig "github.com/influxdata/telegraf/plugins/common/http" + common_http "github.com/influxdata/telegraf/plugins/common/http" "github.com/influxdata/telegraf/plugins/inputs" - jsonparser "github.com/influxdata/telegraf/plugins/parsers/json" + parsers_json "github.com/influxdata/telegraf/plugins/parsers/json" ) //go:embed sample.conf @@ -132,7 +132,7 @@ type Elasticsearch struct { Log telegraf.Logger `toml:"-"` client *http.Client - httpconfig.HTTPClientConfig + common_http.HTTPClientConfig serverInfo map[string]serverInfo serverInfoMutex sync.Mutex @@ -152,7 +152,7 @@ func NewElasticsearch() *Elasticsearch { return &Elasticsearch{ ClusterStatsOnlyFromMaster: true, ClusterHealthLevel: "indices", - HTTPClientConfig: httpconfig.HTTPClientConfig{ + HTTPClientConfig: common_http.HTTPClientConfig{ ResponseHeaderTimeout: config.Duration(5 * time.Second), Timeout: config.Duration(5 * time.Second), }, @@ -401,7 +401,7 @@ func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) er if s == nil { continue } - f := jsonparser.JSONFlattener{} + f := parsers_json.JSONFlattener{} // parse Json, ignoring strings and bools err := f.FlattenJSON("", s) if err != nil { @@ -523,7 +523,7 @@ func (e *Elasticsearch) gatherClusterStats(url string, acc telegraf.Accumulator) } for p, s := range stats { - f := jsonparser.JSONFlattener{} + f := parsers_json.JSONFlattener{} // parse json, including bools and strings err := f.FullFlattenJSON("", s, true, true) if err != nil { @@ -557,7 +557,7 @@ func (e *Elasticsearch) gatherIndicesStats(url string, acc telegraf.Accumulator) // All Stats for m, s := range indicesStats.All { // parse Json, ignoring strings and bools - jsonParser := jsonparser.JSONFlattener{} + jsonParser := parsers_json.JSONFlattener{} err := jsonParser.FullFlattenJSON("_", s, true, true) if err != nil { return err @@ -639,7 +639,7 @@ func (e *Elasticsearch) gatherSingleIndexStats(name string, index indexStat, now "total": index.Total, } for m, s := range stats { - f := jsonparser.JSONFlattener{} + f := parsers_json.JSONFlattener{} // parse Json, getting strings and bools err := f.FullFlattenJSON("", s, true, true) if err != nil { @@ -652,7 +652,7 @@ func (e *Elasticsearch) gatherSingleIndexStats(name string, index indexStat, now for shardNumber, shards := range index.Shards { for _, shard := range shards { // Get Shard Stats - flattened := jsonparser.JSONFlattener{} + flattened := parsers_json.JSONFlattener{} err := flattened.FullFlattenJSON("", shard, true, true) if err != nil { return err diff --git a/plugins/inputs/elasticsearch_query/elasticsearch_query.go b/plugins/inputs/elasticsearch_query/elasticsearch_query.go index 74856d170..99c7323eb 100644 --- a/plugins/inputs/elasticsearch_query/elasticsearch_query.go +++ b/plugins/inputs/elasticsearch_query/elasticsearch_query.go @@ -16,7 +16,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" - httpconfig "github.com/influxdata/telegraf/plugins/common/http" + common_http "github.com/influxdata/telegraf/plugins/common/http" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -35,7 +35,7 @@ type ElasticsearchQuery struct { Log telegraf.Logger `toml:"-"` httpclient *http.Client - httpconfig.HTTPClientConfig + common_http.HTTPClientConfig esClient *elastic5.Client } @@ -242,7 +242,7 @@ func init() { inputs.Add("elasticsearch_query", func() telegraf.Input { return &ElasticsearchQuery{ HealthCheckInterval: config.Duration(time.Second * 10), - HTTPClientConfig: httpconfig.HTTPClientConfig{ + HTTPClientConfig: common_http.HTTPClientConfig{ ResponseHeaderTimeout: config.Duration(5 * time.Second), Timeout: config.Duration(5 * time.Second), }, diff --git a/plugins/inputs/elasticsearch_query/elasticsearch_query_test.go b/plugins/inputs/elasticsearch_query/elasticsearch_query_test.go index e9da6bcaa..305c16998 100644 --- a/plugins/inputs/elasticsearch_query/elasticsearch_query_test.go +++ b/plugins/inputs/elasticsearch_query/elasticsearch_query_test.go @@ -19,7 +19,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" - httpconfig "github.com/influxdata/telegraf/plugins/common/http" + common_http "github.com/influxdata/telegraf/plugins/common/http" "github.com/influxdata/telegraf/testutil" ) @@ -538,7 +538,7 @@ func setupIntegrationTest(t *testing.T) (*testutil.Container, error) { ) e := &ElasticsearchQuery{ URLs: []string{url}, - HTTPClientConfig: httpconfig.HTTPClientConfig{ + HTTPClientConfig: common_http.HTTPClientConfig{ ResponseHeaderTimeout: config.Duration(30 * time.Second), Timeout: config.Duration(30 * time.Second), }, @@ -618,7 +618,7 @@ func TestElasticsearchQueryIntegration(t *testing.T) { URLs: []string{ fmt.Sprintf("http://%s:%s", container.Address, container.Ports[servicePort]), }, - HTTPClientConfig: httpconfig.HTTPClientConfig{ + HTTPClientConfig: common_http.HTTPClientConfig{ ResponseHeaderTimeout: config.Duration(30 * time.Second), Timeout: config.Duration(30 * time.Second), }, @@ -684,7 +684,7 @@ func TestElasticsearchQueryIntegration_getMetricFields(t *testing.T) { URLs: []string{ fmt.Sprintf("http://%s:%s", container.Address, container.Ports[servicePort]), }, - HTTPClientConfig: httpconfig.HTTPClientConfig{ + HTTPClientConfig: common_http.HTTPClientConfig{ ResponseHeaderTimeout: config.Duration(30 * time.Second), Timeout: config.Duration(30 * time.Second), }, diff --git a/plugins/inputs/ethtool/namespace_linux.go b/plugins/inputs/ethtool/namespace_linux.go index 68df8ff69..a83ba9b62 100644 --- a/plugins/inputs/ethtool/namespace_linux.go +++ b/plugins/inputs/ethtool/namespace_linux.go @@ -5,7 +5,7 @@ import ( "net" "runtime" - ethtoolLib "github.com/safchain/ethtool" + "github.com/safchain/ethtool" "github.com/vishvananda/netns" "github.com/influxdata/telegraf" @@ -24,7 +24,7 @@ type NamespacedResult struct { type NamespaceGoroutine struct { name string handle netns.NsHandle - ethtoolClient *ethtoolLib.Ethtool + ethtoolClient *ethtool.Ethtool c chan NamespacedAction Log telegraf.Logger } @@ -71,7 +71,7 @@ func (n *NamespaceGoroutine) Stats(intf NamespacedInterface) (map[string]uint64, func (n *NamespaceGoroutine) Get(intf NamespacedInterface) (map[string]uint64, error) { result, err := n.Do(func(n *NamespaceGoroutine) (interface{}, error) { - ecmd := ethtoolLib.EthtoolCmd{} + ecmd := ethtool.EthtoolCmd{} speed32, err := n.ethtoolClient.CmdGet(&ecmd, intf.Name) if err != nil { return nil, err @@ -134,7 +134,7 @@ func (n *NamespaceGoroutine) Start() error { } // Every namespace needs its own connection to ethtool - e, err := ethtoolLib.NewEthtool() + e, err := ethtool.NewEthtool() if err != nil { n.Log.Errorf("Could not create ethtool client for namespace %q: %s", n.name, err.Error()) started <- err diff --git a/plugins/inputs/eventhub_consumer/eventhub_consumer.go b/plugins/inputs/eventhub_consumer/eventhub_consumer.go index c7331b99b..48d28c3b9 100644 --- a/plugins/inputs/eventhub_consumer/eventhub_consumer.go +++ b/plugins/inputs/eventhub_consumer/eventhub_consumer.go @@ -9,7 +9,7 @@ import ( "sync" "time" - eventhubClient "github.com/Azure/azure-event-hubs-go/v3" + eventhub "github.com/Azure/azure-event-hubs-go/v3" "github.com/Azure/azure-event-hubs-go/v3/persist" "github.com/influxdata/telegraf" @@ -62,7 +62,7 @@ type EventHub struct { Log telegraf.Logger `toml:"-"` // Azure - hub *eventhubClient.Hub + hub *eventhub.Hub cancel context.CancelFunc wg sync.WaitGroup @@ -91,7 +91,7 @@ func (e *EventHub) Init() (err error) { } // Set hub options - hubOpts := []eventhubClient.HubOption{} + hubOpts := []eventhub.HubOption{} if e.PersistenceDir != "" { persister, err := persist.NewFilePersister(e.PersistenceDir) @@ -99,20 +99,20 @@ func (e *EventHub) Init() (err error) { return err } - hubOpts = append(hubOpts, eventhubClient.HubWithOffsetPersistence(persister)) + hubOpts = append(hubOpts, eventhub.HubWithOffsetPersistence(persister)) } if e.UserAgent != "" { - hubOpts = append(hubOpts, eventhubClient.HubWithUserAgent(e.UserAgent)) + hubOpts = append(hubOpts, eventhub.HubWithUserAgent(e.UserAgent)) } else { - hubOpts = append(hubOpts, eventhubClient.HubWithUserAgent(internal.ProductToken())) + hubOpts = append(hubOpts, eventhub.HubWithUserAgent(internal.ProductToken())) } // Create event hub connection if e.ConnectionString != "" { - e.hub, err = eventhubClient.NewHubFromConnectionString(e.ConnectionString, hubOpts...) + e.hub, err = eventhub.NewHubFromConnectionString(e.ConnectionString, hubOpts...) } else { - e.hub, err = eventhubClient.NewHubFromEnvironment(hubOpts...) + e.hub, err = eventhub.NewHubFromEnvironment(hubOpts...) } return err @@ -155,25 +155,25 @@ func (e *EventHub) Start(acc telegraf.Accumulator) error { return nil } -func (e *EventHub) configureReceiver() []eventhubClient.ReceiveOption { - receiveOpts := []eventhubClient.ReceiveOption{} +func (e *EventHub) configureReceiver() []eventhub.ReceiveOption { + receiveOpts := []eventhub.ReceiveOption{} if e.ConsumerGroup != "" { - receiveOpts = append(receiveOpts, eventhubClient.ReceiveWithConsumerGroup(e.ConsumerGroup)) + receiveOpts = append(receiveOpts, eventhub.ReceiveWithConsumerGroup(e.ConsumerGroup)) } if !e.FromTimestamp.IsZero() { - receiveOpts = append(receiveOpts, eventhubClient.ReceiveFromTimestamp(e.FromTimestamp)) + receiveOpts = append(receiveOpts, eventhub.ReceiveFromTimestamp(e.FromTimestamp)) } else if e.Latest { - receiveOpts = append(receiveOpts, eventhubClient.ReceiveWithLatestOffset()) + receiveOpts = append(receiveOpts, eventhub.ReceiveWithLatestOffset()) } if e.PrefetchCount != 0 { - receiveOpts = append(receiveOpts, eventhubClient.ReceiveWithPrefetchCount(e.PrefetchCount)) + receiveOpts = append(receiveOpts, eventhub.ReceiveWithPrefetchCount(e.PrefetchCount)) } if e.Epoch != 0 { - receiveOpts = append(receiveOpts, eventhubClient.ReceiveWithEpoch(e.Epoch)) + receiveOpts = append(receiveOpts, eventhub.ReceiveWithEpoch(e.Epoch)) } return receiveOpts @@ -182,7 +182,7 @@ func (e *EventHub) configureReceiver() []eventhubClient.ReceiveOption { // OnMessage handles an Event. When this function returns without error the // Event is immediately accepted and the offset is updated. If an error is // returned the Event is marked for redelivery. -func (e *EventHub) onMessage(ctx context.Context, event *eventhubClient.Event) error { +func (e *EventHub) onMessage(ctx context.Context, event *eventhub.Event) error { metrics, err := e.createMetrics(event) if err != nil { return err @@ -264,7 +264,7 @@ func deepCopyMetrics(in []telegraf.Metric) []telegraf.Metric { } // CreateMetrics returns the Metrics from the Event. -func (e *EventHub) createMetrics(event *eventhubClient.Event) ([]telegraf.Metric, error) { +func (e *EventHub) createMetrics(event *eventhub.Event) ([]telegraf.Metric, error) { metrics, err := e.parser.Parse(event.Data) if err != nil { return nil, err diff --git a/plugins/inputs/exec/run_notwinodws.go b/plugins/inputs/exec/run_notwinodws.go index c4604945c..adad0925b 100644 --- a/plugins/inputs/exec/run_notwinodws.go +++ b/plugins/inputs/exec/run_notwinodws.go @@ -6,12 +6,13 @@ import ( "bytes" "fmt" "os" - osExec "os/exec" + "os/exec" "syscall" "time" - "github.com/influxdata/telegraf/internal" "github.com/kballard/go-shellquote" + + "github.com/influxdata/telegraf/internal" ) func (c CommandRunner) Run( @@ -24,7 +25,7 @@ func (c CommandRunner) Run( return nil, nil, fmt.Errorf("exec: unable to parse command: %w", err) } - cmd := osExec.Command(splitCmd[0], splitCmd[1:]...) + cmd := exec.Command(splitCmd[0], splitCmd[1:]...) cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true} if len(environments) > 0 { diff --git a/plugins/inputs/exec/run_windows.go b/plugins/inputs/exec/run_windows.go index 2d825a5ac..e88d268b4 100644 --- a/plugins/inputs/exec/run_windows.go +++ b/plugins/inputs/exec/run_windows.go @@ -6,12 +6,13 @@ import ( "bytes" "fmt" "os" - osExec "os/exec" + "os/exec" "syscall" "time" - "github.com/influxdata/telegraf/internal" "github.com/kballard/go-shellquote" + + "github.com/influxdata/telegraf/internal" ) func (c CommandRunner) Run( @@ -24,7 +25,7 @@ func (c CommandRunner) Run( return nil, nil, fmt.Errorf("exec: unable to parse command: %w", err) } - cmd := osExec.Command(splitCmd[0], splitCmd[1:]...) + cmd := exec.Command(splitCmd[0], splitCmd[1:]...) cmd.SysProcAttr = &syscall.SysProcAttr{ CreationFlags: syscall.CREATE_NEW_PROCESS_GROUP, } diff --git a/plugins/inputs/execd/execd_test.go b/plugins/inputs/execd/execd_test.go index 34a0f748d..6368e2d21 100644 --- a/plugins/inputs/execd/execd_test.go +++ b/plugins/inputs/execd/execd_test.go @@ -19,7 +19,7 @@ import ( "github.com/influxdata/telegraf/models" "github.com/influxdata/telegraf/plugins/parsers/influx" "github.com/influxdata/telegraf/plugins/parsers/prometheus" - influxSerializer "github.com/influxdata/telegraf/plugins/serializers/influx" + serializers_influx "github.com/influxdata/telegraf/plugins/serializers/influx" "github.com/influxdata/telegraf/testutil" ) @@ -413,7 +413,7 @@ func TestMain(m *testing.M) { func runCounterProgram() error { envMetricName := os.Getenv("METRIC_NAME") - serializer := &influxSerializer.Serializer{} + serializer := &serializers_influx.Serializer{} if err := serializer.Init(); err != nil { return err } diff --git a/plugins/inputs/github/github.go b/plugins/inputs/github/github.go index a05477517..ea5a2c177 100644 --- a/plugins/inputs/github/github.go +++ b/plugins/inputs/github/github.go @@ -11,7 +11,7 @@ import ( "sync" "time" - githubLib "github.com/google/go-github/v32/github" + "github.com/google/go-github/v32/github" "golang.org/x/oauth2" "github.com/influxdata/telegraf" @@ -30,7 +30,7 @@ type GitHub struct { AdditionalFields []string `toml:"additional_fields"` EnterpriseBaseURL string `toml:"enterprise_base_url"` HTTPTimeout config.Duration `toml:"http_timeout"` - githubClient *githubLib.Client + githubClient *github.Client obfuscatedToken string @@ -40,7 +40,7 @@ type GitHub struct { } // Create GitHub Client -func (g *GitHub) createGitHubClient(ctx context.Context) (*githubLib.Client, error) { +func (g *GitHub) createGitHubClient(ctx context.Context) (*github.Client, error) { httpClient := &http.Client{ Transport: &http.Transport{ Proxy: http.ProxyFromEnvironment, @@ -65,11 +65,11 @@ func (g *GitHub) createGitHubClient(ctx context.Context) (*githubLib.Client, err return g.newGithubClient(httpClient) } -func (g *GitHub) newGithubClient(httpClient *http.Client) (*githubLib.Client, error) { +func (g *GitHub) newGithubClient(httpClient *http.Client) (*github.Client, error) { if g.EnterpriseBaseURL != "" { - return githubLib.NewEnterpriseClient(g.EnterpriseBaseURL, "", httpClient) + return github.NewEnterpriseClient(g.EnterpriseBaseURL, "", httpClient) } - return githubLib.NewClient(httpClient), nil + return github.NewClient(httpClient), nil } func (*GitHub) SampleConfig() string { @@ -148,8 +148,8 @@ func (g *GitHub) Gather(acc telegraf.Accumulator) error { return nil } -func (g *GitHub) handleRateLimit(response *githubLib.Response, err error) { - var rlErr *githubLib.RateLimitError +func (g *GitHub) handleRateLimit(response *github.Response, err error) { + var rlErr *github.RateLimitError if err == nil { g.RateLimit.Set(int64(response.Rate.Limit)) g.RateRemaining.Set(int64(response.Rate.Remaining)) @@ -168,7 +168,7 @@ func splitRepositoryName(repositoryName string) (owner, repository string, err e return splits[0], splits[1], nil } -func getLicense(rI *githubLib.Repository) string { +func getLicense(rI *github.Repository) string { if licenseName := rI.GetLicense().GetName(); licenseName != "" { return licenseName } @@ -176,7 +176,7 @@ func getLicense(rI *githubLib.Repository) string { return "None" } -func getTags(repositoryInfo *githubLib.Repository) map[string]string { +func getTags(repositoryInfo *github.Repository) map[string]string { return map[string]string{ "owner": repositoryInfo.GetOwner().GetLogin(), "name": repositoryInfo.GetName(), @@ -185,7 +185,7 @@ func getTags(repositoryInfo *githubLib.Repository) map[string]string { } } -func getFields(repositoryInfo *githubLib.Repository) map[string]interface{} { +func getFields(repositoryInfo *github.Repository) map[string]interface{} { return map[string]interface{}{ "stars": repositoryInfo.GetStargazersCount(), "subscribers": repositoryInfo.GetSubscribersCount(), @@ -198,9 +198,9 @@ func getFields(repositoryInfo *githubLib.Repository) map[string]interface{} { } func (g *GitHub) getPullRequestFields(ctx context.Context, owner, repo string) (map[string]interface{}, error) { - options := githubLib.SearchOptions{ + options := github.SearchOptions{ TextMatch: false, - ListOptions: githubLib.ListOptions{ + ListOptions: github.ListOptions{ PerPage: 100, Page: 1, }, diff --git a/plugins/inputs/gnmi/gnmi.go b/plugins/inputs/gnmi/gnmi.go index 5015bbeb6..9b5dac254 100644 --- a/plugins/inputs/gnmi/gnmi.go +++ b/plugins/inputs/gnmi/gnmi.go @@ -12,14 +12,14 @@ import ( "time" "github.com/google/gnxi/utils/xpath" - gnmiLib "github.com/openconfig/gnmi/proto/gnmi" + "github.com/openconfig/gnmi/proto/gnmi" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/choice" - internaltls "github.com/influxdata/telegraf/plugins/common/tls" + common_tls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/common/yangmodel" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -65,7 +65,7 @@ type GNMI struct { KeepaliveTimeout config.Duration `toml:"keepalive_timeout"` YangModelPaths []string `toml:"yang_model_paths"` Log telegraf.Logger `toml:"-"` - internaltls.ClientConfig + common_tls.ClientConfig // Internal state internalAliases map[*pathInfo]string @@ -85,7 +85,7 @@ type Subscription struct { HeartbeatInterval config.Duration `toml:"heartbeat_interval"` TagOnly bool `toml:"tag_only" deprecated:"1.25.0;1.35.0;please use 'tag_subscription's instead"` - fullPath *gnmiLib.Path + fullPath *gnmi.Path } // Tag Subscription for a gNMI client @@ -201,15 +201,15 @@ func (c *GNMI) Init() error { c.Log.Debugf("Internal alias mapping: %+v", c.internalAliases) // Warn about configures insecure cipher suites - insecure := internaltls.InsecureCiphers(c.ClientConfig.TLSCipherSuites) + insecure := common_tls.InsecureCiphers(c.ClientConfig.TLSCipherSuites) if len(insecure) > 0 { c.Log.Warnf("Configured insecure cipher suites: %s", strings.Join(insecure, ",")) } // Check the TLS configuration if _, err := c.ClientConfig.TLSConfig(); err != nil { - if errors.Is(err, internaltls.ErrCipherUnsupported) { - secure, insecure := internaltls.Ciphers() + if errors.Is(err, common_tls.ErrCipherUnsupported) { + secure, insecure := common_tls.Ciphers() c.Log.Info("Supported secure ciphers:") for _, name := range secure { c.Log.Infof(" %s", name) @@ -310,18 +310,18 @@ func (c *GNMI) Start(acc telegraf.Accumulator) error { return nil } -func (s *Subscription) buildSubscription() (*gnmiLib.Subscription, error) { +func (s *Subscription) buildSubscription() (*gnmi.Subscription, error) { gnmiPath, err := parsePath(s.Origin, s.Path, "") if err != nil { return nil, err } - mode, ok := gnmiLib.SubscriptionMode_value[strings.ToUpper(s.SubscriptionMode)] + mode, ok := gnmi.SubscriptionMode_value[strings.ToUpper(s.SubscriptionMode)] if !ok { return nil, fmt.Errorf("invalid subscription mode %s", s.SubscriptionMode) } - return &gnmiLib.Subscription{ + return &gnmi.Subscription{ Path: gnmiPath, - Mode: gnmiLib.SubscriptionMode(mode), + Mode: gnmi.SubscriptionMode(mode), HeartbeatInterval: uint64(time.Duration(s.HeartbeatInterval).Nanoseconds()), SampleInterval: uint64(time.Duration(s.SampleInterval).Nanoseconds()), SuppressRedundant: s.SuppressRedundant, @@ -329,9 +329,9 @@ func (s *Subscription) buildSubscription() (*gnmiLib.Subscription, error) { } // Create a new gNMI SubscribeRequest -func (c *GNMI) newSubscribeRequest() (*gnmiLib.SubscribeRequest, error) { +func (c *GNMI) newSubscribeRequest() (*gnmi.SubscribeRequest, error) { // Create subscription objects - subscriptions := make([]*gnmiLib.Subscription, 0, len(c.Subscriptions)+len(c.TagSubscriptions)) + subscriptions := make([]*gnmi.Subscription, 0, len(c.Subscriptions)+len(c.TagSubscriptions)) for _, subscription := range c.TagSubscriptions { sub, err := subscription.buildSubscription() if err != nil { @@ -363,12 +363,12 @@ func (c *GNMI) newSubscribeRequest() (*gnmiLib.SubscribeRequest, error) { return nil, fmt.Errorf("unsupported encoding %s", c.Encoding) } - return &gnmiLib.SubscribeRequest{ - Request: &gnmiLib.SubscribeRequest_Subscribe{ - Subscribe: &gnmiLib.SubscriptionList{ + return &gnmi.SubscribeRequest{ + Request: &gnmi.SubscribeRequest_Subscribe{ + Subscribe: &gnmi.SubscriptionList{ Prefix: gnmiPath, - Mode: gnmiLib.SubscriptionList_STREAM, - Encoding: gnmiLib.Encoding(gnmiLib.Encoding_value[strings.ToUpper(c.Encoding)]), + Mode: gnmi.SubscriptionList_STREAM, + Encoding: gnmi.Encoding(gnmi.Encoding_value[strings.ToUpper(c.Encoding)]), Subscription: subscriptions, UpdatesOnly: c.UpdatesOnly, }, @@ -377,7 +377,7 @@ func (c *GNMI) newSubscribeRequest() (*gnmiLib.SubscribeRequest, error) { } // ParsePath from XPath-like string to gNMI path structure -func parsePath(origin, pathToParse, target string) (*gnmiLib.Path, error) { +func parsePath(origin, pathToParse, target string) (*gnmi.Path, error) { gnmiPath, err := xpath.ToGNMIPath(pathToParse) if err != nil { return nil, err diff --git a/plugins/inputs/gnmi/gnmi_test.go b/plugins/inputs/gnmi/gnmi_test.go index 57d01183c..49b3d3899 100644 --- a/plugins/inputs/gnmi/gnmi_test.go +++ b/plugins/inputs/gnmi/gnmi_test.go @@ -11,8 +11,8 @@ import ( "testing" "time" - gnmiLib "github.com/openconfig/gnmi/proto/gnmi" - gnmiExt "github.com/openconfig/gnmi/proto/gnmi_ext" + "github.com/openconfig/gnmi/proto/gnmi" + "github.com/openconfig/gnmi/proto/gnmi_ext" "github.com/stretchr/testify/require" "google.golang.org/grpc" "google.golang.org/grpc/metadata" @@ -22,7 +22,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" - jnprHeader "github.com/influxdata/telegraf/plugins/inputs/gnmi/extensions/jnpr_gnmi_extention" + "github.com/influxdata/telegraf/plugins/inputs/gnmi/extensions/jnpr_gnmi_extention" "github.com/influxdata/telegraf/plugins/parsers/influx" "github.com/influxdata/telegraf/testutil" ) @@ -34,12 +34,12 @@ func TestParsePath(t *testing.T) { require.NoError(t, err) require.Equal(t, "theorigin", parsed.Origin) require.Equal(t, "thetarget", parsed.Target) - require.Equal(t, []*gnmiLib.PathElem{{Name: "foo"}, {Name: "bar"}, + require.Equal(t, []*gnmi.PathElem{{Name: "foo"}, {Name: "bar"}, {Name: "bla", Key: map[string]string{"shoo": "woo", "shoop": "/woop/"}}, {Name: "z"}}, parsed.Elem) parsed, err = parsePath("", "", "") require.NoError(t, err) - require.Equal(t, &gnmiLib.Path{}, parsed) + require.Equal(t, &gnmi.Path{}, parsed) parsed, err = parsePath("", "/foo[[", "") require.Nil(t, parsed) @@ -47,23 +47,23 @@ func TestParsePath(t *testing.T) { } type MockServer struct { - SubscribeF func(gnmiLib.GNMI_SubscribeServer) error + SubscribeF func(gnmi.GNMI_SubscribeServer) error GRPCServer *grpc.Server } -func (s *MockServer) Capabilities(context.Context, *gnmiLib.CapabilityRequest) (*gnmiLib.CapabilityResponse, error) { +func (s *MockServer) Capabilities(context.Context, *gnmi.CapabilityRequest) (*gnmi.CapabilityResponse, error) { return nil, nil } -func (s *MockServer) Get(context.Context, *gnmiLib.GetRequest) (*gnmiLib.GetResponse, error) { +func (s *MockServer) Get(context.Context, *gnmi.GetRequest) (*gnmi.GetResponse, error) { return nil, nil } -func (s *MockServer) Set(context.Context, *gnmiLib.SetRequest) (*gnmiLib.SetResponse, error) { +func (s *MockServer) Set(context.Context, *gnmi.SetRequest) (*gnmi.SetResponse, error) { return nil, nil } -func (s *MockServer) Subscribe(server gnmiLib.GNMI_SubscribeServer) error { +func (s *MockServer) Subscribe(server gnmi.GNMI_SubscribeServer) error { return s.SubscribeF(server) } @@ -73,12 +73,12 @@ func TestWaitError(t *testing.T) { grpcServer := grpc.NewServer() gnmiServer := &MockServer{ - SubscribeF: func(gnmiLib.GNMI_SubscribeServer) error { + SubscribeF: func(gnmi.GNMI_SubscribeServer) error { return errors.New("testerror") }, GRPCServer: grpcServer, } - gnmiLib.RegisterGNMIServer(grpcServer, gnmiServer) + gnmi.RegisterGNMIServer(grpcServer, gnmiServer) plugin := &GNMI{ Log: testutil.Logger{}, @@ -115,7 +115,7 @@ func TestUsernamePassword(t *testing.T) { grpcServer := grpc.NewServer() gnmiServer := &MockServer{ - SubscribeF: func(server gnmiLib.GNMI_SubscribeServer) error { + SubscribeF: func(server gnmi.GNMI_SubscribeServer) error { metadata, ok := metadata.FromIncomingContext(server.Context()) if !ok { return errors.New("failed to get metadata") @@ -135,7 +135,7 @@ func TestUsernamePassword(t *testing.T) { }, GRPCServer: grpcServer, } - gnmiLib.RegisterGNMIServer(grpcServer, gnmiServer) + gnmi.RegisterGNMIServer(grpcServer, gnmiServer) plugin := &GNMI{ Log: testutil.Logger{}, @@ -168,12 +168,12 @@ func TestUsernamePassword(t *testing.T) { require.ErrorContains(t, acc.Errors[0], "aborted gNMI subscription: rpc error: code = Unknown desc = success") } -func mockGNMINotification() *gnmiLib.Notification { - return &gnmiLib.Notification{ +func mockGNMINotification() *gnmi.Notification { + return &gnmi.Notification{ Timestamp: 1543236572000000000, - Prefix: &gnmiLib.Path{ + Prefix: &gnmi.Path{ Origin: "type", - Elem: []*gnmiLib.PathElem{ + Elem: []*gnmi.PathElem{ { Name: "model", Key: map[string]string{"foo": "bar"}, @@ -181,35 +181,35 @@ func mockGNMINotification() *gnmiLib.Notification { }, Target: "subscription", }, - Update: []*gnmiLib.Update{ + Update: []*gnmi.Update{ { - Path: &gnmiLib.Path{ - Elem: []*gnmiLib.PathElem{ + Path: &gnmi.Path{ + Elem: []*gnmi.PathElem{ {Name: "some"}, { Name: "path", Key: map[string]string{"name": "str", "uint64": "1234"}}, }, }, - Val: &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_IntVal{IntVal: 5678}}, + Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_IntVal{IntVal: 5678}}, }, { - Path: &gnmiLib.Path{ - Elem: []*gnmiLib.PathElem{ + Path: &gnmi.Path{ + Elem: []*gnmi.PathElem{ {Name: "other"}, {Name: "path"}, }, }, - Val: &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_StringVal{StringVal: "foobar"}}, + Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_StringVal{StringVal: "foobar"}}, }, { - Path: &gnmiLib.Path{ - Elem: []*gnmiLib.PathElem{ + Path: &gnmi.Path{ + Elem: []*gnmi.PathElem{ {Name: "other"}, {Name: "this"}, }, }, - Val: &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_StringVal{StringVal: "that"}}, + Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_StringVal{StringVal: "that"}}, }, }, } @@ -238,20 +238,20 @@ func TestNotification(t *testing.T) { }, }, server: &MockServer{ - SubscribeF: func(server gnmiLib.GNMI_SubscribeServer) error { + SubscribeF: func(server gnmi.GNMI_SubscribeServer) error { notification := mockGNMINotification() - err := server.Send(&gnmiLib.SubscribeResponse{Response: &gnmiLib.SubscribeResponse_Update{Update: notification}}) + err := server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}}) if err != nil { return err } - err = server.Send(&gnmiLib.SubscribeResponse{Response: &gnmiLib.SubscribeResponse_SyncResponse{SyncResponse: true}}) + err = server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_SyncResponse{SyncResponse: true}}) if err != nil { return err } notification.Prefix.Elem[0].Key["foo"] = "bar2" notification.Update[0].Path.Elem[1].Key["name"] = "str2" - notification.Update[0].Val = &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_JsonVal{JsonVal: []byte{'"', '1', '2', '3', '"'}}} - return server.Send(&gnmiLib.SubscribeResponse{Response: &gnmiLib.SubscribeResponse_Update{Update: notification}}) + notification.Update[0].Val = &gnmi.TypedValue{Value: &gnmi.TypedValue_JsonVal{JsonVal: []byte{'"', '1', '2', '3', '"'}}} + return server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}}) }, }, expected: []telegraf.Metric{ @@ -327,14 +327,14 @@ func TestNotification(t *testing.T) { }, }, server: &MockServer{ - SubscribeF: func(server gnmiLib.GNMI_SubscribeServer) error { - response := &gnmiLib.SubscribeResponse{ - Response: &gnmiLib.SubscribeResponse_Update{ - Update: &gnmiLib.Notification{ + SubscribeF: func(server gnmi.GNMI_SubscribeServer) error { + response := &gnmi.SubscribeResponse{ + Response: &gnmi.SubscribeResponse_Update{ + Update: &gnmi.Notification{ Timestamp: 1543236572000000000, - Prefix: &gnmiLib.Path{ + Prefix: &gnmi.Path{ Origin: "type", - Elem: []*gnmiLib.PathElem{ + Elem: []*gnmi.PathElem{ { Name: "state", }, @@ -351,11 +351,11 @@ func TestNotification(t *testing.T) { }, Target: "subscription", }, - Update: []*gnmiLib.Update{ + Update: []*gnmi.Update{ { - Path: &gnmiLib.Path{}, - Val: &gnmiLib.TypedValue{ - Value: &gnmiLib.TypedValue_IntVal{IntVal: 42}, + Path: &gnmi.Path{}, + Val: &gnmi.TypedValue{ + Value: &gnmi.TypedValue_IntVal{IntVal: 42}, }, }, }, @@ -403,17 +403,17 @@ func TestNotification(t *testing.T) { }, }, server: &MockServer{ - SubscribeF: func(server gnmiLib.GNMI_SubscribeServer) error { - tagResponse := &gnmiLib.SubscribeResponse{ - Response: &gnmiLib.SubscribeResponse_Update{ - Update: &gnmiLib.Notification{ + SubscribeF: func(server gnmi.GNMI_SubscribeServer) error { + tagResponse := &gnmi.SubscribeResponse{ + Response: &gnmi.SubscribeResponse_Update{ + Update: &gnmi.Notification{ Timestamp: 1543236571000000000, - Prefix: &gnmiLib.Path{}, - Update: []*gnmiLib.Update{ + Prefix: &gnmi.Path{}, + Update: []*gnmi.Update{ { - Path: &gnmiLib.Path{ + Path: &gnmi.Path{ Origin: "", - Elem: []*gnmiLib.PathElem{ + Elem: []*gnmi.PathElem{ { Name: "interfaces", }, @@ -430,8 +430,8 @@ func TestNotification(t *testing.T) { }, Target: "", }, - Val: &gnmiLib.TypedValue{ - Value: &gnmiLib.TypedValue_StringVal{StringVal: "foo"}, + Val: &gnmi.TypedValue{ + Value: &gnmi.TypedValue_StringVal{StringVal: "foo"}, }, }, }, @@ -441,19 +441,19 @@ func TestNotification(t *testing.T) { if err := server.Send(tagResponse); err != nil { return err } - if err := server.Send(&gnmiLib.SubscribeResponse{Response: &gnmiLib.SubscribeResponse_SyncResponse{SyncResponse: true}}); err != nil { + if err := server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_SyncResponse{SyncResponse: true}}); err != nil { return err } - taggedResponse := &gnmiLib.SubscribeResponse{ - Response: &gnmiLib.SubscribeResponse_Update{ - Update: &gnmiLib.Notification{ + taggedResponse := &gnmi.SubscribeResponse{ + Response: &gnmi.SubscribeResponse_Update{ + Update: &gnmi.Notification{ Timestamp: 1543236572000000000, - Prefix: &gnmiLib.Path{}, - Update: []*gnmiLib.Update{ + Prefix: &gnmi.Path{}, + Update: []*gnmi.Update{ { - Path: &gnmiLib.Path{ + Path: &gnmi.Path{ Origin: "", - Elem: []*gnmiLib.PathElem{ + Elem: []*gnmi.PathElem{ { Name: "interfaces", }, @@ -473,8 +473,8 @@ func TestNotification(t *testing.T) { }, Target: "", }, - Val: &gnmiLib.TypedValue{ - Value: &gnmiLib.TypedValue_IntVal{IntVal: 42}, + Val: &gnmi.TypedValue{ + Value: &gnmi.TypedValue_IntVal{IntVal: 42}, }, }, }, @@ -526,17 +526,17 @@ func TestNotification(t *testing.T) { }, }, server: &MockServer{ - SubscribeF: func(server gnmiLib.GNMI_SubscribeServer) error { - tagResponse := &gnmiLib.SubscribeResponse{ - Response: &gnmiLib.SubscribeResponse_Update{ - Update: &gnmiLib.Notification{ + SubscribeF: func(server gnmi.GNMI_SubscribeServer) error { + tagResponse := &gnmi.SubscribeResponse{ + Response: &gnmi.SubscribeResponse_Update{ + Update: &gnmi.Notification{ Timestamp: 1543236571000000000, - Prefix: &gnmiLib.Path{}, - Update: []*gnmiLib.Update{ + Prefix: &gnmi.Path{}, + Update: []*gnmi.Update{ { - Path: &gnmiLib.Path{ + Path: &gnmi.Path{ Origin: "", - Elem: []*gnmiLib.PathElem{ + Elem: []*gnmi.PathElem{ { Name: "network-instances", }, @@ -570,8 +570,8 @@ func TestNotification(t *testing.T) { }, Target: "", }, - Val: &gnmiLib.TypedValue{ - Value: &gnmiLib.TypedValue_StringVal{StringVal: "EXAMPLE-PEER"}, + Val: &gnmi.TypedValue{ + Value: &gnmi.TypedValue_StringVal{StringVal: "EXAMPLE-PEER"}, }, }, }, @@ -581,19 +581,19 @@ func TestNotification(t *testing.T) { if err := server.Send(tagResponse); err != nil { return err } - if err := server.Send(&gnmiLib.SubscribeResponse{Response: &gnmiLib.SubscribeResponse_SyncResponse{SyncResponse: true}}); err != nil { + if err := server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_SyncResponse{SyncResponse: true}}); err != nil { return err } - taggedResponse := &gnmiLib.SubscribeResponse{ - Response: &gnmiLib.SubscribeResponse_Update{ - Update: &gnmiLib.Notification{ + taggedResponse := &gnmi.SubscribeResponse{ + Response: &gnmi.SubscribeResponse_Update{ + Update: &gnmi.Notification{ Timestamp: 1543236572000000000, - Prefix: &gnmiLib.Path{}, - Update: []*gnmiLib.Update{ + Prefix: &gnmi.Path{}, + Update: []*gnmi.Update{ { - Path: &gnmiLib.Path{ + Path: &gnmi.Path{ Origin: "", - Elem: []*gnmiLib.PathElem{ + Elem: []*gnmi.PathElem{ { Name: "network-instances", }, @@ -627,8 +627,8 @@ func TestNotification(t *testing.T) { }, Target: "", }, - Val: &gnmiLib.TypedValue{ - Value: &gnmiLib.TypedValue_StringVal{StringVal: "ESTABLISHED"}, + Val: &gnmi.TypedValue{ + Value: &gnmi.TypedValue_StringVal{StringVal: "ESTABLISHED"}, }, }, }, @@ -674,17 +674,17 @@ func TestNotification(t *testing.T) { }, }, server: &MockServer{ - SubscribeF: func(server gnmiLib.GNMI_SubscribeServer) error { - if err := server.Send(&gnmiLib.SubscribeResponse{Response: &gnmiLib.SubscribeResponse_SyncResponse{SyncResponse: true}}); err != nil { + SubscribeF: func(server gnmi.GNMI_SubscribeServer) error { + if err := server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_SyncResponse{SyncResponse: true}}); err != nil { return err } - response := &gnmiLib.SubscribeResponse{ - Response: &gnmiLib.SubscribeResponse_Update{ - Update: &gnmiLib.Notification{ + response := &gnmi.SubscribeResponse{ + Response: &gnmi.SubscribeResponse_Update{ + Update: &gnmi.Notification{ Timestamp: 1668762813698611837, - Prefix: &gnmiLib.Path{ + Prefix: &gnmi.Path{ Origin: "openconfig", - Elem: []*gnmiLib.PathElem{ + Elem: []*gnmi.PathElem{ {Name: "interfaces"}, {Name: "interface", Key: map[string]string{"name": "Ethernet1"}}, {Name: "state"}, @@ -692,54 +692,54 @@ func TestNotification(t *testing.T) { }, Target: "OC-YANG", }, - Update: []*gnmiLib.Update{ + Update: []*gnmi.Update{ { - Path: &gnmiLib.Path{Elem: []*gnmiLib.PathElem{{Name: "in-broadcast-pkts"}}}, - Val: &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_UintVal{UintVal: 0}}, + Path: &gnmi.Path{Elem: []*gnmi.PathElem{{Name: "in-broadcast-pkts"}}}, + Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_UintVal{UintVal: 0}}, }, { - Path: &gnmiLib.Path{Elem: []*gnmiLib.PathElem{{Name: "in-discards"}}}, - Val: &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_UintVal{UintVal: 0}}, + Path: &gnmi.Path{Elem: []*gnmi.PathElem{{Name: "in-discards"}}}, + Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_UintVal{UintVal: 0}}, }, { - Path: &gnmiLib.Path{Elem: []*gnmiLib.PathElem{{Name: "in-errors"}}}, - Val: &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_UintVal{UintVal: 0}}, + Path: &gnmi.Path{Elem: []*gnmi.PathElem{{Name: "in-errors"}}}, + Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_UintVal{UintVal: 0}}, }, { - Path: &gnmiLib.Path{Elem: []*gnmiLib.PathElem{{Name: "in-fcs-errors"}}}, - Val: &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_UintVal{UintVal: 0}}, + Path: &gnmi.Path{Elem: []*gnmi.PathElem{{Name: "in-fcs-errors"}}}, + Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_UintVal{UintVal: 0}}, }, { - Path: &gnmiLib.Path{Elem: []*gnmiLib.PathElem{{Name: "in-unicast-pkts"}}}, - Val: &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_UintVal{UintVal: 0}}, + Path: &gnmi.Path{Elem: []*gnmi.PathElem{{Name: "in-unicast-pkts"}}}, + Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_UintVal{UintVal: 0}}, }, { - Path: &gnmiLib.Path{Elem: []*gnmiLib.PathElem{{Name: "out-broadcast-pkts"}}}, - Val: &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_UintVal{UintVal: 0}}, + Path: &gnmi.Path{Elem: []*gnmi.PathElem{{Name: "out-broadcast-pkts"}}}, + Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_UintVal{UintVal: 0}}, }, { - Path: &gnmiLib.Path{Elem: []*gnmiLib.PathElem{{Name: "out-discards"}}}, - Val: &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_UintVal{UintVal: 0}}, + Path: &gnmi.Path{Elem: []*gnmi.PathElem{{Name: "out-discards"}}}, + Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_UintVal{UintVal: 0}}, }, { - Path: &gnmiLib.Path{Elem: []*gnmiLib.PathElem{{Name: "out-errors"}}}, - Val: &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_UintVal{UintVal: 0}}, + Path: &gnmi.Path{Elem: []*gnmi.PathElem{{Name: "out-errors"}}}, + Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_UintVal{UintVal: 0}}, }, { - Path: &gnmiLib.Path{Elem: []*gnmiLib.PathElem{{Name: "out-multicast-pkts"}}}, - Val: &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_UintVal{UintVal: 0}}, + Path: &gnmi.Path{Elem: []*gnmi.PathElem{{Name: "out-multicast-pkts"}}}, + Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_UintVal{UintVal: 0}}, }, { - Path: &gnmiLib.Path{Elem: []*gnmiLib.PathElem{{Name: "out-octets"}}}, - Val: &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_UintVal{UintVal: 0}}, + Path: &gnmi.Path{Elem: []*gnmi.PathElem{{Name: "out-octets"}}}, + Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_UintVal{UintVal: 0}}, }, { - Path: &gnmiLib.Path{Elem: []*gnmiLib.PathElem{{Name: "out-pkts"}}}, - Val: &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_UintVal{UintVal: 0}}, + Path: &gnmi.Path{Elem: []*gnmi.PathElem{{Name: "out-pkts"}}}, + Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_UintVal{UintVal: 0}}, }, { - Path: &gnmiLib.Path{Elem: []*gnmiLib.PathElem{{Name: "out-unicast-pkts"}}}, - Val: &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_UintVal{UintVal: 0}}, + Path: &gnmi.Path{Elem: []*gnmi.PathElem{{Name: "out-unicast-pkts"}}}, + Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_UintVal{UintVal: 0}}, }, }, }, @@ -791,90 +791,90 @@ func TestNotification(t *testing.T) { }, }, server: &MockServer{ - SubscribeF: func(server gnmiLib.GNMI_SubscribeServer) error { - if err := server.Send(&gnmiLib.SubscribeResponse{Response: &gnmiLib.SubscribeResponse_SyncResponse{SyncResponse: true}}); err != nil { + SubscribeF: func(server gnmi.GNMI_SubscribeServer) error { + if err := server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_SyncResponse{SyncResponse: true}}); err != nil { return err } - response := &gnmiLib.SubscribeResponse{ - Response: &gnmiLib.SubscribeResponse_Update{ - Update: &gnmiLib.Notification{ + response := &gnmi.SubscribeResponse{ + Response: &gnmi.SubscribeResponse_Update{ + Update: &gnmi.Notification{ Timestamp: 1668771585733542546, - Prefix: &gnmiLib.Path{ - Elem: []*gnmiLib.PathElem{ + Prefix: &gnmi.Path{ + Elem: []*gnmi.PathElem{ {Name: "openconfig-platform:components"}, {Name: "component", Key: map[string]string{"name": "TEMP 1"}}, {Name: "state"}, }, Target: "OC-YANG", }, - Update: []*gnmiLib.Update{ + Update: []*gnmi.Update{ { - Path: &gnmiLib.Path{ - Elem: []*gnmiLib.PathElem{ + Path: &gnmi.Path{ + Elem: []*gnmi.PathElem{ {Name: "temperature"}, {Name: "low-threshold"}, }}, - Val: &gnmiLib.TypedValue{ - Value: &gnmiLib.TypedValue_FloatVal{FloatVal: 0}, + Val: &gnmi.TypedValue{ + Value: &gnmi.TypedValue_FloatVal{FloatVal: 0}, }, }, { - Path: &gnmiLib.Path{ - Elem: []*gnmiLib.PathElem{ + Path: &gnmi.Path{ + Elem: []*gnmi.PathElem{ {Name: "temperature"}, {Name: "timestamp"}, }}, - Val: &gnmiLib.TypedValue{ - Value: &gnmiLib.TypedValue_StringVal{StringVal: "2022-11-18T11:39:26Z"}, + Val: &gnmi.TypedValue{ + Value: &gnmi.TypedValue_StringVal{StringVal: "2022-11-18T11:39:26Z"}, }, }, { - Path: &gnmiLib.Path{ - Elem: []*gnmiLib.PathElem{ + Path: &gnmi.Path{ + Elem: []*gnmi.PathElem{ {Name: "temperature"}, {Name: "warning-status"}, }}, - Val: &gnmiLib.TypedValue{ - Value: &gnmiLib.TypedValue_BoolVal{BoolVal: false}, + Val: &gnmi.TypedValue{ + Value: &gnmi.TypedValue_BoolVal{BoolVal: false}, }, }, { - Path: &gnmiLib.Path{ - Elem: []*gnmiLib.PathElem{ + Path: &gnmi.Path{ + Elem: []*gnmi.PathElem{ {Name: "name"}, }}, - Val: &gnmiLib.TypedValue{ - Value: &gnmiLib.TypedValue_StringVal{StringVal: "CPU On-board"}, + Val: &gnmi.TypedValue{ + Value: &gnmi.TypedValue_StringVal{StringVal: "CPU On-board"}, }, }, { - Path: &gnmiLib.Path{ - Elem: []*gnmiLib.PathElem{ + Path: &gnmi.Path{ + Elem: []*gnmi.PathElem{ {Name: "temperature"}, {Name: "critical-high-threshold"}, }}, - Val: &gnmiLib.TypedValue{ - Value: &gnmiLib.TypedValue_FloatVal{FloatVal: 94}, + Val: &gnmi.TypedValue{ + Value: &gnmi.TypedValue_FloatVal{FloatVal: 94}, }, }, { - Path: &gnmiLib.Path{ - Elem: []*gnmiLib.PathElem{ + Path: &gnmi.Path{ + Elem: []*gnmi.PathElem{ {Name: "temperature"}, {Name: "current"}, }}, - Val: &gnmiLib.TypedValue{ - Value: &gnmiLib.TypedValue_FloatVal{FloatVal: 29}, + Val: &gnmi.TypedValue{ + Value: &gnmi.TypedValue_FloatVal{FloatVal: 29}, }, }, { - Path: &gnmiLib.Path{ - Elem: []*gnmiLib.PathElem{ + Path: &gnmi.Path{ + Elem: []*gnmi.PathElem{ {Name: "temperature"}, {Name: "high-threshold"}, }}, - Val: &gnmiLib.TypedValue{ - Value: &gnmiLib.TypedValue_FloatVal{FloatVal: 90}, + Val: &gnmi.TypedValue{ + Value: &gnmi.TypedValue_FloatVal{FloatVal: 90}, }, }, }, @@ -923,48 +923,48 @@ func TestNotification(t *testing.T) { }, }, server: &MockServer{ - SubscribeF: func(server gnmiLib.GNMI_SubscribeServer) error { - if err := server.Send(&gnmiLib.SubscribeResponse{Response: &gnmiLib.SubscribeResponse_SyncResponse{SyncResponse: true}}); err != nil { + SubscribeF: func(server gnmi.GNMI_SubscribeServer) error { + if err := server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_SyncResponse{SyncResponse: true}}); err != nil { return err } - response := &gnmiLib.SubscribeResponse{ - Response: &gnmiLib.SubscribeResponse_Update{ - Update: &gnmiLib.Notification{ + response := &gnmi.SubscribeResponse{ + Response: &gnmi.SubscribeResponse_Update{ + Update: &gnmi.Notification{ Timestamp: 1668771585733542546, - Prefix: &gnmiLib.Path{ - Elem: []*gnmiLib.PathElem{ + Prefix: &gnmi.Path{ + Elem: []*gnmi.PathElem{ {Name: "openconfig-platform:components"}, {Name: "component", Key: map[string]string{"name": "CHASSIS0:FPC0"}}, {Name: "state"}, }, Target: "OC-YANG", }, - Update: []*gnmiLib.Update{ + Update: []*gnmi.Update{ { - Path: &gnmiLib.Path{ - Elem: []*gnmiLib.PathElem{ + Path: &gnmi.Path{ + Elem: []*gnmi.PathElem{ {Name: "type"}, }}, - Val: &gnmiLib.TypedValue{ - Value: &gnmiLib.TypedValue_StringVal{StringVal: "LINECARD"}, + Val: &gnmi.TypedValue{ + Value: &gnmi.TypedValue_StringVal{StringVal: "LINECARD"}, }, }, }, }, }, - Extension: []*gnmiExt.Extension{{ - Ext: &gnmiExt.Extension_RegisteredExt{ - RegisteredExt: &gnmiExt.RegisteredExtension{ + Extension: []*gnmi_ext.Extension{{ + Ext: &gnmi_ext.Extension_RegisteredExt{ + RegisteredExt: &gnmi_ext.RegisteredExtension{ // Juniper Header Extension // EID_JUNIPER_TELEMETRY_HEADER = 1; Id: 1, - Msg: func(jnprExt *jnprHeader.GnmiJuniperTelemetryHeaderExtension) []byte { + Msg: func(jnprExt *jnpr_gnmi_extention.GnmiJuniperTelemetryHeaderExtension) []byte { b, err := proto.Marshal(jnprExt) if err != nil { return nil } return b - }(&jnprHeader.GnmiJuniperTelemetryHeaderExtension{ComponentId: 15, SubComponentId: 1, Component: "PICD"}), + }(&jnpr_gnmi_extention.GnmiJuniperTelemetryHeaderExtension{ComponentId: 15, SubComponentId: 1, Component: "PICD"}), }, }, }}, @@ -1001,7 +1001,7 @@ func TestNotification(t *testing.T) { grpcServer := grpc.NewServer() tt.server.GRPCServer = grpcServer - gnmiLib.RegisterGNMIServer(grpcServer, tt.server) + gnmi.RegisterGNMIServer(grpcServer, tt.server) var acc testutil.Accumulator require.NoError(t, tt.plugin.Init()) @@ -1051,13 +1051,13 @@ func TestRedial(t *testing.T) { grpcServer := grpc.NewServer() gnmiServer := &MockServer{ - SubscribeF: func(server gnmiLib.GNMI_SubscribeServer) error { + SubscribeF: func(server gnmi.GNMI_SubscribeServer) error { notification := mockGNMINotification() - return server.Send(&gnmiLib.SubscribeResponse{Response: &gnmiLib.SubscribeResponse_Update{Update: notification}}) + return server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}}) }, GRPCServer: grpcServer, } - gnmiLib.RegisterGNMIServer(grpcServer, gnmiServer) + gnmi.RegisterGNMIServer(grpcServer, gnmiServer) var wg sync.WaitGroup wg.Add(1) @@ -1081,16 +1081,16 @@ func TestRedial(t *testing.T) { grpcServer = grpc.NewServer() gnmiServer = &MockServer{ - SubscribeF: func(server gnmiLib.GNMI_SubscribeServer) error { + SubscribeF: func(server gnmi.GNMI_SubscribeServer) error { notification := mockGNMINotification() notification.Prefix.Elem[0].Key["foo"] = "bar2" notification.Update[0].Path.Elem[1].Key["name"] = "str2" - notification.Update[0].Val = &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_BoolVal{BoolVal: false}} - return server.Send(&gnmiLib.SubscribeResponse{Response: &gnmiLib.SubscribeResponse_Update{Update: notification}}) + notification.Update[0].Val = &gnmi.TypedValue{Value: &gnmi.TypedValue_BoolVal{BoolVal: false}} + return server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}}) }, GRPCServer: grpcServer, } - gnmiLib.RegisterGNMIServer(grpcServer, gnmiServer) + gnmi.RegisterGNMIServer(grpcServer, gnmiServer) wg.Add(1) go func() { @@ -1131,7 +1131,7 @@ func TestCases(t *testing.T) { require.NoError(t, err) var entries []json.RawMessage require.NoError(t, json.Unmarshal(buf, &entries)) - responses := make([]gnmiLib.SubscribeResponse, len(entries)) + responses := make([]gnmi.SubscribeResponse, len(entries)) for i, entry := range entries { require.NoError(t, protojson.Unmarshal(entry, &responses[i])) } @@ -1163,9 +1163,9 @@ func TestCases(t *testing.T) { require.Len(t, cfg.Inputs, 1) // Prepare the server response - responseFunction := func(server gnmiLib.GNMI_SubscribeServer) error { - sync := &gnmiLib.SubscribeResponse{ - Response: &gnmiLib.SubscribeResponse_SyncResponse{ + responseFunction := func(server gnmi.GNMI_SubscribeServer) error { + sync := &gnmi.SubscribeResponse{ + Response: &gnmi.SubscribeResponse_SyncResponse{ SyncResponse: true, }, } @@ -1187,7 +1187,7 @@ func TestCases(t *testing.T) { SubscribeF: responseFunction, GRPCServer: grpcServer, } - gnmiLib.RegisterGNMIServer(grpcServer, gnmiServer) + gnmi.RegisterGNMIServer(grpcServer, gnmiServer) // Setup the plugin plugin := cfg.Inputs[0].Input.(*GNMI) diff --git a/plugins/inputs/gnmi/handler.go b/plugins/inputs/gnmi/handler.go index 0b420e857..7ff683e8e 100644 --- a/plugins/inputs/gnmi/handler.go +++ b/plugins/inputs/gnmi/handler.go @@ -13,8 +13,8 @@ import ( "strings" "time" - gnmiLib "github.com/openconfig/gnmi/proto/gnmi" - gnmiExt "github.com/openconfig/gnmi/proto/gnmi_ext" + "github.com/openconfig/gnmi/proto/gnmi" + "github.com/openconfig/gnmi/proto/gnmi_ext" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" @@ -26,7 +26,7 @@ import ( "github.com/influxdata/telegraf/internal/choice" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/common/yangmodel" - jnprHeader "github.com/influxdata/telegraf/plugins/inputs/gnmi/extensions/jnpr_gnmi_extention" + "github.com/influxdata/telegraf/plugins/inputs/gnmi/extensions/jnpr_gnmi_extention" "github.com/influxdata/telegraf/selfstat" ) @@ -51,7 +51,7 @@ type handler struct { } // SubscribeGNMI and extract telemetry data -func (h *handler) subscribeGNMI(ctx context.Context, acc telegraf.Accumulator, tlscfg *tls.Config, request *gnmiLib.SubscribeRequest) error { +func (h *handler) subscribeGNMI(ctx context.Context, acc telegraf.Accumulator, tlscfg *tls.Config, request *gnmi.SubscribeRequest) error { var creds credentials.TransportCredentials if tlscfg != nil { creds = credentials.NewTLS(tlscfg) @@ -78,7 +78,7 @@ func (h *handler) subscribeGNMI(ctx context.Context, acc telegraf.Accumulator, t } defer client.Close() - subscribeClient, err := gnmiLib.NewGNMIClient(client).Subscribe(ctx) + subscribeClient, err := gnmi.NewGNMIClient(client).Subscribe(ctx) if err != nil { return fmt.Errorf("failed to setup subscription: %w", err) } @@ -99,7 +99,7 @@ func (h *handler) subscribeGNMI(ctx context.Context, acc telegraf.Accumulator, t defer h.log.Debugf("Connection to gNMI device %s closed", h.address) for ctx.Err() == nil { - var reply *gnmiLib.SubscribeResponse + var reply *gnmi.SubscribeResponse if reply, err = subscribeClient.Recv(); err != nil { if !errors.Is(err, io.EOF) && ctx.Err() == nil { connectStat.Set(0) @@ -117,7 +117,7 @@ func (h *handler) subscribeGNMI(ctx context.Context, acc telegraf.Accumulator, t h.log.Debugf("Got update_%v: %s", t, string(buf)) } } - if response, ok := reply.Response.(*gnmiLib.SubscribeResponse_Update); ok { + if response, ok := reply.Response.(*gnmi.SubscribeResponse_Update); ok { h.handleSubscribeResponseUpdate(acc, response, reply.GetExtension()) } } @@ -127,7 +127,7 @@ func (h *handler) subscribeGNMI(ctx context.Context, acc telegraf.Accumulator, t } // Handle SubscribeResponse_Update message from gNMI and parse contained telemetry data -func (h *handler) handleSubscribeResponseUpdate(acc telegraf.Accumulator, response *gnmiLib.SubscribeResponse_Update, extension []*gnmiExt.Extension) { +func (h *handler) handleSubscribeResponseUpdate(acc telegraf.Accumulator, response *gnmi.SubscribeResponse_Update, extension []*gnmi_ext.Extension) { grouper := metric.NewSeriesGrouper() timestamp := time.Unix(0, response.Update.Timestamp) @@ -144,7 +144,7 @@ func (h *handler) handleSubscribeResponseUpdate(acc telegraf.Accumulator, respon // Juniper Header extension // Decode it only if user requested it if choice.Contains("juniper_header", h.vendorExt) { - juniperHeader := &jnprHeader.GnmiJuniperTelemetryHeaderExtension{} + juniperHeader := &jnpr_gnmi_extention.GnmiJuniperTelemetryHeaderExtension{} if err := proto.Unmarshal(currentExt, juniperHeader); err != nil { h.log.Errorf("unmarshal gnmi Juniper Header extension failed: %v", err) } else { diff --git a/plugins/inputs/gnmi/path.go b/plugins/inputs/gnmi/path.go index cdb074be9..8f2f78224 100644 --- a/plugins/inputs/gnmi/path.go +++ b/plugins/inputs/gnmi/path.go @@ -3,7 +3,7 @@ package gnmi import ( "strings" - gnmiLib "github.com/openconfig/gnmi/proto/gnmi" + "github.com/openconfig/gnmi/proto/gnmi" ) type keySegment struct { @@ -41,7 +41,7 @@ func newInfoFromString(path string) *pathInfo { return info } -func newInfoFromPathWithoutKeys(path *gnmiLib.Path) *pathInfo { +func newInfoFromPathWithoutKeys(path *gnmi.Path) *pathInfo { info := &pathInfo{ origin: path.Origin, segments: make([]segment, 0, len(path.Elem)), @@ -57,7 +57,7 @@ func newInfoFromPathWithoutKeys(path *gnmiLib.Path) *pathInfo { return info } -func newInfoFromPath(paths ...*gnmiLib.Path) *pathInfo { +func newInfoFromPath(paths ...*gnmi.Path) *pathInfo { if len(paths) == 0 { return nil } @@ -101,7 +101,7 @@ func (pi *pathInfo) empty() bool { return len(pi.segments) == 0 } -func (pi *pathInfo) append(paths ...*gnmiLib.Path) *pathInfo { +func (pi *pathInfo) append(paths ...*gnmi.Path) *pathInfo { // Copy the existing info path := &pathInfo{ origin: pi.origin, @@ -209,7 +209,7 @@ func (pi *pathInfo) normalize() { pi.segments = segments } -func (pi *pathInfo) equalsPathNoKeys(path *gnmiLib.Path) bool { +func (pi *pathInfo) equalsPathNoKeys(path *gnmi.Path) bool { if len(pi.segments) != len(path.Elem) { return false } diff --git a/plugins/inputs/gnmi/update_fields.go b/plugins/inputs/gnmi/update_fields.go index a4298b6d8..faf82d5eb 100644 --- a/plugins/inputs/gnmi/update_fields.go +++ b/plugins/inputs/gnmi/update_fields.go @@ -6,8 +6,8 @@ import ( "strconv" "strings" - gnmiLib "github.com/openconfig/gnmi/proto/gnmi" - gnmiValue "github.com/openconfig/gnmi/value" + "github.com/openconfig/gnmi/proto/gnmi" + "github.com/openconfig/gnmi/value" ) type keyValuePair struct { @@ -20,27 +20,27 @@ type updateField struct { value interface{} } -func (h *handler) newFieldsFromUpdate(path *pathInfo, update *gnmiLib.Update) ([]updateField, error) { +func (h *handler) newFieldsFromUpdate(path *pathInfo, update *gnmi.Update) ([]updateField, error) { if update.Val == nil || update.Val.Value == nil { return []updateField{{path: path}}, nil } // Apply some special handling for special types switch v := update.Val.Value.(type) { - case *gnmiLib.TypedValue_AsciiVal: // not handled in ToScalar + case *gnmi.TypedValue_AsciiVal: // not handled in ToScalar return []updateField{{path, v.AsciiVal}}, nil - case *gnmiLib.TypedValue_JsonVal: // requires special path handling + case *gnmi.TypedValue_JsonVal: // requires special path handling return processJSON(path, v.JsonVal) - case *gnmiLib.TypedValue_JsonIetfVal: // requires special path handling + case *gnmi.TypedValue_JsonIetfVal: // requires special path handling return h.processJSONIETF(path, v.JsonIetfVal) } // Convert the protobuf "oneof" data to a Golang type. - value, err := gnmiValue.ToScalar(update.Val) + nativeType, err := value.ToScalar(update.Val) if err != nil { return nil, err } - return []updateField{{path, value}}, nil + return []updateField{{path, nativeType}}, nil } func processJSON(path *pathInfo, data []byte) ([]updateField, error) { diff --git a/plugins/inputs/google_cloud_storage/google_cloud_storage_test.go b/plugins/inputs/google_cloud_storage/google_cloud_storage_test.go index f3d94d8c4..3cedc8dff 100644 --- a/plugins/inputs/google_cloud_storage/google_cloud_storage_test.go +++ b/plugins/inputs/google_cloud_storage/google_cloud_storage_test.go @@ -14,7 +14,7 @@ import ( "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" - jsonparser "github.com/influxdata/telegraf/plugins/parsers/json" + parsers_json "github.com/influxdata/telegraf/plugins/parsers/json" "github.com/influxdata/telegraf/testutil" ) @@ -180,7 +180,7 @@ func TestRunGatherIterationWithPages(t *testing.T) { } func createParser() telegraf.Parser { - p := &jsonparser.Parser{ + p := &parsers_json.Parser{ MetricName: "cpu", Query: "metrics", TagKeys: []string{"tags_datacenter", "tags_host"}, diff --git a/plugins/inputs/http/http.go b/plugins/inputs/http/http.go index c2b74e77c..3853f676c 100644 --- a/plugins/inputs/http/http.go +++ b/plugins/inputs/http/http.go @@ -16,7 +16,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" - httpconfig "github.com/influxdata/telegraf/plugins/common/http" + common_http "github.com/influxdata/telegraf/plugins/common/http" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -44,7 +44,7 @@ type HTTP struct { SuccessStatusCodes []int `toml:"success_status_codes"` Log telegraf.Logger `toml:"-"` - httpconfig.HTTPClientConfig + common_http.HTTPClientConfig client *http.Client parserFunc telegraf.ParserFunc diff --git a/plugins/inputs/http/http_test.go b/plugins/inputs/http/http_test.go index 2848228a0..19a500ce3 100644 --- a/plugins/inputs/http/http_test.go +++ b/plugins/inputs/http/http_test.go @@ -19,7 +19,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" - httpconfig "github.com/influxdata/telegraf/plugins/common/http" + common_http "github.com/influxdata/telegraf/plugins/common/http" "github.com/influxdata/telegraf/plugins/common/oauth" httpplugin "github.com/influxdata/telegraf/plugins/inputs/http" "github.com/influxdata/telegraf/plugins/parsers/csv" @@ -349,7 +349,7 @@ func TestOAuthClientCredentialsGrant(t *testing.T) { name: "success", plugin: &httpplugin.HTTP{ URLs: []string{u.String() + "/write"}, - HTTPClientConfig: httpconfig.HTTPClientConfig{ + HTTPClientConfig: common_http.HTTPClientConfig{ OAuth2Config: oauth.OAuth2Config{ ClientID: "howdy", ClientSecret: "secret", diff --git a/plugins/inputs/http_listener_v2/http_listener_v2.go b/plugins/inputs/http_listener_v2/http_listener_v2.go index c00a64fdc..87386943b 100644 --- a/plugins/inputs/http_listener_v2/http_listener_v2.go +++ b/plugins/inputs/http_listener_v2/http_listener_v2.go @@ -27,7 +27,7 @@ import ( "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/choice" - tlsint "github.com/influxdata/telegraf/plugins/common/tls" + common_tls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -69,7 +69,7 @@ type HTTPListenerV2 struct { BasicPassword string `toml:"basic_password"` HTTPHeaderTags map[string]string `toml:"http_header_tags"` - tlsint.ServerConfig + common_tls.ServerConfig tlsConf *tls.Config TimeFunc diff --git a/plugins/inputs/infiniband/infiniband_test.go b/plugins/inputs/infiniband/infiniband_test.go index ccaded0fc..bb9d5de3c 100644 --- a/plugins/inputs/infiniband/infiniband_test.go +++ b/plugins/inputs/infiniband/infiniband_test.go @@ -3,9 +3,11 @@ package infiniband import ( - "github.com/Mellanox/rdmamap" - "github.com/influxdata/telegraf/testutil" "testing" + + "github.com/Mellanox/rdmamap" + + "github.com/influxdata/telegraf/testutil" ) func TestInfiniband(t *testing.T) { diff --git a/plugins/inputs/influxdb_listener/influxdb_listener.go b/plugins/inputs/influxdb_listener/influxdb_listener.go index 30225ea13..2c54088b3 100644 --- a/plugins/inputs/influxdb_listener/influxdb_listener.go +++ b/plugins/inputs/influxdb_listener/influxdb_listener.go @@ -16,7 +16,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" - tlsint "github.com/influxdata/telegraf/plugins/common/tls" + common_tls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers/influx" "github.com/influxdata/telegraf/plugins/parsers/influx/influx_upstream" @@ -35,7 +35,7 @@ const ( type InfluxDBListener struct { ServiceAddress string `toml:"service_address"` port int - tlsint.ServerConfig + common_tls.ServerConfig ReadTimeout config.Duration `toml:"read_timeout"` WriteTimeout config.Duration `toml:"write_timeout"` diff --git a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go index f53fa437d..612521f97 100644 --- a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go +++ b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go @@ -20,7 +20,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" - tlsint "github.com/influxdata/telegraf/plugins/common/tls" + common_tls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers/influx" "github.com/influxdata/telegraf/plugins/parsers/influx/influx_upstream" @@ -52,7 +52,7 @@ const ( type InfluxDBV2Listener struct { ServiceAddress string `toml:"service_address"` port int - tlsint.ServerConfig + common_tls.ServerConfig MaxUndeliveredMetrics int `toml:"max_undelivered_metrics"` ReadTimeout config.Duration `toml:"read_timeout"` diff --git a/plugins/inputs/intel_baseband/sock_connector_test.go b/plugins/inputs/intel_baseband/sock_connector_test.go index 76b6eb2a6..a290af3ea 100644 --- a/plugins/inputs/intel_baseband/sock_connector_test.go +++ b/plugins/inputs/intel_baseband/sock_connector_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "github.com/influxdata/telegraf/plugins/inputs/intel_baseband/mock" + mocks "github.com/influxdata/telegraf/plugins/inputs/intel_baseband/mock" ) func TestWriteCommandToSocket(t *testing.T) { diff --git a/plugins/inputs/intel_powerstat/intel_powerstat.go b/plugins/inputs/intel_powerstat/intel_powerstat.go index 15604d91b..31a497cf5 100644 --- a/plugins/inputs/intel_powerstat/intel_powerstat.go +++ b/plugins/inputs/intel_powerstat/intel_powerstat.go @@ -13,8 +13,8 @@ import ( "strings" "time" - ptel "github.com/intel/powertelemetry" - cpuUtil "github.com/shirou/gopsutil/v3/cpu" + "github.com/intel/powertelemetry" + "github.com/shirou/gopsutil/v3/cpu" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" @@ -90,8 +90,8 @@ func (p *PowerStat) Start(_ telegraf.Accumulator) error { }) var err error - var initErr *ptel.MultiError - p.fetcher, err = ptel.New(opts...) + var initErr *powertelemetry.MultiError + p.fetcher, err = powertelemetry.New(opts...) if err != nil { if !errors.As(err, &initErr) { // Error caused by failing to get information about the CPU, or CPU is not supported. @@ -441,7 +441,7 @@ func (p *PowerStat) addPerCPUMsrMetrics(acc telegraf.Accumulator, cpuID, coreID, } // Read several time-related MSR offsets. - var moduleErr *ptel.ModuleNotInitializedError + var moduleErr *powertelemetry.ModuleNotInitializedError err := p.fetcher.UpdatePerCPUMetrics(cpuID) if err == nil { // Add time-related MSR offset metrics to the accumulator @@ -490,7 +490,7 @@ func (p *PowerStat) addCPUTimeRelatedMsrMetrics(acc telegraf.Accumulator, cpuID, // addCPUPerfMetrics takes an accumulator, and adds to it enabled metrics which rely on perf. func (p *PowerStat) addCPUPerfMetrics(acc telegraf.Accumulator) { - var moduleErr *ptel.ModuleNotInitializedError + var moduleErr *powertelemetry.ModuleNotInitializedError // Read events related to perf-related metrics. err := p.fetcher.ReadPerfEvents() @@ -921,7 +921,7 @@ func (p *PowerStat) addUncoreFrequencyInitialLimits(acc telegraf.Accumulator, pa } // Always add to the accumulator errors not related to module not initialized. - var moduleErr *ptel.ModuleNotInitializedError + var moduleErr *powertelemetry.ModuleNotInitializedError if !errors.As(err, &moduleErr) { acc.AddError(fmt.Errorf("failed to get initial uncore frequency limits for package ID %v and die ID %v: %w", packageID, dieID, err)) return @@ -961,7 +961,7 @@ func (p *PowerStat) addUncoreFrequencyCurrentValues(acc telegraf.Accumulator, pa } // Always add to the accumulator errors not related to module not initialized. - var moduleErr *ptel.ModuleNotInitializedError + var moduleErr *powertelemetry.ModuleNotInitializedError if !errors.As(err, &moduleErr) { acc.AddError(fmt.Errorf("failed to get current uncore frequency values for package ID %v and die ID %v: %w", packageID, dieID, err)) return @@ -1024,7 +1024,7 @@ func getUncoreFreqCurrentValues(fetcher metricFetcher, packageID, dieID int) (un // addMaxTurboFreqLimits fetches the max turbo frequency limits metric for a given package ID, and adds it to the accumulator. func (p *PowerStat) addMaxTurboFreqLimits(acc telegraf.Accumulator, packageID int) { - var moduleErr *ptel.ModuleNotInitializedError + var moduleErr *powertelemetry.ModuleNotInitializedError turboFreqList, err := p.fetcher.GetMaxTurboFreqList(packageID) if err != nil { @@ -1076,7 +1076,7 @@ func (p *PowerStat) addMaxTurboFreqLimits(acc telegraf.Accumulator, packageID in // isHybridCPU is a helper function that takes a slice of MaxTurboFreq structs and returns true if the CPU where these values belong to, // is a hybrid CPU. Otherwise, returns false. -func isHybridCPU(turboFreqList []ptel.MaxTurboFreq) bool { +func isHybridCPU(turboFreqList []powertelemetry.MaxTurboFreq) bool { for _, v := range turboFreqList { if v.Secondary { return true @@ -1089,7 +1089,7 @@ func isHybridCPU(turboFreqList []ptel.MaxTurboFreq) bool { // In case it is not, disableUnsupportedMetrics will disable the option to gather those metrics. // Error is returned if there is an issue with retrieving processor information. func (p *PowerStat) disableUnsupportedMetrics() error { - cpus, err := cpuUtil.Info() + cpus, err := cpu.Info() if err != nil { return fmt.Errorf("error occurred while parsing CPU information: %w", err) } @@ -1104,27 +1104,27 @@ func (p *PowerStat) disableUnsupportedMetrics() error { return fmt.Errorf("error occurred while parsing CPU model: %w", err) } - if err := ptel.CheckIfCPUC1StateResidencySupported(cpuModel); err != nil { + if err := powertelemetry.CheckIfCPUC1StateResidencySupported(cpuModel); err != nil { p.disableCPUMetric(cpuC1StateResidency) } - if err := ptel.CheckIfCPUC3StateResidencySupported(cpuModel); err != nil { + if err := powertelemetry.CheckIfCPUC3StateResidencySupported(cpuModel); err != nil { p.disableCPUMetric(cpuC3StateResidency) } - if err := ptel.CheckIfCPUC6StateResidencySupported(cpuModel); err != nil { + if err := powertelemetry.CheckIfCPUC6StateResidencySupported(cpuModel); err != nil { p.disableCPUMetric(cpuC6StateResidency) } - if err := ptel.CheckIfCPUC7StateResidencySupported(cpuModel); err != nil { + if err := powertelemetry.CheckIfCPUC7StateResidencySupported(cpuModel); err != nil { p.disableCPUMetric(cpuC7StateResidency) } - if err := ptel.CheckIfCPUTemperatureSupported(cpuModel); err != nil { + if err := powertelemetry.CheckIfCPUTemperatureSupported(cpuModel); err != nil { p.disableCPUMetric(cpuTemperature) } - if err := ptel.CheckIfCPUBaseFrequencySupported(cpuModel); err != nil { + if err := powertelemetry.CheckIfCPUBaseFrequencySupported(cpuModel); err != nil { p.disablePackageMetric(packageCPUBaseFrequency) } diff --git a/plugins/inputs/jti_openconfig_telemetry/jti_openconfig_telemetry.go b/plugins/inputs/jti_openconfig_telemetry/jti_openconfig_telemetry.go index 19d8c1510..b572d8d34 100644 --- a/plugins/inputs/jti_openconfig_telemetry/jti_openconfig_telemetry.go +++ b/plugins/inputs/jti_openconfig_telemetry/jti_openconfig_telemetry.go @@ -22,7 +22,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" - internaltls "github.com/influxdata/telegraf/plugins/common/tls" + common_tls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" authentication "github.com/influxdata/telegraf/plugins/inputs/jti_openconfig_telemetry/auth" telemetry "github.com/influxdata/telegraf/plugins/inputs/jti_openconfig_telemetry/oc" @@ -43,7 +43,7 @@ type OpenConfigTelemetry struct { RetryDelay config.Duration `toml:"retry_delay"` EnableTLS bool `toml:"enable_tls"` KeepAlivePeriod config.Duration `toml:"keep_alive_period"` - internaltls.ClientConfig + common_tls.ClientConfig Log telegraf.Logger diff --git a/plugins/inputs/kafka_consumer/kafka_consumer_test.go b/plugins/inputs/kafka_consumer/kafka_consumer_test.go index 0c8ec4b8a..76747b967 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer_test.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer_test.go @@ -18,10 +18,10 @@ import ( "github.com/influxdata/telegraf/plugins/common/kafka" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" - kafkaOutput "github.com/influxdata/telegraf/plugins/outputs/kafka" + outputs_kafka "github.com/influxdata/telegraf/plugins/outputs/kafka" "github.com/influxdata/telegraf/plugins/parsers/influx" "github.com/influxdata/telegraf/plugins/parsers/value" - influxSerializer "github.com/influxdata/telegraf/plugins/serializers/influx" + serializers_influx "github.com/influxdata/telegraf/plugins/serializers/influx" "github.com/influxdata/telegraf/testutil" ) @@ -500,10 +500,10 @@ func TestKafkaRoundTripIntegration(t *testing.T) { // Make kafka output t.Logf("rt: starting output plugin") creator := outputs.Outputs["kafka"] - output, ok := creator().(*kafkaOutput.Kafka) + output, ok := creator().(*outputs_kafka.Kafka) require.True(t, ok) - s := &influxSerializer.Serializer{} + s := &serializers_influx.Serializer{} require.NoError(t, s.Init()) output.SetSerializer(s) output.Brokers = brokers @@ -576,10 +576,10 @@ func TestKafkaTimestampSourceIntegration(t *testing.T) { // Make kafka output creator := outputs.Outputs["kafka"] - output, ok := creator().(*kafkaOutput.Kafka) + output, ok := creator().(*outputs_kafka.Kafka) require.True(t, ok) - s := &influxSerializer.Serializer{} + s := &serializers_influx.Serializer{} require.NoError(t, s.Init()) output.SetSerializer(s) output.Brokers = brokers diff --git a/plugins/inputs/kibana/kibana.go b/plugins/inputs/kibana/kibana.go index 5b8220a41..0ecbe4bd6 100644 --- a/plugins/inputs/kibana/kibana.go +++ b/plugins/inputs/kibana/kibana.go @@ -15,7 +15,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" - httpconfig "github.com/influxdata/telegraf/plugins/common/http" + common_http "github.com/influxdata/telegraf/plugins/common/http" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -95,12 +95,12 @@ type Kibana struct { Log telegraf.Logger `toml:"-"` client *http.Client - httpconfig.HTTPClientConfig + common_http.HTTPClientConfig } func NewKibana() *Kibana { return &Kibana{ - HTTPClientConfig: httpconfig.HTTPClientConfig{ + HTTPClientConfig: common_http.HTTPClientConfig{ Timeout: config.Duration(5 * time.Second), }, } diff --git a/plugins/inputs/kinesis_consumer/kinesis_consumer.go b/plugins/inputs/kinesis_consumer/kinesis_consumer.go index 257e97e5a..60b094dfd 100644 --- a/plugins/inputs/kinesis_consumer/kinesis_consumer.go +++ b/plugins/inputs/kinesis_consumer/kinesis_consumer.go @@ -24,7 +24,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" - internalaws "github.com/influxdata/telegraf/plugins/common/aws" + common_aws "github.com/influxdata/telegraf/plugins/common/aws" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -65,7 +65,7 @@ type ( lastSeqNum *big.Int - internalaws.CredentialConfig + common_aws.CredentialConfig } checkpoint struct { diff --git a/plugins/inputs/kube_inventory/daemonset.go b/plugins/inputs/kube_inventory/daemonset.go index 33f13f5c9..40050955a 100644 --- a/plugins/inputs/kube_inventory/daemonset.go +++ b/plugins/inputs/kube_inventory/daemonset.go @@ -3,7 +3,7 @@ package kube_inventory import ( "context" - v1 "k8s.io/api/apps/v1" + apps "k8s.io/api/apps/v1" "github.com/influxdata/telegraf" ) @@ -19,7 +19,7 @@ func collectDaemonSets(ctx context.Context, acc telegraf.Accumulator, ki *Kubern } } -func (ki *KubernetesInventory) gatherDaemonSet(d *v1.DaemonSet, acc telegraf.Accumulator) { +func (ki *KubernetesInventory) gatherDaemonSet(d *apps.DaemonSet, acc telegraf.Accumulator) { fields := map[string]interface{}{ "generation": d.Generation, "current_number_scheduled": d.Status.CurrentNumberScheduled, diff --git a/plugins/inputs/kube_inventory/daemonset_test.go b/plugins/inputs/kube_inventory/daemonset_test.go index 0c6cc87a9..b88db1832 100644 --- a/plugins/inputs/kube_inventory/daemonset_test.go +++ b/plugins/inputs/kube_inventory/daemonset_test.go @@ -6,8 +6,8 @@ import ( "time" "github.com/stretchr/testify/require" - v1 "k8s.io/api/apps/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apps "k8s.io/api/apps/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" @@ -29,7 +29,7 @@ func TestDaemonSet(t *testing.T) { name: "no daemon set", handler: &mockHandler{ responseMap: map[string]interface{}{ - "/daemonsets/": &v1.DaemonSetList{}, + "/daemonsets/": &apps.DaemonSetList{}, }, }, hasError: false, @@ -38,10 +38,10 @@ func TestDaemonSet(t *testing.T) { name: "collect daemonsets", handler: &mockHandler{ responseMap: map[string]interface{}{ - "/daemonsets/": &v1.DaemonSetList{ - Items: []v1.DaemonSet{ + "/daemonsets/": &apps.DaemonSetList{ + Items: []apps.DaemonSet{ { - Status: v1.DaemonSetStatus{ + Status: apps.DaemonSetStatus{ CurrentNumberScheduled: 3, DesiredNumberScheduled: 5, NumberAvailable: 2, @@ -50,7 +50,7 @@ func TestDaemonSet(t *testing.T) { NumberUnavailable: 1, UpdatedNumberScheduled: 2, }, - ObjectMeta: metav1.ObjectMeta{ + ObjectMeta: meta.ObjectMeta{ Generation: 11221, Namespace: "ns1", Name: "daemon1", @@ -58,10 +58,10 @@ func TestDaemonSet(t *testing.T) { "lab1": "v1", "lab2": "v2", }, - CreationTimestamp: metav1.Time{Time: now}, + CreationTimestamp: meta.Time{Time: now}, }, - Spec: v1.DaemonSetSpec{ - Selector: &metav1.LabelSelector{ + Spec: apps.DaemonSetSpec{ + Selector: &meta.LabelSelector{ MatchLabels: map[string]string{ "select1": "s1", "select2": "s2", @@ -108,7 +108,7 @@ func TestDaemonSet(t *testing.T) { } require.NoError(t, ks.createSelectorFilters()) acc := new(testutil.Accumulator) - items := ((v.handler.responseMap["/daemonsets/"]).(*v1.DaemonSetList)).Items + items := ((v.handler.responseMap["/daemonsets/"]).(*apps.DaemonSetList)).Items for i := range items { ks.gatherDaemonSet(&items[i], acc) } @@ -131,10 +131,10 @@ func TestDaemonSetSelectorFilter(t *testing.T) { cli := &client{} responseMap := map[string]interface{}{ - "/daemonsets/": &v1.DaemonSetList{ - Items: []v1.DaemonSet{ + "/daemonsets/": &apps.DaemonSetList{ + Items: []apps.DaemonSet{ { - Status: v1.DaemonSetStatus{ + Status: apps.DaemonSetStatus{ CurrentNumberScheduled: 3, DesiredNumberScheduled: 5, NumberAvailable: 2, @@ -143,7 +143,7 @@ func TestDaemonSetSelectorFilter(t *testing.T) { NumberUnavailable: 1, UpdatedNumberScheduled: 2, }, - ObjectMeta: metav1.ObjectMeta{ + ObjectMeta: meta.ObjectMeta{ Generation: 11221, Namespace: "ns1", Name: "daemon1", @@ -151,10 +151,10 @@ func TestDaemonSetSelectorFilter(t *testing.T) { "lab1": "v1", "lab2": "v2", }, - CreationTimestamp: metav1.Time{Time: time.Now()}, + CreationTimestamp: meta.Time{Time: time.Now()}, }, - Spec: v1.DaemonSetSpec{ - Selector: &metav1.LabelSelector{ + Spec: apps.DaemonSetSpec{ + Selector: &meta.LabelSelector{ MatchLabels: map[string]string{ "select1": "s1", "select2": "s2", @@ -269,7 +269,7 @@ func TestDaemonSetSelectorFilter(t *testing.T) { ks.SelectorExclude = v.exclude require.NoError(t, ks.createSelectorFilters()) acc := new(testutil.Accumulator) - items := ((v.handler.responseMap["/daemonsets/"]).(*v1.DaemonSetList)).Items + items := ((v.handler.responseMap["/daemonsets/"]).(*apps.DaemonSetList)).Items for i := range items { ks.gatherDaemonSet(&items[i], acc) } diff --git a/plugins/inputs/kube_inventory/deployment.go b/plugins/inputs/kube_inventory/deployment.go index 4d4a938db..68d33e852 100644 --- a/plugins/inputs/kube_inventory/deployment.go +++ b/plugins/inputs/kube_inventory/deployment.go @@ -3,7 +3,7 @@ package kube_inventory import ( "context" - v1 "k8s.io/api/apps/v1" + "k8s.io/api/apps/v1" "github.com/influxdata/telegraf" ) diff --git a/plugins/inputs/kube_inventory/deployment_test.go b/plugins/inputs/kube_inventory/deployment_test.go index ccd7c0d60..b854a6d39 100644 --- a/plugins/inputs/kube_inventory/deployment_test.go +++ b/plugins/inputs/kube_inventory/deployment_test.go @@ -6,7 +6,7 @@ import ( "time" "github.com/stretchr/testify/require" - v1 "k8s.io/api/apps/v1" + "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" diff --git a/plugins/inputs/kube_inventory/endpoint_test.go b/plugins/inputs/kube_inventory/endpoint_test.go index 936a64b72..f5be722c9 100644 --- a/plugins/inputs/kube_inventory/endpoint_test.go +++ b/plugins/inputs/kube_inventory/endpoint_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/influxdata/telegraf" diff --git a/plugins/inputs/kube_inventory/statefulset.go b/plugins/inputs/kube_inventory/statefulset.go index ee61d728f..77b2a56bb 100644 --- a/plugins/inputs/kube_inventory/statefulset.go +++ b/plugins/inputs/kube_inventory/statefulset.go @@ -3,7 +3,7 @@ package kube_inventory import ( "context" - v1 "k8s.io/api/apps/v1" + "k8s.io/api/apps/v1" "github.com/influxdata/telegraf" ) diff --git a/plugins/inputs/kube_inventory/statefulset_test.go b/plugins/inputs/kube_inventory/statefulset_test.go index 8caa0b14a..139d30824 100644 --- a/plugins/inputs/kube_inventory/statefulset_test.go +++ b/plugins/inputs/kube_inventory/statefulset_test.go @@ -6,7 +6,7 @@ import ( "time" "github.com/stretchr/testify/require" - v1 "k8s.io/api/apps/v1" + "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/influxdata/telegraf" diff --git a/plugins/inputs/kubernetes/kubernetes.go b/plugins/inputs/kubernetes/kubernetes.go index c0bd17b54..b692d39c3 100644 --- a/plugins/inputs/kubernetes/kubernetes.go +++ b/plugins/inputs/kubernetes/kubernetes.go @@ -12,7 +12,7 @@ import ( "sync" "time" - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" diff --git a/plugins/inputs/ldap/ldap.go b/plugins/inputs/ldap/ldap.go index eb1e61fbf..7a69871ca 100644 --- a/plugins/inputs/ldap/ldap.go +++ b/plugins/inputs/ldap/ldap.go @@ -13,7 +13,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" - commontls "github.com/influxdata/telegraf/plugins/common/tls" + common_tls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -26,7 +26,7 @@ type LDAP struct { BindDn string `toml:"bind_dn"` BindPassword config.Secret `toml:"bind_password"` ReverseFieldNames bool `toml:"reverse_field_names"` - commontls.ClientConfig + common_tls.ClientConfig tlsCfg *tls.Config requests []request diff --git a/plugins/inputs/ldap/ldap_test.go b/plugins/inputs/ldap/ldap_test.go index c512ac25c..ad70654d5 100644 --- a/plugins/inputs/ldap/ldap_test.go +++ b/plugins/inputs/ldap/ldap_test.go @@ -12,7 +12,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" - commontls "github.com/influxdata/telegraf/plugins/common/tls" + common_tls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/testutil" ) @@ -312,7 +312,7 @@ func TestOpenLDAPStartTLSIntegration(t *testing.T) { Server: "starttls://" + container.Address + ":" + port, BindDn: "CN=manager,DC=example,DC=org", BindPassword: config.NewSecret([]byte("secret")), - ClientConfig: commontls.ClientConfig{ + ClientConfig: common_tls.ClientConfig{ TLSCA: pkiPaths.ClientCert, InsecureSkipVerify: true, }, @@ -420,7 +420,7 @@ func TestOpenLDAPLDAPSIntegration(t *testing.T) { Server: "ldaps://" + container.Address + ":" + port, BindDn: "CN=manager,DC=example,DC=org", BindPassword: config.NewSecret([]byte("secret")), - ClientConfig: commontls.ClientConfig{ + ClientConfig: common_tls.ClientConfig{ InsecureSkipVerify: true, }, } diff --git a/plugins/inputs/logstash/logstash.go b/plugins/inputs/logstash/logstash.go index 51e792021..77251b32f 100644 --- a/plugins/inputs/logstash/logstash.go +++ b/plugins/inputs/logstash/logstash.go @@ -15,9 +15,9 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/choice" - httpconfig "github.com/influxdata/telegraf/plugins/common/http" + common_http "github.com/influxdata/telegraf/plugins/common/http" "github.com/influxdata/telegraf/plugins/inputs" - jsonParser "github.com/influxdata/telegraf/plugins/parsers/json" + parsers_json "github.com/influxdata/telegraf/plugins/parsers/json" ) //go:embed sample.conf @@ -43,7 +43,7 @@ type Logstash struct { Log telegraf.Logger `toml:"-"` client *http.Client - httpconfig.HTTPClientConfig + common_http.HTTPClientConfig } type processStats struct { @@ -183,7 +183,7 @@ func (logstash *Logstash) gatherJVMStats(address string, accumulator telegraf.Ac "source": jvmStats.Host, } - flattener := jsonParser.JSONFlattener{} + flattener := parsers_json.JSONFlattener{} err = flattener.FlattenJSON("", jvmStats.JVM) if err != nil { return err @@ -209,7 +209,7 @@ func (logstash *Logstash) gatherProcessStats(address string, accumulator telegra "source": processStats.Host, } - flattener := jsonParser.JSONFlattener{} + flattener := parsers_json.JSONFlattener{} err = flattener.FlattenJSON("", processStats.Process) if err != nil { return err @@ -235,7 +235,7 @@ func (logstash *Logstash) gatherPluginsStats( for tag, value := range tags { pluginTags[tag] = value } - flattener := jsonParser.JSONFlattener{} + flattener := parsers_json.JSONFlattener{} err := flattener.FlattenJSON("", plugin.Events) if err != nil { return err @@ -264,7 +264,7 @@ func (logstash *Logstash) gatherPluginsStats( "with_errors": 9089 }, */ - flattener := jsonParser.JSONFlattener{} + flattener := parsers_json.JSONFlattener{} err := flattener.FlattenJSON("", plugin.BulkRequests) if err != nil { return err @@ -287,7 +287,7 @@ func (logstash *Logstash) gatherPluginsStats( "retryable_failures": 13733 } */ - flattener = jsonParser.JSONFlattener{} + flattener = parsers_json.JSONFlattener{} err = flattener.FlattenJSON("", plugin.Documents) if err != nil { return err @@ -325,7 +325,7 @@ func (logstash *Logstash) gatherQueueStats(queue pipelineQueue, tags map[string] } if queue.Type != "memory" { - flattener := jsonParser.JSONFlattener{} + flattener := parsers_json.JSONFlattener{} err := flattener.FlattenJSON("", queue.Capacity) if err != nil { return err @@ -368,7 +368,7 @@ func (logstash *Logstash) gatherPipelineStats(address string, accumulator telegr "source": pipelineStats.Host, } - flattener := jsonParser.JSONFlattener{} + flattener := parsers_json.JSONFlattener{} err = flattener.FlattenJSON("", pipelineStats.Pipeline.Events) if err != nil { return err @@ -414,7 +414,7 @@ func (logstash *Logstash) gatherPipelinesStats(address string, accumulator teleg "source": pipelinesStats.Host, } - flattener := jsonParser.JSONFlattener{} + flattener := parsers_json.JSONFlattener{} err := flattener.FlattenJSON("", pipeline.Events) if err != nil { return err @@ -520,7 +520,7 @@ func newLogstash() *Logstash { URL: "http://127.0.0.1:9600", Collect: []string{"pipelines", "process", "jvm"}, Headers: make(map[string]string), - HTTPClientConfig: httpconfig.HTTPClientConfig{ + HTTPClientConfig: common_http.HTTPClientConfig{ Timeout: config.Duration(5 * time.Second), }, } diff --git a/plugins/inputs/memcached/memcached.go b/plugins/inputs/memcached/memcached.go index fee4fcbee..6f050b278 100644 --- a/plugins/inputs/memcached/memcached.go +++ b/plugins/inputs/memcached/memcached.go @@ -15,7 +15,7 @@ import ( "golang.org/x/net/proxy" "github.com/influxdata/telegraf" - tlsint "github.com/influxdata/telegraf/plugins/common/tls" + common_tls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -27,7 +27,7 @@ type Memcached struct { Servers []string `toml:"servers"` UnixSockets []string `toml:"unix_sockets"` EnableTLS bool `toml:"enable_tls"` - tlsint.ClientConfig + common_tls.ClientConfig } var defaultTimeout = 5 * time.Second diff --git a/plugins/inputs/mesos/mesos.go b/plugins/inputs/mesos/mesos.go index 2829cd779..c4b57ffff 100644 --- a/plugins/inputs/mesos/mesos.go +++ b/plugins/inputs/mesos/mesos.go @@ -17,7 +17,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" - jsonparser "github.com/influxdata/telegraf/plugins/parsers/json" + parsers_json "github.com/influxdata/telegraf/plugins/parsers/json" ) //go:embed sample.conf @@ -525,7 +525,7 @@ func (m *Mesos) gatherMainMetrics(u *url.URL, role Role, acc telegraf.Accumulato m.filterMetrics(role, &jsonOut) - jf := jsonparser.JSONFlattener{} + jf := parsers_json.JSONFlattener{} err = jf.FlattenJSON("", jsonOut) diff --git a/plugins/inputs/mongodb/mongodb.go b/plugins/inputs/mongodb/mongodb.go index 06b98d039..65c323c18 100644 --- a/plugins/inputs/mongodb/mongodb.go +++ b/plugins/inputs/mongodb/mongodb.go @@ -19,7 +19,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal/choice" - tlsint "github.com/influxdata/telegraf/plugins/common/tls" + common_tls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -37,7 +37,7 @@ type MongoDB struct { GatherTopStat bool DisconnectedServersBehavior string ColStatsDbs []string - tlsint.ClientConfig + common_tls.ClientConfig Log telegraf.Logger `toml:"-"` diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go index 642b8ea0f..7da0255ee 100644 --- a/plugins/inputs/mysql/mysql.go +++ b/plugins/inputs/mysql/mysql.go @@ -20,8 +20,8 @@ import ( "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" - v1 "github.com/influxdata/telegraf/plugins/inputs/mysql/v1" - v2 "github.com/influxdata/telegraf/plugins/inputs/mysql/v2" + "github.com/influxdata/telegraf/plugins/inputs/mysql/v1" + "github.com/influxdata/telegraf/plugins/inputs/mysql/v2" ) //go:embed sample.conf diff --git a/plugins/inputs/opensearch_query/opensearch_query.go b/plugins/inputs/opensearch_query/opensearch_query.go index 9602f5b87..b9cefce59 100644 --- a/plugins/inputs/opensearch_query/opensearch_query.go +++ b/plugins/inputs/opensearch_query/opensearch_query.go @@ -18,7 +18,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" - influxtls "github.com/influxdata/telegraf/plugins/common/tls" + common_tls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -37,7 +37,7 @@ type OpensearchQuery struct { Log telegraf.Logger `toml:"-"` - influxtls.ClientConfig + common_tls.ClientConfig osClient *opensearch.Client } diff --git a/plugins/inputs/openstack/openstack.go b/plugins/inputs/openstack/openstack.go index 35fd1a5d0..c4217ec34 100644 --- a/plugins/inputs/openstack/openstack.go +++ b/plugins/inputs/openstack/openstack.go @@ -44,7 +44,7 @@ import ( "github.com/gophercloud/gophercloud/v2/openstack/orchestration/v1/stacks" "github.com/influxdata/telegraf" - httpconfig "github.com/influxdata/telegraf/plugins/common/http" + common_http "github.com/influxdata/telegraf/plugins/common/http" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -74,7 +74,7 @@ type OpenStack struct { MeasureRequest bool `toml:"measure_openstack_requests"` AllTenants bool `toml:"query_all_tenants"` Log telegraf.Logger `toml:"-"` - httpconfig.HTTPClientConfig + common_http.HTTPClientConfig client *http.Client diff --git a/plugins/inputs/opentelemetry/opentelemetry_test.go b/plugins/inputs/opentelemetry/opentelemetry_test.go index 798c58bdc..ae6b198f2 100644 --- a/plugins/inputs/opentelemetry/opentelemetry_test.go +++ b/plugins/inputs/opentelemetry/opentelemetry_test.go @@ -27,7 +27,6 @@ import ( "github.com/influxdata/influxdb-observability/otel2influx" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" - tmetric "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers/influx" "github.com/influxdata/telegraf/testutil" @@ -86,7 +85,7 @@ func TestOpenTelemetry(t *testing.T) { exesuffix = ".exe" } expected := []telegraf.Metric{ - tmetric.New( + testutil.MustMetric( "measurement-counter", map[string]string{ "otel.library.name": "library-name", diff --git a/plugins/inputs/p4runtime/p4runtime.go b/plugins/inputs/p4runtime/p4runtime.go index f1dd9f4dc..accc0d86c 100644 --- a/plugins/inputs/p4runtime/p4runtime.go +++ b/plugins/inputs/p4runtime/p4runtime.go @@ -11,14 +11,14 @@ import ( "slices" "sync" - p4ConfigV1 "github.com/p4lang/p4runtime/go/p4/config/v1" - p4v1 "github.com/p4lang/p4runtime/go/p4/v1" + p4_config "github.com/p4lang/p4runtime/go/p4/config/v1" + p4 "github.com/p4lang/p4runtime/go/p4/v1" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" "github.com/influxdata/telegraf" - internaltls "github.com/influxdata/telegraf/plugins/common/tls" + common_tls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -36,10 +36,10 @@ type P4runtime struct { CounterNamesInclude []string `toml:"counter_names_include"` Log telegraf.Logger `toml:"-"` EnableTLS bool `toml:"enable_tls"` - internaltls.ClientConfig + common_tls.ClientConfig conn *grpc.ClientConn - client p4v1.P4RuntimeClient + client p4.P4RuntimeClient wg sync.WaitGroup } @@ -75,7 +75,7 @@ func (p *P4runtime) Gather(acc telegraf.Accumulator) error { for _, counter := range filteredCounters { p.wg.Add(1) - go func(counter *p4ConfigV1.Counter) { + go func(counter *p4_config.Counter) { defer p.wg.Done() entries, err := p.readAllEntries(counter.Preamble.Id) if err != nil { @@ -136,10 +136,10 @@ func initConnection(endpoint string, tlscfg *tls.Config) (*grpc.ClientConn, erro return grpc.NewClient(endpoint, grpc.WithTransportCredentials(creds)) } -func (p *P4runtime) getP4Info() (*p4ConfigV1.P4Info, error) { - req := &p4v1.GetForwardingPipelineConfigRequest{ +func (p *P4runtime) getP4Info() (*p4_config.P4Info, error) { + req := &p4.GetForwardingPipelineConfigRequest{ DeviceId: p.DeviceID, - ResponseType: p4v1.GetForwardingPipelineConfigRequest_ALL, + ResponseType: p4.GetForwardingPipelineConfigRequest_ALL, } resp, err := p.client.GetForwardingPipelineConfig(context.Background(), req) if err != nil { @@ -165,12 +165,12 @@ func (p *P4runtime) getP4Info() (*p4ConfigV1.P4Info, error) { return p4info, nil } -func filterCounters(counters []*p4ConfigV1.Counter, counterNamesInclude []string) []*p4ConfigV1.Counter { +func filterCounters(counters []*p4_config.Counter, counterNamesInclude []string) []*p4_config.Counter { if len(counterNamesInclude) == 0 { return counters } - var filteredCounters []*p4ConfigV1.Counter + var filteredCounters []*p4_config.Counter for _, counter := range counters { if counter == nil { continue @@ -197,16 +197,16 @@ func (p *P4runtime) newP4RuntimeClient() error { return fmt.Errorf("cannot connect to the server: %w", err) } p.conn = conn - p.client = p4v1.NewP4RuntimeClient(conn) + p.client = p4.NewP4RuntimeClient(conn) return nil } -func (p *P4runtime) readAllEntries(counterID uint32) ([]*p4v1.Entity, error) { - readRequest := &p4v1.ReadRequest{ +func (p *P4runtime) readAllEntries(counterID uint32) ([]*p4.Entity, error) { + readRequest := &p4.ReadRequest{ DeviceId: p.DeviceID, - Entities: []*p4v1.Entity{{ - Entity: &p4v1.Entity_CounterEntry{ - CounterEntry: &p4v1.CounterEntry{ + Entities: []*p4.Entity{{ + Entity: &p4.Entity_CounterEntry{ + CounterEntry: &p4.CounterEntry{ CounterId: counterID}}}}} stream, err := p.client.Read(context.Background(), readRequest) diff --git a/plugins/inputs/p4runtime/p4runtime_fake_client_test.go b/plugins/inputs/p4runtime/p4runtime_fake_client_test.go index 3b037e223..5dc2413c4 100644 --- a/plugins/inputs/p4runtime/p4runtime_fake_client_test.go +++ b/plugins/inputs/p4runtime/p4runtime_fake_client_test.go @@ -3,49 +3,49 @@ package p4runtime import ( "context" - p4v1 "github.com/p4lang/p4runtime/go/p4/v1" + p4 "github.com/p4lang/p4runtime/go/p4/v1" "google.golang.org/grpc" ) type fakeP4RuntimeClient struct { writeFn func( ctx context.Context, - in *p4v1.WriteRequest, + in *p4.WriteRequest, opts ...grpc.CallOption, - ) (*p4v1.WriteResponse, error) + ) (*p4.WriteResponse, error) readFn func( - in *p4v1.ReadRequest, - ) (p4v1.P4Runtime_ReadClient, error) + in *p4.ReadRequest, + ) (p4.P4Runtime_ReadClient, error) setForwardingPipelineConfigFn func( ctx context.Context, - in *p4v1.SetForwardingPipelineConfigRequest, + in *p4.SetForwardingPipelineConfigRequest, opts ...grpc.CallOption, - ) (*p4v1.SetForwardingPipelineConfigResponse, error) + ) (*p4.SetForwardingPipelineConfigResponse, error) - getForwardingPipelineConfigFn func() (*p4v1.GetForwardingPipelineConfigResponse, error) + getForwardingPipelineConfigFn func() (*p4.GetForwardingPipelineConfigResponse, error) streamChannelFn func( ctx context.Context, opts ...grpc.CallOption, - ) (p4v1.P4Runtime_StreamChannelClient, error) + ) (p4.P4Runtime_StreamChannelClient, error) capabilitiesFn func( ctx context.Context, - in *p4v1.CapabilitiesRequest, + in *p4.CapabilitiesRequest, opts ...grpc.CallOption, - ) (*p4v1.CapabilitiesResponse, error) + ) (*p4.CapabilitiesResponse, error) } -// fakeP4RuntimeClient implements the p4v1.P4RuntimeClient interface -var _ p4v1.P4RuntimeClient = &fakeP4RuntimeClient{} +// fakeP4RuntimeClient implements the v1.P4RuntimeClient interface +var _ p4.P4RuntimeClient = &fakeP4RuntimeClient{} func (c *fakeP4RuntimeClient) Write( ctx context.Context, - in *p4v1.WriteRequest, + in *p4.WriteRequest, opts ...grpc.CallOption, -) (*p4v1.WriteResponse, error) { +) (*p4.WriteResponse, error) { if c.writeFn == nil { panic("No mock defined for Write RPC") } @@ -54,9 +54,9 @@ func (c *fakeP4RuntimeClient) Write( func (c *fakeP4RuntimeClient) Read( _ context.Context, - in *p4v1.ReadRequest, + in *p4.ReadRequest, _ ...grpc.CallOption, -) (p4v1.P4Runtime_ReadClient, error) { +) (p4.P4Runtime_ReadClient, error) { if c.readFn == nil { panic("No mock defined for Read RPC") } @@ -65,9 +65,9 @@ func (c *fakeP4RuntimeClient) Read( func (c *fakeP4RuntimeClient) SetForwardingPipelineConfig( ctx context.Context, - in *p4v1.SetForwardingPipelineConfigRequest, + in *p4.SetForwardingPipelineConfigRequest, opts ...grpc.CallOption, -) (*p4v1.SetForwardingPipelineConfigResponse, error) { +) (*p4.SetForwardingPipelineConfigResponse, error) { if c.setForwardingPipelineConfigFn == nil { panic("No mock defined for SetForwardingPipelineConfig RPC") } @@ -76,9 +76,9 @@ func (c *fakeP4RuntimeClient) SetForwardingPipelineConfig( func (c *fakeP4RuntimeClient) GetForwardingPipelineConfig( context.Context, - *p4v1.GetForwardingPipelineConfigRequest, + *p4.GetForwardingPipelineConfigRequest, ...grpc.CallOption, -) (*p4v1.GetForwardingPipelineConfigResponse, error) { +) (*p4.GetForwardingPipelineConfigResponse, error) { if c.getForwardingPipelineConfigFn == nil { panic("No mock defined for GetForwardingPipelineConfig RPC") } @@ -88,7 +88,7 @@ func (c *fakeP4RuntimeClient) GetForwardingPipelineConfig( func (c *fakeP4RuntimeClient) StreamChannel( ctx context.Context, opts ...grpc.CallOption, -) (p4v1.P4Runtime_StreamChannelClient, error) { +) (p4.P4Runtime_StreamChannelClient, error) { if c.streamChannelFn == nil { panic("No mock defined for StreamChannel") } @@ -97,9 +97,9 @@ func (c *fakeP4RuntimeClient) StreamChannel( func (c *fakeP4RuntimeClient) Capabilities( ctx context.Context, - in *p4v1.CapabilitiesRequest, + in *p4.CapabilitiesRequest, opts ...grpc.CallOption, -) (*p4v1.CapabilitiesResponse, error) { +) (*p4.CapabilitiesResponse, error) { if c.capabilitiesFn == nil { panic("No mock defined for Capabilities RPC") } @@ -108,13 +108,13 @@ func (c *fakeP4RuntimeClient) Capabilities( type fakeP4RuntimeReadClient struct { grpc.ClientStream - recvFn func() (*p4v1.ReadResponse, error) + recvFn func() (*p4.ReadResponse, error) } -// fakeP4RuntimeReadClient implements the p4v1.P4Runtime_ReadClient interface -var _ p4v1.P4Runtime_ReadClient = &fakeP4RuntimeReadClient{} +// fakeP4RuntimeReadClient implements the v1.P4Runtime_ReadClient interface +var _ p4.P4Runtime_ReadClient = &fakeP4RuntimeReadClient{} -func (c *fakeP4RuntimeReadClient) Recv() (*p4v1.ReadResponse, error) { +func (c *fakeP4RuntimeReadClient) Recv() (*p4.ReadResponse, error) { if c.recvFn == nil { panic("No mock provided for Recv function") } diff --git a/plugins/inputs/p4runtime/p4runtime_test.go b/plugins/inputs/p4runtime/p4runtime_test.go index f2daea060..67a52c7a8 100644 --- a/plugins/inputs/p4runtime/p4runtime_test.go +++ b/plugins/inputs/p4runtime/p4runtime_test.go @@ -7,8 +7,8 @@ import ( "testing" "time" - p4ConfigV1 "github.com/p4lang/p4runtime/go/p4/config/v1" - p4v1 "github.com/p4lang/p4runtime/go/p4/v1" + p4_config "github.com/p4lang/p4runtime/go/p4/config/v1" + p4 "github.com/p4lang/p4runtime/go/p4/v1" "github.com/stretchr/testify/require" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" @@ -21,23 +21,23 @@ import ( func createCounter( name string, id uint32, - unit p4ConfigV1.CounterSpec_Unit, -) *p4ConfigV1.Counter { - return &p4ConfigV1.Counter{ - Preamble: &p4ConfigV1.Preamble{Name: name, Id: id}, - Spec: &p4ConfigV1.CounterSpec{Unit: unit}, + unit p4_config.CounterSpec_Unit, +) *p4_config.Counter { + return &p4_config.Counter{ + Preamble: &p4_config.Preamble{Name: name, Id: id}, + Spec: &p4_config.CounterSpec{Unit: unit}, } } func createEntityCounterEntry( counterID uint32, index int64, - data *p4v1.CounterData, -) *p4v1.Entity_CounterEntry { - return &p4v1.Entity_CounterEntry{ - CounterEntry: &p4v1.CounterEntry{ + data *p4.CounterData, +) *p4.Entity_CounterEntry { + return &p4.Entity_CounterEntry{ + CounterEntry: &p4.CounterEntry{ CounterId: counterID, - Index: &p4v1.Index{Index: index}, + Index: &p4.Index{Index: index}, Data: data, }, } @@ -73,20 +73,20 @@ func TestInitDefault(t *testing.T) { func TestErrorGetP4Info(t *testing.T) { responses := []struct { - getForwardingPipelineConfigResponse *p4v1.GetForwardingPipelineConfigResponse + getForwardingPipelineConfigResponse *p4.GetForwardingPipelineConfigResponse getForwardingPipelineConfigResponseError error }{ { getForwardingPipelineConfigResponse: nil, getForwardingPipelineConfigResponseError: errors.New("error when retrieving forwarding pipeline config"), }, { - getForwardingPipelineConfigResponse: &p4v1.GetForwardingPipelineConfigResponse{ + getForwardingPipelineConfigResponse: &p4.GetForwardingPipelineConfigResponse{ Config: nil, }, getForwardingPipelineConfigResponseError: nil, }, { - getForwardingPipelineConfigResponse: &p4v1.GetForwardingPipelineConfigResponse{ - Config: &p4v1.ForwardingPipelineConfig{P4Info: nil}, + getForwardingPipelineConfigResponse: &p4.GetForwardingPipelineConfigResponse{ + Config: &p4.ForwardingPipelineConfig{P4Info: nil}, }, getForwardingPipelineConfigResponseError: nil, }, @@ -94,7 +94,7 @@ func TestErrorGetP4Info(t *testing.T) { for _, response := range responses { p4RtClient := &fakeP4RuntimeClient{ - getForwardingPipelineConfigFn: func() (*p4v1.GetForwardingPipelineConfigResponse, error) { + getForwardingPipelineConfigFn: func() (*p4.GetForwardingPipelineConfigResponse, error) { return response.getForwardingPipelineConfigResponse, response.getForwardingPipelineConfigResponseError }, } @@ -111,23 +111,23 @@ func TestErrorGetP4Info(t *testing.T) { func TestOneCounterRead(t *testing.T) { tests := []struct { - forwardingPipelineConfig *p4v1.ForwardingPipelineConfig - EntityCounterEntry *p4v1.Entity_CounterEntry + forwardingPipelineConfig *p4.ForwardingPipelineConfig + EntityCounterEntry *p4.Entity_CounterEntry expected []telegraf.Metric }{ { - forwardingPipelineConfig: &p4v1.ForwardingPipelineConfig{ - P4Info: &p4ConfigV1.P4Info{ - Counters: []*p4ConfigV1.Counter{ - createCounter("foo", 1111, p4ConfigV1.CounterSpec_BOTH), + forwardingPipelineConfig: &p4.ForwardingPipelineConfig{ + P4Info: &p4_config.P4Info{ + Counters: []*p4_config.Counter{ + createCounter("foo", 1111, p4_config.CounterSpec_BOTH), }, - PkgInfo: &p4ConfigV1.PkgInfo{Name: "P4Program"}, + PkgInfo: &p4_config.PkgInfo{Name: "P4Program"}, }, }, EntityCounterEntry: createEntityCounterEntry( 1111, 5, - &p4v1.CounterData{ByteCount: 5, PacketCount: 1}, + &p4.CounterData{ByteCount: 5, PacketCount: 1}, ), expected: []telegraf.Metric{testutil.MustMetric( "p4_runtime", @@ -143,22 +143,22 @@ func TestOneCounterRead(t *testing.T) { time.Unix(0, 0)), }, }, { - forwardingPipelineConfig: &p4v1.ForwardingPipelineConfig{ - P4Info: &p4ConfigV1.P4Info{ - Counters: []*p4ConfigV1.Counter{ + forwardingPipelineConfig: &p4.ForwardingPipelineConfig{ + P4Info: &p4_config.P4Info{ + Counters: []*p4_config.Counter{ createCounter( "foo", 2222, - p4ConfigV1.CounterSpec_BYTES, + p4_config.CounterSpec_BYTES, ), }, - PkgInfo: &p4ConfigV1.PkgInfo{Name: "P4Program"}, + PkgInfo: &p4_config.PkgInfo{Name: "P4Program"}, }, }, EntityCounterEntry: createEntityCounterEntry( 2222, 5, - &p4v1.CounterData{ByteCount: 5}, + &p4.CounterData{ByteCount: 5}, ), expected: []telegraf.Metric{testutil.MustMetric( "p4_runtime", @@ -174,22 +174,22 @@ func TestOneCounterRead(t *testing.T) { time.Unix(0, 0)), }, }, { - forwardingPipelineConfig: &p4v1.ForwardingPipelineConfig{ - P4Info: &p4ConfigV1.P4Info{ - Counters: []*p4ConfigV1.Counter{ + forwardingPipelineConfig: &p4.ForwardingPipelineConfig{ + P4Info: &p4_config.P4Info{ + Counters: []*p4_config.Counter{ createCounter( "foo", 3333, - p4ConfigV1.CounterSpec_PACKETS, + p4_config.CounterSpec_PACKETS, ), }, - PkgInfo: &p4ConfigV1.PkgInfo{Name: "P4Program"}, + PkgInfo: &p4_config.PkgInfo{Name: "P4Program"}, }, }, EntityCounterEntry: createEntityCounterEntry( 3333, 5, - &p4v1.CounterData{PacketCount: 1}, + &p4.CounterData{PacketCount: 1}, ), expected: []telegraf.Metric{testutil.MustMetric( "p4_runtime", @@ -205,18 +205,18 @@ func TestOneCounterRead(t *testing.T) { time.Unix(0, 0)), }, }, { - forwardingPipelineConfig: &p4v1.ForwardingPipelineConfig{ - P4Info: &p4ConfigV1.P4Info{ - Counters: []*p4ConfigV1.Counter{ - createCounter("foo", 4444, p4ConfigV1.CounterSpec_BOTH), + forwardingPipelineConfig: &p4.ForwardingPipelineConfig{ + P4Info: &p4_config.P4Info{ + Counters: []*p4_config.Counter{ + createCounter("foo", 4444, p4_config.CounterSpec_BOTH), }, - PkgInfo: &p4ConfigV1.PkgInfo{Name: "P4Program"}, + PkgInfo: &p4_config.PkgInfo{Name: "P4Program"}, }, }, EntityCounterEntry: createEntityCounterEntry( 4444, 5, - &p4v1.CounterData{}, + &p4.CounterData{}, ), expected: nil, }, @@ -224,19 +224,19 @@ func TestOneCounterRead(t *testing.T) { for _, tt := range tests { p4RtReadClient := &fakeP4RuntimeReadClient{ - recvFn: func() (*p4v1.ReadResponse, error) { - return &p4v1.ReadResponse{ - Entities: []*p4v1.Entity{{Entity: tt.EntityCounterEntry}}, + recvFn: func() (*p4.ReadResponse, error) { + return &p4.ReadResponse{ + Entities: []*p4.Entity{{Entity: tt.EntityCounterEntry}}, }, nil }, } p4RtClient := &fakeP4RuntimeClient{ - readFn: func(*p4v1.ReadRequest) (p4v1.P4Runtime_ReadClient, error) { + readFn: func(*p4.ReadRequest) (p4.P4Runtime_ReadClient, error) { return p4RtReadClient, nil }, - getForwardingPipelineConfigFn: func() (*p4v1.GetForwardingPipelineConfigResponse, error) { - return &p4v1.GetForwardingPipelineConfigResponse{ + getForwardingPipelineConfigFn: func() (*p4.GetForwardingPipelineConfigResponse, error) { + return &p4.GetForwardingPipelineConfigResponse{ Config: tt.forwardingPipelineConfig, }, nil }, @@ -270,19 +270,19 @@ func TestMultipleEntitiesSingleCounterRead(t *testing.T) { totalNumOfEntries, "totalNumOfCounters", ) - entities := make([]*p4v1.Entity, 0, totalNumOfEntries) - p4InfoCounters := make([]*p4ConfigV1.Counter, 0, totalNumOfEntries) + entities := make([]*p4.Entity, 0, totalNumOfEntries) + p4InfoCounters := make([]*p4_config.Counter, 0, totalNumOfEntries) p4InfoCounters = append( p4InfoCounters, - createCounter("foo", 0, p4ConfigV1.CounterSpec_BOTH), + createCounter("foo", 0, p4_config.CounterSpec_BOTH), ) for i := 0; i < totalNumOfEntries; i++ { - counterEntry := &p4v1.Entity{ + counterEntry := &p4.Entity{ Entity: createEntityCounterEntry( 0, int64(i), - &p4v1.CounterData{ + &p4.CounterData{ ByteCount: int64(10), PacketCount: int64(10), }, @@ -306,25 +306,25 @@ func TestMultipleEntitiesSingleCounterRead(t *testing.T) { )) } - forwardingPipelineConfig := &p4v1.ForwardingPipelineConfig{ - P4Info: &p4ConfigV1.P4Info{ + forwardingPipelineConfig := &p4.ForwardingPipelineConfig{ + P4Info: &p4_config.P4Info{ Counters: p4InfoCounters, - PkgInfo: &p4ConfigV1.PkgInfo{Name: "P4Program"}, + PkgInfo: &p4_config.PkgInfo{Name: "P4Program"}, }, } p4RtReadClient := &fakeP4RuntimeReadClient{ - recvFn: func() (*p4v1.ReadResponse, error) { - return &p4v1.ReadResponse{Entities: entities}, nil + recvFn: func() (*p4.ReadResponse, error) { + return &p4.ReadResponse{Entities: entities}, nil }, } p4RtClient := &fakeP4RuntimeClient{ - readFn: func(*p4v1.ReadRequest) (p4v1.P4Runtime_ReadClient, error) { + readFn: func(*p4.ReadRequest) (p4.P4Runtime_ReadClient, error) { return p4RtReadClient, nil }, - getForwardingPipelineConfigFn: func() (*p4v1.GetForwardingPipelineConfigResponse, error) { - return &p4v1.GetForwardingPipelineConfigResponse{ + getForwardingPipelineConfigFn: func() (*p4.GetForwardingPipelineConfigResponse, error) { + return &p4.GetForwardingPipelineConfigResponse{ Config: forwardingPipelineConfig, }, nil }, @@ -359,7 +359,7 @@ func TestSingleEntitiesMultipleCounterRead(t *testing.T) { totalNumOfCounters, "totalNumOfCounters", ) - p4InfoCounters := make([]*p4ConfigV1.Counter, 0, totalNumOfCounters) + p4InfoCounters := make([]*p4_config.Counter, 0, totalNumOfCounters) for i := 1; i <= totalNumOfCounters; i++ { counterName := fmt.Sprintf("foo%v", i) @@ -368,7 +368,7 @@ func TestSingleEntitiesMultipleCounterRead(t *testing.T) { createCounter( counterName, uint32(i), - p4ConfigV1.CounterSpec_BOTH, + p4_config.CounterSpec_BOTH, ), ) @@ -388,24 +388,24 @@ func TestSingleEntitiesMultipleCounterRead(t *testing.T) { )) } - forwardingPipelineConfig := &p4v1.ForwardingPipelineConfig{ - P4Info: &p4ConfigV1.P4Info{ + forwardingPipelineConfig := &p4.ForwardingPipelineConfig{ + P4Info: &p4_config.P4Info{ Counters: p4InfoCounters, - PkgInfo: &p4ConfigV1.PkgInfo{Name: "P4Program"}, + PkgInfo: &p4_config.PkgInfo{Name: "P4Program"}, }, } p4RtClient := &fakeP4RuntimeClient{ - readFn: func(in *p4v1.ReadRequest) (p4v1.P4Runtime_ReadClient, error) { + readFn: func(in *p4.ReadRequest) (p4.P4Runtime_ReadClient, error) { counterID := in.Entities[0].GetCounterEntry().CounterId return &fakeP4RuntimeReadClient{ - recvFn: func() (*p4v1.ReadResponse, error) { - return &p4v1.ReadResponse{ - Entities: []*p4v1.Entity{{ + recvFn: func() (*p4.ReadResponse, error) { + return &p4.ReadResponse{ + Entities: []*p4.Entity{{ Entity: createEntityCounterEntry( counterID, 1, - &p4v1.CounterData{ + &p4.CounterData{ ByteCount: 10, PacketCount: 10, }, @@ -415,8 +415,8 @@ func TestSingleEntitiesMultipleCounterRead(t *testing.T) { }, }, nil }, - getForwardingPipelineConfigFn: func() (*p4v1.GetForwardingPipelineConfigResponse, error) { - return &p4v1.GetForwardingPipelineConfigResponse{ + getForwardingPipelineConfigFn: func() (*p4.GetForwardingPipelineConfigResponse, error) { + return &p4.GetForwardingPipelineConfigResponse{ Config: forwardingPipelineConfig, }, nil }, @@ -442,13 +442,13 @@ func TestSingleEntitiesMultipleCounterRead(t *testing.T) { } func TestNoCountersAvailable(t *testing.T) { - forwardingPipelineConfig := &p4v1.ForwardingPipelineConfig{ - P4Info: &p4ConfigV1.P4Info{Counters: []*p4ConfigV1.Counter{}}, + forwardingPipelineConfig := &p4.ForwardingPipelineConfig{ + P4Info: &p4_config.P4Info{Counters: []*p4_config.Counter{}}, } p4RtClient := &fakeP4RuntimeClient{ - getForwardingPipelineConfigFn: func() (*p4v1.GetForwardingPipelineConfigResponse, error) { - return &p4v1.GetForwardingPipelineConfigResponse{ + getForwardingPipelineConfigFn: func() (*p4.GetForwardingPipelineConfigResponse, error) { + return &p4.GetForwardingPipelineConfigResponse{ Config: forwardingPipelineConfig, }, nil }, @@ -464,18 +464,18 @@ func TestNoCountersAvailable(t *testing.T) { } func TestFilterCounters(t *testing.T) { - forwardingPipelineConfig := &p4v1.ForwardingPipelineConfig{ - P4Info: &p4ConfigV1.P4Info{ - Counters: []*p4ConfigV1.Counter{ - createCounter("foo", 1, p4ConfigV1.CounterSpec_BOTH), + forwardingPipelineConfig := &p4.ForwardingPipelineConfig{ + P4Info: &p4_config.P4Info{ + Counters: []*p4_config.Counter{ + createCounter("foo", 1, p4_config.CounterSpec_BOTH), }, - PkgInfo: &p4ConfigV1.PkgInfo{Name: "P4Program"}, + PkgInfo: &p4_config.PkgInfo{Name: "P4Program"}, }, } p4RtClient := &fakeP4RuntimeClient{ - getForwardingPipelineConfigFn: func() (*p4v1.GetForwardingPipelineConfigResponse, error) { - return &p4v1.GetForwardingPipelineConfigResponse{ + getForwardingPipelineConfigFn: func() (*p4.GetForwardingPipelineConfigResponse, error) { + return &p4.GetForwardingPipelineConfigResponse{ Config: forwardingPipelineConfig, }, nil }, @@ -500,31 +500,31 @@ func TestFilterCounters(t *testing.T) { func TestFailReadCounterEntryFromEntry(t *testing.T) { p4RtReadClient := &fakeP4RuntimeReadClient{ - recvFn: func() (*p4v1.ReadResponse, error) { - return &p4v1.ReadResponse{ - Entities: []*p4v1.Entity{{ - Entity: &p4v1.Entity_TableEntry{ - TableEntry: &p4v1.TableEntry{}, + recvFn: func() (*p4.ReadResponse, error) { + return &p4.ReadResponse{ + Entities: []*p4.Entity{{ + Entity: &p4.Entity_TableEntry{ + TableEntry: &p4.TableEntry{}, }}}}, nil }, } p4RtClient := &fakeP4RuntimeClient{ - readFn: func(*p4v1.ReadRequest) (p4v1.P4Runtime_ReadClient, error) { + readFn: func(*p4.ReadRequest) (p4.P4Runtime_ReadClient, error) { return p4RtReadClient, nil }, - getForwardingPipelineConfigFn: func() (*p4v1.GetForwardingPipelineConfigResponse, error) { - return &p4v1.GetForwardingPipelineConfigResponse{ - Config: &p4v1.ForwardingPipelineConfig{ - P4Info: &p4ConfigV1.P4Info{ - Counters: []*p4ConfigV1.Counter{ + getForwardingPipelineConfigFn: func() (*p4.GetForwardingPipelineConfigResponse, error) { + return &p4.GetForwardingPipelineConfigResponse{ + Config: &p4.ForwardingPipelineConfig{ + P4Info: &p4_config.P4Info{ + Counters: []*p4_config.Counter{ createCounter( "foo", 1111, - p4ConfigV1.CounterSpec_BOTH, + p4_config.CounterSpec_BOTH, ), }, - PkgInfo: &p4ConfigV1.PkgInfo{Name: "P4Program"}, + PkgInfo: &p4_config.PkgInfo{Name: "P4Program"}, }, }, }, nil @@ -553,21 +553,21 @@ func TestFailReadCounterEntryFromEntry(t *testing.T) { func TestFailReadAllEntries(t *testing.T) { p4RtClient := &fakeP4RuntimeClient{ - readFn: func(*p4v1.ReadRequest) (p4v1.P4Runtime_ReadClient, error) { + readFn: func(*p4.ReadRequest) (p4.P4Runtime_ReadClient, error) { return nil, errors.New("connection error") }, - getForwardingPipelineConfigFn: func() (*p4v1.GetForwardingPipelineConfigResponse, error) { - return &p4v1.GetForwardingPipelineConfigResponse{ - Config: &p4v1.ForwardingPipelineConfig{ - P4Info: &p4ConfigV1.P4Info{ - Counters: []*p4ConfigV1.Counter{ + getForwardingPipelineConfigFn: func() (*p4.GetForwardingPipelineConfigResponse, error) { + return &p4.GetForwardingPipelineConfigResponse{ + Config: &p4.ForwardingPipelineConfig{ + P4Info: &p4_config.P4Info{ + Counters: []*p4_config.Counter{ createCounter( "foo", 1111, - p4ConfigV1.CounterSpec_BOTH, + p4_config.CounterSpec_BOTH, ), }, - PkgInfo: &p4ConfigV1.PkgInfo{Name: "P4Program"}, + PkgInfo: &p4_config.PkgInfo{Name: "P4Program"}, }, }, }, nil @@ -595,11 +595,11 @@ func TestFailReadAllEntries(t *testing.T) { } func TestFilterCounterNamesInclude(t *testing.T) { - counters := []*p4ConfigV1.Counter{ - createCounter("foo", 1, p4ConfigV1.CounterSpec_BOTH), - createCounter("bar", 2, p4ConfigV1.CounterSpec_BOTH), + counters := []*p4_config.Counter{ + createCounter("foo", 1, p4_config.CounterSpec_BOTH), + createCounter("bar", 2, p4_config.CounterSpec_BOTH), nil, - createCounter("", 3, p4ConfigV1.CounterSpec_BOTH), + createCounter("", 3, p4_config.CounterSpec_BOTH), } counterNamesInclude := []string{"bar"} @@ -607,8 +607,8 @@ func TestFilterCounterNamesInclude(t *testing.T) { filteredCounters := filterCounters(counters, counterNamesInclude) require.Equal( t, - []*p4ConfigV1.Counter{ - createCounter("bar", 2, p4ConfigV1.CounterSpec_BOTH), + []*p4_config.Counter{ + createCounter("bar", 2, p4_config.CounterSpec_BOTH), }, filteredCounters, ) } diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index 157a7966b..29b26c443 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -25,10 +25,10 @@ import ( "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/models" - httpconfig "github.com/influxdata/telegraf/plugins/common/http" + common_http "github.com/influxdata/telegraf/plugins/common/http" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers/openmetrics" - parser "github.com/influxdata/telegraf/plugins/parsers/prometheus" + parsers_prometheus "github.com/influxdata/telegraf/plugins/parsers/prometheus" ) //go:embed sample.conf @@ -88,7 +88,7 @@ type Prometheus struct { ConsulConfig ConsulConfig `toml:"consul"` Log telegraf.Logger `toml:"-"` - httpconfig.HTTPClientConfig + common_http.HTTPClientConfig client *http.Client headers map[string]string @@ -525,7 +525,7 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) (map[s Log: p.Log, } } else { - metricParser = &parser.Parser{ + metricParser = &parsers_prometheus.Parser{ Header: resp.Header, MetricVersion: p.MetricVersion, IgnoreTimestamp: p.IgnoreTimestamp, diff --git a/plugins/inputs/riemann_listener/riemann_listener.go b/plugins/inputs/riemann_listener/riemann_listener.go index 26703e57d..370637ac4 100644 --- a/plugins/inputs/riemann_listener/riemann_listener.go +++ b/plugins/inputs/riemann_listener/riemann_listener.go @@ -16,14 +16,14 @@ import ( "sync" "time" - riemanngo "github.com/riemann/riemann-go-client" - riemangoProto "github.com/riemann/riemann-go-client/proto" + riemann "github.com/riemann/riemann-go-client" + rieman_proto "github.com/riemann/riemann-go-client/proto" "google.golang.org/protobuf/proto" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" - tlsint "github.com/influxdata/telegraf/plugins/common/tls" + common_tls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -37,7 +37,7 @@ type RiemannSocketListener struct { ReadTimeout *config.Duration `toml:"read_timeout"` KeepAlivePeriod *config.Duration `toml:"keep_alive_period"` SocketMode string `toml:"socket_mode"` - tlsint.ServerConfig + common_tls.ServerConfig wg sync.WaitGroup @@ -178,7 +178,7 @@ func (rsl *riemannListener) read(conn net.Conn) { } } - messagePb := &riemangoProto.Msg{} + messagePb := &rieman_proto.Msg{} var header uint32 // First obtain the size of the riemann event from client and acknowledge if err = binary.Read(conn, binary.BigEndian, &header); err != nil { @@ -201,7 +201,7 @@ func (rsl *riemannListener) read(conn net.Conn) { rsl.riemannReturnErrorResponse(conn, "Failed to unmarshal") return } - riemannEvents := riemanngo.ProtocolBuffersToEvents(messagePb.Events) + riemannEvents := riemann.ProtocolBuffersToEvents(messagePb.Events) for _, m := range riemannEvents { if m.Service == "" { @@ -227,7 +227,7 @@ func (rsl *riemannListener) read(conn net.Conn) { func (rsl *riemannListener) riemannReturnResponse(conn net.Conn) { t := true - message := new(riemangoProto.Msg) + message := new(rieman_proto.Msg) message.Ok = &t returnData, err := proto.Marshal(message) if err != nil { @@ -249,7 +249,7 @@ func (rsl *riemannListener) riemannReturnResponse(conn net.Conn) { func (rsl *riemannListener) riemannReturnErrorResponse(conn net.Conn, errorMessage string) { t := false - message := new(riemangoProto.Msg) + message := new(rieman_proto.Msg) message.Ok = &t message.Error = &errorMessage returnData, err := proto.Marshal(message) diff --git a/plugins/inputs/syslog/syslog_test.go b/plugins/inputs/syslog/syslog_test.go index 20c33fadd..22a781f56 100644 --- a/plugins/inputs/syslog/syslog_test.go +++ b/plugins/inputs/syslog/syslog_test.go @@ -18,7 +18,7 @@ import ( "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/common/socket" "github.com/influxdata/telegraf/plugins/inputs" - influx "github.com/influxdata/telegraf/plugins/parsers/influx/influx_upstream" + parsers_influx_upstream "github.com/influxdata/telegraf/plugins/parsers/influx/influx_upstream" "github.com/influxdata/telegraf/testutil" "github.com/leodido/go-syslog/v4/nontransparent" ) @@ -179,7 +179,7 @@ func TestCases(t *testing.T) { expectedErrorFilename := filepath.Join(testcasePath, "expected.err") // Prepare the influx parser for expectations - parser := &influx.Parser{} + parser := &parsers_influx_upstream.Parser{} require.NoError(t, parser.Init()) // Read the input data diff --git a/plugins/inputs/vault/vault.go b/plugins/inputs/vault/vault.go index f80b3d6b4..c80e1fd66 100644 --- a/plugins/inputs/vault/vault.go +++ b/plugins/inputs/vault/vault.go @@ -15,7 +15,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" - httpcommon "github.com/influxdata/telegraf/plugins/common/http" + common_http "github.com/influxdata/telegraf/plugins/common/http" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -28,7 +28,7 @@ type Vault struct { TokenFile string `toml:"token_file"` Token string `toml:"token"` Log telegraf.Logger `toml:"-"` - httpcommon.HTTPClientConfig + common_http.HTTPClientConfig client *http.Client } @@ -192,7 +192,7 @@ func buildVaultMetrics(acc telegraf.Accumulator, sysMetrics *SysMetrics) error { func init() { inputs.Add("vault", func() telegraf.Input { return &Vault{ - HTTPClientConfig: httpcommon.HTTPClientConfig{ + HTTPClientConfig: common_http.HTTPClientConfig{ ResponseHeaderTimeout: config.Duration(5 * time.Second), }, } diff --git a/plugins/inputs/vault/vault_test.go b/plugins/inputs/vault/vault_test.go index 26f64461b..987b70bec 100644 --- a/plugins/inputs/vault/vault_test.go +++ b/plugins/inputs/vault/vault_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - dockercontainer "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/container" "github.com/docker/go-connections/nat" "github.com/influxdata/telegraf" @@ -183,13 +183,13 @@ func TestIntegration(t *testing.T) { } // Start the docker container - container := testutil.Container{ + cntnr := testutil.Container{ Image: "vault:1.13.3", ExposedPorts: []string{"8200"}, Env: map[string]string{ "VAULT_DEV_ROOT_TOKEN_ID": "root", }, - HostConfigModifier: func(hc *dockercontainer.HostConfig) { + HostConfigModifier: func(hc *container.HostConfig) { hc.CapAdd = []string{"IPC_LOCK"} }, WaitingFor: wait.ForAll( @@ -197,13 +197,13 @@ func TestIntegration(t *testing.T) { wait.ForListeningPort(nat.Port("8200")), ), } - require.NoError(t, container.Start(), "failed to start container") - defer container.Terminate() + require.NoError(t, cntnr.Start(), "failed to start container") + defer cntnr.Terminate() // Setup the plugin - port := container.Ports["8200"] + port := cntnr.Ports["8200"] plugin := &Vault{ - URL: "http://" + container.Address + ":" + port, + URL: "http://" + cntnr.Address + ":" + port, Token: "root", } require.NoError(t, plugin.Init()) diff --git a/plugins/inputs/vsphere/vsphere_test.go b/plugins/inputs/vsphere/vsphere_test.go index e721fb9b5..a4f090200 100644 --- a/plugins/inputs/vsphere/vsphere_test.go +++ b/plugins/inputs/vsphere/vsphere_test.go @@ -17,7 +17,7 @@ import ( "github.com/vmware/govmomi/vim25/types" "github.com/influxdata/telegraf/config" - itls "github.com/influxdata/telegraf/plugins/common/tls" + common_tls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/testutil" ) @@ -130,7 +130,7 @@ func defaultVSphere() *VSphere { DatacenterMetricInclude: nil, DatacenterMetricExclude: nil, DatacenterInclude: []string{"/**"}, - ClientConfig: itls.ClientConfig{InsecureSkipVerify: true}, + ClientConfig: common_tls.ClientConfig{InsecureSkipVerify: true}, MaxQueryObjects: 256, MaxQueryMetrics: 256, diff --git a/plugins/inputs/x509_cert/x509_cert.go b/plugins/inputs/x509_cert/x509_cert.go index dfd8b28b5..97097ded3 100644 --- a/plugins/inputs/x509_cert/x509_cert.go +++ b/plugins/inputs/x509_cert/x509_cert.go @@ -28,7 +28,7 @@ import ( "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/globpath" "github.com/influxdata/telegraf/plugins/common/proxy" - commontls "github.com/influxdata/telegraf/plugins/common/tls" + common_tls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -45,7 +45,7 @@ type X509Cert struct { ServerName string `toml:"server_name"` ExcludeRootCerts bool `toml:"exclude_root_certs"` Log telegraf.Logger `toml:"-"` - commontls.ClientConfig + common_tls.ClientConfig proxy.TCPProxy tlsCfg *tls.Config diff --git a/plugins/inputs/x509_cert/x509_cert_test.go b/plugins/inputs/x509_cert/x509_cert_test.go index d0e2c0ade..fbe35dfc9 100644 --- a/plugins/inputs/x509_cert/x509_cert_test.go +++ b/plugins/inputs/x509_cert/x509_cert_test.go @@ -27,7 +27,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" - _tls "github.com/influxdata/telegraf/plugins/common/tls" + common_tls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/testutil" ) @@ -457,7 +457,7 @@ func TestServerName(t *testing.T) { sc := &X509Cert{ Sources: []string{test.url}, ServerName: test.fromCfg, - ClientConfig: _tls.ClientConfig{ServerName: test.fromTLS}, + ClientConfig: common_tls.ClientConfig{ServerName: test.fromTLS}, Log: testutil.Logger{}, } err := sc.Init() @@ -569,7 +569,7 @@ func TestClassification(t *testing.T) { certURI := "file://" + filepath.Join(tmpDir, "cert.pem") plugin := &X509Cert{ Sources: []string{certURI}, - ClientConfig: _tls.ClientConfig{ + ClientConfig: common_tls.ClientConfig{ TLSCA: filepath.Join(tmpDir, "ca.pem"), }, Log: testutil.Logger{}, diff --git a/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/zipkinCore-consts.go b/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/zipkinCore-consts.go index 7c5b5825a..0dadbce64 100644 --- a/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/zipkinCore-consts.go +++ b/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/zipkinCore-consts.go @@ -6,8 +6,9 @@ import ( "bytes" "context" "fmt" - "github.com/apache/thrift/lib/go/thrift" "time" + + "github.com/apache/thrift/lib/go/thrift" ) // (needed to ensure safety because of naive import list construction.) diff --git a/plugins/inputs/zookeeper/zookeeper.go b/plugins/inputs/zookeeper/zookeeper.go index 2ab54ec7a..6f48a012a 100644 --- a/plugins/inputs/zookeeper/zookeeper.go +++ b/plugins/inputs/zookeeper/zookeeper.go @@ -15,7 +15,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" - tlsint "github.com/influxdata/telegraf/plugins/common/tls" + common_tls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -32,7 +32,7 @@ type Zookeeper struct { EnableTLS bool `toml:"enable_tls"` EnableSSL bool `toml:"enable_ssl" deprecated:"1.7.0;1.35.0;use 'enable_tls' instead"` - tlsint.ClientConfig + common_tls.ClientConfig initialized bool tlsConfig *tls.Config diff --git a/plugins/outputs/application_insights/mocks/diagnostics_message_listener.go b/plugins/outputs/application_insights/mocks/diagnostics_message_listener.go index 65747c10c..2a9bd344e 100644 --- a/plugins/outputs/application_insights/mocks/diagnostics_message_listener.go +++ b/plugins/outputs/application_insights/mocks/diagnostics_message_listener.go @@ -1,6 +1,6 @@ package mocks -import mock "github.com/stretchr/testify/mock" +import "github.com/stretchr/testify/mock" // DiagnosticsMessageSubscriber is an autogenerated mock type for the DiagnosticsMessageSubscriber type type DiagnosticsMessageListener struct { diff --git a/plugins/outputs/azure_data_explorer/azure_data_explorer_test.go b/plugins/outputs/azure_data_explorer/azure_data_explorer_test.go index a39f09b8c..6d0d544e0 100644 --- a/plugins/outputs/azure_data_explorer/azure_data_explorer_test.go +++ b/plugins/outputs/azure_data_explorer/azure_data_explorer_test.go @@ -17,7 +17,7 @@ import ( "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" - telegrafJson "github.com/influxdata/telegraf/plugins/serializers/json" + serializers_json "github.com/influxdata/telegraf/plugins/serializers/json" "github.com/influxdata/telegraf/testutil" ) @@ -105,7 +105,7 @@ func TestWrite(t *testing.T) { for _, tC := range testCases { t.Run(tC.name, func(t *testing.T) { - serializer := &telegrafJson.Serializer{} + serializer := &serializers_json.Serializer{} require.NoError(t, serializer.Init()) ingestionType := "queued" @@ -156,7 +156,7 @@ func TestWrite(t *testing.T) { } func TestCreateAzureDataExplorerTable(t *testing.T) { - serializer := &telegrafJson.Serializer{} + serializer := &serializers_json.Serializer{} require.NoError(t, serializer.Init()) plugin := AzureDataExplorer{ Endpoint: "someendpoint", @@ -251,7 +251,7 @@ func TestWriteWithType(t *testing.T) { } for _, testCase := range testCases { t.Run(testCase.name, func(t *testing.T) { - serializer := &telegrafJson.Serializer{} + serializer := &serializers_json.Serializer{} require.NoError(t, serializer.Init()) for tableName, jsonValue := range testCase.tableNameToExpectedResult { ingestionType := "queued" diff --git a/plugins/outputs/cloud_pubsub/topic_gcp.go b/plugins/outputs/cloud_pubsub/topic_gcp.go index 72ef50efc..acad7eb83 100644 --- a/plugins/outputs/cloud_pubsub/topic_gcp.go +++ b/plugins/outputs/cloud_pubsub/topic_gcp.go @@ -1,8 +1,9 @@ package cloud_pubsub import ( - "cloud.google.com/go/pubsub" "context" + + "cloud.google.com/go/pubsub" ) type ( diff --git a/plugins/outputs/cloud_pubsub/topic_stubbed.go b/plugins/outputs/cloud_pubsub/topic_stubbed.go index 05429d509..eba470c93 100644 --- a/plugins/outputs/cloud_pubsub/topic_stubbed.go +++ b/plugins/outputs/cloud_pubsub/topic_stubbed.go @@ -17,7 +17,7 @@ import ( "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/parsers/influx" - serializer "github.com/influxdata/telegraf/plugins/serializers/influx" + serializers_influx "github.com/influxdata/telegraf/plugins/serializers/influx" ) const ( @@ -64,7 +64,7 @@ type ( func getTestResources(tT *testing.T, settings pubsub.PublishSettings, testM []testMetric) (*PubSub, *stubTopic, []telegraf.Metric) { // Instantiate a Influx line-protocol serializer - s := &serializer.Serializer{} + s := &serializers_influx.Serializer{} require.NoError(tT, s.Init()) metrics := make([]telegraf.Metric, 0, len(testM)) diff --git a/plugins/outputs/cloudwatch/cloudwatch.go b/plugins/outputs/cloudwatch/cloudwatch.go index 28b0703dd..6108d4b05 100644 --- a/plugins/outputs/cloudwatch/cloudwatch.go +++ b/plugins/outputs/cloudwatch/cloudwatch.go @@ -15,8 +15,8 @@ import ( "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" "github.com/influxdata/telegraf" - internalaws "github.com/influxdata/telegraf/plugins/common/aws" - httpconfig "github.com/influxdata/telegraf/plugins/common/http" + common_aws "github.com/influxdata/telegraf/plugins/common/aws" + common_http "github.com/influxdata/telegraf/plugins/common/http" "github.com/influxdata/telegraf/plugins/outputs" ) @@ -29,8 +29,8 @@ type CloudWatch struct { svc *cloudwatch.Client WriteStatistics bool `toml:"write_statistics"` Log telegraf.Logger `toml:"-"` - internalaws.CredentialConfig - httpconfig.HTTPClientConfig + common_aws.CredentialConfig + common_http.HTTPClientConfig client *http.Client } diff --git a/plugins/outputs/cloudwatch_logs/cloudwatch_logs.go b/plugins/outputs/cloudwatch_logs/cloudwatch_logs.go index a7de9274d..d27fb009d 100644 --- a/plugins/outputs/cloudwatch_logs/cloudwatch_logs.go +++ b/plugins/outputs/cloudwatch_logs/cloudwatch_logs.go @@ -15,7 +15,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs/types" "github.com/influxdata/telegraf" - internalaws "github.com/influxdata/telegraf/plugins/common/aws" + common_aws "github.com/influxdata/telegraf/plugins/common/aws" "github.com/influxdata/telegraf/plugins/outputs" ) @@ -73,7 +73,7 @@ type CloudWatchLogs struct { Log telegraf.Logger `toml:"-"` - internalaws.CredentialConfig + common_aws.CredentialConfig } const ( diff --git a/plugins/outputs/cloudwatch_logs/cloudwatch_logs_test.go b/plugins/outputs/cloudwatch_logs/cloudwatch_logs_test.go index ad2ae39ee..911252604 100644 --- a/plugins/outputs/cloudwatch_logs/cloudwatch_logs_test.go +++ b/plugins/outputs/cloudwatch_logs/cloudwatch_logs_test.go @@ -9,12 +9,12 @@ import ( "testing" "time" - cloudwatchlogsV2 "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs" + "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs" "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs/types" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" - internalaws "github.com/influxdata/telegraf/plugins/common/aws" + "github.com/influxdata/telegraf/plugins/common/aws" "github.com/influxdata/telegraf/testutil" ) @@ -30,21 +30,21 @@ func (c *mockCloudWatchLogs) Init(lsName string) { func (c *mockCloudWatchLogs) DescribeLogGroups( context.Context, - *cloudwatchlogsV2.DescribeLogGroupsInput, - ...func(options *cloudwatchlogsV2.Options), -) (*cloudwatchlogsV2.DescribeLogGroupsOutput, error) { + *cloudwatchlogs.DescribeLogGroupsInput, + ...func(options *cloudwatchlogs.Options), +) (*cloudwatchlogs.DescribeLogGroupsOutput, error) { return nil, nil } func (c *mockCloudWatchLogs) DescribeLogStreams( context.Context, - *cloudwatchlogsV2.DescribeLogStreamsInput, - ...func(options *cloudwatchlogsV2.Options), -) (*cloudwatchlogsV2.DescribeLogStreamsOutput, error) { + *cloudwatchlogs.DescribeLogStreamsInput, + ...func(options *cloudwatchlogs.Options), +) (*cloudwatchlogs.DescribeLogStreamsOutput, error) { arn := "arn" creationTime := time.Now().Unix() sequenceToken := "arbitraryToken" - output := &cloudwatchlogsV2.DescribeLogStreamsOutput{ + output := &cloudwatchlogs.DescribeLogStreamsOutput{ LogStreams: []types.LogStream{ { Arn: &arn, @@ -62,19 +62,19 @@ func (c *mockCloudWatchLogs) DescribeLogStreams( func (c *mockCloudWatchLogs) CreateLogStream( context.Context, - *cloudwatchlogsV2.CreateLogStreamInput, - ...func(options *cloudwatchlogsV2.Options), -) (*cloudwatchlogsV2.CreateLogStreamOutput, error) { + *cloudwatchlogs.CreateLogStreamInput, + ...func(options *cloudwatchlogs.Options), +) (*cloudwatchlogs.CreateLogStreamOutput, error) { return nil, nil } func (c *mockCloudWatchLogs) PutLogEvents( _ context.Context, - input *cloudwatchlogsV2.PutLogEventsInput, - _ ...func(options *cloudwatchlogsV2.Options), -) (*cloudwatchlogsV2.PutLogEventsOutput, error) { + input *cloudwatchlogs.PutLogEventsInput, + _ ...func(options *cloudwatchlogs.Options), +) (*cloudwatchlogs.PutLogEventsOutput, error) { sequenceToken := "arbitraryToken" - output := &cloudwatchlogsV2.PutLogEventsOutput{NextSequenceToken: &sequenceToken} + output := &cloudwatchlogs.PutLogEventsOutput{NextSequenceToken: &sequenceToken} // Saving messages c.pushedLogEvents = append(c.pushedLogEvents, input.LogEvents...) @@ -102,7 +102,7 @@ func TestInit(t *testing.T) { name: "log group is not set", expectedErrorString: "log group is not set", plugin: &CloudWatchLogs{ - CredentialConfig: internalaws.CredentialConfig{ + CredentialConfig: aws.CredentialConfig{ Region: "eu-central-1", AccessKey: "dummy", SecretKey: "dummy", @@ -120,7 +120,7 @@ func TestInit(t *testing.T) { name: "log stream is not set", expectedErrorString: "log stream is not set", plugin: &CloudWatchLogs{ - CredentialConfig: internalaws.CredentialConfig{ + CredentialConfig: aws.CredentialConfig{ Region: "eu-central-1", AccessKey: "dummy", SecretKey: "dummy", @@ -138,7 +138,7 @@ func TestInit(t *testing.T) { name: "log data metrics name is not set", expectedErrorString: "log data metrics name is not set", plugin: &CloudWatchLogs{ - CredentialConfig: internalaws.CredentialConfig{ + CredentialConfig: aws.CredentialConfig{ Region: "eu-central-1", AccessKey: "dummy", SecretKey: "dummy", @@ -156,7 +156,7 @@ func TestInit(t *testing.T) { name: "log data source is not set", expectedErrorString: "log data source is not set", plugin: &CloudWatchLogs{ - CredentialConfig: internalaws.CredentialConfig{ + CredentialConfig: aws.CredentialConfig{ Region: "eu-central-1", AccessKey: "dummy", SecretKey: "dummy", @@ -175,7 +175,7 @@ func TestInit(t *testing.T) { expectedErrorString: "log data source is not properly formatted, ':' is missed.\n" + "Should be 'tag:' or 'field:'", plugin: &CloudWatchLogs{ - CredentialConfig: internalaws.CredentialConfig{ + CredentialConfig: aws.CredentialConfig{ Region: "eu-central-1", AccessKey: "dummy", SecretKey: "dummy", @@ -194,7 +194,7 @@ func TestInit(t *testing.T) { expectedErrorString: "log data source is not properly formatted.\n" + "Should be 'tag:' or 'field:'", plugin: &CloudWatchLogs{ - CredentialConfig: internalaws.CredentialConfig{ + CredentialConfig: aws.CredentialConfig{ Region: "eu-central-1", AccessKey: "dummy", SecretKey: "dummy", @@ -211,7 +211,7 @@ func TestInit(t *testing.T) { { name: "valid config", plugin: &CloudWatchLogs{ - CredentialConfig: internalaws.CredentialConfig{ + CredentialConfig: aws.CredentialConfig{ Region: "eu-central-1", AccessKey: "dummy", SecretKey: "dummy", @@ -228,7 +228,7 @@ func TestInit(t *testing.T) { { name: "valid config with EndpointURL", plugin: &CloudWatchLogs{ - CredentialConfig: internalaws.CredentialConfig{ + CredentialConfig: aws.CredentialConfig{ Region: "eu-central-1", AccessKey: "dummy", SecretKey: "dummy", @@ -277,7 +277,7 @@ func TestConnect(t *testing.T) { defer ts.Close() plugin := &CloudWatchLogs{ - CredentialConfig: internalaws.CredentialConfig{ + CredentialConfig: aws.CredentialConfig{ Region: "eu-central-1", AccessKey: "dummy", SecretKey: "dummy", @@ -317,7 +317,7 @@ func TestWrite(t *testing.T) { defer ts.Close() plugin := &CloudWatchLogs{ - CredentialConfig: internalaws.CredentialConfig{ + CredentialConfig: aws.CredentialConfig{ Region: "eu-central-1", AccessKey: "dummy", SecretKey: "dummy", diff --git a/plugins/outputs/dynatrace/dynatrace.go b/plugins/outputs/dynatrace/dynatrace.go index 357e9299a..d23734213 100644 --- a/plugins/outputs/dynatrace/dynatrace.go +++ b/plugins/outputs/dynatrace/dynatrace.go @@ -12,7 +12,7 @@ import ( "strings" "time" - dtMetric "github.com/dynatrace-oss/dynatrace-metric-utils-go/metric" + dynatrace_metric "github.com/dynatrace-oss/dynatrace-metric-utils-go/metric" "github.com/dynatrace-oss/dynatrace-metric-utils-go/metric/apiconstants" "github.com/dynatrace-oss/dynatrace-metric-utils-go/metric/dimensions" @@ -101,17 +101,17 @@ func (d *Dynatrace) Write(metrics []telegraf.Metric) error { } name := tm.Name() + "." + field.Key - dm, err := dtMetric.NewMetric( + dm, err := dynatrace_metric.NewMetric( name, - dtMetric.WithPrefix(d.Prefix), - dtMetric.WithDimensions( + dynatrace_metric.WithPrefix(d.Prefix), + dynatrace_metric.WithDimensions( dimensions.MergeLists( d.normalizedDefaultDimensions, dimensions.NewNormalizedDimensionList(dims...), d.normalizedStaticDimensions, ), ), - dtMetric.WithTimestamp(tm.Time()), + dynatrace_metric.WithTimestamp(tm.Time()), typeOpt, ) @@ -230,33 +230,33 @@ func init() { }) } -func (d *Dynatrace) getTypeOption(metric telegraf.Metric, field *telegraf.Field) dtMetric.MetricOption { +func (d *Dynatrace) getTypeOption(metric telegraf.Metric, field *telegraf.Field) dynatrace_metric.MetricOption { metricName := metric.Name() + "." + field.Key if d.isCounterMetricsMatch(d.AddCounterMetrics, metricName) || d.isCounterMetricsPatternsMatch(d.AddCounterMetricsPatterns, metricName) { switch v := field.Value.(type) { case float64: - return dtMetric.WithFloatCounterValueDelta(v) + return dynatrace_metric.WithFloatCounterValueDelta(v) case uint64: - return dtMetric.WithIntCounterValueDelta(int64(v)) + return dynatrace_metric.WithIntCounterValueDelta(int64(v)) case int64: - return dtMetric.WithIntCounterValueDelta(v) + return dynatrace_metric.WithIntCounterValueDelta(v) default: return nil } } switch v := field.Value.(type) { case float64: - return dtMetric.WithFloatGaugeValue(v) + return dynatrace_metric.WithFloatGaugeValue(v) case uint64: - return dtMetric.WithIntGaugeValue(int64(v)) + return dynatrace_metric.WithIntGaugeValue(int64(v)) case int64: - return dtMetric.WithIntGaugeValue(v) + return dynatrace_metric.WithIntGaugeValue(v) case bool: if v { - return dtMetric.WithIntGaugeValue(1) + return dynatrace_metric.WithIntGaugeValue(1) } - return dtMetric.WithIntGaugeValue(0) + return dynatrace_metric.WithIntGaugeValue(0) } return nil diff --git a/plugins/outputs/exec/exec_test.go b/plugins/outputs/exec/exec_test.go index 7a8c4e40a..058822c31 100644 --- a/plugins/outputs/exec/exec_test.go +++ b/plugins/outputs/exec/exec_test.go @@ -13,7 +13,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" - influxParser "github.com/influxdata/telegraf/plugins/parsers/influx" + parsers_influx "github.com/influxdata/telegraf/plugins/parsers/influx" "github.com/influxdata/telegraf/plugins/serializers/influx" "github.com/influxdata/telegraf/testutil" ) @@ -26,13 +26,13 @@ type MockRunner struct { // Run runs the command. func (c *MockRunner) Run(_ time.Duration, _, _ []string, buffer io.Reader) error { - parser := influxParser.NewStreamParser(buffer) + parser := parsers_influx.NewStreamParser(buffer) numMetrics := 0 for { _, err := parser.Next() if err != nil { - if errors.Is(err, influxParser.EOF) { + if errors.Is(err, parsers_influx.EOF) { break // stream ended } continue diff --git a/plugins/outputs/execd/execd_test.go b/plugins/outputs/execd/execd_test.go index eae28d19c..1c908fe09 100644 --- a/plugins/outputs/execd/execd_test.go +++ b/plugins/outputs/execd/execd_test.go @@ -19,14 +19,14 @@ import ( "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/parsers/influx" - influxSerializer "github.com/influxdata/telegraf/plugins/serializers/influx" + serializers_influx "github.com/influxdata/telegraf/plugins/serializers/influx" "github.com/influxdata/telegraf/testutil" ) var now = time.Date(2020, 6, 30, 16, 16, 0, 0, time.UTC) func TestExternalOutputWorks(t *testing.T) { - serializer := &influxSerializer.Serializer{} + serializer := &serializers_influx.Serializer{} require.NoError(t, serializer.Init()) exe, err := os.Executable() @@ -73,7 +73,7 @@ func TestExternalOutputWorks(t *testing.T) { } func TestBatchOutputWorks(t *testing.T) { - serializer := &influxSerializer.Serializer{} + serializer := &serializers_influx.Serializer{} require.NoError(t, serializer.Init()) exe, err := os.Executable() @@ -128,7 +128,7 @@ func TestBatchOutputWorks(t *testing.T) { } func TestPartiallyUnserializableThrowError(t *testing.T) { - serializer := &influxSerializer.Serializer{} + serializer := &serializers_influx.Serializer{} require.NoError(t, serializer.Init()) exe, err := os.Executable() @@ -165,7 +165,7 @@ func TestPartiallyUnserializableThrowError(t *testing.T) { } func TestPartiallyUnserializableCanBeSkipped(t *testing.T) { - serializer := &influxSerializer.Serializer{} + serializer := &serializers_influx.Serializer{} require.NoError(t, serializer.Init()) exe, err := os.Executable() diff --git a/plugins/outputs/graphite/graphite.go b/plugins/outputs/graphite/graphite.go index 05b92785f..c0d83e180 100644 --- a/plugins/outputs/graphite/graphite.go +++ b/plugins/outputs/graphite/graphite.go @@ -15,7 +15,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" - tlsint "github.com/influxdata/telegraf/plugins/common/tls" + common_tls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers/graphite" ) @@ -44,7 +44,7 @@ type Graphite struct { Templates []string `toml:"templates"` Timeout config.Duration `toml:"timeout"` Log telegraf.Logger `toml:"-"` - tlsint.ClientConfig + common_tls.ClientConfig connections []connection serializer *graphite.GraphiteSerializer diff --git a/plugins/outputs/graylog/graylog.go b/plugins/outputs/graylog/graylog.go index d546ac6bf..6185167ec 100644 --- a/plugins/outputs/graylog/graylog.go +++ b/plugins/outputs/graylog/graylog.go @@ -20,7 +20,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" - tlsint "github.com/influxdata/telegraf/plugins/common/tls" + common_tls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" ) @@ -321,7 +321,7 @@ type Graylog struct { Reconnection bool `toml:"connection_retry"` ReconnectionTime config.Duration `toml:"connection_retry_wait_time"` Log telegraf.Logger `toml:"-"` - tlsint.ClientConfig + common_tls.ClientConfig writer io.Writer closers []io.WriteCloser diff --git a/plugins/outputs/graylog/graylog_test_linux.go b/plugins/outputs/graylog/graylog_test_linux.go index 0023aab78..fd5549102 100644 --- a/plugins/outputs/graylog/graylog_test_linux.go +++ b/plugins/outputs/graylog/graylog_test_linux.go @@ -19,7 +19,7 @@ import ( "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/config" - tlsint "github.com/influxdata/telegraf/plugins/common/tls" + common_tls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/testutil" ) @@ -72,14 +72,14 @@ func TestWriteTCP(t *testing.T) { tests := []struct { name string - tlsClientCfg tlsint.ClientConfig + tlsClientCfg common_tls.ClientConfig }{ { name: "TCP", }, { name: "TLS", - tlsClientCfg: tlsint.ClientConfig{ + tlsClientCfg: common_tls.ClientConfig{ ServerName: "localhost", TLSCA: tlsClientConfig.TLSCA, TLSKey: tlsClientConfig.TLSKey, @@ -88,7 +88,7 @@ func TestWriteTCP(t *testing.T) { }, { name: "TLS no validation", - tlsClientCfg: tlsint.ClientConfig{ + tlsClientCfg: common_tls.ClientConfig{ InsecureSkipVerify: true, ServerName: "localhost", TLSKey: tlsClientConfig.TLSKey, @@ -104,7 +104,7 @@ func TestWriteTCP(t *testing.T) { address := TCPServer(t, &wg, tlsServerConfig, errs) plugin := Graylog{ - ClientConfig: tlsint.ClientConfig{ + ClientConfig: common_tls.ClientConfig{ InsecureSkipVerify: true, ServerName: "localhost", TLSKey: tlsClientConfig.TLSKey, diff --git a/plugins/outputs/health/health.go b/plugins/outputs/health/health.go index 419044d4c..e99da4044 100644 --- a/plugins/outputs/health/health.go +++ b/plugins/outputs/health/health.go @@ -15,7 +15,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" - tlsint "github.com/influxdata/telegraf/plugins/common/tls" + common_tls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" ) @@ -39,7 +39,7 @@ type Health struct { WriteTimeout config.Duration `toml:"write_timeout"` BasicUsername string `toml:"basic_username"` BasicPassword string `toml:"basic_password"` - tlsint.ServerConfig + common_tls.ServerConfig Compares []*Compares `toml:"compares"` Contains []*Contains `toml:"contains"` diff --git a/plugins/outputs/http/http.go b/plugins/outputs/http/http.go index 8e15020d1..306353513 100644 --- a/plugins/outputs/http/http.go +++ b/plugins/outputs/http/http.go @@ -14,16 +14,16 @@ import ( "strings" "time" - awsV2 "github.com/aws/aws-sdk-go-v2/aws" - v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/aws" + aws_signer "github.com/aws/aws-sdk-go-v2/aws/signer/v4" "golang.org/x/oauth2" "google.golang.org/api/idtoken" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" - internalaws "github.com/influxdata/telegraf/plugins/common/aws" - httpconfig "github.com/influxdata/telegraf/plugins/common/http" + common_aws "github.com/influxdata/telegraf/plugins/common/aws" + common_http "github.com/influxdata/telegraf/plugins/common/http" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" ) @@ -52,14 +52,14 @@ type HTTP struct { UseBatchFormat bool `toml:"use_batch_format"` AwsService string `toml:"aws_service"` NonRetryableStatusCodes []int `toml:"non_retryable_statuscodes"` - httpconfig.HTTPClientConfig + common_http.HTTPClientConfig Log telegraf.Logger `toml:"-"` client *http.Client serializer serializers.Serializer - awsCfg *awsV2.Config - internalaws.CredentialConfig + awsCfg *aws.Config + common_aws.CredentialConfig // Google API Auth CredentialsFile string `toml:"google_application_credentials"` @@ -165,7 +165,7 @@ func (h *HTTP) writeMetric(reqBody []byte) error { } if h.awsCfg != nil { - signer := v4.NewSigner() + signer := aws_signer.NewSigner() ctx := context.Background() credentials, err := h.awsCfg.Credentials.Retrieve(ctx) diff --git a/plugins/outputs/http/http_test.go b/plugins/outputs/http/http_test.go index 2293ee545..b2a81cc27 100644 --- a/plugins/outputs/http/http_test.go +++ b/plugins/outputs/http/http_test.go @@ -17,8 +17,8 @@ import ( "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/metric" - internalaws "github.com/influxdata/telegraf/plugins/common/aws" - httpconfig "github.com/influxdata/telegraf/plugins/common/http" + common_aws "github.com/influxdata/telegraf/plugins/common/aws" + common_http "github.com/influxdata/telegraf/plugins/common/http" "github.com/influxdata/telegraf/plugins/common/oauth" "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/telegraf/plugins/serializers/influx" @@ -147,7 +147,7 @@ func TestHTTPClientConfig(t *testing.T) { plugin: &HTTP{ URL: u.String(), Method: defaultMethod, - HTTPClientConfig: httpconfig.HTTPClientConfig{ + HTTPClientConfig: common_http.HTTPClientConfig{ IdleConnTimeout: config.Duration(5 * time.Second), }, }, @@ -159,7 +159,7 @@ func TestHTTPClientConfig(t *testing.T) { plugin: &HTTP{ URL: u.String(), Method: defaultMethod, - HTTPClientConfig: httpconfig.HTTPClientConfig{ + HTTPClientConfig: common_http.HTTPClientConfig{ MaxIdleConns: 100, MaxIdleConnsPerHost: 100, IdleConnTimeout: config.Duration(5 * time.Second), @@ -477,7 +477,7 @@ func TestOAuthClientCredentialsGrant(t *testing.T) { name: "success", plugin: &HTTP{ URL: u.String() + "/write", - HTTPClientConfig: httpconfig.HTTPClientConfig{ + HTTPClientConfig: common_http.HTTPClientConfig{ OAuth2Config: oauth.OAuth2Config{ ClientID: "howdy", ClientSecret: "secret", @@ -504,7 +504,7 @@ func TestOAuthClientCredentialsGrant(t *testing.T) { name: "audience", plugin: &HTTP{ URL: u.String() + "/write", - HTTPClientConfig: httpconfig.HTTPClientConfig{ + HTTPClientConfig: common_http.HTTPClientConfig{ OAuth2Config: oauth.OAuth2Config{ ClientID: "howdy", ClientSecret: "secret", @@ -749,7 +749,7 @@ func TestAwsCredentials(t *testing.T) { plugin: &HTTP{ URL: u.String(), AwsService: "aps", - CredentialConfig: internalaws.CredentialConfig{ + CredentialConfig: common_aws.CredentialConfig{ Region: "us-east-1", AccessKey: "dummy", SecretKey: "dummy", diff --git a/plugins/outputs/kinesis/kinesis.go b/plugins/outputs/kinesis/kinesis.go index 24b362204..816da77d5 100644 --- a/plugins/outputs/kinesis/kinesis.go +++ b/plugins/outputs/kinesis/kinesis.go @@ -12,7 +12,7 @@ import ( "github.com/gofrs/uuid/v5" "github.com/influxdata/telegraf" - internalaws "github.com/influxdata/telegraf/plugins/common/aws" + common_aws "github.com/influxdata/telegraf/plugins/common/aws" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" ) @@ -35,7 +35,7 @@ type ( serializer serializers.Serializer svc kinesisClient - internalaws.CredentialConfig + common_aws.CredentialConfig } Partition struct { diff --git a/plugins/outputs/mqtt/mqtt_test.go b/plugins/outputs/mqtt/mqtt_test.go index 74ebd6eb9..ca37dae9d 100644 --- a/plugins/outputs/mqtt/mqtt_test.go +++ b/plugins/outputs/mqtt/mqtt_test.go @@ -16,7 +16,7 @@ import ( "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/common/mqtt" "github.com/influxdata/telegraf/plugins/parsers/influx" - influxSerializer "github.com/influxdata/telegraf/plugins/serializers/influx" + serializers_influx "github.com/influxdata/telegraf/plugins/serializers/influx" "github.com/influxdata/telegraf/testutil" ) @@ -48,7 +48,7 @@ func TestConnectAndWriteIntegration(t *testing.T) { container := launchTestContainer(t) defer container.Terminate() var url = fmt.Sprintf("%s:%s", container.Address, container.Ports[servicePort]) - s := &influxSerializer.Serializer{} + s := &serializers_influx.Serializer{} require.NoError(t, s.Init()) m := &MQTT{ MqttConfig: mqtt.MqttConfig{ @@ -79,7 +79,7 @@ func TestConnectAndWriteIntegrationMQTTv3(t *testing.T) { defer container.Terminate() var url = fmt.Sprintf("%s:%s", container.Address, container.Ports[servicePort]) - s := &influxSerializer.Serializer{} + s := &serializers_influx.Serializer{} require.NoError(t, s.Init()) m := &MQTT{ @@ -112,7 +112,7 @@ func TestConnectAndWriteIntegrationMQTTv5(t *testing.T) { defer container.Terminate() url := fmt.Sprintf("%s:%s", container.Address, container.Ports[servicePort]) - s := &influxSerializer.Serializer{} + s := &serializers_influx.Serializer{} require.NoError(t, s.Init()) m := &MQTT{ @@ -156,7 +156,7 @@ func TestIntegrationMQTTv3(t *testing.T) { // Setup the parser / serializer pair parser := &influx.Parser{} require.NoError(t, parser.Init()) - serializer := &influxSerializer.Serializer{} + serializer := &serializers_influx.Serializer{} require.NoError(t, serializer.Init()) // Setup the plugin @@ -271,7 +271,7 @@ func TestMQTTv5Properties(t *testing.T) { } // Setup the metric serializer - serializer := &influxSerializer.Serializer{} + serializer := &serializers_influx.Serializer{} require.NoError(t, serializer.Init()) plugin.SetSerializer(serializer) @@ -308,7 +308,7 @@ func TestIntegrationMQTTLayoutNonBatch(t *testing.T) { // Setup the parser / serializer pair parser := &influx.Parser{} require.NoError(t, parser.Init()) - serializer := &influxSerializer.Serializer{} + serializer := &serializers_influx.Serializer{} require.NoError(t, serializer.Init()) // Setup the plugin @@ -395,7 +395,7 @@ func TestIntegrationMQTTLayoutBatch(t *testing.T) { // Setup the parser / serializer pair parser := &influx.Parser{} require.NoError(t, parser.Init()) - serializer := &influxSerializer.Serializer{} + serializer := &serializers_influx.Serializer{} require.NoError(t, serializer.Init()) // Setup the plugin @@ -861,7 +861,7 @@ func TestMQTTTopicGenerationTemplateIsValid(t *testing.T) { } func TestGenerateTopicName(t *testing.T) { - s := &influxSerializer.Serializer{} + s := &serializers_influx.Serializer{} require.NoError(t, s.Init()) m := &MQTT{ diff --git a/plugins/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go index 152b23465..3704a00a8 100644 --- a/plugins/outputs/prometheus_client/prometheus_client.go +++ b/plugins/outputs/prometheus_client/prometheus_client.go @@ -22,11 +22,11 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" - tlsint "github.com/influxdata/telegraf/plugins/common/tls" + common_tls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" - v1 "github.com/influxdata/telegraf/plugins/outputs/prometheus_client/v1" - v2 "github.com/influxdata/telegraf/plugins/outputs/prometheus_client/v2" - serializer "github.com/influxdata/telegraf/plugins/serializers/prometheus" + "github.com/influxdata/telegraf/plugins/outputs/prometheus_client/v1" + "github.com/influxdata/telegraf/plugins/outputs/prometheus_client/v2" + serializers_prometheus "github.com/influxdata/telegraf/plugins/serializers/prometheus" ) //go:embed sample.conf @@ -47,22 +47,22 @@ type Collector interface { } type PrometheusClient struct { - Listen string `toml:"listen"` - ReadTimeout config.Duration `toml:"read_timeout"` - WriteTimeout config.Duration `toml:"write_timeout"` - MetricVersion int `toml:"metric_version"` - BasicUsername string `toml:"basic_username"` - BasicPassword config.Secret `toml:"basic_password"` - IPRange []string `toml:"ip_range"` - ExpirationInterval config.Duration `toml:"expiration_interval"` - Path string `toml:"path"` - CollectorsExclude []string `toml:"collectors_exclude"` - StringAsLabel bool `toml:"string_as_label"` - ExportTimestamp bool `toml:"export_timestamp"` - TypeMappings serializer.MetricTypes `toml:"metric_types"` - Log telegraf.Logger `toml:"-"` + Listen string `toml:"listen"` + ReadTimeout config.Duration `toml:"read_timeout"` + WriteTimeout config.Duration `toml:"write_timeout"` + MetricVersion int `toml:"metric_version"` + BasicUsername string `toml:"basic_username"` + BasicPassword config.Secret `toml:"basic_password"` + IPRange []string `toml:"ip_range"` + ExpirationInterval config.Duration `toml:"expiration_interval"` + Path string `toml:"path"` + CollectorsExclude []string `toml:"collectors_exclude"` + StringAsLabel bool `toml:"string_as_label"` + ExportTimestamp bool `toml:"export_timestamp"` + TypeMappings serializers_prometheus.MetricTypes `toml:"metric_types"` + Log telegraf.Logger `toml:"-"` - tlsint.ServerConfig + common_tls.ServerConfig server *http.Server url *url.URL diff --git a/plugins/outputs/prometheus_client/v1/collector.go b/plugins/outputs/prometheus_client/v1/collector.go index d015ddc05..9578db5e7 100644 --- a/plugins/outputs/prometheus_client/v1/collector.go +++ b/plugins/outputs/prometheus_client/v1/collector.go @@ -10,7 +10,7 @@ import ( "time" "github.com/influxdata/telegraf" - serializer "github.com/influxdata/telegraf/plugins/serializers/prometheus" + serializers_prometheus "github.com/influxdata/telegraf/plugins/serializers/prometheus" "github.com/prometheus/client_golang/prometheus" ) @@ -54,7 +54,7 @@ type Collector struct { ExpirationInterval time.Duration StringAsLabel bool ExportTimestamp bool - TypeMapping serializer.MetricTypes + TypeMapping serializers_prometheus.MetricTypes Log telegraf.Logger sync.Mutex @@ -62,13 +62,13 @@ type Collector struct { expireTicker *time.Ticker } -func NewCollector(expire time.Duration, stringsAsLabel, exportTimestamp bool, typeMapping serializer.MetricTypes, logger telegraf.Logger) *Collector { +func NewCollector(expire time.Duration, stringsAsLabel, exportTimestamp bool, typeMapping serializers_prometheus.MetricTypes, log telegraf.Logger) *Collector { c := &Collector{ ExpirationInterval: expire, StringAsLabel: stringsAsLabel, ExportTimestamp: exportTimestamp, TypeMapping: typeMapping, - Log: logger, + Log: log, fam: make(map[string]*MetricFamily), } @@ -234,7 +234,7 @@ func (c *Collector) addMetrics(metrics []telegraf.Metric) { labels := make(map[string]string) for k, v := range tags { - name, ok := serializer.SanitizeLabelName(k) + name, ok := serializers_prometheus.SanitizeLabelName(k) if !ok { continue } @@ -250,7 +250,7 @@ func (c *Collector) addMetrics(metrics []telegraf.Metric) { continue } - name, ok := serializer.SanitizeLabelName(fn) + name, ok := serializers_prometheus.SanitizeLabelName(fn) if !ok { continue } diff --git a/plugins/outputs/prometheus_client/v2/collector.go b/plugins/outputs/prometheus_client/v2/collector.go index 632685c16..3901d3250 100644 --- a/plugins/outputs/prometheus_client/v2/collector.go +++ b/plugins/outputs/prometheus_client/v2/collector.go @@ -5,7 +5,7 @@ import ( "time" "github.com/influxdata/telegraf" - serializer "github.com/influxdata/telegraf/plugins/serializers/prometheus" + serializers_prometheus "github.com/influxdata/telegraf/plugins/serializers/prometheus" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" ) @@ -40,11 +40,11 @@ func (m *Metric) Write(out *dto.Metric) error { type Collector struct { sync.Mutex expireDuration time.Duration - coll *serializer.Collection + coll *serializers_prometheus.Collection } -func NewCollector(expire time.Duration, stringsAsLabel, exportTimestamp bool, typeMapping serializer.MetricTypes) *Collector { - cfg := serializer.FormatConfig{ +func NewCollector(expire time.Duration, stringsAsLabel, exportTimestamp bool, typeMapping serializers_prometheus.MetricTypes) *Collector { + cfg := serializers_prometheus.FormatConfig{ StringAsLabel: stringsAsLabel, ExportTimestamp: exportTimestamp, TypeMappings: typeMapping, @@ -52,7 +52,7 @@ func NewCollector(expire time.Duration, stringsAsLabel, exportTimestamp bool, ty return &Collector{ expireDuration: expire, - coll: serializer.NewCollection(cfg), + coll: serializers_prometheus.NewCollection(cfg), } } diff --git a/plugins/outputs/socket_writer/socket_writer.go b/plugins/outputs/socket_writer/socket_writer.go index 9da9f22d4..a12e92659 100644 --- a/plugins/outputs/socket_writer/socket_writer.go +++ b/plugins/outputs/socket_writer/socket_writer.go @@ -17,7 +17,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" - tlsint "github.com/influxdata/telegraf/plugins/common/tls" + common_tls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" ) @@ -29,7 +29,7 @@ type SocketWriter struct { ContentEncoding string `toml:"content_encoding"` Address string KeepAlivePeriod *config.Duration - tlsint.ClientConfig + common_tls.ClientConfig Log telegraf.Logger `toml:"-"` serializers.Serializer diff --git a/plugins/outputs/stomp/stomp.go b/plugins/outputs/stomp/stomp.go index 5ca8e9538..7e12762d0 100644 --- a/plugins/outputs/stomp/stomp.go +++ b/plugins/outputs/stomp/stomp.go @@ -12,7 +12,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" - commontls "github.com/influxdata/telegraf/plugins/common/tls" + common_tls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" ) @@ -30,7 +30,7 @@ type STOMP struct { HeartBeatSend config.Duration `toml:"heartbeat_timeout_send"` HeartBeatRec config.Duration `toml:"heartbeat_timeout_receive"` - commontls.ClientConfig + common_tls.ClientConfig conn net.Conn stomp *stomp.Conn diff --git a/plugins/outputs/syslog/syslog.go b/plugins/outputs/syslog/syslog.go index 681d771f3..fc599fce8 100644 --- a/plugins/outputs/syslog/syslog.go +++ b/plugins/outputs/syslog/syslog.go @@ -17,7 +17,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" - tlsint "github.com/influxdata/telegraf/plugins/common/tls" + common_tls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" ) @@ -37,7 +37,7 @@ type Syslog struct { Trailer nontransparent.TrailerType Log telegraf.Logger `toml:"-"` net.Conn - tlsint.ClientConfig + common_tls.ClientConfig mapper *SyslogMapper } diff --git a/plugins/outputs/timestream/timestream.go b/plugins/outputs/timestream/timestream.go index 7fd27cbab..04d8322e9 100644 --- a/plugins/outputs/timestream/timestream.go +++ b/plugins/outputs/timestream/timestream.go @@ -19,7 +19,7 @@ import ( "github.com/aws/smithy-go" "github.com/influxdata/telegraf" - internalaws "github.com/influxdata/telegraf/plugins/common/aws" + common_aws "github.com/influxdata/telegraf/plugins/common/aws" "github.com/influxdata/telegraf/plugins/outputs" ) @@ -47,7 +47,7 @@ type ( Log telegraf.Logger svc WriteClient - internalaws.CredentialConfig + common_aws.CredentialConfig } WriteClient interface { @@ -76,7 +76,7 @@ const MaxRecordsPerCall = 100 const MaxWriteRoutinesDefault = 1 // WriteFactory function provides a way to mock the client instantiation for testing purposes. -var WriteFactory = func(credentialConfig *internalaws.CredentialConfig) (WriteClient, error) { +var WriteFactory = func(credentialConfig *common_aws.CredentialConfig) (WriteClient, error) { awsCreds, awsErr := credentialConfig.Credentials() if awsErr != nil { panic("Unable to load credentials config " + awsErr.Error()) diff --git a/plugins/outputs/timestream/timestream_test.go b/plugins/outputs/timestream/timestream_test.go index 391903e10..c68422316 100644 --- a/plugins/outputs/timestream/timestream_test.go +++ b/plugins/outputs/timestream/timestream_test.go @@ -18,7 +18,7 @@ import ( "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" - internalaws "github.com/influxdata/telegraf/plugins/common/aws" + common_aws "github.com/influxdata/telegraf/plugins/common/aws" "github.com/influxdata/telegraf/testutil" ) @@ -69,7 +69,7 @@ func (m *mockTimestreamClient) DescribeDatabase( } func TestConnectValidatesConfigParameters(t *testing.T) { - WriteFactory = func(*internalaws.CredentialConfig) (WriteClient, error) { + WriteFactory = func(*common_aws.CredentialConfig) (WriteClient, error) { return &mockTimestreamClient{}, nil } // checking base arguments @@ -227,7 +227,7 @@ func TestWriteMultiMeasuresSingleTableMode(t *testing.T) { const recordCount = 100 mockClient := &mockTimestreamClient{0} - WriteFactory = func(*internalaws.CredentialConfig) (WriteClient, error) { + WriteFactory = func(*common_aws.CredentialConfig) (WriteClient, error) { return mockClient, nil } @@ -285,7 +285,7 @@ func TestWriteMultiMeasuresMultiTableMode(t *testing.T) { const recordCount = 100 mockClient := &mockTimestreamClient{0} - WriteFactory = func(*internalaws.CredentialConfig) (WriteClient, error) { + WriteFactory = func(*common_aws.CredentialConfig) (WriteClient, error) { return mockClient, nil } @@ -555,7 +555,7 @@ func (m *mockTimestreamErrorClient) DescribeDatabase( } func TestThrottlingErrorIsReturnedToTelegraf(t *testing.T) { - WriteFactory = func(*internalaws.CredentialConfig) (WriteClient, error) { + WriteFactory = func(*common_aws.CredentialConfig) (WriteClient, error) { return &mockTimestreamErrorClient{ ErrorToReturnOnWriteRecords: &types.ThrottlingException{Message: aws.String("Throttling Test")}, }, nil @@ -581,7 +581,7 @@ func TestThrottlingErrorIsReturnedToTelegraf(t *testing.T) { } func TestRejectedRecordsErrorResultsInMetricsBeingSkipped(t *testing.T) { - WriteFactory = func(*internalaws.CredentialConfig) (WriteClient, error) { + WriteFactory = func(*common_aws.CredentialConfig) (WriteClient, error) { return &mockTimestreamErrorClient{ ErrorToReturnOnWriteRecords: &types.RejectedRecordsException{Message: aws.String("RejectedRecords Test")}, }, nil @@ -612,7 +612,7 @@ func TestWriteWhenRequestsGreaterThanMaxWriteGoRoutinesCount(t *testing.T) { const totalRecords = maxWriteRecordsCalls * maxRecordsInWriteRecordsCall mockClient := &mockTimestreamClient{0} - WriteFactory = func(*internalaws.CredentialConfig) (WriteClient, error) { + WriteFactory = func(*common_aws.CredentialConfig) (WriteClient, error) { return mockClient, nil } @@ -651,7 +651,7 @@ func TestWriteWhenRequestsLesserThanMaxWriteGoRoutinesCount(t *testing.T) { const totalRecords = maxWriteRecordsCalls * maxRecordsInWriteRecordsCall mockClient := &mockTimestreamClient{0} - WriteFactory = func(*internalaws.CredentialConfig) (WriteClient, error) { + WriteFactory = func(*common_aws.CredentialConfig) (WriteClient, error) { return mockClient, nil } @@ -1221,7 +1221,7 @@ func TestCustomEndpoint(t *testing.T) { MappingMode: MappingModeMultiTable, DatabaseName: tsDbName, Log: testutil.Logger{}, - CredentialConfig: internalaws.CredentialConfig{EndpointURL: customEndpoint}, + CredentialConfig: common_aws.CredentialConfig{EndpointURL: customEndpoint}, } // validate config correctness diff --git a/plugins/outputs/wavefront/wavefront.go b/plugins/outputs/wavefront/wavefront.go index 17a658544..7a862df51 100644 --- a/plugins/outputs/wavefront/wavefront.go +++ b/plugins/outputs/wavefront/wavefront.go @@ -15,9 +15,9 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" - httpconfig "github.com/influxdata/telegraf/plugins/common/http" + common_http "github.com/influxdata/telegraf/plugins/common/http" "github.com/influxdata/telegraf/plugins/outputs" - serializer "github.com/influxdata/telegraf/plugins/serializers/wavefront" + serializers_wavefront "github.com/influxdata/telegraf/plugins/serializers/wavefront" ) //go:embed sample.conf @@ -53,7 +53,7 @@ type Wavefront struct { SourceOverride []string `toml:"source_override"` StringToNumber map[string][]map[string]float64 `toml:"string_to_number" deprecated:"1.9.0;1.35.0;use the enum processor instead"` - httpconfig.HTTPClientConfig + common_http.HTTPClientConfig sender wavefront.Sender Log telegraf.Logger `toml:"-"` @@ -168,8 +168,8 @@ func (w *Wavefront) Write(metrics []telegraf.Metric) error { return nil } -func (w *Wavefront) buildMetrics(m telegraf.Metric) []*serializer.MetricPoint { - ret := make([]*serializer.MetricPoint, 0) +func (w *Wavefront) buildMetrics(m telegraf.Metric) []*serializers_wavefront.MetricPoint { + ret := make([]*serializers_wavefront.MetricPoint, 0) for fieldName, value := range m.Fields() { var name string @@ -182,14 +182,14 @@ func (w *Wavefront) buildMetrics(m telegraf.Metric) []*serializer.MetricPoint { if w.UseRegex { name = sanitizedRegex.ReplaceAllLiteralString(name, "-") } else { - name = serializer.Sanitize(w.UseStrict, name) + name = serializers_wavefront.Sanitize(w.UseStrict, name) } if w.ConvertPaths { name = pathReplacer.Replace(name) } - metric := &serializer.MetricPoint{ + metric := &serializers_wavefront.MetricPoint{ Metric: name, Timestamp: m.Time().Unix(), } @@ -259,7 +259,7 @@ func (w *Wavefront) buildTags(mTags map[string]string) (string, map[string]strin if w.UseRegex { key = sanitizedRegex.ReplaceAllLiteralString(k, "-") } else { - key = serializer.Sanitize(w.UseStrict, k) + key = serializers_wavefront.Sanitize(w.UseStrict, k) } val := tagValueReplacer.Replace(v) if w.TruncateTags { @@ -382,7 +382,7 @@ func init() { ImmediateFlush: true, SendInternalMetrics: true, HTTPMaximumBatchSize: 10000, - HTTPClientConfig: httpconfig.HTTPClientConfig{Timeout: config.Duration(10 * time.Second)}, + HTTPClientConfig: common_http.HTTPClientConfig{Timeout: config.Duration(10 * time.Second)}, CSPBaseURL: "https://console.cloud.vmware.com", } }) diff --git a/plugins/outputs/wavefront/wavefront_test.go b/plugins/outputs/wavefront/wavefront_test.go index 38632046c..bca34fd4d 100644 --- a/plugins/outputs/wavefront/wavefront_test.go +++ b/plugins/outputs/wavefront/wavefront_test.go @@ -12,7 +12,7 @@ import ( "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/outputs" - serializer "github.com/influxdata/telegraf/plugins/serializers/wavefront" + serializers_wavefront "github.com/influxdata/telegraf/plugins/serializers/wavefront" "github.com/influxdata/telegraf/testutil" ) @@ -48,25 +48,25 @@ func TestBuildMetrics(t *testing.T) { var metricTests = []struct { metric telegraf.Metric - metricPoints []serializer.MetricPoint + metricPoints []serializers_wavefront.MetricPoint }{ { testutil.TestMetric(float64(1), "testing_just*a%metric:float", "metric2"), - []serializer.MetricPoint{ + []serializers_wavefront.MetricPoint{ {Metric: w.Prefix + "testing.just-a-metric-float", Value: 1, Timestamp: timestamp, Tags: map[string]string{"tag1": "value1"}}, {Metric: w.Prefix + "testing.metric2", Value: 1, Timestamp: timestamp, Tags: map[string]string{"tag1": "value1"}}, }, }, { testutil.TestMetric(float64(1), "testing_just/another,metric:float", "metric2"), - []serializer.MetricPoint{ + []serializers_wavefront.MetricPoint{ {Metric: w.Prefix + "testing.just-another-metric-float", Value: 1, Timestamp: timestamp, Tags: map[string]string{"tag1": "value1"}}, {Metric: w.Prefix + "testing.metric2", Value: 1, Timestamp: timestamp, Tags: map[string]string{"tag1": "value1"}}, }, }, { testMetric1, - []serializer.MetricPoint{ + []serializers_wavefront.MetricPoint{ {Metric: w.Prefix + "test.simple.metric", Value: 123, Timestamp: timestamp, Source: "testHost", Tags: map[string]string{"tag1": "value1"}}, }, }, @@ -93,18 +93,18 @@ func TestBuildMetricsStrict(t *testing.T) { var metricTests = []struct { metric telegraf.Metric - metricPoints []serializer.MetricPoint + metricPoints []serializers_wavefront.MetricPoint }{ { testutil.TestMetric(float64(1), "testing_just*a%metric:float", "metric2"), - []serializer.MetricPoint{ + []serializers_wavefront.MetricPoint{ {Metric: w.Prefix + "testing.just-a-metric-float", Value: 1, Timestamp: timestamp, Tags: map[string]string{"tag1": "value1"}}, {Metric: w.Prefix + "testing.metric2", Value: 1, Timestamp: timestamp, Tags: map[string]string{"tag1": "value1"}}, }, }, { testutil.TestMetric(float64(1), "testing_just/another,metric:float", "metric2"), - []serializer.MetricPoint{ + []serializers_wavefront.MetricPoint{ { Metric: w.Prefix + "testing.just/another,metric-float", Value: 1, @@ -142,15 +142,15 @@ func TestBuildMetricsWithSimpleFields(t *testing.T) { var metricTests = []struct { metric telegraf.Metric - metricLines []serializer.MetricPoint + metricLines []serializers_wavefront.MetricPoint }{ { testutil.TestMetric(float64(1), "testing_just*a%metric:float"), - []serializer.MetricPoint{{Metric: w.Prefix + "testing.just-a-metric-float.value", Value: 1}}, + []serializers_wavefront.MetricPoint{{Metric: w.Prefix + "testing.just-a-metric-float.value", Value: 1}}, }, { testMetric1, - []serializer.MetricPoint{{Metric: w.Prefix + "test.simple.metric.value", Value: 123}}, + []serializers_wavefront.MetricPoint{{Metric: w.Prefix + "test.simple.metric.value", Value: 123}}, }, } @@ -459,6 +459,6 @@ func BenchmarkReplaceAllLiteralString(b *testing.B) { func BenchmarkReplacer(b *testing.B) { for n := 0; n < b.N; n++ { - serializer.Sanitize(false, testString) + serializers_wavefront.Sanitize(false, testString) } } diff --git a/plugins/processors/dedup/dedup.go b/plugins/processors/dedup/dedup.go index 35dd1c467..e77b14b5d 100644 --- a/plugins/processors/dedup/dedup.go +++ b/plugins/processors/dedup/dedup.go @@ -10,7 +10,7 @@ import ( "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/parsers/influx" "github.com/influxdata/telegraf/plugins/processors" - influxSerializer "github.com/influxdata/telegraf/plugins/serializers/influx" + serializers_influx "github.com/influxdata/telegraf/plugins/serializers/influx" ) //go:embed sample.conf @@ -122,7 +122,7 @@ func (d *Dedup) Apply(metrics ...telegraf.Metric) []telegraf.Metric { } func (d *Dedup) GetState() interface{} { - s := &influxSerializer.Serializer{} + s := &serializers_influx.Serializer{} v := make([]telegraf.Metric, 0, len(d.Cache)) for _, value := range d.Cache { v = append(v, value) diff --git a/plugins/processors/execd/README.md b/plugins/processors/execd/README.md index 0b0a0843f..fbec5c4ee 100644 --- a/plugins/processors/execd/README.md +++ b/plugins/processors/execd/README.md @@ -70,12 +70,12 @@ import ( "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/parsers/influx" - influxSerializer "github.com/influxdata/telegraf/plugins/serializers/influx" + serializers_influx "github.com/influxdata/telegraf/plugins/serializers/influx" ) func main() { parser := influx.NewStreamParser(os.Stdin) - serializer := influxSerializer.Serializer{} + serializer := serializers_influx.Serializer{} if err := serializer.Init(); err != nil { fmt.Fprintf(os.Stderr, "serializer init failed: %v\n", err) os.Exit(1) diff --git a/plugins/processors/execd/execd_test.go b/plugins/processors/execd/execd_test.go index fe57dacf7..88d2e63ad 100644 --- a/plugins/processors/execd/execd_test.go +++ b/plugins/processors/execd/execd_test.go @@ -19,7 +19,7 @@ import ( "github.com/influxdata/telegraf/plugins/parsers/influx" "github.com/influxdata/telegraf/plugins/processors" _ "github.com/influxdata/telegraf/plugins/serializers/all" - influxSerializer "github.com/influxdata/telegraf/plugins/serializers/influx" + serializers_influx "github.com/influxdata/telegraf/plugins/serializers/influx" "github.com/influxdata/telegraf/testutil" ) @@ -31,7 +31,7 @@ func TestExternalProcessorWorks(t *testing.T) { require.NoError(t, parser.Init()) e.SetParser(parser) - serializer := &influxSerializer.Serializer{} + serializer := &serializers_influx.Serializer{} require.NoError(t, serializer.Init()) e.SetSerializer(serializer) @@ -100,7 +100,7 @@ func TestParseLinesWithNewLines(t *testing.T) { require.NoError(t, parser.Init()) e.SetParser(parser) - serializer := &influxSerializer.Serializer{} + serializer := &serializers_influx.Serializer{} require.NoError(t, serializer.Init()) e.SetSerializer(serializer) @@ -166,7 +166,7 @@ func TestMain(m *testing.M) { func runCountMultiplierProgram() { fieldName := os.Getenv("FIELD_NAME") parser := influx.NewStreamParser(os.Stdin) - serializer := &influxSerializer.Serializer{} + serializer := &serializers_influx.Serializer{} //nolint:errcheck // this should always succeed serializer.Init() @@ -364,7 +364,7 @@ func TestTracking(t *testing.T) { require.NoError(t, parser.Init()) plugin.SetParser(parser) - serializer := &influxSerializer.Serializer{} + serializer := &serializers_influx.Serializer{} require.NoError(t, serializer.Init()) plugin.SetSerializer(serializer) diff --git a/plugins/secretstores/http/http.go b/plugins/secretstores/http/http.go index 47e49ea82..9dbe028b6 100644 --- a/plugins/secretstores/http/http.go +++ b/plugins/secretstores/http/http.go @@ -17,7 +17,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" - chttp "github.com/influxdata/telegraf/plugins/common/http" + common_http "github.com/influxdata/telegraf/plugins/common/http" "github.com/influxdata/telegraf/plugins/secretstores" ) @@ -35,7 +35,7 @@ type HTTP struct { SuccessStatusCodes []int `toml:"success_status_codes"` Transformation string `toml:"transformation"` Log telegraf.Logger `toml:"-"` - chttp.HTTPClientConfig + common_http.HTTPClientConfig DecryptionConfig client *http.Client diff --git a/testutil/container.go b/testutil/container.go index 7c51cbde1..eeecdd620 100644 --- a/testutil/container.go +++ b/testutil/container.go @@ -8,7 +8,7 @@ import ( "io" "strings" - dockercontainer "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/container" "github.com/docker/go-connections/nat" "github.com/testcontainers/testcontainers-go" "github.com/testcontainers/testcontainers-go/wait" @@ -26,7 +26,7 @@ type Container struct { Entrypoint []string Env map[string]string Files map[string]string - HostConfigModifier func(*dockercontainer.HostConfig) + HostConfigModifier func(*container.HostConfig) ExposedPorts []string Cmd []string Image string @@ -72,11 +72,11 @@ func (c *Container) Start() error { Started: true, } - container, err := testcontainers.GenericContainer(c.ctx, req) + cntnr, err := testcontainers.GenericContainer(c.ctx, req) if err != nil { return fmt.Errorf("container failed to start: %w", err) } - c.container = container + c.container = cntnr c.Logs = TestLogConsumer{ Msgs: []string{}, diff --git a/testutil/metric.go b/testutil/metric.go index bb3ad41fa..2002c3b9b 100644 --- a/testutil/metric.go +++ b/testutil/metric.go @@ -8,8 +8,9 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" + "github.com/influxdata/telegraf" - telegrafMetric "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/metric" ) type metricDiff struct { @@ -95,43 +96,43 @@ func lessFunc(lhs, rhs *metricDiff) bool { return false } -func newMetricDiff(metric telegraf.Metric) *metricDiff { - if metric == nil { +func newMetricDiff(telegrafMetric telegraf.Metric) *metricDiff { + if telegrafMetric == nil { return nil } m := &metricDiff{} - m.Measurement = metric.Name() + m.Measurement = telegrafMetric.Name() - m.Tags = append(m.Tags, metric.TagList()...) + m.Tags = append(m.Tags, telegrafMetric.TagList()...) sort.Slice(m.Tags, func(i, j int) bool { return m.Tags[i].Key < m.Tags[j].Key }) - m.Fields = append(m.Fields, metric.FieldList()...) + m.Fields = append(m.Fields, telegrafMetric.FieldList()...) sort.Slice(m.Fields, func(i, j int) bool { return m.Fields[i].Key < m.Fields[j].Key }) - m.Type = metric.Type() - m.Time = metric.Time() + m.Type = telegrafMetric.Type() + m.Time = telegrafMetric.Time() return m } -func newMetricStructureDiff(metric telegraf.Metric) *metricDiff { - if metric == nil { +func newMetricStructureDiff(telegrafMetric telegraf.Metric) *metricDiff { + if telegrafMetric == nil { return nil } m := &metricDiff{} - m.Measurement = metric.Name() + m.Measurement = telegrafMetric.Name() - m.Tags = append(m.Tags, metric.TagList()...) + m.Tags = append(m.Tags, telegrafMetric.TagList()...) sort.Slice(m.Tags, func(i, j int) bool { return m.Tags[i].Key < m.Tags[j].Key }) - for _, f := range metric.FieldList() { + for _, f := range telegrafMetric.FieldList() { sf := &telegraf.Field{ Key: f.Key, Value: reflect.Zero(reflect.TypeOf(f.Value)).Interface(), @@ -142,8 +143,8 @@ func newMetricStructureDiff(metric telegraf.Metric) *metricDiff { return m.Fields[i].Key < m.Fields[j].Key }) - m.Type = metric.Type() - m.Time = metric.Time() + m.Type = telegrafMetric.Type() + m.Time = telegrafMetric.Time() return m } @@ -364,12 +365,12 @@ func MustMetric( tm time.Time, tp ...telegraf.ValueType, ) telegraf.Metric { - m := telegrafMetric.New(name, tags, fields, tm, tp...) + m := metric.New(name, tags, fields, tm, tp...) return m } func FromTestMetric(met *Metric) telegraf.Metric { - m := telegrafMetric.New(met.Measurement, met.Tags, met.Fields, met.Time, met.Type) + m := metric.New(met.Measurement, met.Tags, met.Fields, met.Time, met.Type) return m }