chore(linters): Enable `import-alias-naming` and `redundant-import-alias` rules for revive (#15836)
This commit is contained in:
parent
7d82709caf
commit
9415d8e7e9
|
|
@ -270,6 +270,9 @@ linters-settings:
|
|||
- name: get-return
|
||||
- name: identical-branches
|
||||
- name: if-return
|
||||
- name: import-alias-naming
|
||||
arguments:
|
||||
- "^[a-z][a-z0-9_]*[a-z0-9]+$"
|
||||
- name: import-shadowing
|
||||
- name: increment-decrement
|
||||
- name: indent-error-flow
|
||||
|
|
@ -285,6 +288,7 @@ linters-settings:
|
|||
- name: range-val-in-closure
|
||||
- name: receiver-naming
|
||||
- name: redefines-builtin-id
|
||||
- name: redundant-import-alias
|
||||
- name: string-of-int
|
||||
- name: struct-tag
|
||||
- name: superfluous-else
|
||||
|
|
|
|||
|
|
@ -18,7 +18,6 @@ import (
|
|||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"github.com/influxdata/telegraf/plugins/parsers/json_v2"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
|
|
@ -33,10 +32,11 @@ import (
|
|||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
_ "github.com/influxdata/telegraf/plugins/parsers/all" // Blank import to have all parsers for testing
|
||||
"github.com/influxdata/telegraf/plugins/parsers/json"
|
||||
"github.com/influxdata/telegraf/plugins/parsers/json_v2"
|
||||
"github.com/influxdata/telegraf/plugins/processors"
|
||||
"github.com/influxdata/telegraf/plugins/serializers"
|
||||
_ "github.com/influxdata/telegraf/plugins/serializers/all" // Blank import to have all serializers for testing
|
||||
promserializer "github.com/influxdata/telegraf/plugins/serializers/prometheus"
|
||||
serializers_prometheus "github.com/influxdata/telegraf/plugins/serializers/prometheus"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
|
|
@ -687,7 +687,7 @@ func TestConfig_SerializerInterfaceNewFormat(t *testing.T) {
|
|||
// Ignore all unexported fields and fields not relevant for functionality
|
||||
options := []cmp.Option{
|
||||
cmpopts.IgnoreUnexported(stype),
|
||||
cmpopts.IgnoreUnexported(reflect.Indirect(reflect.ValueOf(promserializer.MetricTypes{})).Interface()),
|
||||
cmpopts.IgnoreUnexported(reflect.Indirect(reflect.ValueOf(serializers_prometheus.MetricTypes{})).Interface()),
|
||||
cmpopts.IgnoreTypes(sync.Mutex{}, regexp.Regexp{}),
|
||||
cmpopts.IgnoreInterfaces(struct{ telegraf.Logger }{}),
|
||||
}
|
||||
|
|
@ -779,7 +779,7 @@ func TestConfig_SerializerInterfaceOldFormat(t *testing.T) {
|
|||
// Ignore all unexported fields and fields not relevant for functionality
|
||||
options := []cmp.Option{
|
||||
cmpopts.IgnoreUnexported(stype),
|
||||
cmpopts.IgnoreUnexported(reflect.Indirect(reflect.ValueOf(promserializer.MetricTypes{})).Interface()),
|
||||
cmpopts.IgnoreUnexported(reflect.Indirect(reflect.ValueOf(serializers_prometheus.MetricTypes{})).Interface()),
|
||||
cmpopts.IgnoreTypes(sync.Mutex{}, regexp.Regexp{}),
|
||||
cmpopts.IgnoreInterfaces(struct{ telegraf.Logger }{}),
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ import (
|
|||
"bufio"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
cryptoRand "crypto/rand"
|
||||
crypto_rand "crypto/rand"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
|
@ -94,7 +94,7 @@ func ReadLines(filename string) ([]string, error) {
|
|||
// RandomString returns a random string of alphanumeric characters
|
||||
func RandomString(n int) (string, error) {
|
||||
var bytes = make([]byte, n)
|
||||
_, err := cryptoRand.Read(bytes)
|
||||
_, err := crypto_rand.Read(bytes)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
telegrafConfig "github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/plugins/aggregators"
|
||||
)
|
||||
|
||||
|
|
@ -29,18 +29,18 @@ const bucketNegInf = "-Inf"
|
|||
|
||||
// HistogramAggregator is aggregator with histogram configs and particular histograms for defined metrics
|
||||
type HistogramAggregator struct {
|
||||
Configs []config `toml:"config"`
|
||||
ResetBuckets bool `toml:"reset"`
|
||||
Cumulative bool `toml:"cumulative"`
|
||||
ExpirationInterval telegrafConfig.Duration `toml:"expiration_interval"`
|
||||
PushOnlyOnUpdate bool `toml:"push_only_on_update"`
|
||||
Configs []bucketConfig `toml:"config"`
|
||||
ResetBuckets bool `toml:"reset"`
|
||||
Cumulative bool `toml:"cumulative"`
|
||||
ExpirationInterval config.Duration `toml:"expiration_interval"`
|
||||
PushOnlyOnUpdate bool `toml:"push_only_on_update"`
|
||||
|
||||
buckets bucketsByMetrics
|
||||
cache map[uint64]metricHistogramCollection
|
||||
}
|
||||
|
||||
// config is the config, which contains name, field of metric and histogram buckets.
|
||||
type config struct {
|
||||
// bucketConfig is the config, which contains name, field of metric and histogram buckets.
|
||||
type bucketConfig struct {
|
||||
Metric string `toml:"measurement_name"`
|
||||
Fields []string `toml:"fields"`
|
||||
Buckets buckets `toml:"buckets"`
|
||||
|
|
@ -239,9 +239,9 @@ func (h *HistogramAggregator) getBuckets(metric string, field string) []float64
|
|||
return buckets
|
||||
}
|
||||
|
||||
for _, config := range h.Configs {
|
||||
if config.Metric == metric {
|
||||
if !isBucketExists(field, config) {
|
||||
for _, cfg := range h.Configs {
|
||||
if cfg.Metric == metric {
|
||||
if !isBucketExists(field, cfg) {
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
@ -249,7 +249,7 @@ func (h *HistogramAggregator) getBuckets(metric string, field string) []float64
|
|||
h.buckets[metric] = make(bucketsByFields)
|
||||
}
|
||||
|
||||
h.buckets[metric][field] = sortBuckets(config.Buckets)
|
||||
h.buckets[metric][field] = sortBuckets(cfg.Buckets)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -257,7 +257,7 @@ func (h *HistogramAggregator) getBuckets(metric string, field string) []float64
|
|||
}
|
||||
|
||||
// isBucketExists checks if buckets exists for the passed field
|
||||
func isBucketExists(field string, cfg config) bool {
|
||||
func isBucketExists(field string, cfg bucketConfig) bool {
|
||||
if len(cfg.Fields) == 0 {
|
||||
return true
|
||||
}
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
telegrafConfig "github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
|
@ -17,16 +17,16 @@ type fields map[string]interface{}
|
|||
type tags map[string]string
|
||||
|
||||
// NewTestHistogram creates new test histogram aggregation with specified config
|
||||
func NewTestHistogram(cfg []config, reset bool, cumulative bool, pushOnlyOnUpdate bool) telegraf.Aggregator {
|
||||
func NewTestHistogram(cfg []bucketConfig, reset bool, cumulative bool, pushOnlyOnUpdate bool) telegraf.Aggregator {
|
||||
return NewTestHistogramWithExpirationInterval(cfg, reset, cumulative, pushOnlyOnUpdate, 0)
|
||||
}
|
||||
|
||||
func NewTestHistogramWithExpirationInterval(
|
||||
cfg []config,
|
||||
cfg []bucketConfig,
|
||||
reset bool,
|
||||
cumulative bool,
|
||||
pushOnlyOnUpdate bool,
|
||||
expirationInterval telegrafConfig.Duration,
|
||||
expirationInterval config.Duration,
|
||||
) telegraf.Aggregator {
|
||||
htm := NewHistogramAggregator()
|
||||
htm.Configs = cfg
|
||||
|
|
@ -85,8 +85,8 @@ func BenchmarkApply(b *testing.B) {
|
|||
|
||||
// TestHistogram tests metrics for one period and for one field
|
||||
func TestHistogram(t *testing.T) {
|
||||
var cfg []config
|
||||
cfg = append(cfg, config{Metric: "first_metric_name", Fields: []string{"a"}, Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}})
|
||||
var cfg []bucketConfig
|
||||
cfg = append(cfg, bucketConfig{Metric: "first_metric_name", Fields: []string{"a"}, Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}})
|
||||
histogram := NewTestHistogram(cfg, false, true, false)
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
|
|
@ -107,8 +107,8 @@ func TestHistogram(t *testing.T) {
|
|||
|
||||
// TestHistogram tests metrics for one period, for one field and push only on histogram update
|
||||
func TestHistogramPushOnUpdate(t *testing.T) {
|
||||
var cfg []config
|
||||
cfg = append(cfg, config{Metric: "first_metric_name", Fields: []string{"a"}, Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}})
|
||||
var cfg []bucketConfig
|
||||
cfg = append(cfg, bucketConfig{Metric: "first_metric_name", Fields: []string{"a"}, Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}})
|
||||
histogram := NewTestHistogram(cfg, false, true, true)
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
|
|
@ -143,8 +143,8 @@ func TestHistogramPushOnUpdate(t *testing.T) {
|
|||
|
||||
// TestHistogramNonCumulative tests metrics for one period and for one field
|
||||
func TestHistogramNonCumulative(t *testing.T) {
|
||||
var cfg []config
|
||||
cfg = append(cfg, config{Metric: "first_metric_name", Fields: []string{"a"}, Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}})
|
||||
var cfg []bucketConfig
|
||||
cfg = append(cfg, bucketConfig{Metric: "first_metric_name", Fields: []string{"a"}, Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}})
|
||||
histogram := NewTestHistogram(cfg, false, false, false)
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
|
|
@ -165,8 +165,8 @@ func TestHistogramNonCumulative(t *testing.T) {
|
|||
|
||||
// TestHistogramWithReset tests metrics for one period and for one field, with reset between metrics adding
|
||||
func TestHistogramWithReset(t *testing.T) {
|
||||
var cfg []config
|
||||
cfg = append(cfg, config{Metric: "first_metric_name", Fields: []string{"a"}, Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}})
|
||||
var cfg []bucketConfig
|
||||
cfg = append(cfg, bucketConfig{Metric: "first_metric_name", Fields: []string{"a"}, Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}})
|
||||
histogram := NewTestHistogram(cfg, true, true, false)
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
|
|
@ -187,7 +187,7 @@ func TestHistogramWithReset(t *testing.T) {
|
|||
|
||||
// TestHistogramWithAllFields tests two metrics for one period and for all fields
|
||||
func TestHistogramWithAllFields(t *testing.T) {
|
||||
cfg := []config{
|
||||
cfg := []bucketConfig{
|
||||
{Metric: "first_metric_name", Buckets: []float64{0.0, 15.5, 20.0, 30.0, 40.0}},
|
||||
{Metric: "second_metric_name", Buckets: []float64{0.0, 4.0, 10.0, 23.0, 30.0}},
|
||||
}
|
||||
|
|
@ -266,7 +266,7 @@ func TestHistogramWithAllFields(t *testing.T) {
|
|||
|
||||
// TestHistogramWithAllFieldsNonCumulative tests two metrics for one period and for all fields
|
||||
func TestHistogramWithAllFieldsNonCumulative(t *testing.T) {
|
||||
cfg := []config{
|
||||
cfg := []bucketConfig{
|
||||
{Metric: "first_metric_name", Buckets: []float64{0.0, 15.5, 20.0, 30.0, 40.0}},
|
||||
{Metric: "second_metric_name", Buckets: []float64{0.0, 4.0, 10.0, 23.0, 30.0}},
|
||||
}
|
||||
|
|
@ -370,8 +370,8 @@ func TestHistogramWithAllFieldsNonCumulative(t *testing.T) {
|
|||
// TestHistogramWithTwoPeriodsAndAllFields tests two metrics getting added with a push/reset in between (simulates
|
||||
// getting added in different periods) for all fields
|
||||
func TestHistogramWithTwoPeriodsAndAllFields(t *testing.T) {
|
||||
var cfg []config
|
||||
cfg = append(cfg, config{Metric: "first_metric_name", Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}})
|
||||
var cfg []bucketConfig
|
||||
cfg = append(cfg, bucketConfig{Metric: "first_metric_name", Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}})
|
||||
histogram := NewTestHistogram(cfg, false, true, false)
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
|
|
@ -415,8 +415,8 @@ func TestWrongBucketsOrder(t *testing.T) {
|
|||
}
|
||||
}()
|
||||
|
||||
var cfg []config
|
||||
cfg = append(cfg, config{Metric: "first_metric_name", Buckets: []float64{0.0, 90.0, 20.0, 30.0, 40.0}})
|
||||
var cfg []bucketConfig
|
||||
cfg = append(cfg, bucketConfig{Metric: "first_metric_name", Buckets: []float64{0.0, 90.0, 20.0, 30.0, 40.0}})
|
||||
histogram := NewTestHistogram(cfg, false, true, false)
|
||||
histogram.Add(firstMetric2)
|
||||
}
|
||||
|
|
@ -431,11 +431,11 @@ func TestHistogramMetricExpiration(t *testing.T) {
|
|||
timeNow = time.Now
|
||||
}()
|
||||
|
||||
cfg := []config{
|
||||
cfg := []bucketConfig{
|
||||
{Metric: "first_metric_name", Fields: []string{"a"}, Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}},
|
||||
{Metric: "second_metric_name", Buckets: []float64{0.0, 4.0, 10.0, 23.0, 30.0}},
|
||||
}
|
||||
histogram := NewTestHistogramWithExpirationInterval(cfg, false, true, false, telegrafConfig.Duration(30))
|
||||
histogram := NewTestHistogramWithExpirationInterval(cfg, false, true, false, config.Duration(30))
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,9 +1,10 @@
|
|||
package auth
|
||||
|
||||
import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestBasicAuth_VerifyWithCredentials(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -3,10 +3,10 @@ package aws
|
|||
import (
|
||||
"context"
|
||||
|
||||
awsV2 "github.com/aws/aws-sdk-go-v2/aws"
|
||||
configV2 "github.com/aws/aws-sdk-go-v2/config"
|
||||
credentialsV2 "github.com/aws/aws-sdk-go-v2/credentials"
|
||||
stscredsV2 "github.com/aws/aws-sdk-go-v2/credentials/stscreds"
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/config"
|
||||
"github.com/aws/aws-sdk-go-v2/credentials"
|
||||
"github.com/aws/aws-sdk-go-v2/credentials/stscreds"
|
||||
"github.com/aws/aws-sdk-go-v2/service/sts"
|
||||
)
|
||||
|
||||
|
|
@ -24,61 +24,61 @@ type CredentialConfig struct {
|
|||
WebIdentityTokenFile string `toml:"web_identity_token_file"`
|
||||
}
|
||||
|
||||
func (c *CredentialConfig) Credentials() (awsV2.Config, error) {
|
||||
func (c *CredentialConfig) Credentials() (aws.Config, error) {
|
||||
if c.RoleARN != "" {
|
||||
return c.configWithAssumeCredentials()
|
||||
}
|
||||
return c.configWithRootCredentials()
|
||||
}
|
||||
|
||||
func (c *CredentialConfig) configWithRootCredentials() (awsV2.Config, error) {
|
||||
options := []func(*configV2.LoadOptions) error{
|
||||
configV2.WithRegion(c.Region),
|
||||
func (c *CredentialConfig) configWithRootCredentials() (aws.Config, error) {
|
||||
options := []func(*config.LoadOptions) error{
|
||||
config.WithRegion(c.Region),
|
||||
}
|
||||
|
||||
if c.Profile != "" {
|
||||
options = append(options, configV2.WithSharedConfigProfile(c.Profile))
|
||||
options = append(options, config.WithSharedConfigProfile(c.Profile))
|
||||
}
|
||||
if c.Filename != "" {
|
||||
options = append(options, configV2.WithSharedCredentialsFiles([]string{c.Filename}))
|
||||
options = append(options, config.WithSharedCredentialsFiles([]string{c.Filename}))
|
||||
}
|
||||
|
||||
if c.AccessKey != "" || c.SecretKey != "" {
|
||||
provider := credentialsV2.NewStaticCredentialsProvider(c.AccessKey, c.SecretKey, c.Token)
|
||||
options = append(options, configV2.WithCredentialsProvider(provider))
|
||||
provider := credentials.NewStaticCredentialsProvider(c.AccessKey, c.SecretKey, c.Token)
|
||||
options = append(options, config.WithCredentialsProvider(provider))
|
||||
}
|
||||
|
||||
return configV2.LoadDefaultConfig(context.Background(), options...)
|
||||
return config.LoadDefaultConfig(context.Background(), options...)
|
||||
}
|
||||
|
||||
func (c *CredentialConfig) configWithAssumeCredentials() (awsV2.Config, error) {
|
||||
func (c *CredentialConfig) configWithAssumeCredentials() (aws.Config, error) {
|
||||
// To generate credentials using assumeRole, we need to create AWS STS client with the default AWS endpoint,
|
||||
defaultConfig, err := c.configWithRootCredentials()
|
||||
if err != nil {
|
||||
return awsV2.Config{}, err
|
||||
return aws.Config{}, err
|
||||
}
|
||||
|
||||
var provider awsV2.CredentialsProvider
|
||||
var provider aws.CredentialsProvider
|
||||
stsService := sts.NewFromConfig(defaultConfig)
|
||||
if c.WebIdentityTokenFile != "" {
|
||||
provider = stscredsV2.NewWebIdentityRoleProvider(
|
||||
provider = stscreds.NewWebIdentityRoleProvider(
|
||||
stsService,
|
||||
c.RoleARN,
|
||||
stscredsV2.IdentityTokenFile(c.WebIdentityTokenFile),
|
||||
func(opts *stscredsV2.WebIdentityRoleOptions) {
|
||||
stscreds.IdentityTokenFile(c.WebIdentityTokenFile),
|
||||
func(opts *stscreds.WebIdentityRoleOptions) {
|
||||
if c.RoleSessionName != "" {
|
||||
opts.RoleSessionName = c.RoleSessionName
|
||||
}
|
||||
},
|
||||
)
|
||||
} else {
|
||||
provider = stscredsV2.NewAssumeRoleProvider(stsService, c.RoleARN, func(opts *stscredsV2.AssumeRoleOptions) {
|
||||
provider = stscreds.NewAssumeRoleProvider(stsService, c.RoleARN, func(opts *stscreds.AssumeRoleOptions) {
|
||||
if c.RoleSessionName != "" {
|
||||
opts.RoleSessionName = c.RoleSessionName
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
defaultConfig.Credentials = awsV2.NewCredentialsCache(provider)
|
||||
defaultConfig.Credentials = aws.NewCredentialsCache(provider)
|
||||
return defaultConfig, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ import (
|
|||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/plugins/common/cookie"
|
||||
oauthConfig "github.com/influxdata/telegraf/plugins/common/oauth"
|
||||
"github.com/influxdata/telegraf/plugins/common/oauth"
|
||||
"github.com/influxdata/telegraf/plugins/common/proxy"
|
||||
"github.com/influxdata/telegraf/plugins/common/tls"
|
||||
)
|
||||
|
|
@ -27,7 +27,7 @@ type HTTPClientConfig struct {
|
|||
|
||||
proxy.HTTPProxy
|
||||
tls.ClientConfig
|
||||
oauthConfig.OAuth2Config
|
||||
oauth.OAuth2Config
|
||||
cookie.CookieAuthConfig
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ import (
|
|||
"github.com/IBM/sarama"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
tgConf "github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/plugins/common/tls"
|
||||
)
|
||||
|
||||
|
|
@ -19,10 +19,9 @@ type ReadConfig struct {
|
|||
}
|
||||
|
||||
// SetConfig on the sarama.Config object from the ReadConfig struct.
|
||||
func (k *ReadConfig) SetConfig(config *sarama.Config, log telegraf.Logger) error {
|
||||
config.Consumer.Return.Errors = true
|
||||
|
||||
return k.Config.SetConfig(config, log)
|
||||
func (k *ReadConfig) SetConfig(cfg *sarama.Config, log telegraf.Logger) error {
|
||||
cfg.Consumer.Return.Errors = true
|
||||
return k.Config.SetConfig(cfg, log)
|
||||
}
|
||||
|
||||
// WriteConfig for kafka clients meaning to write to kafka
|
||||
|
|
@ -36,18 +35,18 @@ type WriteConfig struct {
|
|||
}
|
||||
|
||||
// SetConfig on the sarama.Config object from the WriteConfig struct.
|
||||
func (k *WriteConfig) SetConfig(config *sarama.Config, log telegraf.Logger) error {
|
||||
config.Producer.Return.Successes = true
|
||||
config.Producer.Idempotent = k.IdempotentWrites
|
||||
config.Producer.Retry.Max = k.MaxRetry
|
||||
func (k *WriteConfig) SetConfig(cfg *sarama.Config, log telegraf.Logger) error {
|
||||
cfg.Producer.Return.Successes = true
|
||||
cfg.Producer.Idempotent = k.IdempotentWrites
|
||||
cfg.Producer.Retry.Max = k.MaxRetry
|
||||
if k.MaxMessageBytes > 0 {
|
||||
config.Producer.MaxMessageBytes = k.MaxMessageBytes
|
||||
cfg.Producer.MaxMessageBytes = k.MaxMessageBytes
|
||||
}
|
||||
config.Producer.RequiredAcks = sarama.RequiredAcks(k.RequiredAcks)
|
||||
if config.Producer.Idempotent {
|
||||
config.Net.MaxOpenRequests = 1
|
||||
cfg.Producer.RequiredAcks = sarama.RequiredAcks(k.RequiredAcks)
|
||||
if cfg.Producer.Idempotent {
|
||||
cfg.Net.MaxOpenRequests = 1
|
||||
}
|
||||
return k.Config.SetConfig(config, log)
|
||||
return k.Config.SetConfig(cfg, log)
|
||||
}
|
||||
|
||||
// Config common to all Kafka clients.
|
||||
|
|
@ -59,12 +58,12 @@ type Config struct {
|
|||
ClientID string `toml:"client_id"`
|
||||
CompressionCodec int `toml:"compression_codec"`
|
||||
EnableTLS *bool `toml:"enable_tls"`
|
||||
KeepAlivePeriod *tgConf.Duration `toml:"keep_alive_period"`
|
||||
KeepAlivePeriod *config.Duration `toml:"keep_alive_period"`
|
||||
|
||||
MetadataRetryMax int `toml:"metadata_retry_max"`
|
||||
MetadataRetryType string `toml:"metadata_retry_type"`
|
||||
MetadataRetryBackoff tgConf.Duration `toml:"metadata_retry_backoff"`
|
||||
MetadataRetryMaxDuration tgConf.Duration `toml:"metadata_retry_max_duration"`
|
||||
MetadataRetryBackoff config.Duration `toml:"metadata_retry_backoff"`
|
||||
MetadataRetryMaxDuration config.Duration `toml:"metadata_retry_max_duration"`
|
||||
|
||||
// Disable full metadata fetching
|
||||
MetadataFull *bool `toml:"metadata_full"`
|
||||
|
|
@ -83,26 +82,26 @@ func makeBackoffFunc(backoff, maxDuration time.Duration) BackoffFunc {
|
|||
}
|
||||
|
||||
// SetConfig on the sarama.Config object from the Config struct.
|
||||
func (k *Config) SetConfig(config *sarama.Config, log telegraf.Logger) error {
|
||||
func (k *Config) SetConfig(cfg *sarama.Config, log telegraf.Logger) error {
|
||||
if k.Version != "" {
|
||||
version, err := sarama.ParseKafkaVersion(k.Version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
config.Version = version
|
||||
cfg.Version = version
|
||||
}
|
||||
|
||||
if k.ClientID != "" {
|
||||
config.ClientID = k.ClientID
|
||||
cfg.ClientID = k.ClientID
|
||||
} else {
|
||||
config.ClientID = "Telegraf"
|
||||
cfg.ClientID = "Telegraf"
|
||||
}
|
||||
|
||||
config.Producer.Compression = sarama.CompressionCodec(k.CompressionCodec)
|
||||
cfg.Producer.Compression = sarama.CompressionCodec(k.CompressionCodec)
|
||||
|
||||
if k.EnableTLS != nil && *k.EnableTLS {
|
||||
config.Net.TLS.Enable = true
|
||||
cfg.Net.TLS.Enable = true
|
||||
}
|
||||
|
||||
tlsConfig, err := k.ClientConfig.TLSConfig()
|
||||
|
|
@ -111,33 +110,33 @@ func (k *Config) SetConfig(config *sarama.Config, log telegraf.Logger) error {
|
|||
}
|
||||
|
||||
if tlsConfig != nil {
|
||||
config.Net.TLS.Config = tlsConfig
|
||||
cfg.Net.TLS.Config = tlsConfig
|
||||
|
||||
// To maintain backwards compatibility, if the enable_tls option is not
|
||||
// set TLS is enabled if a non-default TLS config is used.
|
||||
if k.EnableTLS == nil {
|
||||
config.Net.TLS.Enable = true
|
||||
cfg.Net.TLS.Enable = true
|
||||
}
|
||||
}
|
||||
|
||||
if k.KeepAlivePeriod != nil {
|
||||
// Defaults to OS setting (15s currently)
|
||||
config.Net.KeepAlive = time.Duration(*k.KeepAlivePeriod)
|
||||
cfg.Net.KeepAlive = time.Duration(*k.KeepAlivePeriod)
|
||||
}
|
||||
|
||||
if k.MetadataFull != nil {
|
||||
// Defaults to true in Sarama
|
||||
config.Metadata.Full = *k.MetadataFull
|
||||
cfg.Metadata.Full = *k.MetadataFull
|
||||
}
|
||||
|
||||
if k.MetadataRetryMax != 0 {
|
||||
config.Metadata.Retry.Max = k.MetadataRetryMax
|
||||
cfg.Metadata.Retry.Max = k.MetadataRetryMax
|
||||
}
|
||||
|
||||
if k.MetadataRetryBackoff != 0 {
|
||||
// If config.Metadata.Retry.BackoffFunc is set, sarama ignores
|
||||
// config.Metadata.Retry.Backoff
|
||||
config.Metadata.Retry.Backoff = time.Duration(k.MetadataRetryBackoff)
|
||||
// If cfg.Metadata.Retry.BackoffFunc is set, sarama ignores
|
||||
// cfg.Metadata.Retry.Backoff
|
||||
cfg.Metadata.Retry.Backoff = time.Duration(k.MetadataRetryBackoff)
|
||||
}
|
||||
|
||||
switch strings.ToLower(k.MetadataRetryType) {
|
||||
|
|
@ -145,15 +144,15 @@ func (k *Config) SetConfig(config *sarama.Config, log telegraf.Logger) error {
|
|||
return errors.New("invalid metadata retry type")
|
||||
case "exponential":
|
||||
if k.MetadataRetryBackoff == 0 {
|
||||
k.MetadataRetryBackoff = tgConf.Duration(250 * time.Millisecond)
|
||||
k.MetadataRetryBackoff = config.Duration(250 * time.Millisecond)
|
||||
log.Warnf("metadata_retry_backoff is 0, using %s", time.Duration(k.MetadataRetryBackoff))
|
||||
}
|
||||
config.Metadata.Retry.BackoffFunc = makeBackoffFunc(
|
||||
cfg.Metadata.Retry.BackoffFunc = makeBackoffFunc(
|
||||
time.Duration(k.MetadataRetryBackoff),
|
||||
time.Duration(k.MetadataRetryMaxDuration),
|
||||
)
|
||||
case "constant", "":
|
||||
}
|
||||
|
||||
return k.SetSASLConfig(config)
|
||||
return k.SetSASLConfig(cfg)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -8,12 +8,12 @@ import (
|
|||
"net/http"
|
||||
"net/url"
|
||||
|
||||
netProxy "golang.org/x/net/proxy"
|
||||
"golang.org/x/net/proxy"
|
||||
)
|
||||
|
||||
// httpConnectProxy proxies (only?) TCP over a HTTP tunnel using the CONNECT method
|
||||
type httpConnectProxy struct {
|
||||
forward netProxy.Dialer
|
||||
forward proxy.Dialer
|
||||
url *url.URL
|
||||
}
|
||||
|
||||
|
|
@ -25,7 +25,7 @@ func (c *httpConnectProxy) DialContext(ctx context.Context, network, addr string
|
|||
|
||||
var proxyConn net.Conn
|
||||
var err error
|
||||
if dialer, ok := c.forward.(netProxy.ContextDialer); ok {
|
||||
if dialer, ok := c.forward.(proxy.ContextDialer); ok {
|
||||
proxyConn, err = dialer.DialContext(ctx, "tcp", c.url.Host)
|
||||
} else {
|
||||
shim := contextDialerShim{c.forward}
|
||||
|
|
@ -93,14 +93,14 @@ func (c *httpConnectProxy) Dial(network, addr string) (net.Conn, error) {
|
|||
return c.DialContext(context.Background(), network, addr)
|
||||
}
|
||||
|
||||
func newHTTPConnectProxy(proxyURL *url.URL, forward netProxy.Dialer) (netProxy.Dialer, error) {
|
||||
func newHTTPConnectProxy(proxyURL *url.URL, forward proxy.Dialer) (proxy.Dialer, error) {
|
||||
return &httpConnectProxy{forward, proxyURL}, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Register new proxy types
|
||||
netProxy.RegisterDialerType("http", newHTTPConnectProxy)
|
||||
netProxy.RegisterDialerType("https", newHTTPConnectProxy)
|
||||
proxy.RegisterDialerType("http", newHTTPConnectProxy)
|
||||
proxy.RegisterDialerType("https", newHTTPConnectProxy)
|
||||
}
|
||||
|
||||
// contextDialerShim allows cancellation of the dial from a context even if the underlying
|
||||
|
|
@ -108,7 +108,7 @@ func init() {
|
|||
// unless a new proxy type is added that doesn't implement `proxy.ContextDialer`, as all the
|
||||
// standard library dialers implement `proxy.ContextDialer`.
|
||||
type contextDialerShim struct {
|
||||
dialer netProxy.Dialer
|
||||
dialer proxy.Dialer
|
||||
}
|
||||
|
||||
func (cd *contextDialerShim) Dial(network, addr string) (net.Conn, error) {
|
||||
|
|
|
|||
|
|
@ -5,11 +5,11 @@ import (
|
|||
"net"
|
||||
"time"
|
||||
|
||||
netProxy "golang.org/x/net/proxy"
|
||||
"golang.org/x/net/proxy"
|
||||
)
|
||||
|
||||
type ProxiedDialer struct {
|
||||
dialer netProxy.Dialer
|
||||
dialer proxy.Dialer
|
||||
}
|
||||
|
||||
func (pd *ProxiedDialer) Dial(network, addr string) (net.Conn, error) {
|
||||
|
|
@ -17,7 +17,7 @@ func (pd *ProxiedDialer) Dial(network, addr string) (net.Conn, error) {
|
|||
}
|
||||
|
||||
func (pd *ProxiedDialer) DialContext(ctx context.Context, network, addr string) (net.Conn, error) {
|
||||
if contextDialer, ok := pd.dialer.(netProxy.ContextDialer); ok {
|
||||
if contextDialer, ok := pd.dialer.(proxy.ContextDialer); ok {
|
||||
return contextDialer.DialContext(ctx, network, addr)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
tgConfig "github.com/influxdata/telegraf/config"
|
||||
cfg "github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/processors"
|
||||
)
|
||||
|
|
@ -61,9 +61,9 @@ func TestLoadingProcessorWithConfig(t *testing.T) {
|
|||
}
|
||||
|
||||
type testDurationInput struct {
|
||||
Duration tgConfig.Duration `toml:"duration"`
|
||||
Size tgConfig.Size `toml:"size"`
|
||||
Hex int64 `toml:"hex"`
|
||||
Duration cfg.Duration `toml:"duration"`
|
||||
Size cfg.Size `toml:"size"`
|
||||
Hex int64 `toml:"hex"`
|
||||
}
|
||||
|
||||
func (i *testDurationInput) SampleConfig() string {
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ import (
|
|||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
"github.com/influxdata/telegraf/plugins/parsers/influx"
|
||||
influxSerializer "github.com/influxdata/telegraf/plugins/serializers/influx"
|
||||
serializers_influx "github.com/influxdata/telegraf/plugins/serializers/influx"
|
||||
)
|
||||
|
||||
func TestProcessorShim(t *testing.T) {
|
||||
|
|
@ -52,7 +52,7 @@ func testSendAndReceive(t *testing.T, fieldKey string, fieldValue string) {
|
|||
wg.Done()
|
||||
}()
|
||||
|
||||
serializer := &influxSerializer.Serializer{}
|
||||
serializer := &serializers_influx.Serializer{}
|
||||
require.NoError(t, serializer.Init())
|
||||
|
||||
parser := influx.Parser{}
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ import (
|
|||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
tlsint "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
common_tls "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
)
|
||||
|
||||
type CallbackData func(net.Addr, []byte)
|
||||
|
|
@ -34,7 +34,7 @@ type Config struct {
|
|||
SocketMode string `toml:"socket_mode"`
|
||||
ContentEncoding string `toml:"content_encoding"`
|
||||
MaxDecompressionSize config.Size `toml:"max_decompression_size"`
|
||||
tlsint.ServerConfig
|
||||
common_tls.ServerConfig
|
||||
}
|
||||
|
||||
type Socket struct {
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ import (
|
|||
as "github.com/aerospike/aerospike-client-go/v5"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
tlsint "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
common_tls "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
|
|
@ -30,7 +30,7 @@ type Aerospike struct {
|
|||
EnableTLS bool `toml:"enable_tls"`
|
||||
EnableSSL bool `toml:"enable_ssl" deprecated:"1.7.0;1.35.0;use 'enable_tls' instead"`
|
||||
TLSName string `toml:"tls_name"`
|
||||
tlsint.ClientConfig
|
||||
common_tls.ClientConfig
|
||||
|
||||
initialized bool
|
||||
tlsConfig *tls.Config
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
apcupsdClient "github.com/mdlayher/apcupsd"
|
||||
"github.com/mdlayher/apcupsd"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
|
|
@ -97,8 +97,8 @@ func (h *ApcUpsd) Gather(acc telegraf.Accumulator) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func fetchStatus(ctx context.Context, addr *url.URL) (*apcupsdClient.Status, error) {
|
||||
client, err := apcupsdClient.DialContext(ctx, addr.Scheme, addr.Host)
|
||||
func fetchStatus(ctx context.Context, addr *url.URL) (*apcupsd.Status, error) {
|
||||
client, err := apcupsd.DialContext(ctx, addr.Scheme, addr.Host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,10 +4,11 @@ package azure_monitor
|
|||
import (
|
||||
_ "embed"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
|
||||
"sync"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
|
|
|
|||
|
|
@ -5,11 +5,11 @@ import (
|
|||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/monitor/armmonitor"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources"
|
||||
"github.com/influxdata/toml"
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ import (
|
|||
"github.com/influxdata/telegraf/internal/choice"
|
||||
"github.com/influxdata/telegraf/plugins/common/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
jsonparser "github.com/influxdata/telegraf/plugins/parsers/json"
|
||||
parsers_json "github.com/influxdata/telegraf/plugins/parsers/json"
|
||||
)
|
||||
|
||||
//go:embed sample.conf
|
||||
|
|
@ -180,7 +180,7 @@ func (beat *Beat) Gather(accumulator telegraf.Accumulator) error {
|
|||
default:
|
||||
return fmt.Errorf("unknown stats-type %q", name)
|
||||
}
|
||||
flattener := jsonparser.JSONFlattener{}
|
||||
flattener := parsers_json.JSONFlattener{}
|
||||
err := flattener.FullFlattenJSON("", stats, true, true)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
dialout "github.com/cisco-ie/nx-telemetry-proto/mdt_dialout"
|
||||
mdtdialout "github.com/cisco-ie/nx-telemetry-proto/mdt_dialout"
|
||||
telemetry "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
|
|
@ -28,7 +28,7 @@ import (
|
|||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
internaltls "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
common_tls "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
|
|
@ -65,7 +65,7 @@ type CiscoTelemetryMDT struct {
|
|||
Log telegraf.Logger
|
||||
|
||||
// GRPC TLS settings
|
||||
internaltls.ServerConfig
|
||||
common_tls.ServerConfig
|
||||
|
||||
// Internal listener / client handle
|
||||
grpcServer *grpc.Server
|
||||
|
|
@ -83,7 +83,7 @@ type CiscoTelemetryMDT struct {
|
|||
wg sync.WaitGroup
|
||||
|
||||
// Though unused in the code, required by protoc-gen-go-grpc to maintain compatibility
|
||||
dialout.UnimplementedGRPCMdtDialoutServer
|
||||
mdtdialout.UnimplementedGRPCMdtDialoutServer
|
||||
}
|
||||
|
||||
type NxPayloadXfromStructure struct {
|
||||
|
|
@ -200,7 +200,7 @@ func (c *CiscoTelemetryMDT) Start(acc telegraf.Accumulator) error {
|
|||
}
|
||||
|
||||
c.grpcServer = grpc.NewServer(opts...)
|
||||
dialout.RegisterGRPCMdtDialoutServer(c.grpcServer, c)
|
||||
mdtdialout.RegisterGRPCMdtDialoutServer(c.grpcServer, c)
|
||||
|
||||
c.wg.Add(1)
|
||||
go func() {
|
||||
|
|
@ -312,7 +312,7 @@ func (c *CiscoTelemetryMDT) handleTCPClient(conn net.Conn) error {
|
|||
}
|
||||
|
||||
// MdtDialout RPC server method for grpc-dialout transport
|
||||
func (c *CiscoTelemetryMDT) MdtDialout(stream dialout.GRPCMdtDialout_MdtDialoutServer) error {
|
||||
func (c *CiscoTelemetryMDT) MdtDialout(stream mdtdialout.GRPCMdtDialout_MdtDialoutServer) error {
|
||||
peerInCtx, peerOK := peer.FromContext(stream.Context())
|
||||
if peerOK {
|
||||
c.Log.Debugf("Accepted Cisco MDT GRPC dialout connection from %s", peerInCtx.Addr)
|
||||
|
|
|
|||
|
|
@ -10,8 +10,8 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
dialout "github.com/cisco-ie/nx-telemetry-proto/mdt_dialout"
|
||||
telemetryBis "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis"
|
||||
mdtdialout "github.com/cisco-ie/nx-telemetry-proto/mdt_dialout"
|
||||
telemetry "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
|
|
@ -36,55 +36,55 @@ func TestHandleTelemetryTwoSimple(t *testing.T) {
|
|||
// error is expected since we are passing in dummy transport
|
||||
require.Error(t, err)
|
||||
|
||||
telemetry := &telemetryBis.Telemetry{
|
||||
tel := &telemetry.Telemetry{
|
||||
MsgTimestamp: 1543236572000,
|
||||
EncodingPath: "type:model/some/path",
|
||||
NodeId: &telemetryBis.Telemetry_NodeIdStr{NodeIdStr: "hostname"},
|
||||
Subscription: &telemetryBis.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"},
|
||||
DataGpbkv: []*telemetryBis.TelemetryField{
|
||||
NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"},
|
||||
Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"},
|
||||
DataGpbkv: []*telemetry.TelemetryField{
|
||||
{
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "keys",
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "name",
|
||||
ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "str"},
|
||||
ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "str"},
|
||||
},
|
||||
{
|
||||
Name: "uint64",
|
||||
ValueByType: &telemetryBis.TelemetryField_Uint64Value{Uint64Value: 1234},
|
||||
ValueByType: &telemetry.TelemetryField_Uint64Value{Uint64Value: 1234},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "content",
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "bool",
|
||||
ValueByType: &telemetryBis.TelemetryField_BoolValue{BoolValue: true},
|
||||
ValueByType: &telemetry.TelemetryField_BoolValue{BoolValue: true},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "keys",
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "name",
|
||||
ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "str2"},
|
||||
ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "str2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "content",
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "bool",
|
||||
ValueByType: &telemetryBis.TelemetryField_BoolValue{BoolValue: false},
|
||||
ValueByType: &telemetry.TelemetryField_BoolValue{BoolValue: false},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -92,7 +92,7 @@ func TestHandleTelemetryTwoSimple(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
data, err := proto.Marshal(telemetry)
|
||||
data, err := proto.Marshal(tel)
|
||||
require.NoError(t, err)
|
||||
|
||||
c.handleTelemetry(data)
|
||||
|
|
@ -142,55 +142,55 @@ func TestIncludeDeleteField(t *testing.T) {
|
|||
stateKey := "state"
|
||||
|
||||
testCases := []struct {
|
||||
telemetry *telemetryBis.Telemetry
|
||||
telemetry *telemetry.Telemetry
|
||||
expected []telegraf.Metric
|
||||
}{{
|
||||
telemetry: &telemetryBis.Telemetry{
|
||||
telemetry: &telemetry.Telemetry{
|
||||
MsgTimestamp: 1543236572000,
|
||||
EncodingPath: encodingPath.stringValue,
|
||||
NodeId: &telemetryBis.Telemetry_NodeIdStr{NodeIdStr: source.stringValue},
|
||||
Subscription: &telemetryBis.Telemetry_SubscriptionIdStr{SubscriptionIdStr: subscription.stringValue},
|
||||
DataGpbkv: []*telemetryBis.TelemetryField{
|
||||
NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: source.stringValue},
|
||||
Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: subscription.stringValue},
|
||||
DataGpbkv: []*telemetry.TelemetryField{
|
||||
{
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "keys",
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: name.name,
|
||||
ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: name.stringValue},
|
||||
ValueByType: &telemetry.TelemetryField_StringValue{StringValue: name.stringValue},
|
||||
},
|
||||
{
|
||||
Name: index.name,
|
||||
ValueByType: &telemetryBis.TelemetryField_Uint32Value{Uint32Value: index.uint32Value},
|
||||
ValueByType: &telemetry.TelemetryField_Uint32Value{Uint32Value: index.uint32Value},
|
||||
},
|
||||
{
|
||||
Name: ip.name,
|
||||
ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: ip.stringValue},
|
||||
ValueByType: &telemetry.TelemetryField_StringValue{StringValue: ip.stringValue},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "content",
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: stateKey,
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: ip.name,
|
||||
ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: ip.stringValue},
|
||||
ValueByType: &telemetry.TelemetryField_StringValue{StringValue: ip.stringValue},
|
||||
},
|
||||
{
|
||||
Name: prefixLength.name,
|
||||
ValueByType: &telemetryBis.TelemetryField_Uint32Value{Uint32Value: prefixLength.uint32Value},
|
||||
ValueByType: &telemetry.TelemetryField_Uint32Value{Uint32Value: prefixLength.uint32Value},
|
||||
},
|
||||
{
|
||||
Name: origin.name,
|
||||
ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: origin.stringValue},
|
||||
ValueByType: &telemetry.TelemetryField_StringValue{StringValue: origin.stringValue},
|
||||
},
|
||||
{
|
||||
Name: status.name,
|
||||
ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: status.stringValue},
|
||||
ValueByType: &telemetry.TelemetryField_StringValue{StringValue: status.stringValue},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -222,29 +222,29 @@ func TestIncludeDeleteField(t *testing.T) {
|
|||
)},
|
||||
},
|
||||
{
|
||||
telemetry: &telemetryBis.Telemetry{
|
||||
telemetry: &telemetry.Telemetry{
|
||||
MsgTimestamp: 1543236572000,
|
||||
EncodingPath: encodingPath.stringValue,
|
||||
NodeId: &telemetryBis.Telemetry_NodeIdStr{NodeIdStr: source.stringValue},
|
||||
Subscription: &telemetryBis.Telemetry_SubscriptionIdStr{SubscriptionIdStr: subscription.stringValue},
|
||||
DataGpbkv: []*telemetryBis.TelemetryField{
|
||||
NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: source.stringValue},
|
||||
Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: subscription.stringValue},
|
||||
DataGpbkv: []*telemetry.TelemetryField{
|
||||
{
|
||||
Delete: true,
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "keys",
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: name.name,
|
||||
ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: name.stringValue},
|
||||
ValueByType: &telemetry.TelemetryField_StringValue{StringValue: name.stringValue},
|
||||
},
|
||||
{
|
||||
Name: index.name,
|
||||
ValueByType: &telemetryBis.TelemetryField_Uint32Value{Uint32Value: index.uint32Value},
|
||||
ValueByType: &telemetry.TelemetryField_Uint32Value{Uint32Value: index.uint32Value},
|
||||
},
|
||||
{
|
||||
Name: ip.name,
|
||||
ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: ip.stringValue},
|
||||
ValueByType: &telemetry.TelemetryField_StringValue{StringValue: ip.stringValue},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -299,26 +299,26 @@ func TestHandleTelemetrySingleNested(t *testing.T) {
|
|||
// error is expected since we are passing in dummy transport
|
||||
require.Error(t, err)
|
||||
|
||||
telemetry := &telemetryBis.Telemetry{
|
||||
tel := &telemetry.Telemetry{
|
||||
MsgTimestamp: 1543236572000,
|
||||
EncodingPath: "type:model/nested/path",
|
||||
NodeId: &telemetryBis.Telemetry_NodeIdStr{NodeIdStr: "hostname"},
|
||||
Subscription: &telemetryBis.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"},
|
||||
DataGpbkv: []*telemetryBis.TelemetryField{
|
||||
NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"},
|
||||
Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"},
|
||||
DataGpbkv: []*telemetry.TelemetryField{
|
||||
{
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "keys",
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "nested",
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "key",
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "level",
|
||||
ValueByType: &telemetryBis.TelemetryField_DoubleValue{DoubleValue: 3},
|
||||
ValueByType: &telemetry.TelemetryField_DoubleValue{DoubleValue: 3},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -328,16 +328,16 @@ func TestHandleTelemetrySingleNested(t *testing.T) {
|
|||
},
|
||||
{
|
||||
Name: "content",
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "nested",
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "value",
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "foo",
|
||||
ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "bar"},
|
||||
ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -349,7 +349,7 @@ func TestHandleTelemetrySingleNested(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
data, err := proto.Marshal(telemetry)
|
||||
data, err := proto.Marshal(tel)
|
||||
require.NoError(t, err)
|
||||
|
||||
c.handleTelemetry(data)
|
||||
|
|
@ -376,49 +376,49 @@ func TestHandleEmbeddedTags(t *testing.T) {
|
|||
// error is expected since we are passing in dummy transport
|
||||
require.Error(t, err)
|
||||
|
||||
telemetry := &telemetryBis.Telemetry{
|
||||
tel := &telemetry.Telemetry{
|
||||
MsgTimestamp: 1543236572000,
|
||||
EncodingPath: "type:model/extra",
|
||||
NodeId: &telemetryBis.Telemetry_NodeIdStr{NodeIdStr: "hostname"},
|
||||
Subscription: &telemetryBis.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"},
|
||||
DataGpbkv: []*telemetryBis.TelemetryField{
|
||||
NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"},
|
||||
Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"},
|
||||
DataGpbkv: []*telemetry.TelemetryField{
|
||||
{
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "keys",
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "foo",
|
||||
ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "bar"},
|
||||
ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "content",
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "list",
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "name",
|
||||
ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "entry1"},
|
||||
ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "entry1"},
|
||||
},
|
||||
{
|
||||
Name: "test",
|
||||
ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "foo"},
|
||||
ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "foo"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "list",
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "name",
|
||||
ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "entry2"},
|
||||
ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "entry2"},
|
||||
},
|
||||
{
|
||||
Name: "test",
|
||||
ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "bar"},
|
||||
ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -428,7 +428,7 @@ func TestHandleEmbeddedTags(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
data, err := proto.Marshal(telemetry)
|
||||
data, err := proto.Marshal(tel)
|
||||
require.NoError(t, err)
|
||||
|
||||
c.handleTelemetry(data)
|
||||
|
|
@ -464,57 +464,57 @@ func TestHandleNXAPI(t *testing.T) {
|
|||
// error is expected since we are passing in dummy transport
|
||||
require.Error(t, err)
|
||||
|
||||
telemetry := &telemetryBis.Telemetry{
|
||||
tel := &telemetry.Telemetry{
|
||||
MsgTimestamp: 1543236572000,
|
||||
EncodingPath: "show nxapi",
|
||||
NodeId: &telemetryBis.Telemetry_NodeIdStr{NodeIdStr: "hostname"},
|
||||
Subscription: &telemetryBis.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"},
|
||||
DataGpbkv: []*telemetryBis.TelemetryField{
|
||||
NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"},
|
||||
Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"},
|
||||
DataGpbkv: []*telemetry.TelemetryField{
|
||||
{
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "keys",
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "foo",
|
||||
ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "bar"},
|
||||
ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "content",
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "TABLE_nxapi",
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "ROW_nxapi",
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "index",
|
||||
ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "i1"},
|
||||
ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "i1"},
|
||||
},
|
||||
{
|
||||
Name: "value",
|
||||
ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "foo"},
|
||||
ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "foo"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "index",
|
||||
ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "i2"},
|
||||
ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "i2"},
|
||||
},
|
||||
{
|
||||
Name: "value",
|
||||
ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "bar"},
|
||||
ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -532,7 +532,7 @@ func TestHandleNXAPI(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
data, err := proto.Marshal(telemetry)
|
||||
data, err := proto.Marshal(tel)
|
||||
require.NoError(t, err)
|
||||
|
||||
c.handleTelemetry(data)
|
||||
|
|
@ -571,45 +571,45 @@ func TestHandleNXAPIXformNXAPI(t *testing.T) {
|
|||
// error is expected since we are passing in dummy transport
|
||||
require.Error(t, err)
|
||||
|
||||
telemetry := &telemetryBis.Telemetry{
|
||||
tel := &telemetry.Telemetry{
|
||||
MsgTimestamp: 1543236572000,
|
||||
EncodingPath: "show processes cpu",
|
||||
NodeId: &telemetryBis.Telemetry_NodeIdStr{NodeIdStr: "hostname"},
|
||||
Subscription: &telemetryBis.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"},
|
||||
DataGpbkv: []*telemetryBis.TelemetryField{
|
||||
NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"},
|
||||
Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"},
|
||||
DataGpbkv: []*telemetry.TelemetryField{
|
||||
{
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "keys",
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "foo",
|
||||
ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "bar"},
|
||||
ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "content",
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "TABLE_process_cpu",
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "ROW_process_cpu",
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "index",
|
||||
ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "i1"},
|
||||
ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "i1"},
|
||||
},
|
||||
{
|
||||
Name: "value",
|
||||
ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "foo"},
|
||||
ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "foo"},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -627,7 +627,7 @@ func TestHandleNXAPIXformNXAPI(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
data, err := proto.Marshal(telemetry)
|
||||
data, err := proto.Marshal(tel)
|
||||
require.NoError(t, err)
|
||||
|
||||
c.handleTelemetry(data)
|
||||
|
|
@ -655,57 +655,57 @@ func TestHandleNXXformMulti(t *testing.T) {
|
|||
// error is expected since we are passing in dummy transport
|
||||
require.Error(t, err)
|
||||
|
||||
telemetry := &telemetryBis.Telemetry{
|
||||
tel := &telemetry.Telemetry{
|
||||
MsgTimestamp: 1543236572000,
|
||||
EncodingPath: "sys/lldp",
|
||||
NodeId: &telemetryBis.Telemetry_NodeIdStr{NodeIdStr: "hostname"},
|
||||
Subscription: &telemetryBis.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"},
|
||||
DataGpbkv: []*telemetryBis.TelemetryField{
|
||||
NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"},
|
||||
Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"},
|
||||
DataGpbkv: []*telemetry.TelemetryField{
|
||||
{
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "keys",
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "foo",
|
||||
ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "bar"},
|
||||
ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "content",
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "fooEntity",
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "attributes",
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "rn",
|
||||
ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "some-rn"},
|
||||
ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "some-rn"},
|
||||
},
|
||||
{
|
||||
Name: "portIdV",
|
||||
ValueByType: &telemetryBis.TelemetryField_Uint32Value{Uint32Value: 12},
|
||||
ValueByType: &telemetry.TelemetryField_Uint32Value{Uint32Value: 12},
|
||||
},
|
||||
{
|
||||
Name: "portDesc",
|
||||
ValueByType: &telemetryBis.TelemetryField_Uint64Value{Uint64Value: 100},
|
||||
ValueByType: &telemetry.TelemetryField_Uint64Value{Uint64Value: 100},
|
||||
},
|
||||
{
|
||||
Name: "test",
|
||||
ValueByType: &telemetryBis.TelemetryField_Uint64Value{Uint64Value: 281474976710655},
|
||||
ValueByType: &telemetry.TelemetryField_Uint64Value{Uint64Value: 281474976710655},
|
||||
},
|
||||
{
|
||||
Name: "subscriptionId",
|
||||
ValueByType: &telemetryBis.TelemetryField_Uint64Value{Uint64Value: 2814749767106551},
|
||||
ValueByType: &telemetry.TelemetryField_Uint64Value{Uint64Value: 2814749767106551},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -723,7 +723,7 @@ func TestHandleNXXformMulti(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
data, err := proto.Marshal(telemetry)
|
||||
data, err := proto.Marshal(tel)
|
||||
require.NoError(t, err)
|
||||
|
||||
c.handleTelemetry(data)
|
||||
|
|
@ -748,45 +748,45 @@ func TestHandleNXDME(t *testing.T) {
|
|||
// error is expected since we are passing in dummy transport
|
||||
require.Error(t, err)
|
||||
|
||||
telemetry := &telemetryBis.Telemetry{
|
||||
tel := &telemetry.Telemetry{
|
||||
MsgTimestamp: 1543236572000,
|
||||
EncodingPath: "sys/dme",
|
||||
NodeId: &telemetryBis.Telemetry_NodeIdStr{NodeIdStr: "hostname"},
|
||||
Subscription: &telemetryBis.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"},
|
||||
DataGpbkv: []*telemetryBis.TelemetryField{
|
||||
NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"},
|
||||
Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"},
|
||||
DataGpbkv: []*telemetry.TelemetryField{
|
||||
{
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "keys",
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "foo",
|
||||
ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "bar"},
|
||||
ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "content",
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "fooEntity",
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "attributes",
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "rn",
|
||||
ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "some-rn"},
|
||||
ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "some-rn"},
|
||||
},
|
||||
{
|
||||
Name: "value",
|
||||
ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "foo"},
|
||||
ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "foo"},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -804,7 +804,7 @@ func TestHandleNXDME(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
data, err := proto.Marshal(telemetry)
|
||||
data, err := proto.Marshal(tel)
|
||||
require.NoError(t, err)
|
||||
|
||||
c.handleTelemetry(data)
|
||||
|
|
@ -852,13 +852,13 @@ func TestTCPDialoutOverflow(t *testing.T) {
|
|||
require.Contains(t, acc.Errors, errors.New("dialout packet too long: 1000000000"))
|
||||
}
|
||||
|
||||
func mockTelemetryMicroburstMessage() *telemetryBis.Telemetry {
|
||||
func mockTelemetryMicroburstMessage() *telemetry.Telemetry {
|
||||
data, err := os.ReadFile("./testdata/microburst")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
newMessage := &telemetryBis.Telemetry{}
|
||||
newMessage := &telemetry.Telemetry{}
|
||||
err = proto.Unmarshal(data, newMessage)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
|
@ -866,30 +866,30 @@ func mockTelemetryMicroburstMessage() *telemetryBis.Telemetry {
|
|||
return newMessage
|
||||
}
|
||||
|
||||
func mockTelemetryMessage() *telemetryBis.Telemetry {
|
||||
return &telemetryBis.Telemetry{
|
||||
func mockTelemetryMessage() *telemetry.Telemetry {
|
||||
return &telemetry.Telemetry{
|
||||
MsgTimestamp: 1543236572000,
|
||||
EncodingPath: "type:model/some/path",
|
||||
NodeId: &telemetryBis.Telemetry_NodeIdStr{NodeIdStr: "hostname"},
|
||||
Subscription: &telemetryBis.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"},
|
||||
DataGpbkv: []*telemetryBis.TelemetryField{
|
||||
NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"},
|
||||
Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"},
|
||||
DataGpbkv: []*telemetry.TelemetryField{
|
||||
{
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "keys",
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "name",
|
||||
ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "str"},
|
||||
ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "str"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "content",
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "value",
|
||||
ValueByType: &telemetryBis.TelemetryField_Sint64Value{Sint64Value: -1},
|
||||
ValueByType: &telemetry.TelemetryField_Sint64Value{Sint64Value: -1},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -914,8 +914,8 @@ func TestGRPCDialoutMicroburst(t *testing.T) {
|
|||
err := c.Start(acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
telemetry := mockTelemetryMicroburstMessage()
|
||||
data, err := proto.Marshal(telemetry)
|
||||
tel := mockTelemetryMicroburstMessage()
|
||||
data, err := proto.Marshal(tel)
|
||||
require.NoError(t, err)
|
||||
|
||||
c.handleTelemetry(data)
|
||||
|
|
@ -954,7 +954,7 @@ func TestTCPDialoutMultiple(t *testing.T) {
|
|||
err := c.Start(acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
telemetry := mockTelemetryMessage()
|
||||
tel := mockTelemetryMessage()
|
||||
|
||||
hdr := struct {
|
||||
MsgType uint16
|
||||
|
|
@ -968,7 +968,7 @@ func TestTCPDialoutMultiple(t *testing.T) {
|
|||
conn, err := net.Dial(addr.Network(), addr.String())
|
||||
require.NoError(t, err)
|
||||
|
||||
data, err := proto.Marshal(telemetry)
|
||||
data, err := proto.Marshal(tel)
|
||||
require.NoError(t, err)
|
||||
hdr.MsgLen = uint32(len(data))
|
||||
require.NoError(t, binary.Write(conn, binary.BigEndian, hdr))
|
||||
|
|
@ -978,8 +978,8 @@ func TestTCPDialoutMultiple(t *testing.T) {
|
|||
conn2, err := net.Dial(addr.Network(), addr.String())
|
||||
require.NoError(t, err)
|
||||
|
||||
telemetry.EncodingPath = "type:model/parallel/path"
|
||||
data, err = proto.Marshal(telemetry)
|
||||
tel.EncodingPath = "type:model/parallel/path"
|
||||
data, err = proto.Marshal(tel)
|
||||
require.NoError(t, err)
|
||||
hdr.MsgLen = uint32(len(data))
|
||||
require.NoError(t, binary.Write(conn2, binary.BigEndian, hdr))
|
||||
|
|
@ -991,8 +991,8 @@ func TestTCPDialoutMultiple(t *testing.T) {
|
|||
require.True(t, err == nil || errors.Is(err, io.EOF))
|
||||
require.NoError(t, conn2.Close())
|
||||
|
||||
telemetry.EncodingPath = "type:model/other/path"
|
||||
data, err = proto.Marshal(telemetry)
|
||||
tel.EncodingPath = "type:model/other/path"
|
||||
data, err = proto.Marshal(tel)
|
||||
require.NoError(t, err)
|
||||
hdr.MsgLen = uint32(len(data))
|
||||
require.NoError(t, binary.Write(conn, binary.BigEndian, hdr))
|
||||
|
|
@ -1049,11 +1049,11 @@ func TestGRPCDialoutError(t *testing.T) {
|
|||
addr := c.Address()
|
||||
conn, err := grpc.NewClient(addr.String(), grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
require.NoError(t, err)
|
||||
client := dialout.NewGRPCMdtDialoutClient(conn)
|
||||
client := mdtdialout.NewGRPCMdtDialoutClient(conn)
|
||||
stream, err := client.MdtDialout(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
args := &dialout.MdtDialoutArgs{Errors: "foobar"}
|
||||
args := &mdtdialout.MdtDialoutArgs{Errors: "foobar"}
|
||||
require.NoError(t, stream.Send(args))
|
||||
|
||||
// Wait for the server to close
|
||||
|
|
@ -1078,44 +1078,44 @@ func TestGRPCDialoutMultiple(t *testing.T) {
|
|||
acc := &testutil.Accumulator{}
|
||||
err := c.Start(acc)
|
||||
require.NoError(t, err)
|
||||
telemetry := mockTelemetryMessage()
|
||||
tel := mockTelemetryMessage()
|
||||
|
||||
addr := c.Address()
|
||||
conn, err := grpc.NewClient(addr.String(), grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
require.NoError(t, err)
|
||||
require.True(t, conn.WaitForStateChange(context.Background(), connectivity.Connecting))
|
||||
client := dialout.NewGRPCMdtDialoutClient(conn)
|
||||
client := mdtdialout.NewGRPCMdtDialoutClient(conn)
|
||||
stream, err := client.MdtDialout(context.TODO())
|
||||
require.NoError(t, err)
|
||||
|
||||
data, err := proto.Marshal(telemetry)
|
||||
data, err := proto.Marshal(tel)
|
||||
require.NoError(t, err)
|
||||
args := &dialout.MdtDialoutArgs{Data: data, ReqId: 456}
|
||||
args := &mdtdialout.MdtDialoutArgs{Data: data, ReqId: 456}
|
||||
require.NoError(t, stream.Send(args))
|
||||
|
||||
conn2, err := grpc.NewClient(addr.String(), grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
require.NoError(t, err)
|
||||
require.True(t, conn.WaitForStateChange(context.Background(), connectivity.Connecting))
|
||||
client2 := dialout.NewGRPCMdtDialoutClient(conn2)
|
||||
client2 := mdtdialout.NewGRPCMdtDialoutClient(conn2)
|
||||
stream2, err := client2.MdtDialout(context.TODO())
|
||||
require.NoError(t, err)
|
||||
|
||||
telemetry.EncodingPath = "type:model/parallel/path"
|
||||
data, err = proto.Marshal(telemetry)
|
||||
tel.EncodingPath = "type:model/parallel/path"
|
||||
data, err = proto.Marshal(tel)
|
||||
require.NoError(t, err)
|
||||
args = &dialout.MdtDialoutArgs{Data: data}
|
||||
args = &mdtdialout.MdtDialoutArgs{Data: data}
|
||||
require.NoError(t, stream2.Send(args))
|
||||
require.NoError(t, stream2.Send(&dialout.MdtDialoutArgs{Errors: "testclose"}))
|
||||
require.NoError(t, stream2.Send(&mdtdialout.MdtDialoutArgs{Errors: "testclose"}))
|
||||
_, err = stream2.Recv()
|
||||
require.True(t, err == nil || errors.Is(err, io.EOF))
|
||||
require.NoError(t, conn2.Close())
|
||||
|
||||
telemetry.EncodingPath = "type:model/other/path"
|
||||
data, err = proto.Marshal(telemetry)
|
||||
tel.EncodingPath = "type:model/other/path"
|
||||
data, err = proto.Marshal(tel)
|
||||
require.NoError(t, err)
|
||||
args = &dialout.MdtDialoutArgs{Data: data}
|
||||
args = &mdtdialout.MdtDialoutArgs{Data: data}
|
||||
require.NoError(t, stream.Send(args))
|
||||
require.NoError(t, stream.Send(&dialout.MdtDialoutArgs{Errors: "testclose"}))
|
||||
require.NoError(t, stream.Send(&mdtdialout.MdtDialoutArgs{Errors: "testclose"}))
|
||||
_, err = stream.Recv()
|
||||
require.True(t, err == nil || errors.Is(err, io.EOF))
|
||||
|
||||
|
|
@ -1169,14 +1169,14 @@ func TestGRPCDialoutKeepalive(t *testing.T) {
|
|||
addr := c.Address()
|
||||
conn, err := grpc.NewClient(addr.String(), grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
require.NoError(t, err)
|
||||
client := dialout.NewGRPCMdtDialoutClient(conn)
|
||||
client := mdtdialout.NewGRPCMdtDialoutClient(conn)
|
||||
stream, err := client.MdtDialout(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
telemetry := mockTelemetryMessage()
|
||||
data, err := proto.Marshal(telemetry)
|
||||
tel := mockTelemetryMessage()
|
||||
data, err := proto.Marshal(tel)
|
||||
require.NoError(t, err)
|
||||
args := &dialout.MdtDialoutArgs{Data: data, ReqId: 456}
|
||||
args := &mdtdialout.MdtDialoutArgs{Data: data, ReqId: 456}
|
||||
require.NoError(t, stream.Send(args))
|
||||
|
||||
c.Stop()
|
||||
|
|
@ -1195,29 +1195,29 @@ func TestSourceFieldRewrite(t *testing.T) {
|
|||
// error is expected since we are passing in dummy transport
|
||||
require.Error(t, err)
|
||||
|
||||
telemetry := &telemetryBis.Telemetry{
|
||||
tel := &telemetry.Telemetry{
|
||||
MsgTimestamp: 1543236572000,
|
||||
EncodingPath: "type:model/some/path",
|
||||
NodeId: &telemetryBis.Telemetry_NodeIdStr{NodeIdStr: "hostname"},
|
||||
Subscription: &telemetryBis.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"},
|
||||
DataGpbkv: []*telemetryBis.TelemetryField{
|
||||
NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"},
|
||||
Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"},
|
||||
DataGpbkv: []*telemetry.TelemetryField{
|
||||
{
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "keys",
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "source",
|
||||
ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "str"},
|
||||
ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "str"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "content",
|
||||
Fields: []*telemetryBis.TelemetryField{
|
||||
Fields: []*telemetry.TelemetryField{
|
||||
{
|
||||
Name: "bool",
|
||||
ValueByType: &telemetryBis.TelemetryField_BoolValue{BoolValue: false},
|
||||
ValueByType: &telemetry.TelemetryField_BoolValue{BoolValue: false},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -1225,7 +1225,7 @@ func TestSourceFieldRewrite(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
data, err := proto.Marshal(telemetry)
|
||||
data, err := proto.Marshal(tel)
|
||||
require.NoError(t, err)
|
||||
|
||||
c.handleTelemetry(data)
|
||||
|
|
|
|||
|
|
@ -1,9 +1,10 @@
|
|||
package cloud_pubsub
|
||||
|
||||
import (
|
||||
"cloud.google.com/go/pubsub"
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/pubsub"
|
||||
)
|
||||
|
||||
type (
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ import (
|
|||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
tlsint "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
common_tls "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
|
|
@ -42,7 +42,7 @@ type PubSubPush struct {
|
|||
|
||||
MaxUndeliveredMessages int `toml:"max_undelivered_messages"`
|
||||
|
||||
tlsint.ServerConfig
|
||||
common_tls.ServerConfig
|
||||
telegraf.Parser
|
||||
|
||||
server *http.Server
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
cwClient "github.com/aws/aws-sdk-go-v2/service/cloudwatch"
|
||||
"github.com/aws/aws-sdk-go-v2/service/cloudwatch"
|
||||
"github.com/aws/aws-sdk-go-v2/service/cloudwatch/types"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
|
|
@ -22,9 +22,9 @@ import (
|
|||
"github.com/influxdata/telegraf/filter"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/internal/limiter"
|
||||
internalMetric "github.com/influxdata/telegraf/metric"
|
||||
internalaws "github.com/influxdata/telegraf/plugins/common/aws"
|
||||
internalProxy "github.com/influxdata/telegraf/plugins/common/proxy"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
common_aws "github.com/influxdata/telegraf/plugins/common/aws"
|
||||
"github.com/influxdata/telegraf/plugins/common/proxy"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
|
|
@ -37,7 +37,7 @@ type CloudWatch struct {
|
|||
StatisticInclude []string `toml:"statistic_include"`
|
||||
Timeout config.Duration `toml:"timeout"`
|
||||
|
||||
internalProxy.HTTPProxy
|
||||
proxy.HTTPProxy
|
||||
|
||||
Period config.Duration `toml:"period"`
|
||||
Delay config.Duration `toml:"delay"`
|
||||
|
|
@ -59,7 +59,7 @@ type CloudWatch struct {
|
|||
windowStart time.Time
|
||||
windowEnd time.Time
|
||||
|
||||
internalaws.CredentialConfig
|
||||
common_aws.CredentialConfig
|
||||
}
|
||||
|
||||
// Metric defines a simplified Cloudwatch metric.
|
||||
|
|
@ -86,8 +86,8 @@ type metricCache struct {
|
|||
}
|
||||
|
||||
type cloudwatchClient interface {
|
||||
ListMetrics(context.Context, *cwClient.ListMetricsInput, ...func(*cwClient.Options)) (*cwClient.ListMetricsOutput, error)
|
||||
GetMetricData(context.Context, *cwClient.GetMetricDataInput, ...func(*cwClient.Options)) (*cwClient.GetMetricDataOutput, error)
|
||||
ListMetrics(context.Context, *cloudwatch.ListMetricsInput, ...func(*cloudwatch.Options)) (*cloudwatch.ListMetricsOutput, error)
|
||||
GetMetricData(context.Context, *cloudwatch.GetMetricDataInput, ...func(*cloudwatch.Options)) (*cloudwatch.GetMetricDataOutput, error)
|
||||
}
|
||||
|
||||
func (*CloudWatch) SampleConfig() string {
|
||||
|
|
@ -178,7 +178,7 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error {
|
|||
}
|
||||
|
||||
func (c *CloudWatch) initializeCloudWatch() error {
|
||||
proxy, err := c.HTTPProxy.Proxy()
|
||||
proxyFunc, err := c.HTTPProxy.Proxy()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -188,7 +188,7 @@ func (c *CloudWatch) initializeCloudWatch() error {
|
|||
return err
|
||||
}
|
||||
|
||||
c.client = cwClient.NewFromConfig(awsCreds, func(options *cwClient.Options) {
|
||||
c.client = cloudwatch.NewFromConfig(awsCreds, func(options *cloudwatch.Options) {
|
||||
if c.CredentialConfig.EndpointURL != "" && c.CredentialConfig.Region != "" {
|
||||
options.BaseEndpoint = &c.CredentialConfig.EndpointURL
|
||||
}
|
||||
|
|
@ -197,7 +197,7 @@ func (c *CloudWatch) initializeCloudWatch() error {
|
|||
options.HTTPClient = &http.Client{
|
||||
// use values from DefaultTransport
|
||||
Transport: &http.Transport{
|
||||
Proxy: proxy,
|
||||
Proxy: proxyFunc,
|
||||
DialContext: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
|
|
@ -271,13 +271,13 @@ func getFilteredMetrics(c *CloudWatch) ([]filteredMetric, error) {
|
|||
allMetrics, allAccounts := c.fetchNamespaceMetrics()
|
||||
|
||||
for _, name := range m.MetricNames {
|
||||
for i, metric := range allMetrics {
|
||||
if isSelected(name, metric, m.Dimensions) {
|
||||
for i, singleMetric := range allMetrics {
|
||||
if isSelected(name, singleMetric, m.Dimensions) {
|
||||
for _, namespace := range c.Namespaces {
|
||||
metrics = append(metrics, types.Metric{
|
||||
Namespace: aws.String(namespace),
|
||||
MetricName: aws.String(name),
|
||||
Dimensions: metric.Dimensions,
|
||||
Dimensions: singleMetric.Dimensions,
|
||||
})
|
||||
}
|
||||
if c.IncludeLinkedAccounts {
|
||||
|
|
@ -327,7 +327,7 @@ func (c *CloudWatch) fetchNamespaceMetrics() ([]types.Metric, []string) {
|
|||
metrics := []types.Metric{}
|
||||
var accounts []string
|
||||
for _, namespace := range c.Namespaces {
|
||||
params := &cwClient.ListMetricsInput{
|
||||
params := &cloudwatch.ListMetricsInput{
|
||||
Dimensions: []types.DimensionFilter{},
|
||||
Namespace: aws.String(namespace),
|
||||
IncludeLinkedAccounts: &c.IncludeLinkedAccounts,
|
||||
|
|
@ -379,9 +379,9 @@ func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) map[string
|
|||
|
||||
dataQueries := map[string][]types.MetricDataQuery{}
|
||||
for i, filtered := range filteredMetrics {
|
||||
for j, metric := range filtered.metrics {
|
||||
for j, singleMetric := range filtered.metrics {
|
||||
id := strconv.Itoa(j) + "_" + strconv.Itoa(i)
|
||||
dimension := ctod(metric.Dimensions)
|
||||
dimension := ctod(singleMetric.Dimensions)
|
||||
var accountID *string
|
||||
if c.IncludeLinkedAccounts && len(filtered.accounts) > j {
|
||||
accountID = aws.String(filtered.accounts[j])
|
||||
|
|
@ -402,10 +402,10 @@ func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) map[string
|
|||
}
|
||||
queryID := statisticType + "_" + id
|
||||
c.queryDimensions[queryID] = dimension
|
||||
dataQueries[*metric.Namespace] = append(dataQueries[*metric.Namespace], types.MetricDataQuery{
|
||||
dataQueries[*singleMetric.Namespace] = append(dataQueries[*singleMetric.Namespace], types.MetricDataQuery{
|
||||
Id: aws.String(queryID),
|
||||
AccountId: accountID,
|
||||
Label: aws.String(snakeCase(*metric.MetricName + "_" + statisticType)),
|
||||
Label: aws.String(snakeCase(*singleMetric.MetricName + "_" + statisticType)),
|
||||
MetricStat: &types.MetricStat{
|
||||
Metric: &filtered.metrics[j],
|
||||
Period: aws.Int32(int32(time.Duration(c.Period).Seconds())),
|
||||
|
|
@ -436,7 +436,7 @@ func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) map[string
|
|||
|
||||
// gatherMetrics gets metric data from Cloudwatch.
|
||||
func (c *CloudWatch) gatherMetrics(
|
||||
params *cwClient.GetMetricDataInput,
|
||||
params *cloudwatch.GetMetricDataInput,
|
||||
) ([]types.MetricDataResult, error) {
|
||||
results := []types.MetricDataResult{}
|
||||
|
||||
|
|
@ -457,7 +457,7 @@ func (c *CloudWatch) gatherMetrics(
|
|||
}
|
||||
|
||||
func (c *CloudWatch) aggregateMetrics(acc telegraf.Accumulator, metricDataResults map[string][]types.MetricDataResult) {
|
||||
grouper := internalMetric.NewSeriesGrouper()
|
||||
grouper := metric.NewSeriesGrouper()
|
||||
for namespace, results := range metricDataResults {
|
||||
namespace = sanitizeMeasurement(namespace)
|
||||
|
||||
|
|
@ -489,8 +489,8 @@ func (c *CloudWatch) aggregateMetrics(acc telegraf.Accumulator, metricDataResult
|
|||
}
|
||||
}
|
||||
|
||||
for _, metric := range grouper.Metrics() {
|
||||
acc.AddMetric(metric)
|
||||
for _, singleMetric := range grouper.Metrics() {
|
||||
acc.AddMetric(singleMetric)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -532,8 +532,8 @@ func ctod(cDimensions []types.Dimension) *map[string]string {
|
|||
return &dimensions
|
||||
}
|
||||
|
||||
func (c *CloudWatch) getDataInputs(dataQueries []types.MetricDataQuery) *cwClient.GetMetricDataInput {
|
||||
return &cwClient.GetMetricDataInput{
|
||||
func (c *CloudWatch) getDataInputs(dataQueries []types.MetricDataQuery) *cloudwatch.GetMetricDataInput {
|
||||
return &cloudwatch.GetMetricDataInput{
|
||||
StartTime: aws.Time(c.windowStart),
|
||||
EndTime: aws.Time(c.windowEnd),
|
||||
MetricDataQueries: dataQueries,
|
||||
|
|
@ -554,16 +554,16 @@ func hasWildcard(dimensions []*Dimension) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
func isSelected(name string, metric types.Metric, dimensions []*Dimension) bool {
|
||||
if name != *metric.MetricName {
|
||||
func isSelected(name string, cloudwatchMetric types.Metric, dimensions []*Dimension) bool {
|
||||
if name != *cloudwatchMetric.MetricName {
|
||||
return false
|
||||
}
|
||||
if len(metric.Dimensions) != len(dimensions) {
|
||||
if len(cloudwatchMetric.Dimensions) != len(dimensions) {
|
||||
return false
|
||||
}
|
||||
for _, d := range dimensions {
|
||||
selected := false
|
||||
for _, d2 := range metric.Dimensions {
|
||||
for _, d2 := range cloudwatchMetric.Dimensions {
|
||||
if d.Name == *d2.Name {
|
||||
if d.Value == "" || d.valueMatcher.Match(*d2.Value) {
|
||||
selected = true
|
||||
|
|
|
|||
|
|
@ -8,13 +8,13 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
cwClient "github.com/aws/aws-sdk-go-v2/service/cloudwatch"
|
||||
"github.com/aws/aws-sdk-go-v2/service/cloudwatch"
|
||||
"github.com/aws/aws-sdk-go-v2/service/cloudwatch/types"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/filter"
|
||||
internalaws "github.com/influxdata/telegraf/plugins/common/aws"
|
||||
common_aws "github.com/influxdata/telegraf/plugins/common/aws"
|
||||
"github.com/influxdata/telegraf/plugins/common/proxy"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
|
@ -23,10 +23,10 @@ type mockGatherCloudWatchClient struct{}
|
|||
|
||||
func (m *mockGatherCloudWatchClient) ListMetrics(
|
||||
_ context.Context,
|
||||
params *cwClient.ListMetricsInput,
|
||||
_ ...func(*cwClient.Options),
|
||||
) (*cwClient.ListMetricsOutput, error) {
|
||||
response := &cwClient.ListMetricsOutput{
|
||||
params *cloudwatch.ListMetricsInput,
|
||||
_ ...func(*cloudwatch.Options),
|
||||
) (*cloudwatch.ListMetricsOutput, error) {
|
||||
response := &cloudwatch.ListMetricsOutput{
|
||||
Metrics: []types.Metric{
|
||||
{
|
||||
Namespace: params.Namespace,
|
||||
|
|
@ -58,10 +58,10 @@ func (m *mockGatherCloudWatchClient) ListMetrics(
|
|||
|
||||
func (m *mockGatherCloudWatchClient) GetMetricData(
|
||||
_ context.Context,
|
||||
params *cwClient.GetMetricDataInput,
|
||||
_ ...func(*cwClient.Options),
|
||||
) (*cwClient.GetMetricDataOutput, error) {
|
||||
return &cwClient.GetMetricDataOutput{
|
||||
params *cloudwatch.GetMetricDataInput,
|
||||
_ ...func(*cloudwatch.Options),
|
||||
) (*cloudwatch.GetMetricDataOutput, error) {
|
||||
return &cloudwatch.GetMetricDataOutput{
|
||||
MetricDataResults: []types.MetricDataResult{
|
||||
{
|
||||
Id: aws.String("minimum_0_0"),
|
||||
|
|
@ -167,7 +167,7 @@ func TestGather(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
internalDuration := config.Duration(duration)
|
||||
c := &CloudWatch{
|
||||
CredentialConfig: internalaws.CredentialConfig{
|
||||
CredentialConfig: common_aws.CredentialConfig{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
Namespace: "AWS/ELB",
|
||||
|
|
@ -204,7 +204,7 @@ func TestGatherDenseMetric(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
internalDuration := config.Duration(duration)
|
||||
c := &CloudWatch{
|
||||
CredentialConfig: internalaws.CredentialConfig{
|
||||
CredentialConfig: common_aws.CredentialConfig{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
Namespace: "AWS/ELB",
|
||||
|
|
@ -243,7 +243,7 @@ func TestMultiAccountGather(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
internalDuration := config.Duration(duration)
|
||||
c := &CloudWatch{
|
||||
CredentialConfig: internalaws.CredentialConfig{
|
||||
CredentialConfig: common_aws.CredentialConfig{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
Namespace: "AWS/ELB",
|
||||
|
|
@ -309,9 +309,9 @@ type mockSelectMetricsCloudWatchClient struct{}
|
|||
|
||||
func (m *mockSelectMetricsCloudWatchClient) ListMetrics(
|
||||
_ context.Context,
|
||||
_ *cwClient.ListMetricsInput,
|
||||
_ ...func(*cwClient.Options),
|
||||
) (*cwClient.ListMetricsOutput, error) {
|
||||
_ *cloudwatch.ListMetricsInput,
|
||||
_ ...func(*cloudwatch.Options),
|
||||
) (*cloudwatch.ListMetricsOutput, error) {
|
||||
metrics := []types.Metric{}
|
||||
// 4 metrics are available
|
||||
metricNames := []string{"Latency", "RequestCount", "HealthyHostCount", "UnHealthyHostCount"}
|
||||
|
|
@ -352,7 +352,7 @@ func (m *mockSelectMetricsCloudWatchClient) ListMetrics(
|
|||
}
|
||||
}
|
||||
|
||||
result := &cwClient.ListMetricsOutput{
|
||||
result := &cloudwatch.ListMetricsOutput{
|
||||
Metrics: metrics,
|
||||
}
|
||||
return result, nil
|
||||
|
|
@ -360,9 +360,9 @@ func (m *mockSelectMetricsCloudWatchClient) ListMetrics(
|
|||
|
||||
func (m *mockSelectMetricsCloudWatchClient) GetMetricData(
|
||||
_ context.Context,
|
||||
_ *cwClient.GetMetricDataInput,
|
||||
_ ...func(*cwClient.Options),
|
||||
) (*cwClient.GetMetricDataOutput, error) {
|
||||
_ *cloudwatch.GetMetricDataInput,
|
||||
_ ...func(*cloudwatch.Options),
|
||||
) (*cloudwatch.GetMetricDataOutput, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
|
@ -371,7 +371,7 @@ func TestSelectMetrics(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
internalDuration := config.Duration(duration)
|
||||
c := &CloudWatch{
|
||||
CredentialConfig: internalaws.CredentialConfig{
|
||||
CredentialConfig: common_aws.CredentialConfig{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
Namespace: "AWS/ELB",
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ import (
|
|||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/internal/choice"
|
||||
tlsint "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
common_tls "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/selfstat"
|
||||
)
|
||||
|
|
@ -48,7 +48,7 @@ type CloudWatchMetricStreams struct {
|
|||
ageMin selfstat.Stat
|
||||
|
||||
Log telegraf.Logger
|
||||
tlsint.ServerConfig
|
||||
common_tls.ServerConfig
|
||||
wg sync.WaitGroup
|
||||
close chan struct{}
|
||||
listener net.Listener
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
couchbaseClient "github.com/couchbase/go-couchbase"
|
||||
"github.com/couchbase/go-couchbase"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/filter"
|
||||
|
|
@ -72,7 +72,7 @@ func (cb *Couchbase) Gather(acc telegraf.Accumulator) error {
|
|||
func (cb *Couchbase) gatherServer(acc telegraf.Accumulator, addr string) error {
|
||||
escapedAddr := regexpURI.ReplaceAllString(addr, "${1}")
|
||||
|
||||
client, err := couchbaseClient.Connect(addr)
|
||||
client, err := couchbase.Connect(addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -460,15 +460,15 @@ func (cb *Couchbase) Init() error {
|
|||
cb.client = &http.Client{
|
||||
Timeout: 10 * time.Second,
|
||||
Transport: &http.Transport{
|
||||
MaxIdleConnsPerHost: couchbaseClient.MaxIdleConnsPerHost,
|
||||
MaxIdleConnsPerHost: couchbase.MaxIdleConnsPerHost,
|
||||
TLSClientConfig: tlsConfig,
|
||||
},
|
||||
}
|
||||
|
||||
couchbaseClient.SetSkipVerify(cb.ClientConfig.InsecureSkipVerify)
|
||||
couchbaseClient.SetCertFile(cb.ClientConfig.TLSCert)
|
||||
couchbaseClient.SetKeyFile(cb.ClientConfig.TLSKey)
|
||||
couchbaseClient.SetRootFile(cb.ClientConfig.TLSCA)
|
||||
couchbase.SetSkipVerify(cb.ClientConfig.InsecureSkipVerify)
|
||||
couchbase.SetCertFile(cb.ClientConfig.TLSCert)
|
||||
couchbase.SetKeyFile(cb.ClientConfig.TLSKey)
|
||||
couchbase.SetRootFile(cb.ClientConfig.TLSCA)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
cpuUtil "github.com/shirou/gopsutil/v3/cpu"
|
||||
"github.com/shirou/gopsutil/v3/cpu"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
|
|
@ -19,8 +19,8 @@ var sampleConfig string
|
|||
|
||||
type CPUStats struct {
|
||||
ps system.PS
|
||||
lastStats map[string]cpuUtil.TimesStat
|
||||
cpuInfo map[string]cpuUtil.InfoStat
|
||||
lastStats map[string]cpu.TimesStat
|
||||
cpuInfo map[string]cpu.InfoStat
|
||||
coreID bool
|
||||
physicalID bool
|
||||
|
||||
|
|
@ -119,7 +119,7 @@ func (c *CPUStats) Gather(acc telegraf.Accumulator) error {
|
|||
acc.AddGauge("cpu", fieldsG, tags, now)
|
||||
}
|
||||
|
||||
c.lastStats = make(map[string]cpuUtil.TimesStat)
|
||||
c.lastStats = make(map[string]cpu.TimesStat)
|
||||
for _, cts := range times {
|
||||
c.lastStats[cts.CPU] = cts
|
||||
}
|
||||
|
|
@ -129,12 +129,12 @@ func (c *CPUStats) Gather(acc telegraf.Accumulator) error {
|
|||
|
||||
func (c *CPUStats) Init() error {
|
||||
if c.CoreTags {
|
||||
cpuInfo, err := cpuUtil.Info()
|
||||
cpuInfo, err := cpu.Info()
|
||||
if err == nil {
|
||||
c.coreID = cpuInfo[0].CoreID != ""
|
||||
c.physicalID = cpuInfo[0].PhysicalID != ""
|
||||
|
||||
c.cpuInfo = make(map[string]cpuUtil.InfoStat)
|
||||
c.cpuInfo = make(map[string]cpu.InfoStat)
|
||||
for _, ci := range cpuInfo {
|
||||
c.cpuInfo[fmt.Sprintf("cpu%d", ci.CPU)] = ci
|
||||
}
|
||||
|
|
@ -146,12 +146,12 @@ func (c *CPUStats) Init() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func totalCPUTime(t cpuUtil.TimesStat) float64 {
|
||||
func totalCPUTime(t cpu.TimesStat) float64 {
|
||||
total := t.User + t.System + t.Nice + t.Iowait + t.Irq + t.Softirq + t.Steal + t.Idle
|
||||
return total
|
||||
}
|
||||
|
||||
func activeCPUTime(t cpuUtil.TimesStat) float64 {
|
||||
func activeCPUTime(t cpu.TimesStat) float64 {
|
||||
active := totalCPUTime(t) - t.Idle
|
||||
return active
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ import (
|
|||
"fmt"
|
||||
"testing"
|
||||
|
||||
cpuUtil "github.com/shirou/gopsutil/v3/cpu"
|
||||
"github.com/shirou/gopsutil/v3/cpu"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf/plugins/inputs/system"
|
||||
|
|
@ -24,7 +24,7 @@ func TestCPUStats(t *testing.T) {
|
|||
defer mps.AssertExpectations(t)
|
||||
var acc testutil.Accumulator
|
||||
|
||||
cts := cpuUtil.TimesStat{
|
||||
cts := cpu.TimesStat{
|
||||
CPU: "cpu0",
|
||||
User: 8.8,
|
||||
System: 8.2,
|
||||
|
|
@ -38,7 +38,7 @@ func TestCPUStats(t *testing.T) {
|
|||
GuestNice: 0.324,
|
||||
}
|
||||
|
||||
cts2 := cpuUtil.TimesStat{
|
||||
cts2 := cpu.TimesStat{
|
||||
CPU: "cpu0",
|
||||
User: 24.9, // increased by 16.1
|
||||
System: 10.9, // increased by 2.7
|
||||
|
|
@ -52,7 +52,7 @@ func TestCPUStats(t *testing.T) {
|
|||
GuestNice: 2.524, // increased by 2.2
|
||||
}
|
||||
|
||||
mps.On("CPUTimes").Return([]cpuUtil.TimesStat{cts}, nil)
|
||||
mps.On("CPUTimes").Return([]cpu.TimesStat{cts}, nil)
|
||||
|
||||
cs := NewCPUStats(&mps)
|
||||
|
||||
|
|
@ -74,7 +74,7 @@ func TestCPUStats(t *testing.T) {
|
|||
assertContainsTaggedFloat(t, &acc, "time_guest_nice", 0.324, 0)
|
||||
|
||||
mps2 := system.MockPS{}
|
||||
mps2.On("CPUTimes").Return([]cpuUtil.TimesStat{cts2}, nil)
|
||||
mps2.On("CPUTimes").Return([]cpu.TimesStat{cts2}, nil)
|
||||
cs.ps = &mps2
|
||||
|
||||
// Should have added cpu percentages too
|
||||
|
|
@ -162,7 +162,7 @@ func TestCPUCountIncrease(t *testing.T) {
|
|||
cs := NewCPUStats(&mps)
|
||||
|
||||
mps.On("CPUTimes").Return(
|
||||
[]cpuUtil.TimesStat{
|
||||
[]cpu.TimesStat{
|
||||
{
|
||||
CPU: "cpu0",
|
||||
},
|
||||
|
|
@ -172,7 +172,7 @@ func TestCPUCountIncrease(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
mps2.On("CPUTimes").Return(
|
||||
[]cpuUtil.TimesStat{
|
||||
[]cpu.TimesStat{
|
||||
{
|
||||
CPU: "cpu0",
|
||||
},
|
||||
|
|
@ -193,28 +193,28 @@ func TestCPUTimesDecrease(t *testing.T) {
|
|||
defer mps.AssertExpectations(t)
|
||||
var acc testutil.Accumulator
|
||||
|
||||
cts := cpuUtil.TimesStat{
|
||||
cts := cpu.TimesStat{
|
||||
CPU: "cpu0",
|
||||
User: 18,
|
||||
Idle: 80,
|
||||
Iowait: 2,
|
||||
}
|
||||
|
||||
cts2 := cpuUtil.TimesStat{
|
||||
cts2 := cpu.TimesStat{
|
||||
CPU: "cpu0",
|
||||
User: 38, // increased by 20
|
||||
Idle: 40, // decreased by 40
|
||||
Iowait: 1, // decreased by 1
|
||||
}
|
||||
|
||||
cts3 := cpuUtil.TimesStat{
|
||||
cts3 := cpu.TimesStat{
|
||||
CPU: "cpu0",
|
||||
User: 56, // increased by 18
|
||||
Idle: 120, // increased by 80
|
||||
Iowait: 3, // increased by 2
|
||||
}
|
||||
|
||||
mps.On("CPUTimes").Return([]cpuUtil.TimesStat{cts}, nil)
|
||||
mps.On("CPUTimes").Return([]cpu.TimesStat{cts}, nil)
|
||||
|
||||
cs := NewCPUStats(&mps)
|
||||
|
||||
|
|
@ -228,7 +228,7 @@ func TestCPUTimesDecrease(t *testing.T) {
|
|||
assertContainsTaggedFloat(t, &acc, "time_iowait", 2, 0)
|
||||
|
||||
mps2 := system.MockPS{}
|
||||
mps2.On("CPUTimes").Return([]cpuUtil.TimesStat{cts2}, nil)
|
||||
mps2.On("CPUTimes").Return([]cpu.TimesStat{cts2}, nil)
|
||||
cs.ps = &mps2
|
||||
|
||||
// CPU times decreased. An error should be raised
|
||||
|
|
@ -236,7 +236,7 @@ func TestCPUTimesDecrease(t *testing.T) {
|
|||
require.Error(t, err)
|
||||
|
||||
mps3 := system.MockPS{}
|
||||
mps3.On("CPUTimes").Return([]cpuUtil.TimesStat{cts3}, nil)
|
||||
mps3.On("CPUTimes").Return([]cpu.TimesStat{cts3}, nil)
|
||||
cs.ps = &mps3
|
||||
|
||||
err = cs.Gather(&acc)
|
||||
|
|
|
|||
|
|
@ -22,9 +22,9 @@ import (
|
|||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/internal/choice"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
httpconfig "github.com/influxdata/telegraf/plugins/common/http"
|
||||
common_http "github.com/influxdata/telegraf/plugins/common/http"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
jsonParser "github.com/influxdata/telegraf/plugins/parsers/json"
|
||||
parsers_json "github.com/influxdata/telegraf/plugins/parsers/json"
|
||||
)
|
||||
|
||||
// This plugin is based on the official ctrlX CORE API. Documentation can be found in OpenAPI format at:
|
||||
|
|
@ -55,7 +55,7 @@ type CtrlXDataLayer struct {
|
|||
acc telegraf.Accumulator
|
||||
connection *http.Client
|
||||
tokenManager token.TokenManager
|
||||
httpconfig.HTTPClientConfig
|
||||
common_http.HTTPClientConfig
|
||||
}
|
||||
|
||||
// convertTimestamp2UnixTime converts the given Data Layer timestamp of the payload to UnixTime.
|
||||
|
|
@ -197,7 +197,7 @@ func (c *CtrlXDataLayer) createMetric(em *sseEventData, sub *subscription) (tele
|
|||
|
||||
switch em.Type {
|
||||
case "object":
|
||||
flattener := jsonParser.JSONFlattener{}
|
||||
flattener := parsers_json.JSONFlattener{}
|
||||
err := flattener.FullFlattenJSON(fieldKey, em.Value, true, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf/config"
|
||||
httpconfig "github.com/influxdata/telegraf/plugins/common/http"
|
||||
common_http "github.com/influxdata/telegraf/plugins/common/http"
|
||||
"github.com/influxdata/telegraf/plugins/common/tls"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
|
@ -183,7 +183,7 @@ func initRunner(t *testing.T) (*CtrlXDataLayer, *httptest.Server) {
|
|||
url: server.URL,
|
||||
Username: config.NewSecret([]byte("user")),
|
||||
Password: config.NewSecret([]byte("password")),
|
||||
HTTPClientConfig: httpconfig.HTTPClientConfig{
|
||||
HTTPClientConfig: common_http.HTTPClientConfig{
|
||||
ClientConfig: tls.ClientConfig{
|
||||
InsecureSkipVerify: true,
|
||||
},
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
diskUtil "github.com/shirou/gopsutil/v3/disk"
|
||||
"github.com/shirou/gopsutil/v3/disk"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
|
|
@ -30,7 +30,7 @@ func TestDiskUsage(t *testing.T) {
|
|||
var acc testutil.Accumulator
|
||||
var err error
|
||||
|
||||
psAll := []diskUtil.PartitionStat{
|
||||
psAll := []disk.PartitionStat{
|
||||
{
|
||||
Device: "/dev/sda",
|
||||
Mountpoint: "/",
|
||||
|
|
@ -50,7 +50,7 @@ func TestDiskUsage(t *testing.T) {
|
|||
Opts: []string{"ro", "noatime", "nodiratime", "bind"},
|
||||
},
|
||||
}
|
||||
duAll := []diskUtil.UsageStat{
|
||||
duAll := []disk.UsageStat{
|
||||
{
|
||||
Path: "/",
|
||||
Fstype: "ext4",
|
||||
|
|
@ -170,15 +170,15 @@ func TestDiskUsage(t *testing.T) {
|
|||
func TestDiskUsageHostMountPrefix(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
partitionStats []diskUtil.PartitionStat
|
||||
usageStats []*diskUtil.UsageStat
|
||||
partitionStats []disk.PartitionStat
|
||||
usageStats []*disk.UsageStat
|
||||
hostMountPrefix string
|
||||
expectedTags map[string]string
|
||||
expectedFields map[string]interface{}
|
||||
}{
|
||||
{
|
||||
name: "no host mount prefix",
|
||||
partitionStats: []diskUtil.PartitionStat{
|
||||
partitionStats: []disk.PartitionStat{
|
||||
{
|
||||
Device: "/dev/sda",
|
||||
Mountpoint: "/",
|
||||
|
|
@ -186,7 +186,7 @@ func TestDiskUsageHostMountPrefix(t *testing.T) {
|
|||
Opts: []string{"ro"},
|
||||
},
|
||||
},
|
||||
usageStats: []*diskUtil.UsageStat{
|
||||
usageStats: []*disk.UsageStat{
|
||||
{
|
||||
Path: "/",
|
||||
Total: 42,
|
||||
|
|
@ -211,7 +211,7 @@ func TestDiskUsageHostMountPrefix(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "host mount prefix",
|
||||
partitionStats: []diskUtil.PartitionStat{
|
||||
partitionStats: []disk.PartitionStat{
|
||||
{
|
||||
Device: "/dev/sda",
|
||||
Mountpoint: "/hostfs/var",
|
||||
|
|
@ -219,7 +219,7 @@ func TestDiskUsageHostMountPrefix(t *testing.T) {
|
|||
Opts: []string{"ro"},
|
||||
},
|
||||
},
|
||||
usageStats: []*diskUtil.UsageStat{
|
||||
usageStats: []*disk.UsageStat{
|
||||
{
|
||||
Path: "/hostfs/var",
|
||||
Total: 42,
|
||||
|
|
@ -245,7 +245,7 @@ func TestDiskUsageHostMountPrefix(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "host mount prefix exact match",
|
||||
partitionStats: []diskUtil.PartitionStat{
|
||||
partitionStats: []disk.PartitionStat{
|
||||
{
|
||||
Device: "/dev/sda",
|
||||
Mountpoint: "/hostfs",
|
||||
|
|
@ -253,7 +253,7 @@ func TestDiskUsageHostMountPrefix(t *testing.T) {
|
|||
Opts: []string{"ro"},
|
||||
},
|
||||
},
|
||||
usageStats: []*diskUtil.UsageStat{
|
||||
usageStats: []*disk.UsageStat{
|
||||
{
|
||||
Path: "/hostfs",
|
||||
Total: 42,
|
||||
|
|
@ -310,7 +310,7 @@ func TestDiskStats(t *testing.T) {
|
|||
var acc testutil.Accumulator
|
||||
var err error
|
||||
|
||||
duAll := []*diskUtil.UsageStat{
|
||||
duAll := []*disk.UsageStat{
|
||||
{
|
||||
Path: "/",
|
||||
Fstype: "ext4",
|
||||
|
|
@ -342,7 +342,7 @@ func TestDiskStats(t *testing.T) {
|
|||
InodesUsed: 1000,
|
||||
},
|
||||
}
|
||||
duMountFiltered := []*diskUtil.UsageStat{
|
||||
duMountFiltered := []*disk.UsageStat{
|
||||
{
|
||||
Path: "/",
|
||||
Fstype: "ext4",
|
||||
|
|
@ -354,7 +354,7 @@ func TestDiskStats(t *testing.T) {
|
|||
InodesUsed: 1000,
|
||||
},
|
||||
}
|
||||
duOptFiltered := []*diskUtil.UsageStat{
|
||||
duOptFiltered := []*disk.UsageStat{
|
||||
{
|
||||
Path: "/",
|
||||
Fstype: "ext4",
|
||||
|
|
@ -377,7 +377,7 @@ func TestDiskStats(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
psAll := []*diskUtil.PartitionStat{
|
||||
psAll := []*disk.PartitionStat{
|
||||
{
|
||||
Device: "/dev/sda",
|
||||
Mountpoint: "/",
|
||||
|
|
@ -398,7 +398,7 @@ func TestDiskStats(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
psMountFiltered := []*diskUtil.PartitionStat{
|
||||
psMountFiltered := []*disk.PartitionStat{
|
||||
{
|
||||
Device: "/dev/sda",
|
||||
Mountpoint: "/",
|
||||
|
|
@ -406,7 +406,7 @@ func TestDiskStats(t *testing.T) {
|
|||
Opts: []string{"ro", "noatime", "nodiratime"},
|
||||
},
|
||||
}
|
||||
psOptFiltered := []*diskUtil.PartitionStat{
|
||||
psOptFiltered := []*disk.PartitionStat{
|
||||
{
|
||||
Device: "/dev/sda",
|
||||
Mountpoint: "/",
|
||||
|
|
@ -495,13 +495,13 @@ func TestDiskUsageIssues(t *testing.T) {
|
|||
tests := []struct {
|
||||
name string
|
||||
prefix string
|
||||
du diskUtil.UsageStat
|
||||
du disk.UsageStat
|
||||
expected []telegraf.Metric
|
||||
}{
|
||||
{
|
||||
name: "success",
|
||||
prefix: "",
|
||||
du: diskUtil.UsageStat{
|
||||
du: disk.UsageStat{
|
||||
Total: 256,
|
||||
Free: 46,
|
||||
Used: 200,
|
||||
|
|
@ -557,7 +557,7 @@ func TestDiskUsageIssues(t *testing.T) {
|
|||
{
|
||||
name: "issue 10297",
|
||||
prefix: "/host",
|
||||
du: diskUtil.UsageStat{
|
||||
du: disk.UsageStat{
|
||||
Total: 256,
|
||||
Free: 46,
|
||||
Used: 200,
|
||||
|
|
@ -630,7 +630,7 @@ func TestDiskUsageIssues(t *testing.T) {
|
|||
t.Setenv("HOST_PROC", hostProcPrefix)
|
||||
t.Setenv("HOST_SYS", hostSysPrefix)
|
||||
|
||||
partitions, err := diskUtil.Partitions(true)
|
||||
partitions, err := disk.Partitions(true)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Mock the disk usage
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ import (
|
|||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/api/types/system"
|
||||
dockerClient "github.com/docker/docker/client"
|
||||
"github.com/docker/docker/client"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
@ -30,11 +30,11 @@ type Client interface {
|
|||
}
|
||||
|
||||
func NewEnvClient() (Client, error) {
|
||||
client, err := dockerClient.NewClientWithOpts(dockerClient.FromEnv)
|
||||
dockerClient, err := client.NewClientWithOpts(client.FromEnv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &SocketClient{client}, nil
|
||||
return &SocketClient{dockerClient}, nil
|
||||
}
|
||||
|
||||
func NewClient(host string, tlsConfig *tls.Config) (Client, error) {
|
||||
|
|
@ -43,20 +43,20 @@ func NewClient(host string, tlsConfig *tls.Config) (Client, error) {
|
|||
}
|
||||
httpClient := &http.Client{Transport: transport}
|
||||
|
||||
client, err := dockerClient.NewClientWithOpts(
|
||||
dockerClient.WithHTTPHeaders(defaultHeaders),
|
||||
dockerClient.WithHTTPClient(httpClient),
|
||||
dockerClient.WithAPIVersionNegotiation(),
|
||||
dockerClient.WithHost(host))
|
||||
dockerClient, err := client.NewClientWithOpts(
|
||||
client.WithHTTPHeaders(defaultHeaders),
|
||||
client.WithHTTPClient(httpClient),
|
||||
client.WithAPIVersionNegotiation(),
|
||||
client.WithHost(host))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &SocketClient{client}, nil
|
||||
return &SocketClient{dockerClient}, nil
|
||||
}
|
||||
|
||||
type SocketClient struct {
|
||||
client *dockerClient.Client
|
||||
client *client.Client
|
||||
}
|
||||
|
||||
func (c *SocketClient) Info(ctx context.Context) (system.Info, error) {
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ import (
|
|||
|
||||
"github.com/Masterminds/semver/v3"
|
||||
"github.com/docker/docker/api/types"
|
||||
typeContainer "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
|
||||
|
|
@ -25,8 +25,8 @@ import (
|
|||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/filter"
|
||||
"github.com/influxdata/telegraf/internal/choice"
|
||||
dockerint "github.com/influxdata/telegraf/internal/docker"
|
||||
tlsint "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
"github.com/influxdata/telegraf/internal/docker"
|
||||
common_tls "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
|
|
@ -61,7 +61,7 @@ type Docker struct {
|
|||
|
||||
Log telegraf.Logger
|
||||
|
||||
tlsint.ClientConfig
|
||||
common_tls.ClientConfig
|
||||
|
||||
newEnvClient func() (Client, error)
|
||||
newClient func(string, *tls.Config) (Client, error)
|
||||
|
|
@ -218,7 +218,7 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error {
|
|||
}
|
||||
|
||||
// List containers
|
||||
opts := typeContainer.ListOptions{
|
||||
opts := container.ListOptions{
|
||||
Filters: filterArgs,
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(d.Timeout))
|
||||
|
|
@ -235,13 +235,13 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error {
|
|||
// Get container data
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(containers))
|
||||
for _, container := range containers {
|
||||
for _, cntnr := range containers {
|
||||
go func(c types.Container) {
|
||||
defer wg.Done()
|
||||
if err := d.gatherContainer(c, acc); err != nil {
|
||||
acc.AddError(err)
|
||||
}
|
||||
}(container)
|
||||
}(cntnr)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
|
|
@ -468,12 +468,12 @@ func parseContainerName(containerNames []string) string {
|
|||
}
|
||||
|
||||
func (d *Docker) gatherContainer(
|
||||
container types.Container,
|
||||
cntnr types.Container,
|
||||
acc telegraf.Accumulator,
|
||||
) error {
|
||||
var v *typeContainer.StatsResponse
|
||||
var v *container.StatsResponse
|
||||
|
||||
cname := parseContainerName(container.Names)
|
||||
cname := parseContainerName(cntnr.Names)
|
||||
|
||||
if cname == "" {
|
||||
return nil
|
||||
|
|
@ -483,7 +483,7 @@ func (d *Docker) gatherContainer(
|
|||
return nil
|
||||
}
|
||||
|
||||
imageName, imageVersion := dockerint.ParseImage(container.Image)
|
||||
imageName, imageVersion := docker.ParseImage(cntnr.Image)
|
||||
|
||||
tags := map[string]string{
|
||||
"engine_host": d.engineHost,
|
||||
|
|
@ -494,13 +494,13 @@ func (d *Docker) gatherContainer(
|
|||
}
|
||||
|
||||
if d.IncludeSourceTag {
|
||||
tags["source"] = hostnameFromID(container.ID)
|
||||
tags["source"] = hostnameFromID(cntnr.ID)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(d.Timeout))
|
||||
defer cancel()
|
||||
|
||||
r, err := d.client.ContainerStats(ctx, container.ID, false)
|
||||
r, err := d.client.ContainerStats(ctx, cntnr.ID, false)
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
return errStatsTimeout
|
||||
}
|
||||
|
|
@ -519,26 +519,26 @@ func (d *Docker) gatherContainer(
|
|||
daemonOSType := r.OSType
|
||||
|
||||
// Add labels to tags
|
||||
for k, label := range container.Labels {
|
||||
for k, label := range cntnr.Labels {
|
||||
if d.labelFilter.Match(k) {
|
||||
tags[k] = label
|
||||
}
|
||||
}
|
||||
|
||||
return d.gatherContainerInspect(container, acc, tags, daemonOSType, v)
|
||||
return d.gatherContainerInspect(cntnr, acc, tags, daemonOSType, v)
|
||||
}
|
||||
|
||||
func (d *Docker) gatherContainerInspect(
|
||||
container types.Container,
|
||||
cntnr types.Container,
|
||||
acc telegraf.Accumulator,
|
||||
tags map[string]string,
|
||||
daemonOSType string,
|
||||
v *typeContainer.StatsResponse,
|
||||
v *container.StatsResponse,
|
||||
) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(d.Timeout))
|
||||
defer cancel()
|
||||
|
||||
info, err := d.client.ContainerInspect(ctx, container.ID)
|
||||
info, err := d.client.ContainerInspect(ctx, cntnr.ID)
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
return errInspectTimeout
|
||||
}
|
||||
|
|
@ -566,7 +566,7 @@ func (d *Docker) gatherContainerInspect(
|
|||
"pid": info.State.Pid,
|
||||
"exitcode": info.State.ExitCode,
|
||||
"restart_count": info.RestartCount,
|
||||
"container_id": container.ID,
|
||||
"container_id": cntnr.ID,
|
||||
}
|
||||
|
||||
finished, err := time.Parse(time.RFC3339, info.State.FinishedAt)
|
||||
|
|
@ -599,13 +599,13 @@ func (d *Docker) gatherContainerInspect(
|
|||
}
|
||||
}
|
||||
|
||||
d.parseContainerStats(v, acc, tags, container.ID, daemonOSType)
|
||||
d.parseContainerStats(v, acc, tags, cntnr.ID, daemonOSType)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Docker) parseContainerStats(
|
||||
stat *typeContainer.StatsResponse,
|
||||
stat *container.StatsResponse,
|
||||
acc telegraf.Accumulator,
|
||||
tags map[string]string,
|
||||
id, daemonOSType string,
|
||||
|
|
@ -781,7 +781,7 @@ func (d *Docker) parseContainerStats(
|
|||
}
|
||||
|
||||
// Make a map of devices to their block io stats
|
||||
func getDeviceStatMap(blkioStats typeContainer.BlkioStats) map[string]map[string]interface{} {
|
||||
func getDeviceStatMap(blkioStats container.BlkioStats) map[string]map[string]interface{} {
|
||||
deviceStatMap := make(map[string]map[string]interface{})
|
||||
|
||||
for _, metric := range blkioStats.IoServiceBytesRecursive {
|
||||
|
|
@ -844,7 +844,7 @@ func getDeviceStatMap(blkioStats typeContainer.BlkioStats) map[string]map[string
|
|||
|
||||
func (d *Docker) gatherBlockIOMetrics(
|
||||
acc telegraf.Accumulator,
|
||||
stat *typeContainer.StatsResponse,
|
||||
stat *container.StatsResponse,
|
||||
tags map[string]string,
|
||||
tm time.Time,
|
||||
id string,
|
||||
|
|
@ -921,24 +921,24 @@ func (d *Docker) gatherDiskUsage(acc telegraf.Accumulator, opts types.DiskUsageO
|
|||
acc.AddFields(duName, fields, tags, now)
|
||||
|
||||
// Containers
|
||||
for _, container := range du.Containers {
|
||||
for _, cntnr := range du.Containers {
|
||||
fields := map[string]interface{}{
|
||||
"size_rw": container.SizeRw,
|
||||
"size_root_fs": container.SizeRootFs,
|
||||
"size_rw": cntnr.SizeRw,
|
||||
"size_root_fs": cntnr.SizeRootFs,
|
||||
}
|
||||
|
||||
imageName, imageVersion := dockerint.ParseImage(container.Image)
|
||||
imageName, imageVersion := docker.ParseImage(cntnr.Image)
|
||||
|
||||
tags := map[string]string{
|
||||
"engine_host": d.engineHost,
|
||||
"server_version": d.serverVersion,
|
||||
"container_name": parseContainerName(container.Names),
|
||||
"container_name": parseContainerName(cntnr.Names),
|
||||
"container_image": imageName,
|
||||
"container_version": imageVersion,
|
||||
}
|
||||
|
||||
if d.IncludeSourceTag {
|
||||
tags["source"] = hostnameFromID(container.ID)
|
||||
tags["source"] = hostnameFromID(cntnr.ID)
|
||||
}
|
||||
|
||||
acc.AddFields(duName, fields, tags, now)
|
||||
|
|
@ -958,7 +958,7 @@ func (d *Docker) gatherDiskUsage(acc telegraf.Accumulator, opts types.DiskUsageO
|
|||
}
|
||||
|
||||
if len(image.RepoTags) > 0 {
|
||||
imageName, imageVersion := dockerint.ParseImage(image.RepoTags[0])
|
||||
imageName, imageVersion := docker.ParseImage(image.RepoTags[0])
|
||||
tags["image_name"] = imageName
|
||||
tags["image_version"] = imageVersion
|
||||
}
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
typeContainer "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/api/types/system"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
|
@ -23,8 +23,8 @@ import (
|
|||
|
||||
type MockClient struct {
|
||||
InfoF func() (system.Info, error)
|
||||
ContainerListF func(options typeContainer.ListOptions) ([]types.Container, error)
|
||||
ContainerStatsF func(containerID string) (typeContainer.StatsResponseReader, error)
|
||||
ContainerListF func(options container.ListOptions) ([]types.Container, error)
|
||||
ContainerStatsF func(containerID string) (container.StatsResponseReader, error)
|
||||
ContainerInspectF func() (types.ContainerJSON, error)
|
||||
ServiceListF func() ([]swarm.Service, error)
|
||||
TaskListF func() ([]swarm.Task, error)
|
||||
|
|
@ -38,11 +38,11 @@ func (c *MockClient) Info(context.Context) (system.Info, error) {
|
|||
return c.InfoF()
|
||||
}
|
||||
|
||||
func (c *MockClient) ContainerList(_ context.Context, options typeContainer.ListOptions) ([]types.Container, error) {
|
||||
func (c *MockClient) ContainerList(_ context.Context, options container.ListOptions) ([]types.Container, error) {
|
||||
return c.ContainerListF(options)
|
||||
}
|
||||
|
||||
func (c *MockClient) ContainerStats(_ context.Context, containerID string, _ bool) (typeContainer.StatsResponseReader, error) {
|
||||
func (c *MockClient) ContainerStats(_ context.Context, containerID string, _ bool) (container.StatsResponseReader, error) {
|
||||
return c.ContainerStatsF(containerID)
|
||||
}
|
||||
|
||||
|
|
@ -78,10 +78,10 @@ var baseClient = MockClient{
|
|||
InfoF: func() (system.Info, error) {
|
||||
return info, nil
|
||||
},
|
||||
ContainerListF: func(typeContainer.ListOptions) ([]types.Container, error) {
|
||||
ContainerListF: func(container.ListOptions) ([]types.Container, error) {
|
||||
return containerList, nil
|
||||
},
|
||||
ContainerStatsF: func(s string) (typeContainer.StatsResponseReader, error) {
|
||||
ContainerStatsF: func(s string) (container.StatsResponseReader, error) {
|
||||
return containerStats(s), nil
|
||||
},
|
||||
ContainerInspectF: func() (types.ContainerJSON, error) {
|
||||
|
|
@ -426,10 +426,10 @@ func TestDocker_WindowsMemoryContainerStats(t *testing.T) {
|
|||
InfoF: func() (system.Info, error) {
|
||||
return info, nil
|
||||
},
|
||||
ContainerListF: func(typeContainer.ListOptions) ([]types.Container, error) {
|
||||
ContainerListF: func(container.ListOptions) ([]types.Container, error) {
|
||||
return containerList, nil
|
||||
},
|
||||
ContainerStatsF: func(string) (typeContainer.StatsResponseReader, error) {
|
||||
ContainerStatsF: func(string) (container.StatsResponseReader, error) {
|
||||
return containerStatsWindows(), nil
|
||||
},
|
||||
ContainerInspectF: func() (types.ContainerJSON, error) {
|
||||
|
|
@ -561,7 +561,7 @@ func TestContainerLabels(t *testing.T) {
|
|||
|
||||
newClientFunc := func(string, *tls.Config) (Client, error) {
|
||||
client := baseClient
|
||||
client.ContainerListF = func(typeContainer.ListOptions) ([]types.Container, error) {
|
||||
client.ContainerListF = func(container.ListOptions) ([]types.Container, error) {
|
||||
return []types.Container{tt.container}, nil
|
||||
}
|
||||
return &client, nil
|
||||
|
|
@ -681,10 +681,10 @@ func TestContainerNames(t *testing.T) {
|
|||
|
||||
newClientFunc := func(string, *tls.Config) (Client, error) {
|
||||
client := baseClient
|
||||
client.ContainerListF = func(typeContainer.ListOptions) ([]types.Container, error) {
|
||||
client.ContainerListF = func(container.ListOptions) ([]types.Container, error) {
|
||||
return containerList, nil
|
||||
}
|
||||
client.ContainerStatsF = func(s string) (typeContainer.StatsResponseReader, error) {
|
||||
client.ContainerStatsF = func(s string) (container.StatsResponseReader, error) {
|
||||
return containerStats(s), nil
|
||||
}
|
||||
|
||||
|
|
@ -891,7 +891,7 @@ func TestContainerStatus(t *testing.T) {
|
|||
acc testutil.Accumulator
|
||||
newClientFunc = func(string, *tls.Config) (Client, error) {
|
||||
client := baseClient
|
||||
client.ContainerListF = func(typeContainer.ListOptions) ([]types.Container, error) {
|
||||
client.ContainerListF = func(container.ListOptions) ([]types.Container, error) {
|
||||
return containerList[:1], nil
|
||||
}
|
||||
client.ContainerInspectF = func() (types.ContainerJSON, error) {
|
||||
|
|
@ -1176,7 +1176,7 @@ func TestContainerStateFilter(t *testing.T) {
|
|||
|
||||
newClientFunc := func(string, *tls.Config) (Client, error) {
|
||||
client := baseClient
|
||||
client.ContainerListF = func(options typeContainer.ListOptions) ([]types.Container, error) {
|
||||
client.ContainerListF = func(options container.ListOptions) ([]types.Container, error) {
|
||||
for k, v := range tt.expected {
|
||||
actual := options.Filters.Get(k)
|
||||
sort.Strings(actual)
|
||||
|
|
@ -1212,15 +1212,15 @@ func TestContainerName(t *testing.T) {
|
|||
name: "container stats name is preferred",
|
||||
clientFunc: func(string, *tls.Config) (Client, error) {
|
||||
client := baseClient
|
||||
client.ContainerListF = func(typeContainer.ListOptions) ([]types.Container, error) {
|
||||
client.ContainerListF = func(container.ListOptions) ([]types.Container, error) {
|
||||
var containers []types.Container
|
||||
containers = append(containers, types.Container{
|
||||
Names: []string{"/logspout/foo"},
|
||||
})
|
||||
return containers, nil
|
||||
}
|
||||
client.ContainerStatsF = func(string) (typeContainer.StatsResponseReader, error) {
|
||||
return typeContainer.StatsResponseReader{
|
||||
client.ContainerStatsF = func(string) (container.StatsResponseReader, error) {
|
||||
return container.StatsResponseReader{
|
||||
Body: io.NopCloser(strings.NewReader(`{"name": "logspout"}`)),
|
||||
}, nil
|
||||
}
|
||||
|
|
@ -1232,15 +1232,15 @@ func TestContainerName(t *testing.T) {
|
|||
name: "container stats without name uses container list name",
|
||||
clientFunc: func(string, *tls.Config) (Client, error) {
|
||||
client := baseClient
|
||||
client.ContainerListF = func(typeContainer.ListOptions) ([]types.Container, error) {
|
||||
client.ContainerListF = func(container.ListOptions) ([]types.Container, error) {
|
||||
var containers []types.Container
|
||||
containers = append(containers, types.Container{
|
||||
Names: []string{"/logspout"},
|
||||
})
|
||||
return containers, nil
|
||||
}
|
||||
client.ContainerStatsF = func(string) (typeContainer.StatsResponseReader, error) {
|
||||
return typeContainer.StatsResponseReader{
|
||||
client.ContainerStatsF = func(string) (container.StatsResponseReader, error) {
|
||||
return container.StatsResponseReader{
|
||||
Body: io.NopCloser(strings.NewReader(`{}`)),
|
||||
}, nil
|
||||
}
|
||||
|
|
@ -1304,7 +1304,7 @@ func TestHostnameFromID(t *testing.T) {
|
|||
|
||||
func Test_parseContainerStatsPerDeviceAndTotal(t *testing.T) {
|
||||
type args struct {
|
||||
stat *typeContainer.StatsResponse
|
||||
stat *container.StatsResponse
|
||||
tags map[string]string
|
||||
id string
|
||||
perDeviceInclude []string
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ import (
|
|||
"unicode"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
typeContainer "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/pkg/stdcopy"
|
||||
|
||||
|
|
@ -24,7 +24,7 @@ import (
|
|||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/filter"
|
||||
"github.com/influxdata/telegraf/internal/docker"
|
||||
tlsint "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
common_tls "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
|
|
@ -53,7 +53,7 @@ type DockerLogs struct {
|
|||
ContainerStateExclude []string `toml:"container_state_exclude"`
|
||||
IncludeSourceTag bool `toml:"source_tag"`
|
||||
|
||||
tlsint.ClientConfig
|
||||
common_tls.ClientConfig
|
||||
|
||||
newEnvClient func() (Client, error)
|
||||
newClient func(string, *tls.Config) (Client, error)
|
||||
|
|
@ -62,7 +62,7 @@ type DockerLogs struct {
|
|||
labelFilter filter.Filter
|
||||
containerFilter filter.Filter
|
||||
stateFilter filter.Filter
|
||||
opts typeContainer.ListOptions
|
||||
opts container.ListOptions
|
||||
wg sync.WaitGroup
|
||||
mu sync.Mutex
|
||||
containerList map[string]context.CancelFunc
|
||||
|
|
@ -117,7 +117,7 @@ func (d *DockerLogs) Init() error {
|
|||
}
|
||||
|
||||
if filterArgs.Len() != 0 {
|
||||
d.opts = typeContainer.ListOptions{
|
||||
d.opts = container.ListOptions{
|
||||
Filters: filterArgs,
|
||||
}
|
||||
}
|
||||
|
|
@ -206,18 +206,18 @@ func (d *DockerLogs) Gather(acc telegraf.Accumulator) error {
|
|||
return err
|
||||
}
|
||||
|
||||
for _, container := range containers {
|
||||
if d.containerInContainerList(container.ID) {
|
||||
for _, cntnr := range containers {
|
||||
if d.containerInContainerList(cntnr.ID) {
|
||||
continue
|
||||
}
|
||||
|
||||
containerName := d.matchedContainerName(container.Names)
|
||||
containerName := d.matchedContainerName(cntnr.Names)
|
||||
if containerName == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
d.addToContainerList(container.ID, cancel)
|
||||
d.addToContainerList(cntnr.ID, cancel)
|
||||
|
||||
// Start a new goroutine for every new container that has logs to collect
|
||||
d.wg.Add(1)
|
||||
|
|
@ -229,15 +229,15 @@ func (d *DockerLogs) Gather(acc telegraf.Accumulator) error {
|
|||
if err != nil && !errors.Is(err, context.Canceled) {
|
||||
acc.AddError(err)
|
||||
}
|
||||
}(container)
|
||||
}(cntnr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *DockerLogs) hasTTY(ctx context.Context, container types.Container) (bool, error) {
|
||||
func (d *DockerLogs) hasTTY(ctx context.Context, cntnr types.Container) (bool, error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Duration(d.Timeout))
|
||||
defer cancel()
|
||||
c, err := d.client.ContainerInspect(ctx, container.ID)
|
||||
c, err := d.client.ContainerInspect(ctx, cntnr.ID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
|
@ -247,10 +247,10 @@ func (d *DockerLogs) hasTTY(ctx context.Context, container types.Container) (boo
|
|||
func (d *DockerLogs) tailContainerLogs(
|
||||
ctx context.Context,
|
||||
acc telegraf.Accumulator,
|
||||
container types.Container,
|
||||
cntnr types.Container,
|
||||
containerName string,
|
||||
) error {
|
||||
imageName, imageVersion := docker.ParseImage(container.Image)
|
||||
imageName, imageVersion := docker.ParseImage(cntnr.Image)
|
||||
tags := map[string]string{
|
||||
"container_name": containerName,
|
||||
"container_image": imageName,
|
||||
|
|
@ -258,17 +258,17 @@ func (d *DockerLogs) tailContainerLogs(
|
|||
}
|
||||
|
||||
if d.IncludeSourceTag {
|
||||
tags["source"] = hostnameFromID(container.ID)
|
||||
tags["source"] = hostnameFromID(cntnr.ID)
|
||||
}
|
||||
|
||||
// Add matching container labels as tags
|
||||
for k, label := range container.Labels {
|
||||
for k, label := range cntnr.Labels {
|
||||
if d.labelFilter.Match(k) {
|
||||
tags[k] = label
|
||||
}
|
||||
}
|
||||
|
||||
hasTTY, err := d.hasTTY(ctx, container)
|
||||
hasTTY, err := d.hasTTY(ctx, cntnr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -276,13 +276,13 @@ func (d *DockerLogs) tailContainerLogs(
|
|||
since := time.Time{}.Format(time.RFC3339Nano)
|
||||
if !d.FromBeginning {
|
||||
d.lastRecordMtx.Lock()
|
||||
if ts, ok := d.lastRecord[container.ID]; ok {
|
||||
if ts, ok := d.lastRecord[cntnr.ID]; ok {
|
||||
since = ts.Format(time.RFC3339Nano)
|
||||
}
|
||||
d.lastRecordMtx.Unlock()
|
||||
}
|
||||
|
||||
logOptions := typeContainer.LogsOptions{
|
||||
logOptions := container.LogsOptions{
|
||||
ShowStdout: true,
|
||||
ShowStderr: true,
|
||||
Timestamps: true,
|
||||
|
|
@ -291,7 +291,7 @@ func (d *DockerLogs) tailContainerLogs(
|
|||
Since: since,
|
||||
}
|
||||
|
||||
logReader, err := d.client.ContainerLogs(ctx, container.ID, logOptions)
|
||||
logReader, err := d.client.ContainerLogs(ctx, cntnr.ID, logOptions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -304,17 +304,17 @@ func (d *DockerLogs) tailContainerLogs(
|
|||
// multiplexed.
|
||||
var last time.Time
|
||||
if hasTTY {
|
||||
last, err = tailStream(acc, tags, container.ID, logReader, "tty")
|
||||
last, err = tailStream(acc, tags, cntnr.ID, logReader, "tty")
|
||||
} else {
|
||||
last, err = tailMultiplexed(acc, tags, container.ID, logReader)
|
||||
last, err = tailMultiplexed(acc, tags, cntnr.ID, logReader)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ts, ok := d.lastRecord[container.ID]; !ok || ts.Before(last) {
|
||||
if ts, ok := d.lastRecord[cntnr.ID]; !ok || ts.Before(last) {
|
||||
d.lastRecordMtx.Lock()
|
||||
d.lastRecord[container.ID] = last
|
||||
d.lastRecord[cntnr.ID] = last
|
||||
d.lastRecordMtx.Unlock()
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ import (
|
|||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
jsonparser "github.com/influxdata/telegraf/plugins/parsers/json"
|
||||
parsers_json "github.com/influxdata/telegraf/plugins/parsers/json"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -159,7 +159,7 @@ func (conn *dpdkConnector) processCommand(acc telegraf.Accumulator, log telegraf
|
|||
return
|
||||
}
|
||||
|
||||
jf := jsonparser.JSONFlattener{}
|
||||
jf := parsers_json.JSONFlattener{}
|
||||
err = jf.FullFlattenJSON("", value, true, true)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("failed to flatten response: %w", err))
|
||||
|
|
|
|||
|
|
@ -18,9 +18,9 @@ import (
|
|||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/filter"
|
||||
httpconfig "github.com/influxdata/telegraf/plugins/common/http"
|
||||
common_http "github.com/influxdata/telegraf/plugins/common/http"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
jsonparser "github.com/influxdata/telegraf/plugins/parsers/json"
|
||||
parsers_json "github.com/influxdata/telegraf/plugins/parsers/json"
|
||||
)
|
||||
|
||||
//go:embed sample.conf
|
||||
|
|
@ -132,7 +132,7 @@ type Elasticsearch struct {
|
|||
Log telegraf.Logger `toml:"-"`
|
||||
|
||||
client *http.Client
|
||||
httpconfig.HTTPClientConfig
|
||||
common_http.HTTPClientConfig
|
||||
|
||||
serverInfo map[string]serverInfo
|
||||
serverInfoMutex sync.Mutex
|
||||
|
|
@ -152,7 +152,7 @@ func NewElasticsearch() *Elasticsearch {
|
|||
return &Elasticsearch{
|
||||
ClusterStatsOnlyFromMaster: true,
|
||||
ClusterHealthLevel: "indices",
|
||||
HTTPClientConfig: httpconfig.HTTPClientConfig{
|
||||
HTTPClientConfig: common_http.HTTPClientConfig{
|
||||
ResponseHeaderTimeout: config.Duration(5 * time.Second),
|
||||
Timeout: config.Duration(5 * time.Second),
|
||||
},
|
||||
|
|
@ -401,7 +401,7 @@ func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) er
|
|||
if s == nil {
|
||||
continue
|
||||
}
|
||||
f := jsonparser.JSONFlattener{}
|
||||
f := parsers_json.JSONFlattener{}
|
||||
// parse Json, ignoring strings and bools
|
||||
err := f.FlattenJSON("", s)
|
||||
if err != nil {
|
||||
|
|
@ -523,7 +523,7 @@ func (e *Elasticsearch) gatherClusterStats(url string, acc telegraf.Accumulator)
|
|||
}
|
||||
|
||||
for p, s := range stats {
|
||||
f := jsonparser.JSONFlattener{}
|
||||
f := parsers_json.JSONFlattener{}
|
||||
// parse json, including bools and strings
|
||||
err := f.FullFlattenJSON("", s, true, true)
|
||||
if err != nil {
|
||||
|
|
@ -557,7 +557,7 @@ func (e *Elasticsearch) gatherIndicesStats(url string, acc telegraf.Accumulator)
|
|||
// All Stats
|
||||
for m, s := range indicesStats.All {
|
||||
// parse Json, ignoring strings and bools
|
||||
jsonParser := jsonparser.JSONFlattener{}
|
||||
jsonParser := parsers_json.JSONFlattener{}
|
||||
err := jsonParser.FullFlattenJSON("_", s, true, true)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -639,7 +639,7 @@ func (e *Elasticsearch) gatherSingleIndexStats(name string, index indexStat, now
|
|||
"total": index.Total,
|
||||
}
|
||||
for m, s := range stats {
|
||||
f := jsonparser.JSONFlattener{}
|
||||
f := parsers_json.JSONFlattener{}
|
||||
// parse Json, getting strings and bools
|
||||
err := f.FullFlattenJSON("", s, true, true)
|
||||
if err != nil {
|
||||
|
|
@ -652,7 +652,7 @@ func (e *Elasticsearch) gatherSingleIndexStats(name string, index indexStat, now
|
|||
for shardNumber, shards := range index.Shards {
|
||||
for _, shard := range shards {
|
||||
// Get Shard Stats
|
||||
flattened := jsonparser.JSONFlattener{}
|
||||
flattened := parsers_json.JSONFlattener{}
|
||||
err := flattened.FullFlattenJSON("", shard, true, true)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ import (
|
|||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
httpconfig "github.com/influxdata/telegraf/plugins/common/http"
|
||||
common_http "github.com/influxdata/telegraf/plugins/common/http"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
|
|
@ -35,7 +35,7 @@ type ElasticsearchQuery struct {
|
|||
Log telegraf.Logger `toml:"-"`
|
||||
|
||||
httpclient *http.Client
|
||||
httpconfig.HTTPClientConfig
|
||||
common_http.HTTPClientConfig
|
||||
|
||||
esClient *elastic5.Client
|
||||
}
|
||||
|
|
@ -242,7 +242,7 @@ func init() {
|
|||
inputs.Add("elasticsearch_query", func() telegraf.Input {
|
||||
return &ElasticsearchQuery{
|
||||
HealthCheckInterval: config.Duration(time.Second * 10),
|
||||
HTTPClientConfig: httpconfig.HTTPClientConfig{
|
||||
HTTPClientConfig: common_http.HTTPClientConfig{
|
||||
ResponseHeaderTimeout: config.Duration(5 * time.Second),
|
||||
Timeout: config.Duration(5 * time.Second),
|
||||
},
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ import (
|
|||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
httpconfig "github.com/influxdata/telegraf/plugins/common/http"
|
||||
common_http "github.com/influxdata/telegraf/plugins/common/http"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
|
|
@ -538,7 +538,7 @@ func setupIntegrationTest(t *testing.T) (*testutil.Container, error) {
|
|||
)
|
||||
e := &ElasticsearchQuery{
|
||||
URLs: []string{url},
|
||||
HTTPClientConfig: httpconfig.HTTPClientConfig{
|
||||
HTTPClientConfig: common_http.HTTPClientConfig{
|
||||
ResponseHeaderTimeout: config.Duration(30 * time.Second),
|
||||
Timeout: config.Duration(30 * time.Second),
|
||||
},
|
||||
|
|
@ -618,7 +618,7 @@ func TestElasticsearchQueryIntegration(t *testing.T) {
|
|||
URLs: []string{
|
||||
fmt.Sprintf("http://%s:%s", container.Address, container.Ports[servicePort]),
|
||||
},
|
||||
HTTPClientConfig: httpconfig.HTTPClientConfig{
|
||||
HTTPClientConfig: common_http.HTTPClientConfig{
|
||||
ResponseHeaderTimeout: config.Duration(30 * time.Second),
|
||||
Timeout: config.Duration(30 * time.Second),
|
||||
},
|
||||
|
|
@ -684,7 +684,7 @@ func TestElasticsearchQueryIntegration_getMetricFields(t *testing.T) {
|
|||
URLs: []string{
|
||||
fmt.Sprintf("http://%s:%s", container.Address, container.Ports[servicePort]),
|
||||
},
|
||||
HTTPClientConfig: httpconfig.HTTPClientConfig{
|
||||
HTTPClientConfig: common_http.HTTPClientConfig{
|
||||
ResponseHeaderTimeout: config.Duration(30 * time.Second),
|
||||
Timeout: config.Duration(30 * time.Second),
|
||||
},
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ import (
|
|||
"net"
|
||||
"runtime"
|
||||
|
||||
ethtoolLib "github.com/safchain/ethtool"
|
||||
"github.com/safchain/ethtool"
|
||||
"github.com/vishvananda/netns"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
|
|
@ -24,7 +24,7 @@ type NamespacedResult struct {
|
|||
type NamespaceGoroutine struct {
|
||||
name string
|
||||
handle netns.NsHandle
|
||||
ethtoolClient *ethtoolLib.Ethtool
|
||||
ethtoolClient *ethtool.Ethtool
|
||||
c chan NamespacedAction
|
||||
Log telegraf.Logger
|
||||
}
|
||||
|
|
@ -71,7 +71,7 @@ func (n *NamespaceGoroutine) Stats(intf NamespacedInterface) (map[string]uint64,
|
|||
|
||||
func (n *NamespaceGoroutine) Get(intf NamespacedInterface) (map[string]uint64, error) {
|
||||
result, err := n.Do(func(n *NamespaceGoroutine) (interface{}, error) {
|
||||
ecmd := ethtoolLib.EthtoolCmd{}
|
||||
ecmd := ethtool.EthtoolCmd{}
|
||||
speed32, err := n.ethtoolClient.CmdGet(&ecmd, intf.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -134,7 +134,7 @@ func (n *NamespaceGoroutine) Start() error {
|
|||
}
|
||||
|
||||
// Every namespace needs its own connection to ethtool
|
||||
e, err := ethtoolLib.NewEthtool()
|
||||
e, err := ethtool.NewEthtool()
|
||||
if err != nil {
|
||||
n.Log.Errorf("Could not create ethtool client for namespace %q: %s", n.name, err.Error())
|
||||
started <- err
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
eventhubClient "github.com/Azure/azure-event-hubs-go/v3"
|
||||
eventhub "github.com/Azure/azure-event-hubs-go/v3"
|
||||
"github.com/Azure/azure-event-hubs-go/v3/persist"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
|
|
@ -62,7 +62,7 @@ type EventHub struct {
|
|||
Log telegraf.Logger `toml:"-"`
|
||||
|
||||
// Azure
|
||||
hub *eventhubClient.Hub
|
||||
hub *eventhub.Hub
|
||||
cancel context.CancelFunc
|
||||
wg sync.WaitGroup
|
||||
|
||||
|
|
@ -91,7 +91,7 @@ func (e *EventHub) Init() (err error) {
|
|||
}
|
||||
|
||||
// Set hub options
|
||||
hubOpts := []eventhubClient.HubOption{}
|
||||
hubOpts := []eventhub.HubOption{}
|
||||
|
||||
if e.PersistenceDir != "" {
|
||||
persister, err := persist.NewFilePersister(e.PersistenceDir)
|
||||
|
|
@ -99,20 +99,20 @@ func (e *EventHub) Init() (err error) {
|
|||
return err
|
||||
}
|
||||
|
||||
hubOpts = append(hubOpts, eventhubClient.HubWithOffsetPersistence(persister))
|
||||
hubOpts = append(hubOpts, eventhub.HubWithOffsetPersistence(persister))
|
||||
}
|
||||
|
||||
if e.UserAgent != "" {
|
||||
hubOpts = append(hubOpts, eventhubClient.HubWithUserAgent(e.UserAgent))
|
||||
hubOpts = append(hubOpts, eventhub.HubWithUserAgent(e.UserAgent))
|
||||
} else {
|
||||
hubOpts = append(hubOpts, eventhubClient.HubWithUserAgent(internal.ProductToken()))
|
||||
hubOpts = append(hubOpts, eventhub.HubWithUserAgent(internal.ProductToken()))
|
||||
}
|
||||
|
||||
// Create event hub connection
|
||||
if e.ConnectionString != "" {
|
||||
e.hub, err = eventhubClient.NewHubFromConnectionString(e.ConnectionString, hubOpts...)
|
||||
e.hub, err = eventhub.NewHubFromConnectionString(e.ConnectionString, hubOpts...)
|
||||
} else {
|
||||
e.hub, err = eventhubClient.NewHubFromEnvironment(hubOpts...)
|
||||
e.hub, err = eventhub.NewHubFromEnvironment(hubOpts...)
|
||||
}
|
||||
|
||||
return err
|
||||
|
|
@ -155,25 +155,25 @@ func (e *EventHub) Start(acc telegraf.Accumulator) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (e *EventHub) configureReceiver() []eventhubClient.ReceiveOption {
|
||||
receiveOpts := []eventhubClient.ReceiveOption{}
|
||||
func (e *EventHub) configureReceiver() []eventhub.ReceiveOption {
|
||||
receiveOpts := []eventhub.ReceiveOption{}
|
||||
|
||||
if e.ConsumerGroup != "" {
|
||||
receiveOpts = append(receiveOpts, eventhubClient.ReceiveWithConsumerGroup(e.ConsumerGroup))
|
||||
receiveOpts = append(receiveOpts, eventhub.ReceiveWithConsumerGroup(e.ConsumerGroup))
|
||||
}
|
||||
|
||||
if !e.FromTimestamp.IsZero() {
|
||||
receiveOpts = append(receiveOpts, eventhubClient.ReceiveFromTimestamp(e.FromTimestamp))
|
||||
receiveOpts = append(receiveOpts, eventhub.ReceiveFromTimestamp(e.FromTimestamp))
|
||||
} else if e.Latest {
|
||||
receiveOpts = append(receiveOpts, eventhubClient.ReceiveWithLatestOffset())
|
||||
receiveOpts = append(receiveOpts, eventhub.ReceiveWithLatestOffset())
|
||||
}
|
||||
|
||||
if e.PrefetchCount != 0 {
|
||||
receiveOpts = append(receiveOpts, eventhubClient.ReceiveWithPrefetchCount(e.PrefetchCount))
|
||||
receiveOpts = append(receiveOpts, eventhub.ReceiveWithPrefetchCount(e.PrefetchCount))
|
||||
}
|
||||
|
||||
if e.Epoch != 0 {
|
||||
receiveOpts = append(receiveOpts, eventhubClient.ReceiveWithEpoch(e.Epoch))
|
||||
receiveOpts = append(receiveOpts, eventhub.ReceiveWithEpoch(e.Epoch))
|
||||
}
|
||||
|
||||
return receiveOpts
|
||||
|
|
@ -182,7 +182,7 @@ func (e *EventHub) configureReceiver() []eventhubClient.ReceiveOption {
|
|||
// OnMessage handles an Event. When this function returns without error the
|
||||
// Event is immediately accepted and the offset is updated. If an error is
|
||||
// returned the Event is marked for redelivery.
|
||||
func (e *EventHub) onMessage(ctx context.Context, event *eventhubClient.Event) error {
|
||||
func (e *EventHub) onMessage(ctx context.Context, event *eventhub.Event) error {
|
||||
metrics, err := e.createMetrics(event)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -264,7 +264,7 @@ func deepCopyMetrics(in []telegraf.Metric) []telegraf.Metric {
|
|||
}
|
||||
|
||||
// CreateMetrics returns the Metrics from the Event.
|
||||
func (e *EventHub) createMetrics(event *eventhubClient.Event) ([]telegraf.Metric, error) {
|
||||
func (e *EventHub) createMetrics(event *eventhub.Event) ([]telegraf.Metric, error) {
|
||||
metrics, err := e.parser.Parse(event.Data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
|||
|
|
@ -6,12 +6,13 @@ import (
|
|||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
osExec "os/exec"
|
||||
"os/exec"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/kballard/go-shellquote"
|
||||
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
)
|
||||
|
||||
func (c CommandRunner) Run(
|
||||
|
|
@ -24,7 +25,7 @@ func (c CommandRunner) Run(
|
|||
return nil, nil, fmt.Errorf("exec: unable to parse command: %w", err)
|
||||
}
|
||||
|
||||
cmd := osExec.Command(splitCmd[0], splitCmd[1:]...)
|
||||
cmd := exec.Command(splitCmd[0], splitCmd[1:]...)
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
|
||||
|
||||
if len(environments) > 0 {
|
||||
|
|
|
|||
|
|
@ -6,12 +6,13 @@ import (
|
|||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
osExec "os/exec"
|
||||
"os/exec"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/kballard/go-shellquote"
|
||||
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
)
|
||||
|
||||
func (c CommandRunner) Run(
|
||||
|
|
@ -24,7 +25,7 @@ func (c CommandRunner) Run(
|
|||
return nil, nil, fmt.Errorf("exec: unable to parse command: %w", err)
|
||||
}
|
||||
|
||||
cmd := osExec.Command(splitCmd[0], splitCmd[1:]...)
|
||||
cmd := exec.Command(splitCmd[0], splitCmd[1:]...)
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{
|
||||
CreationFlags: syscall.CREATE_NEW_PROCESS_GROUP,
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ import (
|
|||
"github.com/influxdata/telegraf/models"
|
||||
"github.com/influxdata/telegraf/plugins/parsers/influx"
|
||||
"github.com/influxdata/telegraf/plugins/parsers/prometheus"
|
||||
influxSerializer "github.com/influxdata/telegraf/plugins/serializers/influx"
|
||||
serializers_influx "github.com/influxdata/telegraf/plugins/serializers/influx"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
|
|
@ -413,7 +413,7 @@ func TestMain(m *testing.M) {
|
|||
|
||||
func runCounterProgram() error {
|
||||
envMetricName := os.Getenv("METRIC_NAME")
|
||||
serializer := &influxSerializer.Serializer{}
|
||||
serializer := &serializers_influx.Serializer{}
|
||||
if err := serializer.Init(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
githubLib "github.com/google/go-github/v32/github"
|
||||
"github.com/google/go-github/v32/github"
|
||||
"golang.org/x/oauth2"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
|
|
@ -30,7 +30,7 @@ type GitHub struct {
|
|||
AdditionalFields []string `toml:"additional_fields"`
|
||||
EnterpriseBaseURL string `toml:"enterprise_base_url"`
|
||||
HTTPTimeout config.Duration `toml:"http_timeout"`
|
||||
githubClient *githubLib.Client
|
||||
githubClient *github.Client
|
||||
|
||||
obfuscatedToken string
|
||||
|
||||
|
|
@ -40,7 +40,7 @@ type GitHub struct {
|
|||
}
|
||||
|
||||
// Create GitHub Client
|
||||
func (g *GitHub) createGitHubClient(ctx context.Context) (*githubLib.Client, error) {
|
||||
func (g *GitHub) createGitHubClient(ctx context.Context) (*github.Client, error) {
|
||||
httpClient := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
|
|
@ -65,11 +65,11 @@ func (g *GitHub) createGitHubClient(ctx context.Context) (*githubLib.Client, err
|
|||
return g.newGithubClient(httpClient)
|
||||
}
|
||||
|
||||
func (g *GitHub) newGithubClient(httpClient *http.Client) (*githubLib.Client, error) {
|
||||
func (g *GitHub) newGithubClient(httpClient *http.Client) (*github.Client, error) {
|
||||
if g.EnterpriseBaseURL != "" {
|
||||
return githubLib.NewEnterpriseClient(g.EnterpriseBaseURL, "", httpClient)
|
||||
return github.NewEnterpriseClient(g.EnterpriseBaseURL, "", httpClient)
|
||||
}
|
||||
return githubLib.NewClient(httpClient), nil
|
||||
return github.NewClient(httpClient), nil
|
||||
}
|
||||
|
||||
func (*GitHub) SampleConfig() string {
|
||||
|
|
@ -148,8 +148,8 @@ func (g *GitHub) Gather(acc telegraf.Accumulator) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (g *GitHub) handleRateLimit(response *githubLib.Response, err error) {
|
||||
var rlErr *githubLib.RateLimitError
|
||||
func (g *GitHub) handleRateLimit(response *github.Response, err error) {
|
||||
var rlErr *github.RateLimitError
|
||||
if err == nil {
|
||||
g.RateLimit.Set(int64(response.Rate.Limit))
|
||||
g.RateRemaining.Set(int64(response.Rate.Remaining))
|
||||
|
|
@ -168,7 +168,7 @@ func splitRepositoryName(repositoryName string) (owner, repository string, err e
|
|||
return splits[0], splits[1], nil
|
||||
}
|
||||
|
||||
func getLicense(rI *githubLib.Repository) string {
|
||||
func getLicense(rI *github.Repository) string {
|
||||
if licenseName := rI.GetLicense().GetName(); licenseName != "" {
|
||||
return licenseName
|
||||
}
|
||||
|
|
@ -176,7 +176,7 @@ func getLicense(rI *githubLib.Repository) string {
|
|||
return "None"
|
||||
}
|
||||
|
||||
func getTags(repositoryInfo *githubLib.Repository) map[string]string {
|
||||
func getTags(repositoryInfo *github.Repository) map[string]string {
|
||||
return map[string]string{
|
||||
"owner": repositoryInfo.GetOwner().GetLogin(),
|
||||
"name": repositoryInfo.GetName(),
|
||||
|
|
@ -185,7 +185,7 @@ func getTags(repositoryInfo *githubLib.Repository) map[string]string {
|
|||
}
|
||||
}
|
||||
|
||||
func getFields(repositoryInfo *githubLib.Repository) map[string]interface{} {
|
||||
func getFields(repositoryInfo *github.Repository) map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"stars": repositoryInfo.GetStargazersCount(),
|
||||
"subscribers": repositoryInfo.GetSubscribersCount(),
|
||||
|
|
@ -198,9 +198,9 @@ func getFields(repositoryInfo *githubLib.Repository) map[string]interface{} {
|
|||
}
|
||||
|
||||
func (g *GitHub) getPullRequestFields(ctx context.Context, owner, repo string) (map[string]interface{}, error) {
|
||||
options := githubLib.SearchOptions{
|
||||
options := github.SearchOptions{
|
||||
TextMatch: false,
|
||||
ListOptions: githubLib.ListOptions{
|
||||
ListOptions: github.ListOptions{
|
||||
PerPage: 100,
|
||||
Page: 1,
|
||||
},
|
||||
|
|
|
|||
|
|
@ -12,14 +12,14 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/google/gnxi/utils/xpath"
|
||||
gnmiLib "github.com/openconfig/gnmi/proto/gnmi"
|
||||
"github.com/openconfig/gnmi/proto/gnmi"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
"google.golang.org/grpc/metadata"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/internal/choice"
|
||||
internaltls "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
common_tls "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
"github.com/influxdata/telegraf/plugins/common/yangmodel"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
|
@ -65,7 +65,7 @@ type GNMI struct {
|
|||
KeepaliveTimeout config.Duration `toml:"keepalive_timeout"`
|
||||
YangModelPaths []string `toml:"yang_model_paths"`
|
||||
Log telegraf.Logger `toml:"-"`
|
||||
internaltls.ClientConfig
|
||||
common_tls.ClientConfig
|
||||
|
||||
// Internal state
|
||||
internalAliases map[*pathInfo]string
|
||||
|
|
@ -85,7 +85,7 @@ type Subscription struct {
|
|||
HeartbeatInterval config.Duration `toml:"heartbeat_interval"`
|
||||
TagOnly bool `toml:"tag_only" deprecated:"1.25.0;1.35.0;please use 'tag_subscription's instead"`
|
||||
|
||||
fullPath *gnmiLib.Path
|
||||
fullPath *gnmi.Path
|
||||
}
|
||||
|
||||
// Tag Subscription for a gNMI client
|
||||
|
|
@ -201,15 +201,15 @@ func (c *GNMI) Init() error {
|
|||
c.Log.Debugf("Internal alias mapping: %+v", c.internalAliases)
|
||||
|
||||
// Warn about configures insecure cipher suites
|
||||
insecure := internaltls.InsecureCiphers(c.ClientConfig.TLSCipherSuites)
|
||||
insecure := common_tls.InsecureCiphers(c.ClientConfig.TLSCipherSuites)
|
||||
if len(insecure) > 0 {
|
||||
c.Log.Warnf("Configured insecure cipher suites: %s", strings.Join(insecure, ","))
|
||||
}
|
||||
|
||||
// Check the TLS configuration
|
||||
if _, err := c.ClientConfig.TLSConfig(); err != nil {
|
||||
if errors.Is(err, internaltls.ErrCipherUnsupported) {
|
||||
secure, insecure := internaltls.Ciphers()
|
||||
if errors.Is(err, common_tls.ErrCipherUnsupported) {
|
||||
secure, insecure := common_tls.Ciphers()
|
||||
c.Log.Info("Supported secure ciphers:")
|
||||
for _, name := range secure {
|
||||
c.Log.Infof(" %s", name)
|
||||
|
|
@ -310,18 +310,18 @@ func (c *GNMI) Start(acc telegraf.Accumulator) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *Subscription) buildSubscription() (*gnmiLib.Subscription, error) {
|
||||
func (s *Subscription) buildSubscription() (*gnmi.Subscription, error) {
|
||||
gnmiPath, err := parsePath(s.Origin, s.Path, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mode, ok := gnmiLib.SubscriptionMode_value[strings.ToUpper(s.SubscriptionMode)]
|
||||
mode, ok := gnmi.SubscriptionMode_value[strings.ToUpper(s.SubscriptionMode)]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid subscription mode %s", s.SubscriptionMode)
|
||||
}
|
||||
return &gnmiLib.Subscription{
|
||||
return &gnmi.Subscription{
|
||||
Path: gnmiPath,
|
||||
Mode: gnmiLib.SubscriptionMode(mode),
|
||||
Mode: gnmi.SubscriptionMode(mode),
|
||||
HeartbeatInterval: uint64(time.Duration(s.HeartbeatInterval).Nanoseconds()),
|
||||
SampleInterval: uint64(time.Duration(s.SampleInterval).Nanoseconds()),
|
||||
SuppressRedundant: s.SuppressRedundant,
|
||||
|
|
@ -329,9 +329,9 @@ func (s *Subscription) buildSubscription() (*gnmiLib.Subscription, error) {
|
|||
}
|
||||
|
||||
// Create a new gNMI SubscribeRequest
|
||||
func (c *GNMI) newSubscribeRequest() (*gnmiLib.SubscribeRequest, error) {
|
||||
func (c *GNMI) newSubscribeRequest() (*gnmi.SubscribeRequest, error) {
|
||||
// Create subscription objects
|
||||
subscriptions := make([]*gnmiLib.Subscription, 0, len(c.Subscriptions)+len(c.TagSubscriptions))
|
||||
subscriptions := make([]*gnmi.Subscription, 0, len(c.Subscriptions)+len(c.TagSubscriptions))
|
||||
for _, subscription := range c.TagSubscriptions {
|
||||
sub, err := subscription.buildSubscription()
|
||||
if err != nil {
|
||||
|
|
@ -363,12 +363,12 @@ func (c *GNMI) newSubscribeRequest() (*gnmiLib.SubscribeRequest, error) {
|
|||
return nil, fmt.Errorf("unsupported encoding %s", c.Encoding)
|
||||
}
|
||||
|
||||
return &gnmiLib.SubscribeRequest{
|
||||
Request: &gnmiLib.SubscribeRequest_Subscribe{
|
||||
Subscribe: &gnmiLib.SubscriptionList{
|
||||
return &gnmi.SubscribeRequest{
|
||||
Request: &gnmi.SubscribeRequest_Subscribe{
|
||||
Subscribe: &gnmi.SubscriptionList{
|
||||
Prefix: gnmiPath,
|
||||
Mode: gnmiLib.SubscriptionList_STREAM,
|
||||
Encoding: gnmiLib.Encoding(gnmiLib.Encoding_value[strings.ToUpper(c.Encoding)]),
|
||||
Mode: gnmi.SubscriptionList_STREAM,
|
||||
Encoding: gnmi.Encoding(gnmi.Encoding_value[strings.ToUpper(c.Encoding)]),
|
||||
Subscription: subscriptions,
|
||||
UpdatesOnly: c.UpdatesOnly,
|
||||
},
|
||||
|
|
@ -377,7 +377,7 @@ func (c *GNMI) newSubscribeRequest() (*gnmiLib.SubscribeRequest, error) {
|
|||
}
|
||||
|
||||
// ParsePath from XPath-like string to gNMI path structure
|
||||
func parsePath(origin, pathToParse, target string) (*gnmiLib.Path, error) {
|
||||
func parsePath(origin, pathToParse, target string) (*gnmi.Path, error) {
|
||||
gnmiPath, err := xpath.ToGNMIPath(pathToParse)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
|||
|
|
@ -11,8 +11,8 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
gnmiLib "github.com/openconfig/gnmi/proto/gnmi"
|
||||
gnmiExt "github.com/openconfig/gnmi/proto/gnmi_ext"
|
||||
"github.com/openconfig/gnmi/proto/gnmi"
|
||||
"github.com/openconfig/gnmi/proto/gnmi_ext"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/metadata"
|
||||
|
|
@ -22,7 +22,7 @@ import (
|
|||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
jnprHeader "github.com/influxdata/telegraf/plugins/inputs/gnmi/extensions/jnpr_gnmi_extention"
|
||||
"github.com/influxdata/telegraf/plugins/inputs/gnmi/extensions/jnpr_gnmi_extention"
|
||||
"github.com/influxdata/telegraf/plugins/parsers/influx"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
|
@ -34,12 +34,12 @@ func TestParsePath(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
require.Equal(t, "theorigin", parsed.Origin)
|
||||
require.Equal(t, "thetarget", parsed.Target)
|
||||
require.Equal(t, []*gnmiLib.PathElem{{Name: "foo"}, {Name: "bar"},
|
||||
require.Equal(t, []*gnmi.PathElem{{Name: "foo"}, {Name: "bar"},
|
||||
{Name: "bla", Key: map[string]string{"shoo": "woo", "shoop": "/woop/"}}, {Name: "z"}}, parsed.Elem)
|
||||
|
||||
parsed, err = parsePath("", "", "")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, &gnmiLib.Path{}, parsed)
|
||||
require.Equal(t, &gnmi.Path{}, parsed)
|
||||
|
||||
parsed, err = parsePath("", "/foo[[", "")
|
||||
require.Nil(t, parsed)
|
||||
|
|
@ -47,23 +47,23 @@ func TestParsePath(t *testing.T) {
|
|||
}
|
||||
|
||||
type MockServer struct {
|
||||
SubscribeF func(gnmiLib.GNMI_SubscribeServer) error
|
||||
SubscribeF func(gnmi.GNMI_SubscribeServer) error
|
||||
GRPCServer *grpc.Server
|
||||
}
|
||||
|
||||
func (s *MockServer) Capabilities(context.Context, *gnmiLib.CapabilityRequest) (*gnmiLib.CapabilityResponse, error) {
|
||||
func (s *MockServer) Capabilities(context.Context, *gnmi.CapabilityRequest) (*gnmi.CapabilityResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (s *MockServer) Get(context.Context, *gnmiLib.GetRequest) (*gnmiLib.GetResponse, error) {
|
||||
func (s *MockServer) Get(context.Context, *gnmi.GetRequest) (*gnmi.GetResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (s *MockServer) Set(context.Context, *gnmiLib.SetRequest) (*gnmiLib.SetResponse, error) {
|
||||
func (s *MockServer) Set(context.Context, *gnmi.SetRequest) (*gnmi.SetResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (s *MockServer) Subscribe(server gnmiLib.GNMI_SubscribeServer) error {
|
||||
func (s *MockServer) Subscribe(server gnmi.GNMI_SubscribeServer) error {
|
||||
return s.SubscribeF(server)
|
||||
}
|
||||
|
||||
|
|
@ -73,12 +73,12 @@ func TestWaitError(t *testing.T) {
|
|||
|
||||
grpcServer := grpc.NewServer()
|
||||
gnmiServer := &MockServer{
|
||||
SubscribeF: func(gnmiLib.GNMI_SubscribeServer) error {
|
||||
SubscribeF: func(gnmi.GNMI_SubscribeServer) error {
|
||||
return errors.New("testerror")
|
||||
},
|
||||
GRPCServer: grpcServer,
|
||||
}
|
||||
gnmiLib.RegisterGNMIServer(grpcServer, gnmiServer)
|
||||
gnmi.RegisterGNMIServer(grpcServer, gnmiServer)
|
||||
|
||||
plugin := &GNMI{
|
||||
Log: testutil.Logger{},
|
||||
|
|
@ -115,7 +115,7 @@ func TestUsernamePassword(t *testing.T) {
|
|||
|
||||
grpcServer := grpc.NewServer()
|
||||
gnmiServer := &MockServer{
|
||||
SubscribeF: func(server gnmiLib.GNMI_SubscribeServer) error {
|
||||
SubscribeF: func(server gnmi.GNMI_SubscribeServer) error {
|
||||
metadata, ok := metadata.FromIncomingContext(server.Context())
|
||||
if !ok {
|
||||
return errors.New("failed to get metadata")
|
||||
|
|
@ -135,7 +135,7 @@ func TestUsernamePassword(t *testing.T) {
|
|||
},
|
||||
GRPCServer: grpcServer,
|
||||
}
|
||||
gnmiLib.RegisterGNMIServer(grpcServer, gnmiServer)
|
||||
gnmi.RegisterGNMIServer(grpcServer, gnmiServer)
|
||||
|
||||
plugin := &GNMI{
|
||||
Log: testutil.Logger{},
|
||||
|
|
@ -168,12 +168,12 @@ func TestUsernamePassword(t *testing.T) {
|
|||
require.ErrorContains(t, acc.Errors[0], "aborted gNMI subscription: rpc error: code = Unknown desc = success")
|
||||
}
|
||||
|
||||
func mockGNMINotification() *gnmiLib.Notification {
|
||||
return &gnmiLib.Notification{
|
||||
func mockGNMINotification() *gnmi.Notification {
|
||||
return &gnmi.Notification{
|
||||
Timestamp: 1543236572000000000,
|
||||
Prefix: &gnmiLib.Path{
|
||||
Prefix: &gnmi.Path{
|
||||
Origin: "type",
|
||||
Elem: []*gnmiLib.PathElem{
|
||||
Elem: []*gnmi.PathElem{
|
||||
{
|
||||
Name: "model",
|
||||
Key: map[string]string{"foo": "bar"},
|
||||
|
|
@ -181,35 +181,35 @@ func mockGNMINotification() *gnmiLib.Notification {
|
|||
},
|
||||
Target: "subscription",
|
||||
},
|
||||
Update: []*gnmiLib.Update{
|
||||
Update: []*gnmi.Update{
|
||||
{
|
||||
Path: &gnmiLib.Path{
|
||||
Elem: []*gnmiLib.PathElem{
|
||||
Path: &gnmi.Path{
|
||||
Elem: []*gnmi.PathElem{
|
||||
{Name: "some"},
|
||||
{
|
||||
Name: "path",
|
||||
Key: map[string]string{"name": "str", "uint64": "1234"}},
|
||||
},
|
||||
},
|
||||
Val: &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_IntVal{IntVal: 5678}},
|
||||
Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_IntVal{IntVal: 5678}},
|
||||
},
|
||||
{
|
||||
Path: &gnmiLib.Path{
|
||||
Elem: []*gnmiLib.PathElem{
|
||||
Path: &gnmi.Path{
|
||||
Elem: []*gnmi.PathElem{
|
||||
{Name: "other"},
|
||||
{Name: "path"},
|
||||
},
|
||||
},
|
||||
Val: &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_StringVal{StringVal: "foobar"}},
|
||||
Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_StringVal{StringVal: "foobar"}},
|
||||
},
|
||||
{
|
||||
Path: &gnmiLib.Path{
|
||||
Elem: []*gnmiLib.PathElem{
|
||||
Path: &gnmi.Path{
|
||||
Elem: []*gnmi.PathElem{
|
||||
{Name: "other"},
|
||||
{Name: "this"},
|
||||
},
|
||||
},
|
||||
Val: &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_StringVal{StringVal: "that"}},
|
||||
Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_StringVal{StringVal: "that"}},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
@ -238,20 +238,20 @@ func TestNotification(t *testing.T) {
|
|||
},
|
||||
},
|
||||
server: &MockServer{
|
||||
SubscribeF: func(server gnmiLib.GNMI_SubscribeServer) error {
|
||||
SubscribeF: func(server gnmi.GNMI_SubscribeServer) error {
|
||||
notification := mockGNMINotification()
|
||||
err := server.Send(&gnmiLib.SubscribeResponse{Response: &gnmiLib.SubscribeResponse_Update{Update: notification}})
|
||||
err := server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = server.Send(&gnmiLib.SubscribeResponse{Response: &gnmiLib.SubscribeResponse_SyncResponse{SyncResponse: true}})
|
||||
err = server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_SyncResponse{SyncResponse: true}})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
notification.Prefix.Elem[0].Key["foo"] = "bar2"
|
||||
notification.Update[0].Path.Elem[1].Key["name"] = "str2"
|
||||
notification.Update[0].Val = &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_JsonVal{JsonVal: []byte{'"', '1', '2', '3', '"'}}}
|
||||
return server.Send(&gnmiLib.SubscribeResponse{Response: &gnmiLib.SubscribeResponse_Update{Update: notification}})
|
||||
notification.Update[0].Val = &gnmi.TypedValue{Value: &gnmi.TypedValue_JsonVal{JsonVal: []byte{'"', '1', '2', '3', '"'}}}
|
||||
return server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}})
|
||||
},
|
||||
},
|
||||
expected: []telegraf.Metric{
|
||||
|
|
@ -327,14 +327,14 @@ func TestNotification(t *testing.T) {
|
|||
},
|
||||
},
|
||||
server: &MockServer{
|
||||
SubscribeF: func(server gnmiLib.GNMI_SubscribeServer) error {
|
||||
response := &gnmiLib.SubscribeResponse{
|
||||
Response: &gnmiLib.SubscribeResponse_Update{
|
||||
Update: &gnmiLib.Notification{
|
||||
SubscribeF: func(server gnmi.GNMI_SubscribeServer) error {
|
||||
response := &gnmi.SubscribeResponse{
|
||||
Response: &gnmi.SubscribeResponse_Update{
|
||||
Update: &gnmi.Notification{
|
||||
Timestamp: 1543236572000000000,
|
||||
Prefix: &gnmiLib.Path{
|
||||
Prefix: &gnmi.Path{
|
||||
Origin: "type",
|
||||
Elem: []*gnmiLib.PathElem{
|
||||
Elem: []*gnmi.PathElem{
|
||||
{
|
||||
Name: "state",
|
||||
},
|
||||
|
|
@ -351,11 +351,11 @@ func TestNotification(t *testing.T) {
|
|||
},
|
||||
Target: "subscription",
|
||||
},
|
||||
Update: []*gnmiLib.Update{
|
||||
Update: []*gnmi.Update{
|
||||
{
|
||||
Path: &gnmiLib.Path{},
|
||||
Val: &gnmiLib.TypedValue{
|
||||
Value: &gnmiLib.TypedValue_IntVal{IntVal: 42},
|
||||
Path: &gnmi.Path{},
|
||||
Val: &gnmi.TypedValue{
|
||||
Value: &gnmi.TypedValue_IntVal{IntVal: 42},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -403,17 +403,17 @@ func TestNotification(t *testing.T) {
|
|||
},
|
||||
},
|
||||
server: &MockServer{
|
||||
SubscribeF: func(server gnmiLib.GNMI_SubscribeServer) error {
|
||||
tagResponse := &gnmiLib.SubscribeResponse{
|
||||
Response: &gnmiLib.SubscribeResponse_Update{
|
||||
Update: &gnmiLib.Notification{
|
||||
SubscribeF: func(server gnmi.GNMI_SubscribeServer) error {
|
||||
tagResponse := &gnmi.SubscribeResponse{
|
||||
Response: &gnmi.SubscribeResponse_Update{
|
||||
Update: &gnmi.Notification{
|
||||
Timestamp: 1543236571000000000,
|
||||
Prefix: &gnmiLib.Path{},
|
||||
Update: []*gnmiLib.Update{
|
||||
Prefix: &gnmi.Path{},
|
||||
Update: []*gnmi.Update{
|
||||
{
|
||||
Path: &gnmiLib.Path{
|
||||
Path: &gnmi.Path{
|
||||
Origin: "",
|
||||
Elem: []*gnmiLib.PathElem{
|
||||
Elem: []*gnmi.PathElem{
|
||||
{
|
||||
Name: "interfaces",
|
||||
},
|
||||
|
|
@ -430,8 +430,8 @@ func TestNotification(t *testing.T) {
|
|||
},
|
||||
Target: "",
|
||||
},
|
||||
Val: &gnmiLib.TypedValue{
|
||||
Value: &gnmiLib.TypedValue_StringVal{StringVal: "foo"},
|
||||
Val: &gnmi.TypedValue{
|
||||
Value: &gnmi.TypedValue_StringVal{StringVal: "foo"},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -441,19 +441,19 @@ func TestNotification(t *testing.T) {
|
|||
if err := server.Send(tagResponse); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := server.Send(&gnmiLib.SubscribeResponse{Response: &gnmiLib.SubscribeResponse_SyncResponse{SyncResponse: true}}); err != nil {
|
||||
if err := server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_SyncResponse{SyncResponse: true}}); err != nil {
|
||||
return err
|
||||
}
|
||||
taggedResponse := &gnmiLib.SubscribeResponse{
|
||||
Response: &gnmiLib.SubscribeResponse_Update{
|
||||
Update: &gnmiLib.Notification{
|
||||
taggedResponse := &gnmi.SubscribeResponse{
|
||||
Response: &gnmi.SubscribeResponse_Update{
|
||||
Update: &gnmi.Notification{
|
||||
Timestamp: 1543236572000000000,
|
||||
Prefix: &gnmiLib.Path{},
|
||||
Update: []*gnmiLib.Update{
|
||||
Prefix: &gnmi.Path{},
|
||||
Update: []*gnmi.Update{
|
||||
{
|
||||
Path: &gnmiLib.Path{
|
||||
Path: &gnmi.Path{
|
||||
Origin: "",
|
||||
Elem: []*gnmiLib.PathElem{
|
||||
Elem: []*gnmi.PathElem{
|
||||
{
|
||||
Name: "interfaces",
|
||||
},
|
||||
|
|
@ -473,8 +473,8 @@ func TestNotification(t *testing.T) {
|
|||
},
|
||||
Target: "",
|
||||
},
|
||||
Val: &gnmiLib.TypedValue{
|
||||
Value: &gnmiLib.TypedValue_IntVal{IntVal: 42},
|
||||
Val: &gnmi.TypedValue{
|
||||
Value: &gnmi.TypedValue_IntVal{IntVal: 42},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -526,17 +526,17 @@ func TestNotification(t *testing.T) {
|
|||
},
|
||||
},
|
||||
server: &MockServer{
|
||||
SubscribeF: func(server gnmiLib.GNMI_SubscribeServer) error {
|
||||
tagResponse := &gnmiLib.SubscribeResponse{
|
||||
Response: &gnmiLib.SubscribeResponse_Update{
|
||||
Update: &gnmiLib.Notification{
|
||||
SubscribeF: func(server gnmi.GNMI_SubscribeServer) error {
|
||||
tagResponse := &gnmi.SubscribeResponse{
|
||||
Response: &gnmi.SubscribeResponse_Update{
|
||||
Update: &gnmi.Notification{
|
||||
Timestamp: 1543236571000000000,
|
||||
Prefix: &gnmiLib.Path{},
|
||||
Update: []*gnmiLib.Update{
|
||||
Prefix: &gnmi.Path{},
|
||||
Update: []*gnmi.Update{
|
||||
{
|
||||
Path: &gnmiLib.Path{
|
||||
Path: &gnmi.Path{
|
||||
Origin: "",
|
||||
Elem: []*gnmiLib.PathElem{
|
||||
Elem: []*gnmi.PathElem{
|
||||
{
|
||||
Name: "network-instances",
|
||||
},
|
||||
|
|
@ -570,8 +570,8 @@ func TestNotification(t *testing.T) {
|
|||
},
|
||||
Target: "",
|
||||
},
|
||||
Val: &gnmiLib.TypedValue{
|
||||
Value: &gnmiLib.TypedValue_StringVal{StringVal: "EXAMPLE-PEER"},
|
||||
Val: &gnmi.TypedValue{
|
||||
Value: &gnmi.TypedValue_StringVal{StringVal: "EXAMPLE-PEER"},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -581,19 +581,19 @@ func TestNotification(t *testing.T) {
|
|||
if err := server.Send(tagResponse); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := server.Send(&gnmiLib.SubscribeResponse{Response: &gnmiLib.SubscribeResponse_SyncResponse{SyncResponse: true}}); err != nil {
|
||||
if err := server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_SyncResponse{SyncResponse: true}}); err != nil {
|
||||
return err
|
||||
}
|
||||
taggedResponse := &gnmiLib.SubscribeResponse{
|
||||
Response: &gnmiLib.SubscribeResponse_Update{
|
||||
Update: &gnmiLib.Notification{
|
||||
taggedResponse := &gnmi.SubscribeResponse{
|
||||
Response: &gnmi.SubscribeResponse_Update{
|
||||
Update: &gnmi.Notification{
|
||||
Timestamp: 1543236572000000000,
|
||||
Prefix: &gnmiLib.Path{},
|
||||
Update: []*gnmiLib.Update{
|
||||
Prefix: &gnmi.Path{},
|
||||
Update: []*gnmi.Update{
|
||||
{
|
||||
Path: &gnmiLib.Path{
|
||||
Path: &gnmi.Path{
|
||||
Origin: "",
|
||||
Elem: []*gnmiLib.PathElem{
|
||||
Elem: []*gnmi.PathElem{
|
||||
{
|
||||
Name: "network-instances",
|
||||
},
|
||||
|
|
@ -627,8 +627,8 @@ func TestNotification(t *testing.T) {
|
|||
},
|
||||
Target: "",
|
||||
},
|
||||
Val: &gnmiLib.TypedValue{
|
||||
Value: &gnmiLib.TypedValue_StringVal{StringVal: "ESTABLISHED"},
|
||||
Val: &gnmi.TypedValue{
|
||||
Value: &gnmi.TypedValue_StringVal{StringVal: "ESTABLISHED"},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -674,17 +674,17 @@ func TestNotification(t *testing.T) {
|
|||
},
|
||||
},
|
||||
server: &MockServer{
|
||||
SubscribeF: func(server gnmiLib.GNMI_SubscribeServer) error {
|
||||
if err := server.Send(&gnmiLib.SubscribeResponse{Response: &gnmiLib.SubscribeResponse_SyncResponse{SyncResponse: true}}); err != nil {
|
||||
SubscribeF: func(server gnmi.GNMI_SubscribeServer) error {
|
||||
if err := server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_SyncResponse{SyncResponse: true}}); err != nil {
|
||||
return err
|
||||
}
|
||||
response := &gnmiLib.SubscribeResponse{
|
||||
Response: &gnmiLib.SubscribeResponse_Update{
|
||||
Update: &gnmiLib.Notification{
|
||||
response := &gnmi.SubscribeResponse{
|
||||
Response: &gnmi.SubscribeResponse_Update{
|
||||
Update: &gnmi.Notification{
|
||||
Timestamp: 1668762813698611837,
|
||||
Prefix: &gnmiLib.Path{
|
||||
Prefix: &gnmi.Path{
|
||||
Origin: "openconfig",
|
||||
Elem: []*gnmiLib.PathElem{
|
||||
Elem: []*gnmi.PathElem{
|
||||
{Name: "interfaces"},
|
||||
{Name: "interface", Key: map[string]string{"name": "Ethernet1"}},
|
||||
{Name: "state"},
|
||||
|
|
@ -692,54 +692,54 @@ func TestNotification(t *testing.T) {
|
|||
},
|
||||
Target: "OC-YANG",
|
||||
},
|
||||
Update: []*gnmiLib.Update{
|
||||
Update: []*gnmi.Update{
|
||||
{
|
||||
Path: &gnmiLib.Path{Elem: []*gnmiLib.PathElem{{Name: "in-broadcast-pkts"}}},
|
||||
Val: &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_UintVal{UintVal: 0}},
|
||||
Path: &gnmi.Path{Elem: []*gnmi.PathElem{{Name: "in-broadcast-pkts"}}},
|
||||
Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_UintVal{UintVal: 0}},
|
||||
},
|
||||
{
|
||||
Path: &gnmiLib.Path{Elem: []*gnmiLib.PathElem{{Name: "in-discards"}}},
|
||||
Val: &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_UintVal{UintVal: 0}},
|
||||
Path: &gnmi.Path{Elem: []*gnmi.PathElem{{Name: "in-discards"}}},
|
||||
Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_UintVal{UintVal: 0}},
|
||||
},
|
||||
{
|
||||
Path: &gnmiLib.Path{Elem: []*gnmiLib.PathElem{{Name: "in-errors"}}},
|
||||
Val: &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_UintVal{UintVal: 0}},
|
||||
Path: &gnmi.Path{Elem: []*gnmi.PathElem{{Name: "in-errors"}}},
|
||||
Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_UintVal{UintVal: 0}},
|
||||
},
|
||||
{
|
||||
Path: &gnmiLib.Path{Elem: []*gnmiLib.PathElem{{Name: "in-fcs-errors"}}},
|
||||
Val: &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_UintVal{UintVal: 0}},
|
||||
Path: &gnmi.Path{Elem: []*gnmi.PathElem{{Name: "in-fcs-errors"}}},
|
||||
Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_UintVal{UintVal: 0}},
|
||||
},
|
||||
{
|
||||
Path: &gnmiLib.Path{Elem: []*gnmiLib.PathElem{{Name: "in-unicast-pkts"}}},
|
||||
Val: &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_UintVal{UintVal: 0}},
|
||||
Path: &gnmi.Path{Elem: []*gnmi.PathElem{{Name: "in-unicast-pkts"}}},
|
||||
Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_UintVal{UintVal: 0}},
|
||||
},
|
||||
{
|
||||
Path: &gnmiLib.Path{Elem: []*gnmiLib.PathElem{{Name: "out-broadcast-pkts"}}},
|
||||
Val: &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_UintVal{UintVal: 0}},
|
||||
Path: &gnmi.Path{Elem: []*gnmi.PathElem{{Name: "out-broadcast-pkts"}}},
|
||||
Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_UintVal{UintVal: 0}},
|
||||
},
|
||||
{
|
||||
Path: &gnmiLib.Path{Elem: []*gnmiLib.PathElem{{Name: "out-discards"}}},
|
||||
Val: &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_UintVal{UintVal: 0}},
|
||||
Path: &gnmi.Path{Elem: []*gnmi.PathElem{{Name: "out-discards"}}},
|
||||
Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_UintVal{UintVal: 0}},
|
||||
},
|
||||
{
|
||||
Path: &gnmiLib.Path{Elem: []*gnmiLib.PathElem{{Name: "out-errors"}}},
|
||||
Val: &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_UintVal{UintVal: 0}},
|
||||
Path: &gnmi.Path{Elem: []*gnmi.PathElem{{Name: "out-errors"}}},
|
||||
Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_UintVal{UintVal: 0}},
|
||||
},
|
||||
{
|
||||
Path: &gnmiLib.Path{Elem: []*gnmiLib.PathElem{{Name: "out-multicast-pkts"}}},
|
||||
Val: &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_UintVal{UintVal: 0}},
|
||||
Path: &gnmi.Path{Elem: []*gnmi.PathElem{{Name: "out-multicast-pkts"}}},
|
||||
Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_UintVal{UintVal: 0}},
|
||||
},
|
||||
{
|
||||
Path: &gnmiLib.Path{Elem: []*gnmiLib.PathElem{{Name: "out-octets"}}},
|
||||
Val: &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_UintVal{UintVal: 0}},
|
||||
Path: &gnmi.Path{Elem: []*gnmi.PathElem{{Name: "out-octets"}}},
|
||||
Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_UintVal{UintVal: 0}},
|
||||
},
|
||||
{
|
||||
Path: &gnmiLib.Path{Elem: []*gnmiLib.PathElem{{Name: "out-pkts"}}},
|
||||
Val: &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_UintVal{UintVal: 0}},
|
||||
Path: &gnmi.Path{Elem: []*gnmi.PathElem{{Name: "out-pkts"}}},
|
||||
Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_UintVal{UintVal: 0}},
|
||||
},
|
||||
{
|
||||
Path: &gnmiLib.Path{Elem: []*gnmiLib.PathElem{{Name: "out-unicast-pkts"}}},
|
||||
Val: &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_UintVal{UintVal: 0}},
|
||||
Path: &gnmi.Path{Elem: []*gnmi.PathElem{{Name: "out-unicast-pkts"}}},
|
||||
Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_UintVal{UintVal: 0}},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -791,90 +791,90 @@ func TestNotification(t *testing.T) {
|
|||
},
|
||||
},
|
||||
server: &MockServer{
|
||||
SubscribeF: func(server gnmiLib.GNMI_SubscribeServer) error {
|
||||
if err := server.Send(&gnmiLib.SubscribeResponse{Response: &gnmiLib.SubscribeResponse_SyncResponse{SyncResponse: true}}); err != nil {
|
||||
SubscribeF: func(server gnmi.GNMI_SubscribeServer) error {
|
||||
if err := server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_SyncResponse{SyncResponse: true}}); err != nil {
|
||||
return err
|
||||
}
|
||||
response := &gnmiLib.SubscribeResponse{
|
||||
Response: &gnmiLib.SubscribeResponse_Update{
|
||||
Update: &gnmiLib.Notification{
|
||||
response := &gnmi.SubscribeResponse{
|
||||
Response: &gnmi.SubscribeResponse_Update{
|
||||
Update: &gnmi.Notification{
|
||||
Timestamp: 1668771585733542546,
|
||||
Prefix: &gnmiLib.Path{
|
||||
Elem: []*gnmiLib.PathElem{
|
||||
Prefix: &gnmi.Path{
|
||||
Elem: []*gnmi.PathElem{
|
||||
{Name: "openconfig-platform:components"},
|
||||
{Name: "component", Key: map[string]string{"name": "TEMP 1"}},
|
||||
{Name: "state"},
|
||||
},
|
||||
Target: "OC-YANG",
|
||||
},
|
||||
Update: []*gnmiLib.Update{
|
||||
Update: []*gnmi.Update{
|
||||
{
|
||||
Path: &gnmiLib.Path{
|
||||
Elem: []*gnmiLib.PathElem{
|
||||
Path: &gnmi.Path{
|
||||
Elem: []*gnmi.PathElem{
|
||||
{Name: "temperature"},
|
||||
{Name: "low-threshold"},
|
||||
}},
|
||||
Val: &gnmiLib.TypedValue{
|
||||
Value: &gnmiLib.TypedValue_FloatVal{FloatVal: 0},
|
||||
Val: &gnmi.TypedValue{
|
||||
Value: &gnmi.TypedValue_FloatVal{FloatVal: 0},
|
||||
},
|
||||
},
|
||||
{
|
||||
Path: &gnmiLib.Path{
|
||||
Elem: []*gnmiLib.PathElem{
|
||||
Path: &gnmi.Path{
|
||||
Elem: []*gnmi.PathElem{
|
||||
{Name: "temperature"},
|
||||
{Name: "timestamp"},
|
||||
}},
|
||||
Val: &gnmiLib.TypedValue{
|
||||
Value: &gnmiLib.TypedValue_StringVal{StringVal: "2022-11-18T11:39:26Z"},
|
||||
Val: &gnmi.TypedValue{
|
||||
Value: &gnmi.TypedValue_StringVal{StringVal: "2022-11-18T11:39:26Z"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Path: &gnmiLib.Path{
|
||||
Elem: []*gnmiLib.PathElem{
|
||||
Path: &gnmi.Path{
|
||||
Elem: []*gnmi.PathElem{
|
||||
{Name: "temperature"},
|
||||
{Name: "warning-status"},
|
||||
}},
|
||||
Val: &gnmiLib.TypedValue{
|
||||
Value: &gnmiLib.TypedValue_BoolVal{BoolVal: false},
|
||||
Val: &gnmi.TypedValue{
|
||||
Value: &gnmi.TypedValue_BoolVal{BoolVal: false},
|
||||
},
|
||||
},
|
||||
{
|
||||
Path: &gnmiLib.Path{
|
||||
Elem: []*gnmiLib.PathElem{
|
||||
Path: &gnmi.Path{
|
||||
Elem: []*gnmi.PathElem{
|
||||
{Name: "name"},
|
||||
}},
|
||||
Val: &gnmiLib.TypedValue{
|
||||
Value: &gnmiLib.TypedValue_StringVal{StringVal: "CPU On-board"},
|
||||
Val: &gnmi.TypedValue{
|
||||
Value: &gnmi.TypedValue_StringVal{StringVal: "CPU On-board"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Path: &gnmiLib.Path{
|
||||
Elem: []*gnmiLib.PathElem{
|
||||
Path: &gnmi.Path{
|
||||
Elem: []*gnmi.PathElem{
|
||||
{Name: "temperature"},
|
||||
{Name: "critical-high-threshold"},
|
||||
}},
|
||||
Val: &gnmiLib.TypedValue{
|
||||
Value: &gnmiLib.TypedValue_FloatVal{FloatVal: 94},
|
||||
Val: &gnmi.TypedValue{
|
||||
Value: &gnmi.TypedValue_FloatVal{FloatVal: 94},
|
||||
},
|
||||
},
|
||||
{
|
||||
Path: &gnmiLib.Path{
|
||||
Elem: []*gnmiLib.PathElem{
|
||||
Path: &gnmi.Path{
|
||||
Elem: []*gnmi.PathElem{
|
||||
{Name: "temperature"},
|
||||
{Name: "current"},
|
||||
}},
|
||||
Val: &gnmiLib.TypedValue{
|
||||
Value: &gnmiLib.TypedValue_FloatVal{FloatVal: 29},
|
||||
Val: &gnmi.TypedValue{
|
||||
Value: &gnmi.TypedValue_FloatVal{FloatVal: 29},
|
||||
},
|
||||
},
|
||||
{
|
||||
Path: &gnmiLib.Path{
|
||||
Elem: []*gnmiLib.PathElem{
|
||||
Path: &gnmi.Path{
|
||||
Elem: []*gnmi.PathElem{
|
||||
{Name: "temperature"},
|
||||
{Name: "high-threshold"},
|
||||
}},
|
||||
Val: &gnmiLib.TypedValue{
|
||||
Value: &gnmiLib.TypedValue_FloatVal{FloatVal: 90},
|
||||
Val: &gnmi.TypedValue{
|
||||
Value: &gnmi.TypedValue_FloatVal{FloatVal: 90},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -923,48 +923,48 @@ func TestNotification(t *testing.T) {
|
|||
},
|
||||
},
|
||||
server: &MockServer{
|
||||
SubscribeF: func(server gnmiLib.GNMI_SubscribeServer) error {
|
||||
if err := server.Send(&gnmiLib.SubscribeResponse{Response: &gnmiLib.SubscribeResponse_SyncResponse{SyncResponse: true}}); err != nil {
|
||||
SubscribeF: func(server gnmi.GNMI_SubscribeServer) error {
|
||||
if err := server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_SyncResponse{SyncResponse: true}}); err != nil {
|
||||
return err
|
||||
}
|
||||
response := &gnmiLib.SubscribeResponse{
|
||||
Response: &gnmiLib.SubscribeResponse_Update{
|
||||
Update: &gnmiLib.Notification{
|
||||
response := &gnmi.SubscribeResponse{
|
||||
Response: &gnmi.SubscribeResponse_Update{
|
||||
Update: &gnmi.Notification{
|
||||
Timestamp: 1668771585733542546,
|
||||
Prefix: &gnmiLib.Path{
|
||||
Elem: []*gnmiLib.PathElem{
|
||||
Prefix: &gnmi.Path{
|
||||
Elem: []*gnmi.PathElem{
|
||||
{Name: "openconfig-platform:components"},
|
||||
{Name: "component", Key: map[string]string{"name": "CHASSIS0:FPC0"}},
|
||||
{Name: "state"},
|
||||
},
|
||||
Target: "OC-YANG",
|
||||
},
|
||||
Update: []*gnmiLib.Update{
|
||||
Update: []*gnmi.Update{
|
||||
{
|
||||
Path: &gnmiLib.Path{
|
||||
Elem: []*gnmiLib.PathElem{
|
||||
Path: &gnmi.Path{
|
||||
Elem: []*gnmi.PathElem{
|
||||
{Name: "type"},
|
||||
}},
|
||||
Val: &gnmiLib.TypedValue{
|
||||
Value: &gnmiLib.TypedValue_StringVal{StringVal: "LINECARD"},
|
||||
Val: &gnmi.TypedValue{
|
||||
Value: &gnmi.TypedValue_StringVal{StringVal: "LINECARD"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Extension: []*gnmiExt.Extension{{
|
||||
Ext: &gnmiExt.Extension_RegisteredExt{
|
||||
RegisteredExt: &gnmiExt.RegisteredExtension{
|
||||
Extension: []*gnmi_ext.Extension{{
|
||||
Ext: &gnmi_ext.Extension_RegisteredExt{
|
||||
RegisteredExt: &gnmi_ext.RegisteredExtension{
|
||||
// Juniper Header Extension
|
||||
// EID_JUNIPER_TELEMETRY_HEADER = 1;
|
||||
Id: 1,
|
||||
Msg: func(jnprExt *jnprHeader.GnmiJuniperTelemetryHeaderExtension) []byte {
|
||||
Msg: func(jnprExt *jnpr_gnmi_extention.GnmiJuniperTelemetryHeaderExtension) []byte {
|
||||
b, err := proto.Marshal(jnprExt)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return b
|
||||
}(&jnprHeader.GnmiJuniperTelemetryHeaderExtension{ComponentId: 15, SubComponentId: 1, Component: "PICD"}),
|
||||
}(&jnpr_gnmi_extention.GnmiJuniperTelemetryHeaderExtension{ComponentId: 15, SubComponentId: 1, Component: "PICD"}),
|
||||
},
|
||||
},
|
||||
}},
|
||||
|
|
@ -1001,7 +1001,7 @@ func TestNotification(t *testing.T) {
|
|||
|
||||
grpcServer := grpc.NewServer()
|
||||
tt.server.GRPCServer = grpcServer
|
||||
gnmiLib.RegisterGNMIServer(grpcServer, tt.server)
|
||||
gnmi.RegisterGNMIServer(grpcServer, tt.server)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, tt.plugin.Init())
|
||||
|
|
@ -1051,13 +1051,13 @@ func TestRedial(t *testing.T) {
|
|||
|
||||
grpcServer := grpc.NewServer()
|
||||
gnmiServer := &MockServer{
|
||||
SubscribeF: func(server gnmiLib.GNMI_SubscribeServer) error {
|
||||
SubscribeF: func(server gnmi.GNMI_SubscribeServer) error {
|
||||
notification := mockGNMINotification()
|
||||
return server.Send(&gnmiLib.SubscribeResponse{Response: &gnmiLib.SubscribeResponse_Update{Update: notification}})
|
||||
return server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}})
|
||||
},
|
||||
GRPCServer: grpcServer,
|
||||
}
|
||||
gnmiLib.RegisterGNMIServer(grpcServer, gnmiServer)
|
||||
gnmi.RegisterGNMIServer(grpcServer, gnmiServer)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
|
@ -1081,16 +1081,16 @@ func TestRedial(t *testing.T) {
|
|||
|
||||
grpcServer = grpc.NewServer()
|
||||
gnmiServer = &MockServer{
|
||||
SubscribeF: func(server gnmiLib.GNMI_SubscribeServer) error {
|
||||
SubscribeF: func(server gnmi.GNMI_SubscribeServer) error {
|
||||
notification := mockGNMINotification()
|
||||
notification.Prefix.Elem[0].Key["foo"] = "bar2"
|
||||
notification.Update[0].Path.Elem[1].Key["name"] = "str2"
|
||||
notification.Update[0].Val = &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_BoolVal{BoolVal: false}}
|
||||
return server.Send(&gnmiLib.SubscribeResponse{Response: &gnmiLib.SubscribeResponse_Update{Update: notification}})
|
||||
notification.Update[0].Val = &gnmi.TypedValue{Value: &gnmi.TypedValue_BoolVal{BoolVal: false}}
|
||||
return server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}})
|
||||
},
|
||||
GRPCServer: grpcServer,
|
||||
}
|
||||
gnmiLib.RegisterGNMIServer(grpcServer, gnmiServer)
|
||||
gnmi.RegisterGNMIServer(grpcServer, gnmiServer)
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
|
|
@ -1131,7 +1131,7 @@ func TestCases(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
var entries []json.RawMessage
|
||||
require.NoError(t, json.Unmarshal(buf, &entries))
|
||||
responses := make([]gnmiLib.SubscribeResponse, len(entries))
|
||||
responses := make([]gnmi.SubscribeResponse, len(entries))
|
||||
for i, entry := range entries {
|
||||
require.NoError(t, protojson.Unmarshal(entry, &responses[i]))
|
||||
}
|
||||
|
|
@ -1163,9 +1163,9 @@ func TestCases(t *testing.T) {
|
|||
require.Len(t, cfg.Inputs, 1)
|
||||
|
||||
// Prepare the server response
|
||||
responseFunction := func(server gnmiLib.GNMI_SubscribeServer) error {
|
||||
sync := &gnmiLib.SubscribeResponse{
|
||||
Response: &gnmiLib.SubscribeResponse_SyncResponse{
|
||||
responseFunction := func(server gnmi.GNMI_SubscribeServer) error {
|
||||
sync := &gnmi.SubscribeResponse{
|
||||
Response: &gnmi.SubscribeResponse_SyncResponse{
|
||||
SyncResponse: true,
|
||||
},
|
||||
}
|
||||
|
|
@ -1187,7 +1187,7 @@ func TestCases(t *testing.T) {
|
|||
SubscribeF: responseFunction,
|
||||
GRPCServer: grpcServer,
|
||||
}
|
||||
gnmiLib.RegisterGNMIServer(grpcServer, gnmiServer)
|
||||
gnmi.RegisterGNMIServer(grpcServer, gnmiServer)
|
||||
|
||||
// Setup the plugin
|
||||
plugin := cfg.Inputs[0].Input.(*GNMI)
|
||||
|
|
|
|||
|
|
@ -13,8 +13,8 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
gnmiLib "github.com/openconfig/gnmi/proto/gnmi"
|
||||
gnmiExt "github.com/openconfig/gnmi/proto/gnmi_ext"
|
||||
"github.com/openconfig/gnmi/proto/gnmi"
|
||||
"github.com/openconfig/gnmi/proto/gnmi_ext"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
|
|
@ -26,7 +26,7 @@ import (
|
|||
"github.com/influxdata/telegraf/internal/choice"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
"github.com/influxdata/telegraf/plugins/common/yangmodel"
|
||||
jnprHeader "github.com/influxdata/telegraf/plugins/inputs/gnmi/extensions/jnpr_gnmi_extention"
|
||||
"github.com/influxdata/telegraf/plugins/inputs/gnmi/extensions/jnpr_gnmi_extention"
|
||||
"github.com/influxdata/telegraf/selfstat"
|
||||
)
|
||||
|
||||
|
|
@ -51,7 +51,7 @@ type handler struct {
|
|||
}
|
||||
|
||||
// SubscribeGNMI and extract telemetry data
|
||||
func (h *handler) subscribeGNMI(ctx context.Context, acc telegraf.Accumulator, tlscfg *tls.Config, request *gnmiLib.SubscribeRequest) error {
|
||||
func (h *handler) subscribeGNMI(ctx context.Context, acc telegraf.Accumulator, tlscfg *tls.Config, request *gnmi.SubscribeRequest) error {
|
||||
var creds credentials.TransportCredentials
|
||||
if tlscfg != nil {
|
||||
creds = credentials.NewTLS(tlscfg)
|
||||
|
|
@ -78,7 +78,7 @@ func (h *handler) subscribeGNMI(ctx context.Context, acc telegraf.Accumulator, t
|
|||
}
|
||||
defer client.Close()
|
||||
|
||||
subscribeClient, err := gnmiLib.NewGNMIClient(client).Subscribe(ctx)
|
||||
subscribeClient, err := gnmi.NewGNMIClient(client).Subscribe(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to setup subscription: %w", err)
|
||||
}
|
||||
|
|
@ -99,7 +99,7 @@ func (h *handler) subscribeGNMI(ctx context.Context, acc telegraf.Accumulator, t
|
|||
|
||||
defer h.log.Debugf("Connection to gNMI device %s closed", h.address)
|
||||
for ctx.Err() == nil {
|
||||
var reply *gnmiLib.SubscribeResponse
|
||||
var reply *gnmi.SubscribeResponse
|
||||
if reply, err = subscribeClient.Recv(); err != nil {
|
||||
if !errors.Is(err, io.EOF) && ctx.Err() == nil {
|
||||
connectStat.Set(0)
|
||||
|
|
@ -117,7 +117,7 @@ func (h *handler) subscribeGNMI(ctx context.Context, acc telegraf.Accumulator, t
|
|||
h.log.Debugf("Got update_%v: %s", t, string(buf))
|
||||
}
|
||||
}
|
||||
if response, ok := reply.Response.(*gnmiLib.SubscribeResponse_Update); ok {
|
||||
if response, ok := reply.Response.(*gnmi.SubscribeResponse_Update); ok {
|
||||
h.handleSubscribeResponseUpdate(acc, response, reply.GetExtension())
|
||||
}
|
||||
}
|
||||
|
|
@ -127,7 +127,7 @@ func (h *handler) subscribeGNMI(ctx context.Context, acc telegraf.Accumulator, t
|
|||
}
|
||||
|
||||
// Handle SubscribeResponse_Update message from gNMI and parse contained telemetry data
|
||||
func (h *handler) handleSubscribeResponseUpdate(acc telegraf.Accumulator, response *gnmiLib.SubscribeResponse_Update, extension []*gnmiExt.Extension) {
|
||||
func (h *handler) handleSubscribeResponseUpdate(acc telegraf.Accumulator, response *gnmi.SubscribeResponse_Update, extension []*gnmi_ext.Extension) {
|
||||
grouper := metric.NewSeriesGrouper()
|
||||
timestamp := time.Unix(0, response.Update.Timestamp)
|
||||
|
||||
|
|
@ -144,7 +144,7 @@ func (h *handler) handleSubscribeResponseUpdate(acc telegraf.Accumulator, respon
|
|||
// Juniper Header extension
|
||||
// Decode it only if user requested it
|
||||
if choice.Contains("juniper_header", h.vendorExt) {
|
||||
juniperHeader := &jnprHeader.GnmiJuniperTelemetryHeaderExtension{}
|
||||
juniperHeader := &jnpr_gnmi_extention.GnmiJuniperTelemetryHeaderExtension{}
|
||||
if err := proto.Unmarshal(currentExt, juniperHeader); err != nil {
|
||||
h.log.Errorf("unmarshal gnmi Juniper Header extension failed: %v", err)
|
||||
} else {
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ package gnmi
|
|||
import (
|
||||
"strings"
|
||||
|
||||
gnmiLib "github.com/openconfig/gnmi/proto/gnmi"
|
||||
"github.com/openconfig/gnmi/proto/gnmi"
|
||||
)
|
||||
|
||||
type keySegment struct {
|
||||
|
|
@ -41,7 +41,7 @@ func newInfoFromString(path string) *pathInfo {
|
|||
return info
|
||||
}
|
||||
|
||||
func newInfoFromPathWithoutKeys(path *gnmiLib.Path) *pathInfo {
|
||||
func newInfoFromPathWithoutKeys(path *gnmi.Path) *pathInfo {
|
||||
info := &pathInfo{
|
||||
origin: path.Origin,
|
||||
segments: make([]segment, 0, len(path.Elem)),
|
||||
|
|
@ -57,7 +57,7 @@ func newInfoFromPathWithoutKeys(path *gnmiLib.Path) *pathInfo {
|
|||
return info
|
||||
}
|
||||
|
||||
func newInfoFromPath(paths ...*gnmiLib.Path) *pathInfo {
|
||||
func newInfoFromPath(paths ...*gnmi.Path) *pathInfo {
|
||||
if len(paths) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
|
@ -101,7 +101,7 @@ func (pi *pathInfo) empty() bool {
|
|||
return len(pi.segments) == 0
|
||||
}
|
||||
|
||||
func (pi *pathInfo) append(paths ...*gnmiLib.Path) *pathInfo {
|
||||
func (pi *pathInfo) append(paths ...*gnmi.Path) *pathInfo {
|
||||
// Copy the existing info
|
||||
path := &pathInfo{
|
||||
origin: pi.origin,
|
||||
|
|
@ -209,7 +209,7 @@ func (pi *pathInfo) normalize() {
|
|||
pi.segments = segments
|
||||
}
|
||||
|
||||
func (pi *pathInfo) equalsPathNoKeys(path *gnmiLib.Path) bool {
|
||||
func (pi *pathInfo) equalsPathNoKeys(path *gnmi.Path) bool {
|
||||
if len(pi.segments) != len(path.Elem) {
|
||||
return false
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,8 +6,8 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
gnmiLib "github.com/openconfig/gnmi/proto/gnmi"
|
||||
gnmiValue "github.com/openconfig/gnmi/value"
|
||||
"github.com/openconfig/gnmi/proto/gnmi"
|
||||
"github.com/openconfig/gnmi/value"
|
||||
)
|
||||
|
||||
type keyValuePair struct {
|
||||
|
|
@ -20,27 +20,27 @@ type updateField struct {
|
|||
value interface{}
|
||||
}
|
||||
|
||||
func (h *handler) newFieldsFromUpdate(path *pathInfo, update *gnmiLib.Update) ([]updateField, error) {
|
||||
func (h *handler) newFieldsFromUpdate(path *pathInfo, update *gnmi.Update) ([]updateField, error) {
|
||||
if update.Val == nil || update.Val.Value == nil {
|
||||
return []updateField{{path: path}}, nil
|
||||
}
|
||||
|
||||
// Apply some special handling for special types
|
||||
switch v := update.Val.Value.(type) {
|
||||
case *gnmiLib.TypedValue_AsciiVal: // not handled in ToScalar
|
||||
case *gnmi.TypedValue_AsciiVal: // not handled in ToScalar
|
||||
return []updateField{{path, v.AsciiVal}}, nil
|
||||
case *gnmiLib.TypedValue_JsonVal: // requires special path handling
|
||||
case *gnmi.TypedValue_JsonVal: // requires special path handling
|
||||
return processJSON(path, v.JsonVal)
|
||||
case *gnmiLib.TypedValue_JsonIetfVal: // requires special path handling
|
||||
case *gnmi.TypedValue_JsonIetfVal: // requires special path handling
|
||||
return h.processJSONIETF(path, v.JsonIetfVal)
|
||||
}
|
||||
|
||||
// Convert the protobuf "oneof" data to a Golang type.
|
||||
value, err := gnmiValue.ToScalar(update.Val)
|
||||
nativeType, err := value.ToScalar(update.Val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []updateField{{path, value}}, nil
|
||||
return []updateField{{path, nativeType}}, nil
|
||||
}
|
||||
|
||||
func processJSON(path *pathInfo, data []byte) ([]updateField, error) {
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
jsonparser "github.com/influxdata/telegraf/plugins/parsers/json"
|
||||
parsers_json "github.com/influxdata/telegraf/plugins/parsers/json"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
|
|
@ -180,7 +180,7 @@ func TestRunGatherIterationWithPages(t *testing.T) {
|
|||
}
|
||||
|
||||
func createParser() telegraf.Parser {
|
||||
p := &jsonparser.Parser{
|
||||
p := &parsers_json.Parser{
|
||||
MetricName: "cpu",
|
||||
Query: "metrics",
|
||||
TagKeys: []string{"tags_datacenter", "tags_host"},
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ import (
|
|||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
httpconfig "github.com/influxdata/telegraf/plugins/common/http"
|
||||
common_http "github.com/influxdata/telegraf/plugins/common/http"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
|
|
@ -44,7 +44,7 @@ type HTTP struct {
|
|||
SuccessStatusCodes []int `toml:"success_status_codes"`
|
||||
Log telegraf.Logger `toml:"-"`
|
||||
|
||||
httpconfig.HTTPClientConfig
|
||||
common_http.HTTPClientConfig
|
||||
|
||||
client *http.Client
|
||||
parserFunc telegraf.ParserFunc
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ import (
|
|||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
httpconfig "github.com/influxdata/telegraf/plugins/common/http"
|
||||
common_http "github.com/influxdata/telegraf/plugins/common/http"
|
||||
"github.com/influxdata/telegraf/plugins/common/oauth"
|
||||
httpplugin "github.com/influxdata/telegraf/plugins/inputs/http"
|
||||
"github.com/influxdata/telegraf/plugins/parsers/csv"
|
||||
|
|
@ -349,7 +349,7 @@ func TestOAuthClientCredentialsGrant(t *testing.T) {
|
|||
name: "success",
|
||||
plugin: &httpplugin.HTTP{
|
||||
URLs: []string{u.String() + "/write"},
|
||||
HTTPClientConfig: httpconfig.HTTPClientConfig{
|
||||
HTTPClientConfig: common_http.HTTPClientConfig{
|
||||
OAuth2Config: oauth.OAuth2Config{
|
||||
ClientID: "howdy",
|
||||
ClientSecret: "secret",
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ import (
|
|||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/internal/choice"
|
||||
tlsint "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
common_tls "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
|
|
@ -69,7 +69,7 @@ type HTTPListenerV2 struct {
|
|||
BasicPassword string `toml:"basic_password"`
|
||||
HTTPHeaderTags map[string]string `toml:"http_header_tags"`
|
||||
|
||||
tlsint.ServerConfig
|
||||
common_tls.ServerConfig
|
||||
tlsConf *tls.Config
|
||||
|
||||
TimeFunc
|
||||
|
|
|
|||
|
|
@ -3,9 +3,11 @@
|
|||
package infiniband
|
||||
|
||||
import (
|
||||
"github.com/Mellanox/rdmamap"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"testing"
|
||||
|
||||
"github.com/Mellanox/rdmamap"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
func TestInfiniband(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ import (
|
|||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
tlsint "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
common_tls "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/parsers/influx"
|
||||
"github.com/influxdata/telegraf/plugins/parsers/influx/influx_upstream"
|
||||
|
|
@ -35,7 +35,7 @@ const (
|
|||
type InfluxDBListener struct {
|
||||
ServiceAddress string `toml:"service_address"`
|
||||
port int
|
||||
tlsint.ServerConfig
|
||||
common_tls.ServerConfig
|
||||
|
||||
ReadTimeout config.Duration `toml:"read_timeout"`
|
||||
WriteTimeout config.Duration `toml:"write_timeout"`
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ import (
|
|||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
tlsint "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
common_tls "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/parsers/influx"
|
||||
"github.com/influxdata/telegraf/plugins/parsers/influx/influx_upstream"
|
||||
|
|
@ -52,7 +52,7 @@ const (
|
|||
type InfluxDBV2Listener struct {
|
||||
ServiceAddress string `toml:"service_address"`
|
||||
port int
|
||||
tlsint.ServerConfig
|
||||
common_tls.ServerConfig
|
||||
|
||||
MaxUndeliveredMetrics int `toml:"max_undelivered_metrics"`
|
||||
ReadTimeout config.Duration `toml:"read_timeout"`
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ import (
|
|||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf/plugins/inputs/intel_baseband/mock"
|
||||
mocks "github.com/influxdata/telegraf/plugins/inputs/intel_baseband/mock"
|
||||
)
|
||||
|
||||
func TestWriteCommandToSocket(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -13,8 +13,8 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
ptel "github.com/intel/powertelemetry"
|
||||
cpuUtil "github.com/shirou/gopsutil/v3/cpu"
|
||||
"github.com/intel/powertelemetry"
|
||||
"github.com/shirou/gopsutil/v3/cpu"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
|
|
@ -90,8 +90,8 @@ func (p *PowerStat) Start(_ telegraf.Accumulator) error {
|
|||
})
|
||||
|
||||
var err error
|
||||
var initErr *ptel.MultiError
|
||||
p.fetcher, err = ptel.New(opts...)
|
||||
var initErr *powertelemetry.MultiError
|
||||
p.fetcher, err = powertelemetry.New(opts...)
|
||||
if err != nil {
|
||||
if !errors.As(err, &initErr) {
|
||||
// Error caused by failing to get information about the CPU, or CPU is not supported.
|
||||
|
|
@ -441,7 +441,7 @@ func (p *PowerStat) addPerCPUMsrMetrics(acc telegraf.Accumulator, cpuID, coreID,
|
|||
}
|
||||
|
||||
// Read several time-related MSR offsets.
|
||||
var moduleErr *ptel.ModuleNotInitializedError
|
||||
var moduleErr *powertelemetry.ModuleNotInitializedError
|
||||
err := p.fetcher.UpdatePerCPUMetrics(cpuID)
|
||||
if err == nil {
|
||||
// Add time-related MSR offset metrics to the accumulator
|
||||
|
|
@ -490,7 +490,7 @@ func (p *PowerStat) addCPUTimeRelatedMsrMetrics(acc telegraf.Accumulator, cpuID,
|
|||
|
||||
// addCPUPerfMetrics takes an accumulator, and adds to it enabled metrics which rely on perf.
|
||||
func (p *PowerStat) addCPUPerfMetrics(acc telegraf.Accumulator) {
|
||||
var moduleErr *ptel.ModuleNotInitializedError
|
||||
var moduleErr *powertelemetry.ModuleNotInitializedError
|
||||
|
||||
// Read events related to perf-related metrics.
|
||||
err := p.fetcher.ReadPerfEvents()
|
||||
|
|
@ -921,7 +921,7 @@ func (p *PowerStat) addUncoreFrequencyInitialLimits(acc telegraf.Accumulator, pa
|
|||
}
|
||||
|
||||
// Always add to the accumulator errors not related to module not initialized.
|
||||
var moduleErr *ptel.ModuleNotInitializedError
|
||||
var moduleErr *powertelemetry.ModuleNotInitializedError
|
||||
if !errors.As(err, &moduleErr) {
|
||||
acc.AddError(fmt.Errorf("failed to get initial uncore frequency limits for package ID %v and die ID %v: %w", packageID, dieID, err))
|
||||
return
|
||||
|
|
@ -961,7 +961,7 @@ func (p *PowerStat) addUncoreFrequencyCurrentValues(acc telegraf.Accumulator, pa
|
|||
}
|
||||
|
||||
// Always add to the accumulator errors not related to module not initialized.
|
||||
var moduleErr *ptel.ModuleNotInitializedError
|
||||
var moduleErr *powertelemetry.ModuleNotInitializedError
|
||||
if !errors.As(err, &moduleErr) {
|
||||
acc.AddError(fmt.Errorf("failed to get current uncore frequency values for package ID %v and die ID %v: %w", packageID, dieID, err))
|
||||
return
|
||||
|
|
@ -1024,7 +1024,7 @@ func getUncoreFreqCurrentValues(fetcher metricFetcher, packageID, dieID int) (un
|
|||
|
||||
// addMaxTurboFreqLimits fetches the max turbo frequency limits metric for a given package ID, and adds it to the accumulator.
|
||||
func (p *PowerStat) addMaxTurboFreqLimits(acc telegraf.Accumulator, packageID int) {
|
||||
var moduleErr *ptel.ModuleNotInitializedError
|
||||
var moduleErr *powertelemetry.ModuleNotInitializedError
|
||||
|
||||
turboFreqList, err := p.fetcher.GetMaxTurboFreqList(packageID)
|
||||
if err != nil {
|
||||
|
|
@ -1076,7 +1076,7 @@ func (p *PowerStat) addMaxTurboFreqLimits(acc telegraf.Accumulator, packageID in
|
|||
|
||||
// isHybridCPU is a helper function that takes a slice of MaxTurboFreq structs and returns true if the CPU where these values belong to,
|
||||
// is a hybrid CPU. Otherwise, returns false.
|
||||
func isHybridCPU(turboFreqList []ptel.MaxTurboFreq) bool {
|
||||
func isHybridCPU(turboFreqList []powertelemetry.MaxTurboFreq) bool {
|
||||
for _, v := range turboFreqList {
|
||||
if v.Secondary {
|
||||
return true
|
||||
|
|
@ -1089,7 +1089,7 @@ func isHybridCPU(turboFreqList []ptel.MaxTurboFreq) bool {
|
|||
// In case it is not, disableUnsupportedMetrics will disable the option to gather those metrics.
|
||||
// Error is returned if there is an issue with retrieving processor information.
|
||||
func (p *PowerStat) disableUnsupportedMetrics() error {
|
||||
cpus, err := cpuUtil.Info()
|
||||
cpus, err := cpu.Info()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error occurred while parsing CPU information: %w", err)
|
||||
}
|
||||
|
|
@ -1104,27 +1104,27 @@ func (p *PowerStat) disableUnsupportedMetrics() error {
|
|||
return fmt.Errorf("error occurred while parsing CPU model: %w", err)
|
||||
}
|
||||
|
||||
if err := ptel.CheckIfCPUC1StateResidencySupported(cpuModel); err != nil {
|
||||
if err := powertelemetry.CheckIfCPUC1StateResidencySupported(cpuModel); err != nil {
|
||||
p.disableCPUMetric(cpuC1StateResidency)
|
||||
}
|
||||
|
||||
if err := ptel.CheckIfCPUC3StateResidencySupported(cpuModel); err != nil {
|
||||
if err := powertelemetry.CheckIfCPUC3StateResidencySupported(cpuModel); err != nil {
|
||||
p.disableCPUMetric(cpuC3StateResidency)
|
||||
}
|
||||
|
||||
if err := ptel.CheckIfCPUC6StateResidencySupported(cpuModel); err != nil {
|
||||
if err := powertelemetry.CheckIfCPUC6StateResidencySupported(cpuModel); err != nil {
|
||||
p.disableCPUMetric(cpuC6StateResidency)
|
||||
}
|
||||
|
||||
if err := ptel.CheckIfCPUC7StateResidencySupported(cpuModel); err != nil {
|
||||
if err := powertelemetry.CheckIfCPUC7StateResidencySupported(cpuModel); err != nil {
|
||||
p.disableCPUMetric(cpuC7StateResidency)
|
||||
}
|
||||
|
||||
if err := ptel.CheckIfCPUTemperatureSupported(cpuModel); err != nil {
|
||||
if err := powertelemetry.CheckIfCPUTemperatureSupported(cpuModel); err != nil {
|
||||
p.disableCPUMetric(cpuTemperature)
|
||||
}
|
||||
|
||||
if err := ptel.CheckIfCPUBaseFrequencySupported(cpuModel); err != nil {
|
||||
if err := powertelemetry.CheckIfCPUBaseFrequencySupported(cpuModel); err != nil {
|
||||
p.disablePackageMetric(packageCPUBaseFrequency)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ import (
|
|||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
internaltls "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
common_tls "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
authentication "github.com/influxdata/telegraf/plugins/inputs/jti_openconfig_telemetry/auth"
|
||||
telemetry "github.com/influxdata/telegraf/plugins/inputs/jti_openconfig_telemetry/oc"
|
||||
|
|
@ -43,7 +43,7 @@ type OpenConfigTelemetry struct {
|
|||
RetryDelay config.Duration `toml:"retry_delay"`
|
||||
EnableTLS bool `toml:"enable_tls"`
|
||||
KeepAlivePeriod config.Duration `toml:"keep_alive_period"`
|
||||
internaltls.ClientConfig
|
||||
common_tls.ClientConfig
|
||||
|
||||
Log telegraf.Logger
|
||||
|
||||
|
|
|
|||
|
|
@ -18,10 +18,10 @@ import (
|
|||
"github.com/influxdata/telegraf/plugins/common/kafka"
|
||||
"github.com/influxdata/telegraf/plugins/common/tls"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
kafkaOutput "github.com/influxdata/telegraf/plugins/outputs/kafka"
|
||||
outputs_kafka "github.com/influxdata/telegraf/plugins/outputs/kafka"
|
||||
"github.com/influxdata/telegraf/plugins/parsers/influx"
|
||||
"github.com/influxdata/telegraf/plugins/parsers/value"
|
||||
influxSerializer "github.com/influxdata/telegraf/plugins/serializers/influx"
|
||||
serializers_influx "github.com/influxdata/telegraf/plugins/serializers/influx"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
|
|
@ -500,10 +500,10 @@ func TestKafkaRoundTripIntegration(t *testing.T) {
|
|||
// Make kafka output
|
||||
t.Logf("rt: starting output plugin")
|
||||
creator := outputs.Outputs["kafka"]
|
||||
output, ok := creator().(*kafkaOutput.Kafka)
|
||||
output, ok := creator().(*outputs_kafka.Kafka)
|
||||
require.True(t, ok)
|
||||
|
||||
s := &influxSerializer.Serializer{}
|
||||
s := &serializers_influx.Serializer{}
|
||||
require.NoError(t, s.Init())
|
||||
output.SetSerializer(s)
|
||||
output.Brokers = brokers
|
||||
|
|
@ -576,10 +576,10 @@ func TestKafkaTimestampSourceIntegration(t *testing.T) {
|
|||
|
||||
// Make kafka output
|
||||
creator := outputs.Outputs["kafka"]
|
||||
output, ok := creator().(*kafkaOutput.Kafka)
|
||||
output, ok := creator().(*outputs_kafka.Kafka)
|
||||
require.True(t, ok)
|
||||
|
||||
s := &influxSerializer.Serializer{}
|
||||
s := &serializers_influx.Serializer{}
|
||||
require.NoError(t, s.Init())
|
||||
output.SetSerializer(s)
|
||||
output.Brokers = brokers
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ import (
|
|||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
httpconfig "github.com/influxdata/telegraf/plugins/common/http"
|
||||
common_http "github.com/influxdata/telegraf/plugins/common/http"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
|
|
@ -95,12 +95,12 @@ type Kibana struct {
|
|||
Log telegraf.Logger `toml:"-"`
|
||||
|
||||
client *http.Client
|
||||
httpconfig.HTTPClientConfig
|
||||
common_http.HTTPClientConfig
|
||||
}
|
||||
|
||||
func NewKibana() *Kibana {
|
||||
return &Kibana{
|
||||
HTTPClientConfig: httpconfig.HTTPClientConfig{
|
||||
HTTPClientConfig: common_http.HTTPClientConfig{
|
||||
Timeout: config.Duration(5 * time.Second),
|
||||
},
|
||||
}
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ import (
|
|||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
internalaws "github.com/influxdata/telegraf/plugins/common/aws"
|
||||
common_aws "github.com/influxdata/telegraf/plugins/common/aws"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
|
|
@ -65,7 +65,7 @@ type (
|
|||
|
||||
lastSeqNum *big.Int
|
||||
|
||||
internalaws.CredentialConfig
|
||||
common_aws.CredentialConfig
|
||||
}
|
||||
|
||||
checkpoint struct {
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ package kube_inventory
|
|||
import (
|
||||
"context"
|
||||
|
||||
v1 "k8s.io/api/apps/v1"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
|
@ -19,7 +19,7 @@ func collectDaemonSets(ctx context.Context, acc telegraf.Accumulator, ki *Kubern
|
|||
}
|
||||
}
|
||||
|
||||
func (ki *KubernetesInventory) gatherDaemonSet(d *v1.DaemonSet, acc telegraf.Accumulator) {
|
||||
func (ki *KubernetesInventory) gatherDaemonSet(d *apps.DaemonSet, acc telegraf.Accumulator) {
|
||||
fields := map[string]interface{}{
|
||||
"generation": d.Generation,
|
||||
"current_number_scheduled": d.Status.CurrentNumberScheduled,
|
||||
|
|
|
|||
|
|
@ -6,8 +6,8 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
v1 "k8s.io/api/apps/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
|
@ -29,7 +29,7 @@ func TestDaemonSet(t *testing.T) {
|
|||
name: "no daemon set",
|
||||
handler: &mockHandler{
|
||||
responseMap: map[string]interface{}{
|
||||
"/daemonsets/": &v1.DaemonSetList{},
|
||||
"/daemonsets/": &apps.DaemonSetList{},
|
||||
},
|
||||
},
|
||||
hasError: false,
|
||||
|
|
@ -38,10 +38,10 @@ func TestDaemonSet(t *testing.T) {
|
|||
name: "collect daemonsets",
|
||||
handler: &mockHandler{
|
||||
responseMap: map[string]interface{}{
|
||||
"/daemonsets/": &v1.DaemonSetList{
|
||||
Items: []v1.DaemonSet{
|
||||
"/daemonsets/": &apps.DaemonSetList{
|
||||
Items: []apps.DaemonSet{
|
||||
{
|
||||
Status: v1.DaemonSetStatus{
|
||||
Status: apps.DaemonSetStatus{
|
||||
CurrentNumberScheduled: 3,
|
||||
DesiredNumberScheduled: 5,
|
||||
NumberAvailable: 2,
|
||||
|
|
@ -50,7 +50,7 @@ func TestDaemonSet(t *testing.T) {
|
|||
NumberUnavailable: 1,
|
||||
UpdatedNumberScheduled: 2,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Generation: 11221,
|
||||
Namespace: "ns1",
|
||||
Name: "daemon1",
|
||||
|
|
@ -58,10 +58,10 @@ func TestDaemonSet(t *testing.T) {
|
|||
"lab1": "v1",
|
||||
"lab2": "v2",
|
||||
},
|
||||
CreationTimestamp: metav1.Time{Time: now},
|
||||
CreationTimestamp: meta.Time{Time: now},
|
||||
},
|
||||
Spec: v1.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
Spec: apps.DaemonSetSpec{
|
||||
Selector: &meta.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"select1": "s1",
|
||||
"select2": "s2",
|
||||
|
|
@ -108,7 +108,7 @@ func TestDaemonSet(t *testing.T) {
|
|||
}
|
||||
require.NoError(t, ks.createSelectorFilters())
|
||||
acc := new(testutil.Accumulator)
|
||||
items := ((v.handler.responseMap["/daemonsets/"]).(*v1.DaemonSetList)).Items
|
||||
items := ((v.handler.responseMap["/daemonsets/"]).(*apps.DaemonSetList)).Items
|
||||
for i := range items {
|
||||
ks.gatherDaemonSet(&items[i], acc)
|
||||
}
|
||||
|
|
@ -131,10 +131,10 @@ func TestDaemonSetSelectorFilter(t *testing.T) {
|
|||
cli := &client{}
|
||||
|
||||
responseMap := map[string]interface{}{
|
||||
"/daemonsets/": &v1.DaemonSetList{
|
||||
Items: []v1.DaemonSet{
|
||||
"/daemonsets/": &apps.DaemonSetList{
|
||||
Items: []apps.DaemonSet{
|
||||
{
|
||||
Status: v1.DaemonSetStatus{
|
||||
Status: apps.DaemonSetStatus{
|
||||
CurrentNumberScheduled: 3,
|
||||
DesiredNumberScheduled: 5,
|
||||
NumberAvailable: 2,
|
||||
|
|
@ -143,7 +143,7 @@ func TestDaemonSetSelectorFilter(t *testing.T) {
|
|||
NumberUnavailable: 1,
|
||||
UpdatedNumberScheduled: 2,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Generation: 11221,
|
||||
Namespace: "ns1",
|
||||
Name: "daemon1",
|
||||
|
|
@ -151,10 +151,10 @@ func TestDaemonSetSelectorFilter(t *testing.T) {
|
|||
"lab1": "v1",
|
||||
"lab2": "v2",
|
||||
},
|
||||
CreationTimestamp: metav1.Time{Time: time.Now()},
|
||||
CreationTimestamp: meta.Time{Time: time.Now()},
|
||||
},
|
||||
Spec: v1.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
Spec: apps.DaemonSetSpec{
|
||||
Selector: &meta.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"select1": "s1",
|
||||
"select2": "s2",
|
||||
|
|
@ -269,7 +269,7 @@ func TestDaemonSetSelectorFilter(t *testing.T) {
|
|||
ks.SelectorExclude = v.exclude
|
||||
require.NoError(t, ks.createSelectorFilters())
|
||||
acc := new(testutil.Accumulator)
|
||||
items := ((v.handler.responseMap["/daemonsets/"]).(*v1.DaemonSetList)).Items
|
||||
items := ((v.handler.responseMap["/daemonsets/"]).(*apps.DaemonSetList)).Items
|
||||
for i := range items {
|
||||
ks.gatherDaemonSet(&items[i], acc)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ package kube_inventory
|
|||
import (
|
||||
"context"
|
||||
|
||||
v1 "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/apps/v1"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
v1 "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/apps/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ package kube_inventory
|
|||
import (
|
||||
"context"
|
||||
|
||||
v1 "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/apps/v1"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
v1 "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/apps/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ import (
|
|||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
commontls "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
common_tls "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
|
|
@ -26,7 +26,7 @@ type LDAP struct {
|
|||
BindDn string `toml:"bind_dn"`
|
||||
BindPassword config.Secret `toml:"bind_password"`
|
||||
ReverseFieldNames bool `toml:"reverse_field_names"`
|
||||
commontls.ClientConfig
|
||||
common_tls.ClientConfig
|
||||
|
||||
tlsCfg *tls.Config
|
||||
requests []request
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ import (
|
|||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
commontls "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
common_tls "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
|
|
@ -312,7 +312,7 @@ func TestOpenLDAPStartTLSIntegration(t *testing.T) {
|
|||
Server: "starttls://" + container.Address + ":" + port,
|
||||
BindDn: "CN=manager,DC=example,DC=org",
|
||||
BindPassword: config.NewSecret([]byte("secret")),
|
||||
ClientConfig: commontls.ClientConfig{
|
||||
ClientConfig: common_tls.ClientConfig{
|
||||
TLSCA: pkiPaths.ClientCert,
|
||||
InsecureSkipVerify: true,
|
||||
},
|
||||
|
|
@ -420,7 +420,7 @@ func TestOpenLDAPLDAPSIntegration(t *testing.T) {
|
|||
Server: "ldaps://" + container.Address + ":" + port,
|
||||
BindDn: "CN=manager,DC=example,DC=org",
|
||||
BindPassword: config.NewSecret([]byte("secret")),
|
||||
ClientConfig: commontls.ClientConfig{
|
||||
ClientConfig: common_tls.ClientConfig{
|
||||
InsecureSkipVerify: true,
|
||||
},
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,9 +15,9 @@ import (
|
|||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/internal/choice"
|
||||
httpconfig "github.com/influxdata/telegraf/plugins/common/http"
|
||||
common_http "github.com/influxdata/telegraf/plugins/common/http"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
jsonParser "github.com/influxdata/telegraf/plugins/parsers/json"
|
||||
parsers_json "github.com/influxdata/telegraf/plugins/parsers/json"
|
||||
)
|
||||
|
||||
//go:embed sample.conf
|
||||
|
|
@ -43,7 +43,7 @@ type Logstash struct {
|
|||
Log telegraf.Logger `toml:"-"`
|
||||
|
||||
client *http.Client
|
||||
httpconfig.HTTPClientConfig
|
||||
common_http.HTTPClientConfig
|
||||
}
|
||||
|
||||
type processStats struct {
|
||||
|
|
@ -183,7 +183,7 @@ func (logstash *Logstash) gatherJVMStats(address string, accumulator telegraf.Ac
|
|||
"source": jvmStats.Host,
|
||||
}
|
||||
|
||||
flattener := jsonParser.JSONFlattener{}
|
||||
flattener := parsers_json.JSONFlattener{}
|
||||
err = flattener.FlattenJSON("", jvmStats.JVM)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -209,7 +209,7 @@ func (logstash *Logstash) gatherProcessStats(address string, accumulator telegra
|
|||
"source": processStats.Host,
|
||||
}
|
||||
|
||||
flattener := jsonParser.JSONFlattener{}
|
||||
flattener := parsers_json.JSONFlattener{}
|
||||
err = flattener.FlattenJSON("", processStats.Process)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -235,7 +235,7 @@ func (logstash *Logstash) gatherPluginsStats(
|
|||
for tag, value := range tags {
|
||||
pluginTags[tag] = value
|
||||
}
|
||||
flattener := jsonParser.JSONFlattener{}
|
||||
flattener := parsers_json.JSONFlattener{}
|
||||
err := flattener.FlattenJSON("", plugin.Events)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -264,7 +264,7 @@ func (logstash *Logstash) gatherPluginsStats(
|
|||
"with_errors": 9089
|
||||
},
|
||||
*/
|
||||
flattener := jsonParser.JSONFlattener{}
|
||||
flattener := parsers_json.JSONFlattener{}
|
||||
err := flattener.FlattenJSON("", plugin.BulkRequests)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -287,7 +287,7 @@ func (logstash *Logstash) gatherPluginsStats(
|
|||
"retryable_failures": 13733
|
||||
}
|
||||
*/
|
||||
flattener = jsonParser.JSONFlattener{}
|
||||
flattener = parsers_json.JSONFlattener{}
|
||||
err = flattener.FlattenJSON("", plugin.Documents)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -325,7 +325,7 @@ func (logstash *Logstash) gatherQueueStats(queue pipelineQueue, tags map[string]
|
|||
}
|
||||
|
||||
if queue.Type != "memory" {
|
||||
flattener := jsonParser.JSONFlattener{}
|
||||
flattener := parsers_json.JSONFlattener{}
|
||||
err := flattener.FlattenJSON("", queue.Capacity)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -368,7 +368,7 @@ func (logstash *Logstash) gatherPipelineStats(address string, accumulator telegr
|
|||
"source": pipelineStats.Host,
|
||||
}
|
||||
|
||||
flattener := jsonParser.JSONFlattener{}
|
||||
flattener := parsers_json.JSONFlattener{}
|
||||
err = flattener.FlattenJSON("", pipelineStats.Pipeline.Events)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -414,7 +414,7 @@ func (logstash *Logstash) gatherPipelinesStats(address string, accumulator teleg
|
|||
"source": pipelinesStats.Host,
|
||||
}
|
||||
|
||||
flattener := jsonParser.JSONFlattener{}
|
||||
flattener := parsers_json.JSONFlattener{}
|
||||
err := flattener.FlattenJSON("", pipeline.Events)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -520,7 +520,7 @@ func newLogstash() *Logstash {
|
|||
URL: "http://127.0.0.1:9600",
|
||||
Collect: []string{"pipelines", "process", "jvm"},
|
||||
Headers: make(map[string]string),
|
||||
HTTPClientConfig: httpconfig.HTTPClientConfig{
|
||||
HTTPClientConfig: common_http.HTTPClientConfig{
|
||||
Timeout: config.Duration(5 * time.Second),
|
||||
},
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ import (
|
|||
"golang.org/x/net/proxy"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
tlsint "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
common_tls "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
|
|
@ -27,7 +27,7 @@ type Memcached struct {
|
|||
Servers []string `toml:"servers"`
|
||||
UnixSockets []string `toml:"unix_sockets"`
|
||||
EnableTLS bool `toml:"enable_tls"`
|
||||
tlsint.ClientConfig
|
||||
common_tls.ClientConfig
|
||||
}
|
||||
|
||||
var defaultTimeout = 5 * time.Second
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ import (
|
|||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/common/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
jsonparser "github.com/influxdata/telegraf/plugins/parsers/json"
|
||||
parsers_json "github.com/influxdata/telegraf/plugins/parsers/json"
|
||||
)
|
||||
|
||||
//go:embed sample.conf
|
||||
|
|
@ -525,7 +525,7 @@ func (m *Mesos) gatherMainMetrics(u *url.URL, role Role, acc telegraf.Accumulato
|
|||
|
||||
m.filterMetrics(role, &jsonOut)
|
||||
|
||||
jf := jsonparser.JSONFlattener{}
|
||||
jf := parsers_json.JSONFlattener{}
|
||||
|
||||
err = jf.FlattenJSON("", jsonOut)
|
||||
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ import (
|
|||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal/choice"
|
||||
tlsint "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
common_tls "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
|
|
@ -37,7 +37,7 @@ type MongoDB struct {
|
|||
GatherTopStat bool
|
||||
DisconnectedServersBehavior string
|
||||
ColStatsDbs []string
|
||||
tlsint.ClientConfig
|
||||
common_tls.ClientConfig
|
||||
|
||||
Log telegraf.Logger `toml:"-"`
|
||||
|
||||
|
|
|
|||
|
|
@ -20,8 +20,8 @@ import (
|
|||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/plugins/common/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
v1 "github.com/influxdata/telegraf/plugins/inputs/mysql/v1"
|
||||
v2 "github.com/influxdata/telegraf/plugins/inputs/mysql/v2"
|
||||
"github.com/influxdata/telegraf/plugins/inputs/mysql/v1"
|
||||
"github.com/influxdata/telegraf/plugins/inputs/mysql/v2"
|
||||
)
|
||||
|
||||
//go:embed sample.conf
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ import (
|
|||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
influxtls "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
common_tls "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
|
|
@ -37,7 +37,7 @@ type OpensearchQuery struct {
|
|||
|
||||
Log telegraf.Logger `toml:"-"`
|
||||
|
||||
influxtls.ClientConfig
|
||||
common_tls.ClientConfig
|
||||
osClient *opensearch.Client
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ import (
|
|||
"github.com/gophercloud/gophercloud/v2/openstack/orchestration/v1/stacks"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
httpconfig "github.com/influxdata/telegraf/plugins/common/http"
|
||||
common_http "github.com/influxdata/telegraf/plugins/common/http"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
|
|
@ -74,7 +74,7 @@ type OpenStack struct {
|
|||
MeasureRequest bool `toml:"measure_openstack_requests"`
|
||||
AllTenants bool `toml:"query_all_tenants"`
|
||||
Log telegraf.Logger `toml:"-"`
|
||||
httpconfig.HTTPClientConfig
|
||||
common_http.HTTPClientConfig
|
||||
|
||||
client *http.Client
|
||||
|
||||
|
|
|
|||
|
|
@ -27,7 +27,6 @@ import (
|
|||
"github.com/influxdata/influxdb-observability/otel2influx"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
tmetric "github.com/influxdata/telegraf/metric"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/parsers/influx"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
|
@ -86,7 +85,7 @@ func TestOpenTelemetry(t *testing.T) {
|
|||
exesuffix = ".exe"
|
||||
}
|
||||
expected := []telegraf.Metric{
|
||||
tmetric.New(
|
||||
testutil.MustMetric(
|
||||
"measurement-counter",
|
||||
map[string]string{
|
||||
"otel.library.name": "library-name",
|
||||
|
|
|
|||
|
|
@ -11,14 +11,14 @@ import (
|
|||
"slices"
|
||||
"sync"
|
||||
|
||||
p4ConfigV1 "github.com/p4lang/p4runtime/go/p4/config/v1"
|
||||
p4v1 "github.com/p4lang/p4runtime/go/p4/v1"
|
||||
p4_config "github.com/p4lang/p4runtime/go/p4/config/v1"
|
||||
p4 "github.com/p4lang/p4runtime/go/p4/v1"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
internaltls "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
common_tls "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
|
|
@ -36,10 +36,10 @@ type P4runtime struct {
|
|||
CounterNamesInclude []string `toml:"counter_names_include"`
|
||||
Log telegraf.Logger `toml:"-"`
|
||||
EnableTLS bool `toml:"enable_tls"`
|
||||
internaltls.ClientConfig
|
||||
common_tls.ClientConfig
|
||||
|
||||
conn *grpc.ClientConn
|
||||
client p4v1.P4RuntimeClient
|
||||
client p4.P4RuntimeClient
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
|
|
@ -75,7 +75,7 @@ func (p *P4runtime) Gather(acc telegraf.Accumulator) error {
|
|||
|
||||
for _, counter := range filteredCounters {
|
||||
p.wg.Add(1)
|
||||
go func(counter *p4ConfigV1.Counter) {
|
||||
go func(counter *p4_config.Counter) {
|
||||
defer p.wg.Done()
|
||||
entries, err := p.readAllEntries(counter.Preamble.Id)
|
||||
if err != nil {
|
||||
|
|
@ -136,10 +136,10 @@ func initConnection(endpoint string, tlscfg *tls.Config) (*grpc.ClientConn, erro
|
|||
return grpc.NewClient(endpoint, grpc.WithTransportCredentials(creds))
|
||||
}
|
||||
|
||||
func (p *P4runtime) getP4Info() (*p4ConfigV1.P4Info, error) {
|
||||
req := &p4v1.GetForwardingPipelineConfigRequest{
|
||||
func (p *P4runtime) getP4Info() (*p4_config.P4Info, error) {
|
||||
req := &p4.GetForwardingPipelineConfigRequest{
|
||||
DeviceId: p.DeviceID,
|
||||
ResponseType: p4v1.GetForwardingPipelineConfigRequest_ALL,
|
||||
ResponseType: p4.GetForwardingPipelineConfigRequest_ALL,
|
||||
}
|
||||
resp, err := p.client.GetForwardingPipelineConfig(context.Background(), req)
|
||||
if err != nil {
|
||||
|
|
@ -165,12 +165,12 @@ func (p *P4runtime) getP4Info() (*p4ConfigV1.P4Info, error) {
|
|||
return p4info, nil
|
||||
}
|
||||
|
||||
func filterCounters(counters []*p4ConfigV1.Counter, counterNamesInclude []string) []*p4ConfigV1.Counter {
|
||||
func filterCounters(counters []*p4_config.Counter, counterNamesInclude []string) []*p4_config.Counter {
|
||||
if len(counterNamesInclude) == 0 {
|
||||
return counters
|
||||
}
|
||||
|
||||
var filteredCounters []*p4ConfigV1.Counter
|
||||
var filteredCounters []*p4_config.Counter
|
||||
for _, counter := range counters {
|
||||
if counter == nil {
|
||||
continue
|
||||
|
|
@ -197,16 +197,16 @@ func (p *P4runtime) newP4RuntimeClient() error {
|
|||
return fmt.Errorf("cannot connect to the server: %w", err)
|
||||
}
|
||||
p.conn = conn
|
||||
p.client = p4v1.NewP4RuntimeClient(conn)
|
||||
p.client = p4.NewP4RuntimeClient(conn)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *P4runtime) readAllEntries(counterID uint32) ([]*p4v1.Entity, error) {
|
||||
readRequest := &p4v1.ReadRequest{
|
||||
func (p *P4runtime) readAllEntries(counterID uint32) ([]*p4.Entity, error) {
|
||||
readRequest := &p4.ReadRequest{
|
||||
DeviceId: p.DeviceID,
|
||||
Entities: []*p4v1.Entity{{
|
||||
Entity: &p4v1.Entity_CounterEntry{
|
||||
CounterEntry: &p4v1.CounterEntry{
|
||||
Entities: []*p4.Entity{{
|
||||
Entity: &p4.Entity_CounterEntry{
|
||||
CounterEntry: &p4.CounterEntry{
|
||||
CounterId: counterID}}}}}
|
||||
|
||||
stream, err := p.client.Read(context.Background(), readRequest)
|
||||
|
|
|
|||
|
|
@ -3,49 +3,49 @@ package p4runtime
|
|||
import (
|
||||
"context"
|
||||
|
||||
p4v1 "github.com/p4lang/p4runtime/go/p4/v1"
|
||||
p4 "github.com/p4lang/p4runtime/go/p4/v1"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
type fakeP4RuntimeClient struct {
|
||||
writeFn func(
|
||||
ctx context.Context,
|
||||
in *p4v1.WriteRequest,
|
||||
in *p4.WriteRequest,
|
||||
opts ...grpc.CallOption,
|
||||
) (*p4v1.WriteResponse, error)
|
||||
) (*p4.WriteResponse, error)
|
||||
|
||||
readFn func(
|
||||
in *p4v1.ReadRequest,
|
||||
) (p4v1.P4Runtime_ReadClient, error)
|
||||
in *p4.ReadRequest,
|
||||
) (p4.P4Runtime_ReadClient, error)
|
||||
|
||||
setForwardingPipelineConfigFn func(
|
||||
ctx context.Context,
|
||||
in *p4v1.SetForwardingPipelineConfigRequest,
|
||||
in *p4.SetForwardingPipelineConfigRequest,
|
||||
opts ...grpc.CallOption,
|
||||
) (*p4v1.SetForwardingPipelineConfigResponse, error)
|
||||
) (*p4.SetForwardingPipelineConfigResponse, error)
|
||||
|
||||
getForwardingPipelineConfigFn func() (*p4v1.GetForwardingPipelineConfigResponse, error)
|
||||
getForwardingPipelineConfigFn func() (*p4.GetForwardingPipelineConfigResponse, error)
|
||||
|
||||
streamChannelFn func(
|
||||
ctx context.Context,
|
||||
opts ...grpc.CallOption,
|
||||
) (p4v1.P4Runtime_StreamChannelClient, error)
|
||||
) (p4.P4Runtime_StreamChannelClient, error)
|
||||
|
||||
capabilitiesFn func(
|
||||
ctx context.Context,
|
||||
in *p4v1.CapabilitiesRequest,
|
||||
in *p4.CapabilitiesRequest,
|
||||
opts ...grpc.CallOption,
|
||||
) (*p4v1.CapabilitiesResponse, error)
|
||||
) (*p4.CapabilitiesResponse, error)
|
||||
}
|
||||
|
||||
// fakeP4RuntimeClient implements the p4v1.P4RuntimeClient interface
|
||||
var _ p4v1.P4RuntimeClient = &fakeP4RuntimeClient{}
|
||||
// fakeP4RuntimeClient implements the v1.P4RuntimeClient interface
|
||||
var _ p4.P4RuntimeClient = &fakeP4RuntimeClient{}
|
||||
|
||||
func (c *fakeP4RuntimeClient) Write(
|
||||
ctx context.Context,
|
||||
in *p4v1.WriteRequest,
|
||||
in *p4.WriteRequest,
|
||||
opts ...grpc.CallOption,
|
||||
) (*p4v1.WriteResponse, error) {
|
||||
) (*p4.WriteResponse, error) {
|
||||
if c.writeFn == nil {
|
||||
panic("No mock defined for Write RPC")
|
||||
}
|
||||
|
|
@ -54,9 +54,9 @@ func (c *fakeP4RuntimeClient) Write(
|
|||
|
||||
func (c *fakeP4RuntimeClient) Read(
|
||||
_ context.Context,
|
||||
in *p4v1.ReadRequest,
|
||||
in *p4.ReadRequest,
|
||||
_ ...grpc.CallOption,
|
||||
) (p4v1.P4Runtime_ReadClient, error) {
|
||||
) (p4.P4Runtime_ReadClient, error) {
|
||||
if c.readFn == nil {
|
||||
panic("No mock defined for Read RPC")
|
||||
}
|
||||
|
|
@ -65,9 +65,9 @@ func (c *fakeP4RuntimeClient) Read(
|
|||
|
||||
func (c *fakeP4RuntimeClient) SetForwardingPipelineConfig(
|
||||
ctx context.Context,
|
||||
in *p4v1.SetForwardingPipelineConfigRequest,
|
||||
in *p4.SetForwardingPipelineConfigRequest,
|
||||
opts ...grpc.CallOption,
|
||||
) (*p4v1.SetForwardingPipelineConfigResponse, error) {
|
||||
) (*p4.SetForwardingPipelineConfigResponse, error) {
|
||||
if c.setForwardingPipelineConfigFn == nil {
|
||||
panic("No mock defined for SetForwardingPipelineConfig RPC")
|
||||
}
|
||||
|
|
@ -76,9 +76,9 @@ func (c *fakeP4RuntimeClient) SetForwardingPipelineConfig(
|
|||
|
||||
func (c *fakeP4RuntimeClient) GetForwardingPipelineConfig(
|
||||
context.Context,
|
||||
*p4v1.GetForwardingPipelineConfigRequest,
|
||||
*p4.GetForwardingPipelineConfigRequest,
|
||||
...grpc.CallOption,
|
||||
) (*p4v1.GetForwardingPipelineConfigResponse, error) {
|
||||
) (*p4.GetForwardingPipelineConfigResponse, error) {
|
||||
if c.getForwardingPipelineConfigFn == nil {
|
||||
panic("No mock defined for GetForwardingPipelineConfig RPC")
|
||||
}
|
||||
|
|
@ -88,7 +88,7 @@ func (c *fakeP4RuntimeClient) GetForwardingPipelineConfig(
|
|||
func (c *fakeP4RuntimeClient) StreamChannel(
|
||||
ctx context.Context,
|
||||
opts ...grpc.CallOption,
|
||||
) (p4v1.P4Runtime_StreamChannelClient, error) {
|
||||
) (p4.P4Runtime_StreamChannelClient, error) {
|
||||
if c.streamChannelFn == nil {
|
||||
panic("No mock defined for StreamChannel")
|
||||
}
|
||||
|
|
@ -97,9 +97,9 @@ func (c *fakeP4RuntimeClient) StreamChannel(
|
|||
|
||||
func (c *fakeP4RuntimeClient) Capabilities(
|
||||
ctx context.Context,
|
||||
in *p4v1.CapabilitiesRequest,
|
||||
in *p4.CapabilitiesRequest,
|
||||
opts ...grpc.CallOption,
|
||||
) (*p4v1.CapabilitiesResponse, error) {
|
||||
) (*p4.CapabilitiesResponse, error) {
|
||||
if c.capabilitiesFn == nil {
|
||||
panic("No mock defined for Capabilities RPC")
|
||||
}
|
||||
|
|
@ -108,13 +108,13 @@ func (c *fakeP4RuntimeClient) Capabilities(
|
|||
|
||||
type fakeP4RuntimeReadClient struct {
|
||||
grpc.ClientStream
|
||||
recvFn func() (*p4v1.ReadResponse, error)
|
||||
recvFn func() (*p4.ReadResponse, error)
|
||||
}
|
||||
|
||||
// fakeP4RuntimeReadClient implements the p4v1.P4Runtime_ReadClient interface
|
||||
var _ p4v1.P4Runtime_ReadClient = &fakeP4RuntimeReadClient{}
|
||||
// fakeP4RuntimeReadClient implements the v1.P4Runtime_ReadClient interface
|
||||
var _ p4.P4Runtime_ReadClient = &fakeP4RuntimeReadClient{}
|
||||
|
||||
func (c *fakeP4RuntimeReadClient) Recv() (*p4v1.ReadResponse, error) {
|
||||
func (c *fakeP4RuntimeReadClient) Recv() (*p4.ReadResponse, error) {
|
||||
if c.recvFn == nil {
|
||||
panic("No mock provided for Recv function")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -7,8 +7,8 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
p4ConfigV1 "github.com/p4lang/p4runtime/go/p4/config/v1"
|
||||
p4v1 "github.com/p4lang/p4runtime/go/p4/v1"
|
||||
p4_config "github.com/p4lang/p4runtime/go/p4/config/v1"
|
||||
p4 "github.com/p4lang/p4runtime/go/p4/v1"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
|
|
@ -21,23 +21,23 @@ import (
|
|||
func createCounter(
|
||||
name string,
|
||||
id uint32,
|
||||
unit p4ConfigV1.CounterSpec_Unit,
|
||||
) *p4ConfigV1.Counter {
|
||||
return &p4ConfigV1.Counter{
|
||||
Preamble: &p4ConfigV1.Preamble{Name: name, Id: id},
|
||||
Spec: &p4ConfigV1.CounterSpec{Unit: unit},
|
||||
unit p4_config.CounterSpec_Unit,
|
||||
) *p4_config.Counter {
|
||||
return &p4_config.Counter{
|
||||
Preamble: &p4_config.Preamble{Name: name, Id: id},
|
||||
Spec: &p4_config.CounterSpec{Unit: unit},
|
||||
}
|
||||
}
|
||||
|
||||
func createEntityCounterEntry(
|
||||
counterID uint32,
|
||||
index int64,
|
||||
data *p4v1.CounterData,
|
||||
) *p4v1.Entity_CounterEntry {
|
||||
return &p4v1.Entity_CounterEntry{
|
||||
CounterEntry: &p4v1.CounterEntry{
|
||||
data *p4.CounterData,
|
||||
) *p4.Entity_CounterEntry {
|
||||
return &p4.Entity_CounterEntry{
|
||||
CounterEntry: &p4.CounterEntry{
|
||||
CounterId: counterID,
|
||||
Index: &p4v1.Index{Index: index},
|
||||
Index: &p4.Index{Index: index},
|
||||
Data: data,
|
||||
},
|
||||
}
|
||||
|
|
@ -73,20 +73,20 @@ func TestInitDefault(t *testing.T) {
|
|||
|
||||
func TestErrorGetP4Info(t *testing.T) {
|
||||
responses := []struct {
|
||||
getForwardingPipelineConfigResponse *p4v1.GetForwardingPipelineConfigResponse
|
||||
getForwardingPipelineConfigResponse *p4.GetForwardingPipelineConfigResponse
|
||||
getForwardingPipelineConfigResponseError error
|
||||
}{
|
||||
{
|
||||
getForwardingPipelineConfigResponse: nil,
|
||||
getForwardingPipelineConfigResponseError: errors.New("error when retrieving forwarding pipeline config"),
|
||||
}, {
|
||||
getForwardingPipelineConfigResponse: &p4v1.GetForwardingPipelineConfigResponse{
|
||||
getForwardingPipelineConfigResponse: &p4.GetForwardingPipelineConfigResponse{
|
||||
Config: nil,
|
||||
},
|
||||
getForwardingPipelineConfigResponseError: nil,
|
||||
}, {
|
||||
getForwardingPipelineConfigResponse: &p4v1.GetForwardingPipelineConfigResponse{
|
||||
Config: &p4v1.ForwardingPipelineConfig{P4Info: nil},
|
||||
getForwardingPipelineConfigResponse: &p4.GetForwardingPipelineConfigResponse{
|
||||
Config: &p4.ForwardingPipelineConfig{P4Info: nil},
|
||||
},
|
||||
getForwardingPipelineConfigResponseError: nil,
|
||||
},
|
||||
|
|
@ -94,7 +94,7 @@ func TestErrorGetP4Info(t *testing.T) {
|
|||
|
||||
for _, response := range responses {
|
||||
p4RtClient := &fakeP4RuntimeClient{
|
||||
getForwardingPipelineConfigFn: func() (*p4v1.GetForwardingPipelineConfigResponse, error) {
|
||||
getForwardingPipelineConfigFn: func() (*p4.GetForwardingPipelineConfigResponse, error) {
|
||||
return response.getForwardingPipelineConfigResponse, response.getForwardingPipelineConfigResponseError
|
||||
},
|
||||
}
|
||||
|
|
@ -111,23 +111,23 @@ func TestErrorGetP4Info(t *testing.T) {
|
|||
|
||||
func TestOneCounterRead(t *testing.T) {
|
||||
tests := []struct {
|
||||
forwardingPipelineConfig *p4v1.ForwardingPipelineConfig
|
||||
EntityCounterEntry *p4v1.Entity_CounterEntry
|
||||
forwardingPipelineConfig *p4.ForwardingPipelineConfig
|
||||
EntityCounterEntry *p4.Entity_CounterEntry
|
||||
expected []telegraf.Metric
|
||||
}{
|
||||
{
|
||||
forwardingPipelineConfig: &p4v1.ForwardingPipelineConfig{
|
||||
P4Info: &p4ConfigV1.P4Info{
|
||||
Counters: []*p4ConfigV1.Counter{
|
||||
createCounter("foo", 1111, p4ConfigV1.CounterSpec_BOTH),
|
||||
forwardingPipelineConfig: &p4.ForwardingPipelineConfig{
|
||||
P4Info: &p4_config.P4Info{
|
||||
Counters: []*p4_config.Counter{
|
||||
createCounter("foo", 1111, p4_config.CounterSpec_BOTH),
|
||||
},
|
||||
PkgInfo: &p4ConfigV1.PkgInfo{Name: "P4Program"},
|
||||
PkgInfo: &p4_config.PkgInfo{Name: "P4Program"},
|
||||
},
|
||||
},
|
||||
EntityCounterEntry: createEntityCounterEntry(
|
||||
1111,
|
||||
5,
|
||||
&p4v1.CounterData{ByteCount: 5, PacketCount: 1},
|
||||
&p4.CounterData{ByteCount: 5, PacketCount: 1},
|
||||
),
|
||||
expected: []telegraf.Metric{testutil.MustMetric(
|
||||
"p4_runtime",
|
||||
|
|
@ -143,22 +143,22 @@ func TestOneCounterRead(t *testing.T) {
|
|||
time.Unix(0, 0)),
|
||||
},
|
||||
}, {
|
||||
forwardingPipelineConfig: &p4v1.ForwardingPipelineConfig{
|
||||
P4Info: &p4ConfigV1.P4Info{
|
||||
Counters: []*p4ConfigV1.Counter{
|
||||
forwardingPipelineConfig: &p4.ForwardingPipelineConfig{
|
||||
P4Info: &p4_config.P4Info{
|
||||
Counters: []*p4_config.Counter{
|
||||
createCounter(
|
||||
"foo",
|
||||
2222,
|
||||
p4ConfigV1.CounterSpec_BYTES,
|
||||
p4_config.CounterSpec_BYTES,
|
||||
),
|
||||
},
|
||||
PkgInfo: &p4ConfigV1.PkgInfo{Name: "P4Program"},
|
||||
PkgInfo: &p4_config.PkgInfo{Name: "P4Program"},
|
||||
},
|
||||
},
|
||||
EntityCounterEntry: createEntityCounterEntry(
|
||||
2222,
|
||||
5,
|
||||
&p4v1.CounterData{ByteCount: 5},
|
||||
&p4.CounterData{ByteCount: 5},
|
||||
),
|
||||
expected: []telegraf.Metric{testutil.MustMetric(
|
||||
"p4_runtime",
|
||||
|
|
@ -174,22 +174,22 @@ func TestOneCounterRead(t *testing.T) {
|
|||
time.Unix(0, 0)),
|
||||
},
|
||||
}, {
|
||||
forwardingPipelineConfig: &p4v1.ForwardingPipelineConfig{
|
||||
P4Info: &p4ConfigV1.P4Info{
|
||||
Counters: []*p4ConfigV1.Counter{
|
||||
forwardingPipelineConfig: &p4.ForwardingPipelineConfig{
|
||||
P4Info: &p4_config.P4Info{
|
||||
Counters: []*p4_config.Counter{
|
||||
createCounter(
|
||||
"foo",
|
||||
3333,
|
||||
p4ConfigV1.CounterSpec_PACKETS,
|
||||
p4_config.CounterSpec_PACKETS,
|
||||
),
|
||||
},
|
||||
PkgInfo: &p4ConfigV1.PkgInfo{Name: "P4Program"},
|
||||
PkgInfo: &p4_config.PkgInfo{Name: "P4Program"},
|
||||
},
|
||||
},
|
||||
EntityCounterEntry: createEntityCounterEntry(
|
||||
3333,
|
||||
5,
|
||||
&p4v1.CounterData{PacketCount: 1},
|
||||
&p4.CounterData{PacketCount: 1},
|
||||
),
|
||||
expected: []telegraf.Metric{testutil.MustMetric(
|
||||
"p4_runtime",
|
||||
|
|
@ -205,18 +205,18 @@ func TestOneCounterRead(t *testing.T) {
|
|||
time.Unix(0, 0)),
|
||||
},
|
||||
}, {
|
||||
forwardingPipelineConfig: &p4v1.ForwardingPipelineConfig{
|
||||
P4Info: &p4ConfigV1.P4Info{
|
||||
Counters: []*p4ConfigV1.Counter{
|
||||
createCounter("foo", 4444, p4ConfigV1.CounterSpec_BOTH),
|
||||
forwardingPipelineConfig: &p4.ForwardingPipelineConfig{
|
||||
P4Info: &p4_config.P4Info{
|
||||
Counters: []*p4_config.Counter{
|
||||
createCounter("foo", 4444, p4_config.CounterSpec_BOTH),
|
||||
},
|
||||
PkgInfo: &p4ConfigV1.PkgInfo{Name: "P4Program"},
|
||||
PkgInfo: &p4_config.PkgInfo{Name: "P4Program"},
|
||||
},
|
||||
},
|
||||
EntityCounterEntry: createEntityCounterEntry(
|
||||
4444,
|
||||
5,
|
||||
&p4v1.CounterData{},
|
||||
&p4.CounterData{},
|
||||
),
|
||||
expected: nil,
|
||||
},
|
||||
|
|
@ -224,19 +224,19 @@ func TestOneCounterRead(t *testing.T) {
|
|||
|
||||
for _, tt := range tests {
|
||||
p4RtReadClient := &fakeP4RuntimeReadClient{
|
||||
recvFn: func() (*p4v1.ReadResponse, error) {
|
||||
return &p4v1.ReadResponse{
|
||||
Entities: []*p4v1.Entity{{Entity: tt.EntityCounterEntry}},
|
||||
recvFn: func() (*p4.ReadResponse, error) {
|
||||
return &p4.ReadResponse{
|
||||
Entities: []*p4.Entity{{Entity: tt.EntityCounterEntry}},
|
||||
}, nil
|
||||
},
|
||||
}
|
||||
|
||||
p4RtClient := &fakeP4RuntimeClient{
|
||||
readFn: func(*p4v1.ReadRequest) (p4v1.P4Runtime_ReadClient, error) {
|
||||
readFn: func(*p4.ReadRequest) (p4.P4Runtime_ReadClient, error) {
|
||||
return p4RtReadClient, nil
|
||||
},
|
||||
getForwardingPipelineConfigFn: func() (*p4v1.GetForwardingPipelineConfigResponse, error) {
|
||||
return &p4v1.GetForwardingPipelineConfigResponse{
|
||||
getForwardingPipelineConfigFn: func() (*p4.GetForwardingPipelineConfigResponse, error) {
|
||||
return &p4.GetForwardingPipelineConfigResponse{
|
||||
Config: tt.forwardingPipelineConfig,
|
||||
}, nil
|
||||
},
|
||||
|
|
@ -270,19 +270,19 @@ func TestMultipleEntitiesSingleCounterRead(t *testing.T) {
|
|||
totalNumOfEntries,
|
||||
"totalNumOfCounters",
|
||||
)
|
||||
entities := make([]*p4v1.Entity, 0, totalNumOfEntries)
|
||||
p4InfoCounters := make([]*p4ConfigV1.Counter, 0, totalNumOfEntries)
|
||||
entities := make([]*p4.Entity, 0, totalNumOfEntries)
|
||||
p4InfoCounters := make([]*p4_config.Counter, 0, totalNumOfEntries)
|
||||
p4InfoCounters = append(
|
||||
p4InfoCounters,
|
||||
createCounter("foo", 0, p4ConfigV1.CounterSpec_BOTH),
|
||||
createCounter("foo", 0, p4_config.CounterSpec_BOTH),
|
||||
)
|
||||
|
||||
for i := 0; i < totalNumOfEntries; i++ {
|
||||
counterEntry := &p4v1.Entity{
|
||||
counterEntry := &p4.Entity{
|
||||
Entity: createEntityCounterEntry(
|
||||
0,
|
||||
int64(i),
|
||||
&p4v1.CounterData{
|
||||
&p4.CounterData{
|
||||
ByteCount: int64(10),
|
||||
PacketCount: int64(10),
|
||||
},
|
||||
|
|
@ -306,25 +306,25 @@ func TestMultipleEntitiesSingleCounterRead(t *testing.T) {
|
|||
))
|
||||
}
|
||||
|
||||
forwardingPipelineConfig := &p4v1.ForwardingPipelineConfig{
|
||||
P4Info: &p4ConfigV1.P4Info{
|
||||
forwardingPipelineConfig := &p4.ForwardingPipelineConfig{
|
||||
P4Info: &p4_config.P4Info{
|
||||
Counters: p4InfoCounters,
|
||||
PkgInfo: &p4ConfigV1.PkgInfo{Name: "P4Program"},
|
||||
PkgInfo: &p4_config.PkgInfo{Name: "P4Program"},
|
||||
},
|
||||
}
|
||||
|
||||
p4RtReadClient := &fakeP4RuntimeReadClient{
|
||||
recvFn: func() (*p4v1.ReadResponse, error) {
|
||||
return &p4v1.ReadResponse{Entities: entities}, nil
|
||||
recvFn: func() (*p4.ReadResponse, error) {
|
||||
return &p4.ReadResponse{Entities: entities}, nil
|
||||
},
|
||||
}
|
||||
|
||||
p4RtClient := &fakeP4RuntimeClient{
|
||||
readFn: func(*p4v1.ReadRequest) (p4v1.P4Runtime_ReadClient, error) {
|
||||
readFn: func(*p4.ReadRequest) (p4.P4Runtime_ReadClient, error) {
|
||||
return p4RtReadClient, nil
|
||||
},
|
||||
getForwardingPipelineConfigFn: func() (*p4v1.GetForwardingPipelineConfigResponse, error) {
|
||||
return &p4v1.GetForwardingPipelineConfigResponse{
|
||||
getForwardingPipelineConfigFn: func() (*p4.GetForwardingPipelineConfigResponse, error) {
|
||||
return &p4.GetForwardingPipelineConfigResponse{
|
||||
Config: forwardingPipelineConfig,
|
||||
}, nil
|
||||
},
|
||||
|
|
@ -359,7 +359,7 @@ func TestSingleEntitiesMultipleCounterRead(t *testing.T) {
|
|||
totalNumOfCounters,
|
||||
"totalNumOfCounters",
|
||||
)
|
||||
p4InfoCounters := make([]*p4ConfigV1.Counter, 0, totalNumOfCounters)
|
||||
p4InfoCounters := make([]*p4_config.Counter, 0, totalNumOfCounters)
|
||||
|
||||
for i := 1; i <= totalNumOfCounters; i++ {
|
||||
counterName := fmt.Sprintf("foo%v", i)
|
||||
|
|
@ -368,7 +368,7 @@ func TestSingleEntitiesMultipleCounterRead(t *testing.T) {
|
|||
createCounter(
|
||||
counterName,
|
||||
uint32(i),
|
||||
p4ConfigV1.CounterSpec_BOTH,
|
||||
p4_config.CounterSpec_BOTH,
|
||||
),
|
||||
)
|
||||
|
||||
|
|
@ -388,24 +388,24 @@ func TestSingleEntitiesMultipleCounterRead(t *testing.T) {
|
|||
))
|
||||
}
|
||||
|
||||
forwardingPipelineConfig := &p4v1.ForwardingPipelineConfig{
|
||||
P4Info: &p4ConfigV1.P4Info{
|
||||
forwardingPipelineConfig := &p4.ForwardingPipelineConfig{
|
||||
P4Info: &p4_config.P4Info{
|
||||
Counters: p4InfoCounters,
|
||||
PkgInfo: &p4ConfigV1.PkgInfo{Name: "P4Program"},
|
||||
PkgInfo: &p4_config.PkgInfo{Name: "P4Program"},
|
||||
},
|
||||
}
|
||||
|
||||
p4RtClient := &fakeP4RuntimeClient{
|
||||
readFn: func(in *p4v1.ReadRequest) (p4v1.P4Runtime_ReadClient, error) {
|
||||
readFn: func(in *p4.ReadRequest) (p4.P4Runtime_ReadClient, error) {
|
||||
counterID := in.Entities[0].GetCounterEntry().CounterId
|
||||
return &fakeP4RuntimeReadClient{
|
||||
recvFn: func() (*p4v1.ReadResponse, error) {
|
||||
return &p4v1.ReadResponse{
|
||||
Entities: []*p4v1.Entity{{
|
||||
recvFn: func() (*p4.ReadResponse, error) {
|
||||
return &p4.ReadResponse{
|
||||
Entities: []*p4.Entity{{
|
||||
Entity: createEntityCounterEntry(
|
||||
counterID,
|
||||
1,
|
||||
&p4v1.CounterData{
|
||||
&p4.CounterData{
|
||||
ByteCount: 10,
|
||||
PacketCount: 10,
|
||||
},
|
||||
|
|
@ -415,8 +415,8 @@ func TestSingleEntitiesMultipleCounterRead(t *testing.T) {
|
|||
},
|
||||
}, nil
|
||||
},
|
||||
getForwardingPipelineConfigFn: func() (*p4v1.GetForwardingPipelineConfigResponse, error) {
|
||||
return &p4v1.GetForwardingPipelineConfigResponse{
|
||||
getForwardingPipelineConfigFn: func() (*p4.GetForwardingPipelineConfigResponse, error) {
|
||||
return &p4.GetForwardingPipelineConfigResponse{
|
||||
Config: forwardingPipelineConfig,
|
||||
}, nil
|
||||
},
|
||||
|
|
@ -442,13 +442,13 @@ func TestSingleEntitiesMultipleCounterRead(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestNoCountersAvailable(t *testing.T) {
|
||||
forwardingPipelineConfig := &p4v1.ForwardingPipelineConfig{
|
||||
P4Info: &p4ConfigV1.P4Info{Counters: []*p4ConfigV1.Counter{}},
|
||||
forwardingPipelineConfig := &p4.ForwardingPipelineConfig{
|
||||
P4Info: &p4_config.P4Info{Counters: []*p4_config.Counter{}},
|
||||
}
|
||||
|
||||
p4RtClient := &fakeP4RuntimeClient{
|
||||
getForwardingPipelineConfigFn: func() (*p4v1.GetForwardingPipelineConfigResponse, error) {
|
||||
return &p4v1.GetForwardingPipelineConfigResponse{
|
||||
getForwardingPipelineConfigFn: func() (*p4.GetForwardingPipelineConfigResponse, error) {
|
||||
return &p4.GetForwardingPipelineConfigResponse{
|
||||
Config: forwardingPipelineConfig,
|
||||
}, nil
|
||||
},
|
||||
|
|
@ -464,18 +464,18 @@ func TestNoCountersAvailable(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestFilterCounters(t *testing.T) {
|
||||
forwardingPipelineConfig := &p4v1.ForwardingPipelineConfig{
|
||||
P4Info: &p4ConfigV1.P4Info{
|
||||
Counters: []*p4ConfigV1.Counter{
|
||||
createCounter("foo", 1, p4ConfigV1.CounterSpec_BOTH),
|
||||
forwardingPipelineConfig := &p4.ForwardingPipelineConfig{
|
||||
P4Info: &p4_config.P4Info{
|
||||
Counters: []*p4_config.Counter{
|
||||
createCounter("foo", 1, p4_config.CounterSpec_BOTH),
|
||||
},
|
||||
PkgInfo: &p4ConfigV1.PkgInfo{Name: "P4Program"},
|
||||
PkgInfo: &p4_config.PkgInfo{Name: "P4Program"},
|
||||
},
|
||||
}
|
||||
|
||||
p4RtClient := &fakeP4RuntimeClient{
|
||||
getForwardingPipelineConfigFn: func() (*p4v1.GetForwardingPipelineConfigResponse, error) {
|
||||
return &p4v1.GetForwardingPipelineConfigResponse{
|
||||
getForwardingPipelineConfigFn: func() (*p4.GetForwardingPipelineConfigResponse, error) {
|
||||
return &p4.GetForwardingPipelineConfigResponse{
|
||||
Config: forwardingPipelineConfig,
|
||||
}, nil
|
||||
},
|
||||
|
|
@ -500,31 +500,31 @@ func TestFilterCounters(t *testing.T) {
|
|||
|
||||
func TestFailReadCounterEntryFromEntry(t *testing.T) {
|
||||
p4RtReadClient := &fakeP4RuntimeReadClient{
|
||||
recvFn: func() (*p4v1.ReadResponse, error) {
|
||||
return &p4v1.ReadResponse{
|
||||
Entities: []*p4v1.Entity{{
|
||||
Entity: &p4v1.Entity_TableEntry{
|
||||
TableEntry: &p4v1.TableEntry{},
|
||||
recvFn: func() (*p4.ReadResponse, error) {
|
||||
return &p4.ReadResponse{
|
||||
Entities: []*p4.Entity{{
|
||||
Entity: &p4.Entity_TableEntry{
|
||||
TableEntry: &p4.TableEntry{},
|
||||
}}}}, nil
|
||||
},
|
||||
}
|
||||
|
||||
p4RtClient := &fakeP4RuntimeClient{
|
||||
readFn: func(*p4v1.ReadRequest) (p4v1.P4Runtime_ReadClient, error) {
|
||||
readFn: func(*p4.ReadRequest) (p4.P4Runtime_ReadClient, error) {
|
||||
return p4RtReadClient, nil
|
||||
},
|
||||
getForwardingPipelineConfigFn: func() (*p4v1.GetForwardingPipelineConfigResponse, error) {
|
||||
return &p4v1.GetForwardingPipelineConfigResponse{
|
||||
Config: &p4v1.ForwardingPipelineConfig{
|
||||
P4Info: &p4ConfigV1.P4Info{
|
||||
Counters: []*p4ConfigV1.Counter{
|
||||
getForwardingPipelineConfigFn: func() (*p4.GetForwardingPipelineConfigResponse, error) {
|
||||
return &p4.GetForwardingPipelineConfigResponse{
|
||||
Config: &p4.ForwardingPipelineConfig{
|
||||
P4Info: &p4_config.P4Info{
|
||||
Counters: []*p4_config.Counter{
|
||||
createCounter(
|
||||
"foo",
|
||||
1111,
|
||||
p4ConfigV1.CounterSpec_BOTH,
|
||||
p4_config.CounterSpec_BOTH,
|
||||
),
|
||||
},
|
||||
PkgInfo: &p4ConfigV1.PkgInfo{Name: "P4Program"},
|
||||
PkgInfo: &p4_config.PkgInfo{Name: "P4Program"},
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
|
|
@ -553,21 +553,21 @@ func TestFailReadCounterEntryFromEntry(t *testing.T) {
|
|||
|
||||
func TestFailReadAllEntries(t *testing.T) {
|
||||
p4RtClient := &fakeP4RuntimeClient{
|
||||
readFn: func(*p4v1.ReadRequest) (p4v1.P4Runtime_ReadClient, error) {
|
||||
readFn: func(*p4.ReadRequest) (p4.P4Runtime_ReadClient, error) {
|
||||
return nil, errors.New("connection error")
|
||||
},
|
||||
getForwardingPipelineConfigFn: func() (*p4v1.GetForwardingPipelineConfigResponse, error) {
|
||||
return &p4v1.GetForwardingPipelineConfigResponse{
|
||||
Config: &p4v1.ForwardingPipelineConfig{
|
||||
P4Info: &p4ConfigV1.P4Info{
|
||||
Counters: []*p4ConfigV1.Counter{
|
||||
getForwardingPipelineConfigFn: func() (*p4.GetForwardingPipelineConfigResponse, error) {
|
||||
return &p4.GetForwardingPipelineConfigResponse{
|
||||
Config: &p4.ForwardingPipelineConfig{
|
||||
P4Info: &p4_config.P4Info{
|
||||
Counters: []*p4_config.Counter{
|
||||
createCounter(
|
||||
"foo",
|
||||
1111,
|
||||
p4ConfigV1.CounterSpec_BOTH,
|
||||
p4_config.CounterSpec_BOTH,
|
||||
),
|
||||
},
|
||||
PkgInfo: &p4ConfigV1.PkgInfo{Name: "P4Program"},
|
||||
PkgInfo: &p4_config.PkgInfo{Name: "P4Program"},
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
|
|
@ -595,11 +595,11 @@ func TestFailReadAllEntries(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestFilterCounterNamesInclude(t *testing.T) {
|
||||
counters := []*p4ConfigV1.Counter{
|
||||
createCounter("foo", 1, p4ConfigV1.CounterSpec_BOTH),
|
||||
createCounter("bar", 2, p4ConfigV1.CounterSpec_BOTH),
|
||||
counters := []*p4_config.Counter{
|
||||
createCounter("foo", 1, p4_config.CounterSpec_BOTH),
|
||||
createCounter("bar", 2, p4_config.CounterSpec_BOTH),
|
||||
nil,
|
||||
createCounter("", 3, p4ConfigV1.CounterSpec_BOTH),
|
||||
createCounter("", 3, p4_config.CounterSpec_BOTH),
|
||||
}
|
||||
|
||||
counterNamesInclude := []string{"bar"}
|
||||
|
|
@ -607,8 +607,8 @@ func TestFilterCounterNamesInclude(t *testing.T) {
|
|||
filteredCounters := filterCounters(counters, counterNamesInclude)
|
||||
require.Equal(
|
||||
t,
|
||||
[]*p4ConfigV1.Counter{
|
||||
createCounter("bar", 2, p4ConfigV1.CounterSpec_BOTH),
|
||||
[]*p4_config.Counter{
|
||||
createCounter("bar", 2, p4_config.CounterSpec_BOTH),
|
||||
}, filteredCounters,
|
||||
)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,10 +25,10 @@ import (
|
|||
"github.com/influxdata/telegraf/filter"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/models"
|
||||
httpconfig "github.com/influxdata/telegraf/plugins/common/http"
|
||||
common_http "github.com/influxdata/telegraf/plugins/common/http"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/parsers/openmetrics"
|
||||
parser "github.com/influxdata/telegraf/plugins/parsers/prometheus"
|
||||
parsers_prometheus "github.com/influxdata/telegraf/plugins/parsers/prometheus"
|
||||
)
|
||||
|
||||
//go:embed sample.conf
|
||||
|
|
@ -88,7 +88,7 @@ type Prometheus struct {
|
|||
ConsulConfig ConsulConfig `toml:"consul"`
|
||||
|
||||
Log telegraf.Logger `toml:"-"`
|
||||
httpconfig.HTTPClientConfig
|
||||
common_http.HTTPClientConfig
|
||||
|
||||
client *http.Client
|
||||
headers map[string]string
|
||||
|
|
@ -525,7 +525,7 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) (map[s
|
|||
Log: p.Log,
|
||||
}
|
||||
} else {
|
||||
metricParser = &parser.Parser{
|
||||
metricParser = &parsers_prometheus.Parser{
|
||||
Header: resp.Header,
|
||||
MetricVersion: p.MetricVersion,
|
||||
IgnoreTimestamp: p.IgnoreTimestamp,
|
||||
|
|
|
|||
|
|
@ -16,14 +16,14 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
riemanngo "github.com/riemann/riemann-go-client"
|
||||
riemangoProto "github.com/riemann/riemann-go-client/proto"
|
||||
riemann "github.com/riemann/riemann-go-client"
|
||||
rieman_proto "github.com/riemann/riemann-go-client/proto"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
tlsint "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
common_tls "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
|
|
@ -37,7 +37,7 @@ type RiemannSocketListener struct {
|
|||
ReadTimeout *config.Duration `toml:"read_timeout"`
|
||||
KeepAlivePeriod *config.Duration `toml:"keep_alive_period"`
|
||||
SocketMode string `toml:"socket_mode"`
|
||||
tlsint.ServerConfig
|
||||
common_tls.ServerConfig
|
||||
|
||||
wg sync.WaitGroup
|
||||
|
||||
|
|
@ -178,7 +178,7 @@ func (rsl *riemannListener) read(conn net.Conn) {
|
|||
}
|
||||
}
|
||||
|
||||
messagePb := &riemangoProto.Msg{}
|
||||
messagePb := &rieman_proto.Msg{}
|
||||
var header uint32
|
||||
// First obtain the size of the riemann event from client and acknowledge
|
||||
if err = binary.Read(conn, binary.BigEndian, &header); err != nil {
|
||||
|
|
@ -201,7 +201,7 @@ func (rsl *riemannListener) read(conn net.Conn) {
|
|||
rsl.riemannReturnErrorResponse(conn, "Failed to unmarshal")
|
||||
return
|
||||
}
|
||||
riemannEvents := riemanngo.ProtocolBuffersToEvents(messagePb.Events)
|
||||
riemannEvents := riemann.ProtocolBuffersToEvents(messagePb.Events)
|
||||
|
||||
for _, m := range riemannEvents {
|
||||
if m.Service == "" {
|
||||
|
|
@ -227,7 +227,7 @@ func (rsl *riemannListener) read(conn net.Conn) {
|
|||
|
||||
func (rsl *riemannListener) riemannReturnResponse(conn net.Conn) {
|
||||
t := true
|
||||
message := new(riemangoProto.Msg)
|
||||
message := new(rieman_proto.Msg)
|
||||
message.Ok = &t
|
||||
returnData, err := proto.Marshal(message)
|
||||
if err != nil {
|
||||
|
|
@ -249,7 +249,7 @@ func (rsl *riemannListener) riemannReturnResponse(conn net.Conn) {
|
|||
|
||||
func (rsl *riemannListener) riemannReturnErrorResponse(conn net.Conn, errorMessage string) {
|
||||
t := false
|
||||
message := new(riemangoProto.Msg)
|
||||
message := new(rieman_proto.Msg)
|
||||
message.Ok = &t
|
||||
message.Error = &errorMessage
|
||||
returnData, err := proto.Marshal(message)
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ import (
|
|||
"github.com/influxdata/telegraf/metric"
|
||||
"github.com/influxdata/telegraf/plugins/common/socket"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
influx "github.com/influxdata/telegraf/plugins/parsers/influx/influx_upstream"
|
||||
parsers_influx_upstream "github.com/influxdata/telegraf/plugins/parsers/influx/influx_upstream"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/leodido/go-syslog/v4/nontransparent"
|
||||
)
|
||||
|
|
@ -179,7 +179,7 @@ func TestCases(t *testing.T) {
|
|||
expectedErrorFilename := filepath.Join(testcasePath, "expected.err")
|
||||
|
||||
// Prepare the influx parser for expectations
|
||||
parser := &influx.Parser{}
|
||||
parser := &parsers_influx_upstream.Parser{}
|
||||
require.NoError(t, parser.Init())
|
||||
|
||||
// Read the input data
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ import (
|
|||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
httpcommon "github.com/influxdata/telegraf/plugins/common/http"
|
||||
common_http "github.com/influxdata/telegraf/plugins/common/http"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
|
|
@ -28,7 +28,7 @@ type Vault struct {
|
|||
TokenFile string `toml:"token_file"`
|
||||
Token string `toml:"token"`
|
||||
Log telegraf.Logger `toml:"-"`
|
||||
httpcommon.HTTPClientConfig
|
||||
common_http.HTTPClientConfig
|
||||
|
||||
client *http.Client
|
||||
}
|
||||
|
|
@ -192,7 +192,7 @@ func buildVaultMetrics(acc telegraf.Accumulator, sysMetrics *SysMetrics) error {
|
|||
func init() {
|
||||
inputs.Add("vault", func() telegraf.Input {
|
||||
return &Vault{
|
||||
HTTPClientConfig: httpcommon.HTTPClientConfig{
|
||||
HTTPClientConfig: common_http.HTTPClientConfig{
|
||||
ResponseHeaderTimeout: config.Duration(5 * time.Second),
|
||||
},
|
||||
}
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
dockercontainer "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/go-connections/nat"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
|
|
@ -183,13 +183,13 @@ func TestIntegration(t *testing.T) {
|
|||
}
|
||||
|
||||
// Start the docker container
|
||||
container := testutil.Container{
|
||||
cntnr := testutil.Container{
|
||||
Image: "vault:1.13.3",
|
||||
ExposedPorts: []string{"8200"},
|
||||
Env: map[string]string{
|
||||
"VAULT_DEV_ROOT_TOKEN_ID": "root",
|
||||
},
|
||||
HostConfigModifier: func(hc *dockercontainer.HostConfig) {
|
||||
HostConfigModifier: func(hc *container.HostConfig) {
|
||||
hc.CapAdd = []string{"IPC_LOCK"}
|
||||
},
|
||||
WaitingFor: wait.ForAll(
|
||||
|
|
@ -197,13 +197,13 @@ func TestIntegration(t *testing.T) {
|
|||
wait.ForListeningPort(nat.Port("8200")),
|
||||
),
|
||||
}
|
||||
require.NoError(t, container.Start(), "failed to start container")
|
||||
defer container.Terminate()
|
||||
require.NoError(t, cntnr.Start(), "failed to start container")
|
||||
defer cntnr.Terminate()
|
||||
|
||||
// Setup the plugin
|
||||
port := container.Ports["8200"]
|
||||
port := cntnr.Ports["8200"]
|
||||
plugin := &Vault{
|
||||
URL: "http://" + container.Address + ":" + port,
|
||||
URL: "http://" + cntnr.Address + ":" + port,
|
||||
Token: "root",
|
||||
}
|
||||
require.NoError(t, plugin.Init())
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ import (
|
|||
"github.com/vmware/govmomi/vim25/types"
|
||||
|
||||
"github.com/influxdata/telegraf/config"
|
||||
itls "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
common_tls "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
|
|
@ -130,7 +130,7 @@ func defaultVSphere() *VSphere {
|
|||
DatacenterMetricInclude: nil,
|
||||
DatacenterMetricExclude: nil,
|
||||
DatacenterInclude: []string{"/**"},
|
||||
ClientConfig: itls.ClientConfig{InsecureSkipVerify: true},
|
||||
ClientConfig: common_tls.ClientConfig{InsecureSkipVerify: true},
|
||||
|
||||
MaxQueryObjects: 256,
|
||||
MaxQueryMetrics: 256,
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ import (
|
|||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/internal/globpath"
|
||||
"github.com/influxdata/telegraf/plugins/common/proxy"
|
||||
commontls "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
common_tls "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
|
|
@ -45,7 +45,7 @@ type X509Cert struct {
|
|||
ServerName string `toml:"server_name"`
|
||||
ExcludeRootCerts bool `toml:"exclude_root_certs"`
|
||||
Log telegraf.Logger `toml:"-"`
|
||||
commontls.ClientConfig
|
||||
common_tls.ClientConfig
|
||||
proxy.TCPProxy
|
||||
|
||||
tlsCfg *tls.Config
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ import (
|
|||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
_tls "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
common_tls "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
|
|
@ -457,7 +457,7 @@ func TestServerName(t *testing.T) {
|
|||
sc := &X509Cert{
|
||||
Sources: []string{test.url},
|
||||
ServerName: test.fromCfg,
|
||||
ClientConfig: _tls.ClientConfig{ServerName: test.fromTLS},
|
||||
ClientConfig: common_tls.ClientConfig{ServerName: test.fromTLS},
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
err := sc.Init()
|
||||
|
|
@ -569,7 +569,7 @@ func TestClassification(t *testing.T) {
|
|||
certURI := "file://" + filepath.Join(tmpDir, "cert.pem")
|
||||
plugin := &X509Cert{
|
||||
Sources: []string{certURI},
|
||||
ClientConfig: _tls.ClientConfig{
|
||||
ClientConfig: common_tls.ClientConfig{
|
||||
TLSCA: filepath.Join(tmpDir, "ca.pem"),
|
||||
},
|
||||
Log: testutil.Logger{},
|
||||
|
|
|
|||
|
|
@ -6,8 +6,9 @@ import (
|
|||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/apache/thrift/lib/go/thrift"
|
||||
"time"
|
||||
|
||||
"github.com/apache/thrift/lib/go/thrift"
|
||||
)
|
||||
|
||||
// (needed to ensure safety because of naive import list construction.)
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ import (
|
|||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/config"
|
||||
tlsint "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
common_tls "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
|
|
@ -32,7 +32,7 @@ type Zookeeper struct {
|
|||
|
||||
EnableTLS bool `toml:"enable_tls"`
|
||||
EnableSSL bool `toml:"enable_ssl" deprecated:"1.7.0;1.35.0;use 'enable_tls' instead"`
|
||||
tlsint.ClientConfig
|
||||
common_tls.ClientConfig
|
||||
|
||||
initialized bool
|
||||
tlsConfig *tls.Config
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
package mocks
|
||||
|
||||
import mock "github.com/stretchr/testify/mock"
|
||||
import "github.com/stretchr/testify/mock"
|
||||
|
||||
// DiagnosticsMessageSubscriber is an autogenerated mock type for the DiagnosticsMessageSubscriber type
|
||||
type DiagnosticsMessageListener struct {
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
telegrafJson "github.com/influxdata/telegraf/plugins/serializers/json"
|
||||
serializers_json "github.com/influxdata/telegraf/plugins/serializers/json"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
|
|
@ -105,7 +105,7 @@ func TestWrite(t *testing.T) {
|
|||
|
||||
for _, tC := range testCases {
|
||||
t.Run(tC.name, func(t *testing.T) {
|
||||
serializer := &telegrafJson.Serializer{}
|
||||
serializer := &serializers_json.Serializer{}
|
||||
require.NoError(t, serializer.Init())
|
||||
|
||||
ingestionType := "queued"
|
||||
|
|
@ -156,7 +156,7 @@ func TestWrite(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCreateAzureDataExplorerTable(t *testing.T) {
|
||||
serializer := &telegrafJson.Serializer{}
|
||||
serializer := &serializers_json.Serializer{}
|
||||
require.NoError(t, serializer.Init())
|
||||
plugin := AzureDataExplorer{
|
||||
Endpoint: "someendpoint",
|
||||
|
|
@ -251,7 +251,7 @@ func TestWriteWithType(t *testing.T) {
|
|||
}
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
serializer := &telegrafJson.Serializer{}
|
||||
serializer := &serializers_json.Serializer{}
|
||||
require.NoError(t, serializer.Init())
|
||||
for tableName, jsonValue := range testCase.tableNameToExpectedResult {
|
||||
ingestionType := "queued"
|
||||
|
|
|
|||
|
|
@ -1,8 +1,9 @@
|
|||
package cloud_pubsub
|
||||
|
||||
import (
|
||||
"cloud.google.com/go/pubsub"
|
||||
"context"
|
||||
|
||||
"cloud.google.com/go/pubsub"
|
||||
)
|
||||
|
||||
type (
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ import (
|
|||
"github.com/influxdata/telegraf/config"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/parsers/influx"
|
||||
serializer "github.com/influxdata/telegraf/plugins/serializers/influx"
|
||||
serializers_influx "github.com/influxdata/telegraf/plugins/serializers/influx"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -64,7 +64,7 @@ type (
|
|||
|
||||
func getTestResources(tT *testing.T, settings pubsub.PublishSettings, testM []testMetric) (*PubSub, *stubTopic, []telegraf.Metric) {
|
||||
// Instantiate a Influx line-protocol serializer
|
||||
s := &serializer.Serializer{}
|
||||
s := &serializers_influx.Serializer{}
|
||||
require.NoError(tT, s.Init())
|
||||
|
||||
metrics := make([]telegraf.Metric, 0, len(testM))
|
||||
|
|
|
|||
|
|
@ -15,8 +15,8 @@ import (
|
|||
"github.com/aws/aws-sdk-go-v2/service/cloudwatch/types"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
internalaws "github.com/influxdata/telegraf/plugins/common/aws"
|
||||
httpconfig "github.com/influxdata/telegraf/plugins/common/http"
|
||||
common_aws "github.com/influxdata/telegraf/plugins/common/aws"
|
||||
common_http "github.com/influxdata/telegraf/plugins/common/http"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
)
|
||||
|
||||
|
|
@ -29,8 +29,8 @@ type CloudWatch struct {
|
|||
svc *cloudwatch.Client
|
||||
WriteStatistics bool `toml:"write_statistics"`
|
||||
Log telegraf.Logger `toml:"-"`
|
||||
internalaws.CredentialConfig
|
||||
httpconfig.HTTPClientConfig
|
||||
common_aws.CredentialConfig
|
||||
common_http.HTTPClientConfig
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue