diff --git a/.drone.yml b/.drone.yml new file mode 100644 index 000000000..0d16cb9ec --- /dev/null +++ b/.drone.yml @@ -0,0 +1,12 @@ +kind: pipeline +type: docker +name: default + +steps: +- name: build + image: golang:latest + environment: + GO111MODULE: on + GOPROXY: https://goproxy.cn,direct + commands: + - go build -tags "custom,inputs.cl_kafka_consumer,outputs.influxdb_v2,parsers.phasor_binary" ./cmd/telegraf \ No newline at end of file diff --git a/config/config.go b/config/config.go index 3789e94a2..585b27d00 100644 --- a/config/config.go +++ b/config/config.go @@ -663,7 +663,7 @@ func (c *Config) LoadConfigData(data []byte, path string) error { if len(c.UnusedFields) > 0 { return fmt.Errorf( "line %d: configuration specified the fields %q, but they were not used. "+ - "This is either a typo or this config option does not exist in this version.", + "This is either a typo or this config option does not exist in this version", tbl.Line, keys(c.UnusedFields)) } @@ -701,7 +701,7 @@ func (c *Config) LoadConfigData(data []byte, path string) error { if len(c.UnusedFields) > 0 { return fmt.Errorf( "plugin %s.%s: line %d: configuration specified the fields %q, but they were not used. "+ - "This is either a typo or this config option does not exist in this version.", + "This is either a typo or this config option does not exist in this version", name, pluginName, subTable.Line, keys(c.UnusedFields)) } } @@ -726,7 +726,7 @@ func (c *Config) LoadConfigData(data []byte, path string) error { if len(c.UnusedFields) > 0 { return fmt.Errorf( "plugin %s.%s: line %d: configuration specified the fields %q, but they were not used. "+ - "This is either a typo or this config option does not exist in this version.", + "This is either a typo or this config option does not exist in this version", name, pluginName, subTable.Line, keys(c.UnusedFields)) } } @@ -746,7 +746,7 @@ func (c *Config) LoadConfigData(data []byte, path string) error { if len(c.UnusedFields) > 0 { return fmt.Errorf( "plugin %s.%s: line %d: configuration specified the fields %q, but they were not used. "+ - "This is either a typo or this config option does not exist in this version.", + "This is either a typo or this config option does not exist in this version", name, pluginName, subTable.Line, @@ -770,7 +770,7 @@ func (c *Config) LoadConfigData(data []byte, path string) error { if len(c.UnusedFields) > 0 { return fmt.Errorf( "plugin %s.%s: line %d: configuration specified the fields %q, but they were not used. "+ - "This is either a typo or this config option does not exist in this version.", + "This is either a typo or this config option does not exist in this version", name, pluginName, subTable.Line, keys(c.UnusedFields)) } } @@ -788,7 +788,7 @@ func (c *Config) LoadConfigData(data []byte, path string) error { } if len(c.UnusedFields) > 0 { msg := "plugin %s.%s: line %d: configuration specified the fields %q, but they were not used. " + - "This is either a typo or this config option does not exist in this version." + "This is either a typo or this config option does not exist in this version" return fmt.Errorf(msg, name, pluginName, subTable.Line, keys(c.UnusedFields)) } } @@ -1066,14 +1066,16 @@ func (c *Config) addParser(parentcategory, parentname string, table *ast.Table) } conf.DataFormat = c.getFieldString(table, "data_format") - if conf.DataFormat == "" { + switch conf.DataFormat { + case "": conf.DataFormat = setDefaultParser(parentcategory, parentname) - } else if conf.DataFormat == "influx" { + case "influx": influxParserType := c.getFieldString(table, "influx_parser_type") if influxParserType == "upstream" { conf.DataFormat = "influx_upstream" } } + conf.LogLevel = c.getFieldString(table, "log_level") creator, ok := parsers.Parsers[conf.DataFormat] diff --git a/docs/DATA_FORMATS_INPUT.md b/docs/DATA_FORMATS_INPUT.md index 3947ffab9..8d23e7d31 100644 --- a/docs/DATA_FORMATS_INPUT.md +++ b/docs/DATA_FORMATS_INPUT.md @@ -26,6 +26,7 @@ Protocol, JSON format, or Apache Avro format. - [Value](/plugins/parsers/value), ie: 45 or "booyah" - [Wavefront](/plugins/parsers/wavefront) - [XPath](/plugins/parsers/xpath) (supports XML, JSON, MessagePack, Protocol Buffers) +- [PhasorBinary](/plugins/parsers/phasor_binary) (supports special binary from CL) Any input plugin containing the `data_format` option can use it to select the desired parser: diff --git a/models/running_parsers.go b/models/running_parsers.go index 7595b1171..59d23380f 100644 --- a/models/running_parsers.go +++ b/models/running_parsers.go @@ -73,9 +73,9 @@ func (r *RunningParser) Init() error { return nil } -func (r *RunningParser) Parse(buf []byte) ([]telegraf.Metric, error) { +func (r *RunningParser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { start := time.Now() - m, err := r.Parser.Parse(buf) + m, err := r.Parser.Parse(buf, "") elapsed := time.Since(start) r.ParseTime.Incr(elapsed.Nanoseconds()) r.MetricsParsed.Incr(int64(len(m))) diff --git a/parser.go b/parser.go index 6111886df..6b05ac2e2 100644 --- a/parser.go +++ b/parser.go @@ -7,7 +7,7 @@ type Parser interface { // and parses it into telegraf metrics // // Must be thread-safe. - Parse(buf []byte) ([]Metric, error) + Parse(buf []byte, extra string) ([]Metric, error) // ParseLine takes a single string metric // ie, "cpu.usage.idle 90" diff --git a/plugins/common/socket/socket_test.go b/plugins/common/socket/socket_test.go index e979f8a36..0ecd07983 100644 --- a/plugins/common/socket/socket_test.go +++ b/plugins/common/socket/socket_test.go @@ -154,7 +154,7 @@ func TestListenData(t *testing.T) { var acc testutil.Accumulator onData := func(remote net.Addr, data []byte, _ time.Time) { - m, err := parser.Parse(data) + m, err := parser.Parse(data, "") require.NoError(t, err) addr, _, err := net.SplitHostPort(remote.String()) if err != nil { @@ -358,7 +358,7 @@ func TestListenConnection(t *testing.T) { onConnection := func(remote net.Addr, reader io.ReadCloser) { data, err := io.ReadAll(reader) require.NoError(t, err) - m, err := parser.Parse(data) + m, err := parser.Parse(data, "") require.NoError(t, err) addr, _, err := net.SplitHostPort(remote.String()) if err != nil { @@ -451,7 +451,7 @@ func TestClosingConnections(t *testing.T) { var acc testutil.Accumulator onData := func(_ net.Addr, data []byte, _ time.Time) { - m, err := parser.Parse(data) + m, err := parser.Parse(data, "") require.NoError(t, err) acc.AddMetrics(m) } @@ -667,7 +667,7 @@ func TestNoSplitter(t *testing.T) { onConnection := func(remote net.Addr, reader io.ReadCloser) { data, err := io.ReadAll(reader) require.NoError(t, err) - m, err := parser.Parse(data) + m, err := parser.Parse(data, "") require.NoError(t, err) addr, _, err := net.SplitHostPort(remote.String()) if err != nil { diff --git a/plugins/inputs/all/cl_kafka_consumer.go b/plugins/inputs/all/cl_kafka_consumer.go new file mode 100644 index 000000000..6988149f1 --- /dev/null +++ b/plugins/inputs/all/cl_kafka_consumer.go @@ -0,0 +1,5 @@ +//go:build !custom || inputs || inputs.cl_kafka_consumer + +package all + +import _ "github.com/influxdata/telegraf/plugins/inputs/cl_kafka_consumer" // register plugin diff --git a/plugins/inputs/amqp_consumer/amqp_consumer.go b/plugins/inputs/amqp_consumer/amqp_consumer.go index e5a32eab5..53edf9946 100644 --- a/plugins/inputs/amqp_consumer/amqp_consumer.go +++ b/plugins/inputs/amqp_consumer/amqp_consumer.go @@ -454,7 +454,7 @@ func (a *AMQPConsumer) onMessage(acc telegraf.TrackingAccumulator, d amqp.Delive return err } - metrics, err := a.parser.Parse(body) + metrics, err := a.parser.Parse(body, "") if err != nil { onError() return err diff --git a/plugins/inputs/amqp_consumer/amqp_consumer_test.go b/plugins/inputs/amqp_consumer/amqp_consumer_test.go index 5dfaf37f1..9a56493c9 100644 --- a/plugins/inputs/amqp_consumer/amqp_consumer_test.go +++ b/plugins/inputs/amqp_consumer/amqp_consumer_test.go @@ -124,7 +124,7 @@ func TestIntegration(t *testing.T) { } expected := make([]telegraf.Metric, 0, len(metrics)) for _, x := range metrics { - m, err := parser.Parse([]byte(x)) + m, err := parser.Parse([]byte(x), "") require.NoError(t, err) expected = append(expected, m...) } @@ -343,7 +343,7 @@ func TestStartupErrorBehaviorRetry(t *testing.T) { } expected := make([]telegraf.Metric, 0, len(metrics)) for _, x := range metrics { - m, err := parser.Parse([]byte(x)) + m, err := parser.Parse([]byte(x), "") require.NoError(t, err) expected = append(expected, m...) } diff --git a/plugins/inputs/cl_kafka_consumer/README.md b/plugins/inputs/cl_kafka_consumer/README.md new file mode 100644 index 000000000..8ccdb6e6c --- /dev/null +++ b/plugins/inputs/cl_kafka_consumer/README.md @@ -0,0 +1,245 @@ +# CL Kafka Consumer Input Plugin + +This service plugin consumes messages from [Kafka brokers][kafka] in one of the +supported [data formats][data_formats]. The plugin uses +[consumer groups][consumer_groups] when talking to the Kafka cluster so multiple +instances of Telegraf can consume messages from the same topic in parallel. + +⭐ Telegraf v0.2.3 +🏷️ messaging +💻 all + +[kafka]: https://kafka.apache.org +[consumer_groups]: http://godoc.org/github.com/wvanbergen/kafka/consumergroup +[data_formats]: /docs/DATA_FORMATS_INPUT.md + +## Service Input + +This plugin is a service input. Normal plugins gather metrics determined by the +interval setting. Service plugins start a service to listens and waits for +metrics or events to occur. Service plugins have two key differences from +normal plugins: + +1. The global or plugin specific `interval` setting may not apply +2. The CLI options of `--test`, `--test-wait`, and `--once` may not produce + output for this plugin + +## Global configuration options + +In addition to the plugin-specific configuration settings, plugins support +additional global and plugin configuration settings. These settings are used to +modify metrics, tags, and field or create aliases and configure ordering, etc. +See the [CONFIGURATION.md][CONFIGURATION.md] for more details. + +[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins + +## Startup error behavior options + +In addition to the plugin-specific and global configuration settings the plugin +supports options for specifying the behavior when experiencing startup errors +using the `startup_error_behavior` setting. Available values are: + +- `error`: Telegraf with stop and exit in case of startup errors. This is the + default behavior. +- `ignore`: Telegraf will ignore startup errors for this plugin and disables it + but continues processing for all other plugins. +- `retry`: Telegraf will try to startup the plugin in every gather or write + cycle in case of startup errors. The plugin is disabled until + the startup succeeds. + +## Secret-store support + +This plugin supports secrets from secret-stores for the `sasl_username`, +`sasl_password` and `sasl_access_token` option. +See the [secret-store documentation][SECRETSTORE] for more details on how +to use them. + +[SECRETSTORE]: ../../../docs/CONFIGURATION.md#secret-store-secrets + +## Configuration + +```toml @sample.conf +# Read metrics from Kafka topics +[[inputs.cl_kafka_consumer]] + ## Kafka brokers. + brokers = ["localhost:9092"] + + ## Set the minimal supported Kafka version. Should be a string contains + ## 4 digits in case if it is 0 version and 3 digits for versions starting + ## from 1.0.0 separated by dot. This setting enables the use of new + ## Kafka features and APIs. Must be 0.10.2.0(used as default) or greater. + ## Please, check the list of supported versions at + ## https://pkg.go.dev/github.com/Shopify/sarama#SupportedVersions + ## ex: kafka_version = "2.6.0" + ## ex: kafka_version = "0.10.2.0" + # kafka_version = "0.10.2.0" + + ## Topics to consume. + topics = ["telegraf"] + + ## Topic regular expressions to consume. Matches will be added to topics. + ## Example: topic_regexps = [ "*test", "metric[0-9A-z]*" ] + # topic_regexps = [ ] + + ## When set this tag will be added to all metrics with the topic as the value. + # topic_tag = "" + + ## The list of Kafka message headers that should be pass as metric tags + ## works only for Kafka version 0.11+, on lower versions the message headers + ## are not available + # msg_headers_as_tags = [] + + ## The name of kafka message header which value should override the metric name. + ## In case when the same header specified in current option and in msg_headers_as_tags + ## option, it will be excluded from the msg_headers_as_tags list. + # msg_header_as_metric_name = "" + + ## Set metric(s) timestamp using the given source. + ## Available options are: + ## metric -- do not modify the metric timestamp + ## inner -- use the inner message timestamp (Kafka v0.10+) + ## outer -- use the outer (compressed) block timestamp (Kafka v0.10+) + # timestamp_source = "metric" + + ## Optional Client id + # client_id = "Telegraf" + + ## Optional TLS Config + # enable_tls = false + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Period between keep alive probes. + ## Defaults to the OS configuration if not specified or zero. + # keep_alive_period = "15s" + + ## SASL authentication credentials. These settings should typically be used + ## with TLS encryption enabled + # sasl_username = "" + # sasl_password = "" + + ## Optional SASL, one of: + ## OAUTHBEARER, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI, AWS-MSK-IAM + # sasl_mechanism = "" + + ## used if sasl_mechanism is GSSAPI + # sasl_gssapi_service_name = "" + # ## One of: KRB5_USER_AUTH and KRB5_KEYTAB_AUTH + # sasl_gssapi_auth_type = "KRB5_USER_AUTH" + # sasl_gssapi_kerberos_config_path = "/" + # sasl_gssapi_realm = "realm" + # sasl_gssapi_key_tab_path = "" + # sasl_gssapi_disable_pafxfast = false + + ## used if sasl_mechanism is OAUTHBEARER + # sasl_access_token = "" + + ## used if sasl_mechanism is AWS-MSK-IAM + # sasl_aws_msk_iam_region = "" + ## for profile based auth + ## sasl_aws_msk_iam_profile = "" + ## for role based auth + ## sasl_aws_msk_iam_role = "" + ## sasl_aws_msk_iam_session = "" + + ## Arbitrary key value string pairs to pass as a TOML table. For example: + ## {logicalCluster = "cluster-042", poolId = "pool-027"} + # sasl_extensions = {} + + ## SASL protocol version. When connecting to Azure EventHub set to 0. + # sasl_version = 1 + + # Disable Kafka metadata full fetch + # metadata_full = false + + ## Name of the consumer group. + # consumer_group = "telegraf_metrics_consumers" + + ## Compression codec represents the various compression codecs recognized by + ## Kafka in messages. + ## 0 : None + ## 1 : Gzip + ## 2 : Snappy + ## 3 : LZ4 + ## 4 : ZSTD + # compression_codec = 0 + ## Initial offset position; one of "oldest" or "newest". + # offset = "oldest" + + ## Consumer group partition assignment strategy; one of "range", "roundrobin" or "sticky". + # balance_strategy = "range" + + ## Maximum number of retries for metadata operations including + ## connecting. Sets Sarama library's Metadata.Retry.Max config value. If 0 or + ## unset, use the Sarama default of 3, + # metadata_retry_max = 0 + + ## Type of retry backoff. Valid options: "constant", "exponential" + # metadata_retry_type = "constant" + + ## Amount of time to wait before retrying. When metadata_retry_type is + ## "constant", each retry is delayed this amount. When "exponential", the + ## first retry is delayed this amount, and subsequent delays are doubled. If 0 + ## or unset, use the Sarama default of 250 ms + # metadata_retry_backoff = 0 + + ## Maximum amount of time to wait before retrying when metadata_retry_type is + ## "exponential". Ignored for other retry types. If 0, there is no backoff + ## limit. + # metadata_retry_max_duration = 0 + + ## When set to true, this turns each bootstrap broker address into a set of + ## IPs, then does a reverse lookup on each one to get its canonical hostname. + ## This list of hostnames then replaces the original address list. + ## resolve_canonical_bootstrap_servers_only = false + + ## Maximum length of a message to consume, in bytes (default 0/unlimited); + ## larger messages are dropped + max_message_len = 1000000 + + ## Max undelivered messages + ## This plugin uses tracking metrics, which ensure messages are read to + ## outputs before acknowledging them to the original broker to ensure data + ## is not lost. This option sets the maximum messages to read from the + ## broker that have not been written by an output. + ## + ## This value needs to be picked with awareness of the agent's + ## metric_batch_size value as well. Setting max undelivered messages too high + ## can result in a constant stream of data batches to the output. While + ## setting it too low may never flush the broker's messages. + # max_undelivered_messages = 1000 + + ## Maximum amount of time the consumer should take to process messages. If + ## the debug log prints messages from sarama about 'abandoning subscription + ## to [topic] because consuming was taking too long', increase this value to + ## longer than the time taken by the output plugin(s). + ## + ## Note that the effective timeout could be between 'max_processing_time' and + ## '2 * max_processing_time'. + # max_processing_time = "100ms" + + ## The default number of message bytes to fetch from the broker in each + ## request (default 1MB). This should be larger than the majority of + ## your messages, or else the consumer will spend a lot of time + ## negotiating sizes and not actually consuming. Similar to the JVM's + ## `fetch.message.max.bytes`. + # consumer_fetch_default = "1MB" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + # data_format = "influx" +``` + +## Metrics + +The plugin accepts arbitrary input and parses it according to the `data_format` +setting. There is no predefined metric format. + +## Example Output + +There is no predefined metric format, so output depends on plugin input. diff --git a/plugins/inputs/cl_kafka_consumer/kafka_consumer.go b/plugins/inputs/cl_kafka_consumer/kafka_consumer.go new file mode 100644 index 000000000..68269b74e --- /dev/null +++ b/plugins/inputs/cl_kafka_consumer/kafka_consumer.go @@ -0,0 +1,591 @@ +//go:generate ../../../tools/readme_config_includer/generator +package kafka_consumer + +import ( + "context" + _ "embed" + "errors" + "fmt" + "regexp" + "sort" + "strings" + "sync" + "time" + + "github.com/IBM/sarama" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/common/kafka" + "github.com/influxdata/telegraf/plugins/inputs" +) + +//go:embed sample.conf +var sampleConfig string + +var once sync.Once + +const ( + defaultMaxUndeliveredMessages = 1000 + defaultMaxProcessingTime = config.Duration(100 * time.Millisecond) + defaultConsumerGroup = "telegraf_metrics_consumers" + reconnectDelay = 5 * time.Second +) + +type KafkaConsumer struct { + Brokers []string `toml:"brokers"` + Version string `toml:"kafka_version"` + ConsumerGroup string `toml:"consumer_group"` + MaxMessageLen int `toml:"max_message_len"` + MaxUndeliveredMessages int `toml:"max_undelivered_messages"` + MaxProcessingTime config.Duration `toml:"max_processing_time"` + Offset string `toml:"offset"` + BalanceStrategy string `toml:"balance_strategy"` + Topics []string `toml:"topics"` + TopicRegexps []string `toml:"topic_regexps"` + TopicTag string `toml:"topic_tag"` + MsgHeadersAsTags []string `toml:"msg_headers_as_tags"` + MsgHeaderAsMetricName string `toml:"msg_header_as_metric_name"` + TimestampSource string `toml:"timestamp_source"` + ConsumerFetchDefault config.Size `toml:"consumer_fetch_default"` + ConnectionStrategy string `toml:"connection_strategy" deprecated:"1.33.0;1.40.0;use 'startup_error_behavior' instead"` + ResolveCanonicalBootstrapServersOnly bool `toml:"resolve_canonical_bootstrap_servers_only"` + Log telegraf.Logger `toml:"-"` + kafka.ReadConfig + + consumerCreator consumerGroupCreator + consumer consumerGroup + config *sarama.Config + + topicClient sarama.Client + regexps []regexp.Regexp + allWantedTopics []string + fingerprint string + + parser telegraf.Parser + topicLock sync.Mutex + wg sync.WaitGroup + cancel context.CancelFunc +} + +// consumerGroupHandler is a sarama.ConsumerGroupHandler implementation. +type consumerGroupHandler struct { + maxMessageLen int + topicTag string + msgHeadersToTags map[string]bool + msgHeaderToMetricName string + timestampSource string + + acc telegraf.TrackingAccumulator + sem semaphore + parser telegraf.Parser + wg sync.WaitGroup + cancel context.CancelFunc + + mu sync.Mutex + undelivered map[telegraf.TrackingID]message + + log telegraf.Logger +} + +// message is an aggregate type binding the Kafka message and the session so that offsets can be updated. +type message struct { + message *sarama.ConsumerMessage + session sarama.ConsumerGroupSession +} + +type ( + empty struct{} + semaphore chan empty +) + +type consumerGroup interface { + Consume(ctx context.Context, topics []string, handler sarama.ConsumerGroupHandler) error + Errors() <-chan error + Close() error +} + +type consumerGroupCreator interface { + create(brokers []string, group string, cfg *sarama.Config) (consumerGroup, error) +} + +type saramaCreator struct{} + +func (*saramaCreator) create(brokers []string, group string, cfg *sarama.Config) (consumerGroup, error) { + return sarama.NewConsumerGroup(brokers, group, cfg) +} + +func (*KafkaConsumer) SampleConfig() string { + return sampleConfig +} + +func (k *KafkaConsumer) Init() error { + kafka.SetLogger(k.Log.Level()) + + if k.MaxUndeliveredMessages == 0 { + k.MaxUndeliveredMessages = defaultMaxUndeliveredMessages + } + if time.Duration(k.MaxProcessingTime) == 0 { + k.MaxProcessingTime = defaultMaxProcessingTime + } + if k.ConsumerGroup == "" { + k.ConsumerGroup = defaultConsumerGroup + } + + switch k.TimestampSource { + case "": + k.TimestampSource = "metric" + case "metric", "inner", "outer": + default: + return fmt.Errorf("invalid timestamp source %q", k.TimestampSource) + } + + cfg := sarama.NewConfig() + + // Kafka version 0.10.2.0 is required for consumer groups. + // Try to parse version from config. If can not, set default + cfg.Version = sarama.V0_10_2_0 + if k.Version != "" { + version, err := sarama.ParseKafkaVersion(k.Version) + if err != nil { + return fmt.Errorf("invalid version: %w", err) + } + cfg.Version = version + } + + if err := k.SetConfig(cfg, k.Log); err != nil { + return fmt.Errorf("setting config failed: %w", err) + } + + switch strings.ToLower(k.Offset) { + case "oldest", "": + cfg.Consumer.Offsets.Initial = sarama.OffsetOldest + case "newest": + cfg.Consumer.Offsets.Initial = sarama.OffsetNewest + default: + return fmt.Errorf("invalid offset %q", k.Offset) + } + + switch strings.ToLower(k.BalanceStrategy) { + case "range", "": + cfg.Consumer.Group.Rebalance.GroupStrategies = []sarama.BalanceStrategy{sarama.NewBalanceStrategyRange()} + case "roundrobin": + cfg.Consumer.Group.Rebalance.GroupStrategies = []sarama.BalanceStrategy{sarama.NewBalanceStrategyRoundRobin()} + case "sticky": + cfg.Consumer.Group.Rebalance.GroupStrategies = []sarama.BalanceStrategy{sarama.NewBalanceStrategySticky()} + default: + return fmt.Errorf("invalid balance strategy %q", k.BalanceStrategy) + } + + if k.consumerCreator == nil { + k.consumerCreator = &saramaCreator{} + } + + cfg.Net.ResolveCanonicalBootstrapServers = k.ResolveCanonicalBootstrapServersOnly + + cfg.Consumer.MaxProcessingTime = time.Duration(k.MaxProcessingTime) + + if k.ConsumerFetchDefault != 0 { + cfg.Consumer.Fetch.Default = int32(k.ConsumerFetchDefault) + } + + switch strings.ToLower(k.ConnectionStrategy) { + default: + return fmt.Errorf("invalid connection strategy %q", k.ConnectionStrategy) + case "defer", "startup", "": + } + + k.config = cfg + + if len(k.TopicRegexps) == 0 { + k.allWantedTopics = k.Topics + } else { + if err := k.compileTopicRegexps(); err != nil { + return err + } + // We have regexps, so we're going to need a client to ask + // the broker for topics + client, err := sarama.NewClient(k.Brokers, k.config) + if err != nil { + return err + } + k.topicClient = client + } + + return nil +} + +func (k *KafkaConsumer) SetParser(parser telegraf.Parser) { + k.parser = parser +} + +func (k *KafkaConsumer) Start(acc telegraf.Accumulator) error { + var err error + + // If TopicRegexps is set, add matches to Topics + if len(k.TopicRegexps) > 0 { + if err := k.refreshTopics(); err != nil { + return err + } + } + + ctx, cancel := context.WithCancel(context.Background()) + k.cancel = cancel + + if k.ConnectionStrategy != "defer" { + err = k.create() + if err != nil { + return &internal.StartupError{ + Err: fmt.Errorf("create consumer: %w", err), + Retry: errors.Is(err, sarama.ErrOutOfBrokers), + } + } + k.startErrorAdder(acc) + } + + // Start consumer goroutine + k.wg.Add(1) + go func() { + var err error + defer k.wg.Done() + + if k.consumer == nil { + err = k.create() + if err != nil { + acc.AddError(fmt.Errorf("create consumer async: %w", err)) + return + } + } + + k.startErrorAdder(acc) + + for ctx.Err() == nil { + handler := newConsumerGroupHandler(acc, k.MaxUndeliveredMessages, k.parser, k.Log) + handler.maxMessageLen = k.MaxMessageLen + handler.topicTag = k.TopicTag + handler.msgHeaderToMetricName = k.MsgHeaderAsMetricName + // if message headers list specified, put it as map to handler + msgHeadersMap := make(map[string]bool, len(k.MsgHeadersAsTags)) + if len(k.MsgHeadersAsTags) > 0 { + for _, header := range k.MsgHeadersAsTags { + if k.MsgHeaderAsMetricName != header { + msgHeadersMap[header] = true + } + } + } + handler.msgHeadersToTags = msgHeadersMap + handler.timestampSource = k.TimestampSource + + // We need to copy allWantedTopics; the Consume() is + // long-running and we can easily deadlock if our + // topic-update-checker fires. + topics := make([]string, len(k.allWantedTopics)) + k.topicLock.Lock() + copy(topics, k.allWantedTopics) + k.topicLock.Unlock() + err := k.consumer.Consume(ctx, topics, handler) + if err != nil { + acc.AddError(fmt.Errorf("consume: %w", err)) + internal.SleepContext(ctx, reconnectDelay) //nolint:errcheck // ignore returned error as we cannot do anything about it anyway + } + } + err = k.consumer.Close() + if err != nil { + acc.AddError(fmt.Errorf("close: %w", err)) + } + }() + + return nil +} + +func (*KafkaConsumer) Gather(telegraf.Accumulator) error { + return nil +} + +func (k *KafkaConsumer) Stop() { + // Lock so that a topic refresh cannot start while we are stopping. + k.topicLock.Lock() + if k.topicClient != nil { + k.topicClient.Close() + } + k.topicLock.Unlock() + + k.cancel() + k.wg.Wait() +} + +func (k *KafkaConsumer) compileTopicRegexps() error { + // While we can add new topics matching extant regexps, we can't + // update that list on the fly. We compile them once at startup. + // Changing them is a configuration change and requires a restart. + + k.regexps = make([]regexp.Regexp, 0, len(k.TopicRegexps)) + for _, r := range k.TopicRegexps { + re, err := regexp.Compile(r) + if err != nil { + return fmt.Errorf("regular expression %q did not compile: '%w", r, err) + } + k.regexps = append(k.regexps, *re) + } + return nil +} + +func (k *KafkaConsumer) refreshTopics() error { + // We have instantiated a new generic Kafka client, so we can ask + // it for all the topics it knows about. Then we build + // regexps from our strings, loop over those, loop over the + // topics, and if we find a match, add that topic to + // out topic set, which then we turn back into a list at the end. + + if len(k.regexps) == 0 { + return nil + } + + allDiscoveredTopics, err := k.topicClient.Topics() + if err != nil { + return err + } + k.Log.Debugf("discovered topics: %v", allDiscoveredTopics) + + extantTopicSet := make(map[string]bool, len(allDiscoveredTopics)) + for _, t := range allDiscoveredTopics { + extantTopicSet[t] = true + } + // Even if a topic specified by a literal string (that is, k.Topics) + // does not appear in the topic list, we want to keep it around, in + // case it pops back up--it is not guaranteed to be matched by any + // of our regular expressions. Therefore, we pretend that it's in + // extantTopicSet, even if it isn't. + // + // Assuming that literally-specified topics are usually in the topics + // present on the broker, this should not need a resizing (although if + // you have many topics that you don't care about, it will be too big) + wantedTopicSet := make(map[string]bool, len(allDiscoveredTopics)) + for _, t := range k.Topics { + // Get our pre-specified topics + k.Log.Debugf("adding literally-specified topic %s", t) + wantedTopicSet[t] = true + } + for _, t := range allDiscoveredTopics { + // Add topics that match regexps + for _, r := range k.regexps { + if r.MatchString(t) { + wantedTopicSet[t] = true + k.Log.Debugf("adding regexp-matched topic %q", t) + break + } + } + } + topicList := make([]string, 0, len(wantedTopicSet)) + for t := range wantedTopicSet { + topicList = append(topicList, t) + } + sort.Strings(topicList) + fingerprint := strings.Join(topicList, ";") + if fingerprint != k.fingerprint { + k.Log.Infof("updating topics: replacing %q with %q", k.allWantedTopics, topicList) + } + k.topicLock.Lock() + k.fingerprint = fingerprint + k.allWantedTopics = topicList + k.topicLock.Unlock() + return nil +} + +func (k *KafkaConsumer) create() error { + var err error + k.consumer, err = k.consumerCreator.create( + k.Brokers, + k.ConsumerGroup, + k.config, + ) + + return err +} + +func (k *KafkaConsumer) startErrorAdder(acc telegraf.Accumulator) { + k.wg.Add(1) + go func() { + defer k.wg.Done() + for err := range k.consumer.Errors() { + acc.AddError(fmt.Errorf("channel: %w", err)) + } + }() +} + +func newConsumerGroupHandler(acc telegraf.Accumulator, maxUndelivered int, parser telegraf.Parser, log telegraf.Logger) *consumerGroupHandler { + handler := &consumerGroupHandler{ + acc: acc.WithTracking(maxUndelivered), + sem: make(chan empty, maxUndelivered), + undelivered: make(map[telegraf.TrackingID]message, maxUndelivered), + parser: parser, + log: log, + } + return handler +} + +// Setup is called once when a new session is opened. It setups up the handler and begins processing delivered messages. +func (h *consumerGroupHandler) Setup(sarama.ConsumerGroupSession) error { + h.undelivered = make(map[telegraf.TrackingID]message) + + ctx, cancel := context.WithCancel(context.Background()) + h.cancel = cancel + + h.wg.Add(1) + go func() { + defer h.wg.Done() + h.run(ctx) + }() + return nil +} + +// ConsumeClaim is called once each claim in a goroutine and must be thread-safe. Should run until the claim is closed. +func (h *consumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error { + ctx := session.Context() + + for { + err := h.reserve(ctx) + if err != nil { + return err + } + + select { + case <-ctx.Done(): + return nil + case msg, ok := <-claim.Messages(): + if !ok { + return nil + } + err := h.handle(session, msg) + if err != nil { + h.acc.AddError(err) + } + } + } +} + +// Cleanup stops the internal goroutine and is called after all ConsumeClaim functions have completed. +func (h *consumerGroupHandler) Cleanup(sarama.ConsumerGroupSession) error { + h.cancel() + h.wg.Wait() + return nil +} + +// Run processes any delivered metrics during the lifetime of the session. +func (h *consumerGroupHandler) run(ctx context.Context) { + for { + select { + case <-ctx.Done(): + return + case track := <-h.acc.Delivered(): + h.onDelivery(track) + } + } +} + +func (h *consumerGroupHandler) onDelivery(track telegraf.DeliveryInfo) { + h.mu.Lock() + defer h.mu.Unlock() + + msg, ok := h.undelivered[track.ID()] + if !ok { + h.log.Errorf("Could not mark message delivered: %d", track.ID()) + return + } + + if track.Delivered() { + msg.session.MarkMessage(msg.message, "") + } + + delete(h.undelivered, track.ID()) + <-h.sem +} + +// reserve blocks until there is an available slot for a new message. +func (h *consumerGroupHandler) reserve(ctx context.Context) error { + select { + case <-ctx.Done(): + return ctx.Err() + case h.sem <- empty{}: + return nil + } +} + +func (h *consumerGroupHandler) release() { + <-h.sem +} + +// handle processes a message and if successful saves it to be acknowledged after delivery. +func (h *consumerGroupHandler) handle(session sarama.ConsumerGroupSession, msg *sarama.ConsumerMessage) error { + if h.maxMessageLen != 0 && len(msg.Value) > h.maxMessageLen { + session.MarkMessage(msg, "") + h.release() + return fmt.Errorf("message exceeds max_message_len (actual %d, max %d)", + len(msg.Value), h.maxMessageLen) + } + + metrics, err := h.parser.Parse(msg.Value, msg.Topic) + if err != nil { + session.MarkMessage(msg, "") + h.release() + return err + } + + if len(metrics) == 0 { + once.Do(func() { + h.log.Debug(internal.NoMetricsCreatedMsg) + }) + } + + // Check if any message header should override metric name or should be pass as tag + if len(h.msgHeadersToTags) > 0 || h.msgHeaderToMetricName != "" { + for _, header := range msg.Headers { + // convert to a string as the header and value are byte arrays. + headerKey := string(header.Key) + if _, exists := h.msgHeadersToTags[headerKey]; exists { + // If message header should be pass as tag then add it to the metrics + for _, metric := range metrics { + metric.AddTag(headerKey, string(header.Value)) + } + } else { + if h.msgHeaderToMetricName == headerKey { + for _, metric := range metrics { + metric.SetName(string(header.Value)) + } + } + } + } + } + + // Add topic name as tag with topicTag name specified in the config + if len(h.topicTag) > 0 { + for _, metric := range metrics { + metric.AddTag(h.topicTag, msg.Topic) + } + } + + // Do override the metric timestamp if required + switch h.timestampSource { + case "inner": + for _, metric := range metrics { + metric.SetTime(msg.Timestamp) + } + case "outer": + for _, metric := range metrics { + metric.SetTime(msg.BlockTimestamp) + } + } + + h.mu.Lock() + id := h.acc.AddTrackingMetricGroup(metrics) + h.undelivered[id] = message{session: session, message: msg} + h.mu.Unlock() + return nil +} + +func init() { + inputs.Add("cl_kafka_consumer", func() telegraf.Input { + return &KafkaConsumer{} + }) +} diff --git a/plugins/inputs/cl_kafka_consumer/sample.conf b/plugins/inputs/cl_kafka_consumer/sample.conf new file mode 100644 index 000000000..7a4abcda4 --- /dev/null +++ b/plugins/inputs/cl_kafka_consumer/sample.conf @@ -0,0 +1,174 @@ +# Read metrics from Kafka topics +[[inputs.cl_kafka_consumer]] + ## Kafka brokers. + brokers = ["localhost:9092"] + + ## Set the minimal supported Kafka version. Should be a string contains + ## 4 digits in case if it is 0 version and 3 digits for versions starting + ## from 1.0.0 separated by dot. This setting enables the use of new + ## Kafka features and APIs. Must be 0.10.2.0(used as default) or greater. + ## Please, check the list of supported versions at + ## https://pkg.go.dev/github.com/Shopify/sarama#SupportedVersions + ## ex: kafka_version = "2.6.0" + ## ex: kafka_version = "0.10.2.0" + # kafka_version = "0.10.2.0" + + ## Topics to consume. + topics = ["telegraf"] + + ## Topic regular expressions to consume. Matches will be added to topics. + ## Example: topic_regexps = [ "*test", "metric[0-9A-z]*" ] + # topic_regexps = [ ] + + ## When set this tag will be added to all metrics with the topic as the value. + # topic_tag = "" + + ## The list of Kafka message headers that should be pass as metric tags + ## works only for Kafka version 0.11+, on lower versions the message headers + ## are not available + # msg_headers_as_tags = [] + + ## The name of kafka message header which value should override the metric name. + ## In case when the same header specified in current option and in msg_headers_as_tags + ## option, it will be excluded from the msg_headers_as_tags list. + # msg_header_as_metric_name = "" + + ## Set metric(s) timestamp using the given source. + ## Available options are: + ## metric -- do not modify the metric timestamp + ## inner -- use the inner message timestamp (Kafka v0.10+) + ## outer -- use the outer (compressed) block timestamp (Kafka v0.10+) + # timestamp_source = "metric" + + ## Optional Client id + # client_id = "Telegraf" + + ## Optional TLS Config + # enable_tls = false + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Period between keep alive probes. + ## Defaults to the OS configuration if not specified or zero. + # keep_alive_period = "15s" + + ## SASL authentication credentials. These settings should typically be used + ## with TLS encryption enabled + # sasl_username = "" + # sasl_password = "" + + ## Optional SASL, one of: + ## OAUTHBEARER, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI, AWS-MSK-IAM + # sasl_mechanism = "" + + ## used if sasl_mechanism is GSSAPI + # sasl_gssapi_service_name = "" + # ## One of: KRB5_USER_AUTH and KRB5_KEYTAB_AUTH + # sasl_gssapi_auth_type = "KRB5_USER_AUTH" + # sasl_gssapi_kerberos_config_path = "/" + # sasl_gssapi_realm = "realm" + # sasl_gssapi_key_tab_path = "" + # sasl_gssapi_disable_pafxfast = false + + ## used if sasl_mechanism is OAUTHBEARER + # sasl_access_token = "" + + ## used if sasl_mechanism is AWS-MSK-IAM + # sasl_aws_msk_iam_region = "" + ## for profile based auth + ## sasl_aws_msk_iam_profile = "" + ## for role based auth + ## sasl_aws_msk_iam_role = "" + ## sasl_aws_msk_iam_session = "" + + ## Arbitrary key value string pairs to pass as a TOML table. For example: + ## {logicalCluster = "cluster-042", poolId = "pool-027"} + # sasl_extensions = {} + + ## SASL protocol version. When connecting to Azure EventHub set to 0. + # sasl_version = 1 + + # Disable Kafka metadata full fetch + # metadata_full = false + + ## Name of the consumer group. + # consumer_group = "telegraf_metrics_consumers" + + ## Compression codec represents the various compression codecs recognized by + ## Kafka in messages. + ## 0 : None + ## 1 : Gzip + ## 2 : Snappy + ## 3 : LZ4 + ## 4 : ZSTD + # compression_codec = 0 + ## Initial offset position; one of "oldest" or "newest". + # offset = "oldest" + + ## Consumer group partition assignment strategy; one of "range", "roundrobin" or "sticky". + # balance_strategy = "range" + + ## Maximum number of retries for metadata operations including + ## connecting. Sets Sarama library's Metadata.Retry.Max config value. If 0 or + ## unset, use the Sarama default of 3, + # metadata_retry_max = 0 + + ## Type of retry backoff. Valid options: "constant", "exponential" + # metadata_retry_type = "constant" + + ## Amount of time to wait before retrying. When metadata_retry_type is + ## "constant", each retry is delayed this amount. When "exponential", the + ## first retry is delayed this amount, and subsequent delays are doubled. If 0 + ## or unset, use the Sarama default of 250 ms + # metadata_retry_backoff = 0 + + ## Maximum amount of time to wait before retrying when metadata_retry_type is + ## "exponential". Ignored for other retry types. If 0, there is no backoff + ## limit. + # metadata_retry_max_duration = 0 + + ## When set to true, this turns each bootstrap broker address into a set of + ## IPs, then does a reverse lookup on each one to get its canonical hostname. + ## This list of hostnames then replaces the original address list. + ## resolve_canonical_bootstrap_servers_only = false + + ## Maximum length of a message to consume, in bytes (default 0/unlimited); + ## larger messages are dropped + max_message_len = 1000000 + + ## Max undelivered messages + ## This plugin uses tracking metrics, which ensure messages are read to + ## outputs before acknowledging them to the original broker to ensure data + ## is not lost. This option sets the maximum messages to read from the + ## broker that have not been written by an output. + ## + ## This value needs to be picked with awareness of the agent's + ## metric_batch_size value as well. Setting max undelivered messages too high + ## can result in a constant stream of data batches to the output. While + ## setting it too low may never flush the broker's messages. + # max_undelivered_messages = 1000 + + ## Maximum amount of time the consumer should take to process messages. If + ## the debug log prints messages from sarama about 'abandoning subscription + ## to [topic] because consuming was taking too long', increase this value to + ## longer than the time taken by the output plugin(s). + ## + ## Note that the effective timeout could be between 'max_processing_time' and + ## '2 * max_processing_time'. + # max_processing_time = "100ms" + + ## The default number of message bytes to fetch from the broker in each + ## request (default 1MB). This should be larger than the majority of + ## your messages, or else the consumer will spend a lot of time + ## negotiating sizes and not actually consuming. Similar to the JVM's + ## `fetch.message.max.bytes`. + # consumer_fetch_default = "1MB" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + # data_format = "influx" diff --git a/plugins/inputs/cloud_pubsub/cloud_pubsub.go b/plugins/inputs/cloud_pubsub/cloud_pubsub.go index d91c55f66..b2fb56748 100644 --- a/plugins/inputs/cloud_pubsub/cloud_pubsub.go +++ b/plugins/inputs/cloud_pubsub/cloud_pubsub.go @@ -217,7 +217,7 @@ func (ps *PubSub) onMessage(ctx context.Context, msg message) error { return fmt.Errorf("unable to decode base64 message: %w", err) } - metrics, err := ps.parser.Parse(data) + metrics, err := ps.parser.Parse(data, "") if err != nil { msg.Ack() return fmt.Errorf("unable to parse message: %w", err) diff --git a/plugins/inputs/cloud_pubsub_push/cloud_pubsub_push.go b/plugins/inputs/cloud_pubsub_push/cloud_pubsub_push.go index d446d04e9..c7a4b936d 100644 --- a/plugins/inputs/cloud_pubsub_push/cloud_pubsub_push.go +++ b/plugins/inputs/cloud_pubsub_push/cloud_pubsub_push.go @@ -196,7 +196,7 @@ func (p *PubSubPush) serveWrite(res http.ResponseWriter, req *http.Request) { return } - metrics, err := p.Parse(sDec) + metrics, err := p.Parse(sDec, "") if err != nil { p.Log.Debug(err.Error()) res.WriteHeader(http.StatusBadRequest) diff --git a/plugins/inputs/directory_monitor/directory_monitor.go b/plugins/inputs/directory_monitor/directory_monitor.go index 2ef24a9fd..7cfee1c12 100644 --- a/plugins/inputs/directory_monitor/directory_monitor.go +++ b/plugins/inputs/directory_monitor/directory_monitor.go @@ -368,7 +368,7 @@ func (monitor *DirectoryMonitor) parseAtOnce(parser telegraf.Parser, reader io.R } func (monitor *DirectoryMonitor) parseMetrics(parser telegraf.Parser, line []byte, fileName string) (metrics []telegraf.Metric, err error) { - metrics, err = parser.Parse(line) + metrics, err = parser.Parse(line, "") if err != nil { if errors.Is(err, parsers.ErrEOF) { return nil, nil diff --git a/plugins/inputs/eventhub_consumer/eventhub_consumer.go b/plugins/inputs/eventhub_consumer/eventhub_consumer.go index 652def78c..3a49e3367 100644 --- a/plugins/inputs/eventhub_consumer/eventhub_consumer.go +++ b/plugins/inputs/eventhub_consumer/eventhub_consumer.go @@ -271,7 +271,7 @@ func deepCopyMetrics(in []telegraf.Metric) []telegraf.Metric { // CreateMetrics returns the Metrics from the Event. func (e *EventHub) createMetrics(event *eventhub.Event) ([]telegraf.Metric, error) { - metrics, err := e.parser.Parse(event.Data) + metrics, err := e.parser.Parse(event.Data, "") if err != nil { return nil, err } diff --git a/plugins/inputs/exec/exec.go b/plugins/inputs/exec/exec.go index ca87944c9..c755795b8 100644 --- a/plugins/inputs/exec/exec.go +++ b/plugins/inputs/exec/exec.go @@ -142,7 +142,7 @@ func (e *Exec) processCommand(acc telegraf.Accumulator, cmd string) error { return fmt.Errorf("exec: %w for command %q: %s", runErr, cmd, string(errBuf)) } - metrics, err := e.parser.Parse(out) + metrics, err := e.parser.Parse(out, "") if err != nil { return err } diff --git a/plugins/inputs/execd/execd.go b/plugins/inputs/execd/execd.go index 91eef2083..aabd336bf 100644 --- a/plugins/inputs/execd/execd.go +++ b/plugins/inputs/execd/execd.go @@ -108,7 +108,7 @@ func (e *Execd) cmdReadOut(out io.Reader) { continue } - metrics, err := e.parser.Parse(data) + metrics, err := e.parser.Parse(data, "") if err != nil { e.acc.AddError(fmt.Errorf("parse error: %w", err)) } diff --git a/plugins/inputs/file/file.go b/plugins/inputs/file/file.go index 3fba04e2e..877395c04 100644 --- a/plugins/inputs/file/file.go +++ b/plugins/inputs/file/file.go @@ -109,7 +109,7 @@ func (f *File) readMetric(filename string) ([]telegraf.Metric, error) { if err != nil { return nil, fmt.Errorf("could not instantiate parser: %w", err) } - metrics, err := parser.Parse(fileContents) + metrics, err := parser.Parse(fileContents, "") if err != nil { return metrics, fmt.Errorf("could not parse %q: %w", filename, err) } diff --git a/plugins/inputs/firehose/firehose.go b/plugins/inputs/firehose/firehose.go index 987864e0b..cbaa0f8e4 100644 --- a/plugins/inputs/firehose/firehose.go +++ b/plugins/inputs/firehose/firehose.go @@ -201,7 +201,7 @@ func (f *Firehose) handleRequest(req *http.Request) (*message, error) { // Parse the metrics var metrics []telegraf.Metric for _, record := range records { - m, err := f.parser.Parse(record) + m, err := f.parser.Parse(record, "") if err != nil { // respond with bad request status code to inform firehose about the failure msg.responseCode = http.StatusBadRequest diff --git a/plugins/inputs/google_cloud_storage/google_cloud_storage.go b/plugins/inputs/google_cloud_storage/google_cloud_storage.go index 2c45552ae..13ff45598 100644 --- a/plugins/inputs/google_cloud_storage/google_cloud_storage.go +++ b/plugins/inputs/google_cloud_storage/google_cloud_storage.go @@ -147,7 +147,7 @@ func (gcs *GCS) fetchedMetrics(r *storage.Reader) ([]telegraf.Metric, error) { return nil, err } - return gcs.parser.Parse(buf.Bytes()) + return gcs.parser.Parse(buf.Bytes(), "") } func (gcs *GCS) reachedThreshlod(processed int) bool { diff --git a/plugins/inputs/http/http.go b/plugins/inputs/http/http.go index 5557d5bd1..b6b771bea 100644 --- a/plugins/inputs/http/http.go +++ b/plugins/inputs/http/http.go @@ -201,7 +201,7 @@ func (h *HTTP) gatherURL(acc telegraf.Accumulator, url string) error { if err != nil { return fmt.Errorf("instantiating parser failed: %w", err) } - metrics, err := parser.Parse(b) + metrics, err := parser.Parse(b, "") if err != nil { return fmt.Errorf("parsing metrics failed: %w", err) } diff --git a/plugins/inputs/http_listener_v2/http_listener_v2.go b/plugins/inputs/http_listener_v2/http_listener_v2.go index e940620fb..a5de8ee55 100644 --- a/plugins/inputs/http_listener_v2/http_listener_v2.go +++ b/plugins/inputs/http_listener_v2/http_listener_v2.go @@ -278,7 +278,7 @@ func (h *HTTPListenerV2) serveWrite(res http.ResponseWriter, req *http.Request) return } - metrics, err := h.Parse(bytes) + metrics, err := h.Parse(bytes, "") if err != nil { h.Log.Debugf("Parse error: %s", err.Error()) if err := badRequest(res); err != nil { diff --git a/plugins/inputs/influxdb/influxdb_test.go b/plugins/inputs/influxdb/influxdb_test.go index ff324be6b..5c2136d05 100644 --- a/plugins/inputs/influxdb/influxdb_test.go +++ b/plugins/inputs/influxdb/influxdb_test.go @@ -215,7 +215,7 @@ func TestCloud1(t *testing.T) { buf, err := os.ReadFile("./testdata/cloud1.influx") require.NoError(t, err) - expected, err := parser.Parse(buf) + expected, err := parser.Parse(buf, "") require.NoError(t, err) // Check the output diff --git a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go index 52854d6f6..143fe0166 100644 --- a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go +++ b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go @@ -317,7 +317,7 @@ func (h *InfluxDBV2Listener) handleWrite() http.HandlerFunc { } } - metrics, err = parser.Parse(bytes) + metrics, err = parser.Parse(bytes, "") } else { parser := influx.Parser{} err = parser.Init() @@ -332,7 +332,7 @@ func (h *InfluxDBV2Listener) handleWrite() http.HandlerFunc { parser.SetTimePrecision(precision) } - metrics, err = parser.Parse(bytes) + metrics, err = parser.Parse(bytes, "") } if !errors.Is(err, io.EOF) && err != nil { diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index ac335eec0..9ff4ef3eb 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -525,7 +525,7 @@ func (h *consumerGroupHandler) handle(session sarama.ConsumerGroupSession, msg * len(msg.Value), h.maxMessageLen) } - metrics, err := h.parser.Parse(msg.Value) + metrics, err := h.parser.Parse(msg.Value, "") if err != nil { session.MarkMessage(msg, "") h.release() diff --git a/plugins/inputs/kinesis_consumer/kinesis_consumer.go b/plugins/inputs/kinesis_consumer/kinesis_consumer.go index 87b272e58..5510cfc55 100644 --- a/plugins/inputs/kinesis_consumer/kinesis_consumer.go +++ b/plugins/inputs/kinesis_consumer/kinesis_consumer.go @@ -210,7 +210,7 @@ func (k *KinesisConsumer) onMessage(acc telegraf.TrackingAccumulator, shard stri if err != nil { return err } - metrics, err := k.parser.Parse(data) + metrics, err := k.parser.Parse(data, "") if err != nil { return err } diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index bbf826d81..eda2197da 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -250,7 +250,7 @@ func (m *MQTTConsumer) onMessage(_ mqtt.Client, msg mqtt.Message) { m.payloadSize.Incr(int64(payloadBytes)) m.messagesRecv.Incr(1) - metrics, err := m.parser.Parse(msg.Payload()) + metrics, err := m.parser.Parse(msg.Payload(), "") if err != nil || len(metrics) == 0 { if len(metrics) == 0 { once.Do(func() { diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go index 32f5b7e9f..966f130d7 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go @@ -64,7 +64,7 @@ type fakeParser struct{} // fakeParser satisfies telegraf.Parser var _ telegraf.Parser = &fakeParser{} -func (*fakeParser) Parse([]byte) ([]telegraf.Metric, error) { +func (*fakeParser) Parse([]byte, string) ([]telegraf.Metric, error) { panic("not implemented") } @@ -716,7 +716,7 @@ func TestIntegration(t *testing.T) { } expected := make([]telegraf.Metric, 0, len(metrics)) for _, x := range metrics { - metrics, err := parser.Parse([]byte(x)) + metrics, err := parser.Parse([]byte(x), "") for i := range metrics { metrics[i].AddTag("topic", topic) } @@ -949,7 +949,7 @@ func TestStartupErrorBehaviorRetryIntegration(t *testing.T) { } expected := make([]telegraf.Metric, 0, len(metrics)) for _, x := range metrics { - metrics, err := parser.Parse([]byte(x)) + metrics, err := parser.Parse([]byte(x), "") for i := range metrics { metrics[i].AddTag("topic", topic) } diff --git a/plugins/inputs/nats_consumer/nats_consumer.go b/plugins/inputs/nats_consumer/nats_consumer.go index 43531cc53..2226926d4 100644 --- a/plugins/inputs/nats_consumer/nats_consumer.go +++ b/plugins/inputs/nats_consumer/nats_consumer.go @@ -228,7 +228,7 @@ func (n *NatsConsumer) receiver(ctx context.Context) { <-sem <-sem case msg := <-n.in: - metrics, err := n.parser.Parse(msg.Data) + metrics, err := n.parser.Parse(msg.Data, "") if err != nil { n.Log.Errorf("Subject: %s, error: %s", msg.Subject, err.Error()) <-sem diff --git a/plugins/inputs/nsq_consumer/nsq_consumer.go b/plugins/inputs/nsq_consumer/nsq_consumer.go index 1516e4f2a..8feca6c08 100644 --- a/plugins/inputs/nsq_consumer/nsq_consumer.go +++ b/plugins/inputs/nsq_consumer/nsq_consumer.go @@ -89,7 +89,7 @@ func (n *NSQConsumer) Start(ac telegraf.Accumulator) error { } n.consumer.SetLogger(&logger{log: n.Log}, nsq.LogLevelInfo) n.consumer.AddHandler(nsq.HandlerFunc(func(message *nsq.Message) error { - metrics, err := n.parser.Parse(message.Body) + metrics, err := n.parser.Parse(message.Body, "") if err != nil { acc.AddError(err) // Remove the message from the queue diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index 560a7fae4..d0e15820b 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -561,7 +561,7 @@ func (p *Prometheus) gatherURL(u urlAndAddress, acc telegraf.Accumulator) (map[s Log: p.Log, } } - metrics, err := metricParser.Parse(body) + metrics, err := metricParser.Parse(body, "") if err != nil { return requestFields, tags, fmt.Errorf("error reading metrics for %q: %w", u.url, err) } diff --git a/plugins/inputs/socket_listener/socket_listener.go b/plugins/inputs/socket_listener/socket_listener.go index 17d753147..b5955d21b 100644 --- a/plugins/inputs/socket_listener/socket_listener.go +++ b/plugins/inputs/socket_listener/socket_listener.go @@ -51,7 +51,7 @@ func (sl *SocketListener) Init() error { func (sl *SocketListener) Start(acc telegraf.Accumulator) error { // Create the callbacks for parsing the data and recording issues onData := func(_ net.Addr, data []byte, receiveTime time.Time) { - metrics, err := sl.parser.Parse(data) + metrics, err := sl.parser.Parse(data, "") if err != nil { acc.AddError(err) diff --git a/plugins/inputs/tail/tail.go b/plugins/inputs/tail/tail.go index 46b268f78..9fa3a4d79 100644 --- a/plugins/inputs/tail/tail.go +++ b/plugins/inputs/tail/tail.go @@ -313,7 +313,7 @@ func (t *Tail) tailNewFiles() error { } func parseLine(parser telegraf.Parser, line string) ([]telegraf.Metric, error) { - m, err := parser.Parse([]byte(line)) + m, err := parser.Parse([]byte(line), "") if err != nil { if errors.Is(err, parsers.ErrEOF) { return nil, nil diff --git a/plugins/outputs/cloud_pubsub/cloud_pubsub_test.go b/plugins/outputs/cloud_pubsub/cloud_pubsub_test.go index 9b0f7519d..c5fb2f898 100644 --- a/plugins/outputs/cloud_pubsub/cloud_pubsub_test.go +++ b/plugins/outputs/cloud_pubsub/cloud_pubsub_test.go @@ -222,7 +222,7 @@ func verifyMetricPublished(t *testing.T, m telegraf.Metric, published map[string data = v } - parsed, err := p.Parse(data) + parsed, err := p.Parse(data, "") if err != nil { t.Fatalf("could not parse influxdb metric from published message: %s", string(data)) } diff --git a/plugins/outputs/cloud_pubsub/topic_stubbed.go b/plugins/outputs/cloud_pubsub/topic_stubbed.go index c47c0624f..04da92d75 100644 --- a/plugins/outputs/cloud_pubsub/topic_stubbed.go +++ b/plugins/outputs/cloud_pubsub/topic_stubbed.go @@ -207,7 +207,7 @@ func (t *stubTopic) parseIDs(msg *pubsub.Message) []string { } d = strData } - metrics, err := p.Parse(d) + metrics, err := p.Parse(d, "") if err != nil { t.Fatalf("unexpected parsing error: %v", err) } diff --git a/plugins/outputs/mqtt/mqtt_test.go b/plugins/outputs/mqtt/mqtt_test.go index 9e12e77fb..3753d7b19 100644 --- a/plugins/outputs/mqtt/mqtt_test.go +++ b/plugins/outputs/mqtt/mqtt_test.go @@ -802,7 +802,7 @@ func TestIntegrationMQTTLayoutHomieV4(t *testing.T) { func createMetricMessageHandler(acc telegraf.Accumulator, parser telegraf.Parser) paho.MessageHandler { return func(_ paho.Client, msg paho.Message) { - metrics, err := parser.Parse(msg.Payload()) + metrics, err := parser.Parse(msg.Payload(), "") if err != nil { acc.AddError(err) return diff --git a/plugins/parsers/all/phasor_binary.go b/plugins/parsers/all/phasor_binary.go new file mode 100644 index 000000000..5297df1d1 --- /dev/null +++ b/plugins/parsers/all/phasor_binary.go @@ -0,0 +1,5 @@ +//go:build !custom || parsers || parsers.phasor_binary + +package all + +import _ "github.com/influxdata/telegraf/plugins/parsers/phasor_binary" // register plugin diff --git a/plugins/parsers/avro/parser.go b/plugins/parsers/avro/parser.go index 6735eeb34..6f7c547c0 100644 --- a/plugins/parsers/avro/parser.go +++ b/plugins/parsers/avro/parser.go @@ -82,7 +82,7 @@ func (p *Parser) Init() error { return nil } -func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { +func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { var schema string var codec *goavro.Codec var err error @@ -145,7 +145,7 @@ func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { } func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { - metrics, err := p.Parse([]byte(line)) + metrics, err := p.Parse([]byte(line), "") if err != nil { return nil, err } diff --git a/plugins/parsers/avro/parser_test.go b/plugins/parsers/avro/parser_test.go index 43c44ac8c..7eafb238f 100644 --- a/plugins/parsers/avro/parser_test.go +++ b/plugins/parsers/avro/parser_test.go @@ -116,7 +116,7 @@ func BenchmarkParsing(b *testing.B) { b.ResetTimer() for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse(benchmarkData) + plugin.Parse(benchmarkData, "") } } @@ -150,7 +150,7 @@ func TestBenchmarkDataBinary(t *testing.T) { require.NoError(t, err) // Do the actual testing - actual, err := plugin.Parse(benchmarkData) + actual, err := plugin.Parse(benchmarkData, "") require.NoError(t, err) testutil.RequireMetricsEqual(t, expected, actual, testutil.SortMetrics()) } @@ -178,6 +178,6 @@ func BenchmarkParsingBinary(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse(benchmarkData) + plugin.Parse(benchmarkData, "") } } diff --git a/plugins/parsers/binary/parser.go b/plugins/parsers/binary/parser.go index 9e417f63d..98c3c4776 100644 --- a/plugins/parsers/binary/parser.go +++ b/plugins/parsers/binary/parser.go @@ -71,7 +71,7 @@ func (p *Parser) Init() error { return nil } -func (p *Parser) Parse(data []byte) ([]telegraf.Metric, error) { +func (p *Parser) Parse(data []byte, extra string) ([]telegraf.Metric, error) { t := time.Now() // If the data is encoded in HEX, we need to decode it first @@ -122,7 +122,7 @@ func (p *Parser) Parse(data []byte) ([]telegraf.Metric, error) { } func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { - metrics, err := p.Parse([]byte(line)) + metrics, err := p.Parse([]byte(line), "") if err != nil { return nil, err } diff --git a/plugins/parsers/binary/parser_test.go b/plugins/parsers/binary/parser_test.go index 2ccd4c145..a59044842 100644 --- a/plugins/parsers/binary/parser_test.go +++ b/plugins/parsers/binary/parser_test.go @@ -196,7 +196,7 @@ func TestFilterMatchInvalid(t *testing.T) { metricName: "binary", } require.NoError(t, parser.Init()) - _, err := parser.Parse(testdata) + _, err := parser.Parse(testdata, "") require.EqualError(t, err, tt.expected) }) } @@ -221,7 +221,7 @@ func TestFilterNoMatch(t *testing.T) { data, err := generateBinary(testdata, internal.HostEndianness) require.NoError(t, err) - _, err = parser.Parse(data) + _, err = parser.Parse(data, "") require.EqualError(t, err, "no matching configuration") }) @@ -242,7 +242,7 @@ func TestFilterNoMatch(t *testing.T) { data, err := generateBinary(testdata, internal.HostEndianness) require.NoError(t, err) - metrics, err := parser.Parse(data) + metrics, err := parser.Parse(data, "") require.NoError(t, err) require.Empty(t, metrics) }) @@ -320,7 +320,7 @@ func TestFilterNone(t *testing.T) { data, err := generateBinary(tt.data, order) require.NoError(t, err) - metrics, err := parser.Parse(data) + metrics, err := parser.Parse(data, "") require.NoError(t, err) require.NotEmpty(t, metrics) }) @@ -392,7 +392,7 @@ func TestFilterLength(t *testing.T) { data, err := generateBinary(tt.data, internal.HostEndianness) require.NoError(t, err) - metrics, err := parser.Parse(data) + metrics, err := parser.Parse(data, "") require.NoError(t, err) if tt.expected { require.NotEmpty(t, metrics) @@ -558,7 +558,7 @@ func TestFilterContent(t *testing.T) { var metrics []telegraf.Metric for _, data := range testdata { - m, err := parser.Parse(data) + m, err := parser.Parse(data, "") require.NoError(t, err) metrics = append(metrics, m...) } @@ -865,7 +865,7 @@ func TestParseInvalid(t *testing.T) { data, err := generateBinary(tt.data, order) require.NoError(t, err) - _, err = parser.Parse(data) + _, err = parser.Parse(data, "") require.EqualError(t, err, tt.expected) }) } @@ -1390,7 +1390,7 @@ func TestParse(t *testing.T) { data, err := generateBinary(tt.data, order) require.NoError(t, err) - metrics, err := parser.Parse(data) + metrics, err := parser.Parse(data, "") require.NoError(t, err) var options []cmp.Option @@ -1479,7 +1479,7 @@ func TestHexEncoding(t *testing.T) { require.NoError(t, err) encoded := hex.EncodeToString(data) - metrics, err := parser.Parse([]byte(encoded)) + metrics, err := parser.Parse([]byte(encoded), "") require.NoError(t, err) require.NotEmpty(t, metrics) } @@ -1564,7 +1564,7 @@ func TestBenchmarkData(t *testing.T) { actual := make([]telegraf.Metric, 0, 2) for _, buf := range benchmarkData { - m, err := plugin.Parse(buf) + m, err := plugin.Parse(buf, "") require.NoError(t, err) actual = append(actual, m...) } @@ -1609,6 +1609,6 @@ func BenchmarkParsing(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse(benchmarkData[n%2]) + plugin.Parse(benchmarkData[n%2], "") } } diff --git a/plugins/parsers/collectd/parser.go b/plugins/parsers/collectd/parser.go index 8a617a33d..381cb8045 100644 --- a/plugins/parsers/collectd/parser.go +++ b/plugins/parsers/collectd/parser.go @@ -65,7 +65,7 @@ func (p *Parser) Init() error { return nil } -func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { +func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { valueLists, err := network.Parse(buf, p.popts) if err != nil { return nil, fmt.Errorf("collectd parser error: %w", err) @@ -91,7 +91,7 @@ func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { } func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { - metrics, err := p.Parse([]byte(line)) + metrics, err := p.Parse([]byte(line), "") if err != nil { return nil, err } diff --git a/plugins/parsers/collectd/parser_test.go b/plugins/parsers/collectd/parser_test.go index 362cd10bf..3d71b24e8 100644 --- a/plugins/parsers/collectd/parser_test.go +++ b/plugins/parsers/collectd/parser_test.go @@ -131,7 +131,7 @@ func TestParse(t *testing.T) { parser := &Parser{} require.NoError(t, parser.Init()) - metrics, err := parser.Parse(bytes) + metrics, err := parser.Parse(bytes, "") require.NoError(t, err) assertEqualMetrics(t, tc.expected, metrics) @@ -146,7 +146,7 @@ func TestParseMultiValueSplit(t *testing.T) { parser := &Parser{ParseMultiValue: "split"} require.NoError(t, parser.Init()) - metrics, err := parser.Parse(bytes) + metrics, err := parser.Parse(bytes, "") require.NoError(t, err) require.Len(t, metrics, 2) @@ -160,7 +160,7 @@ func TestParseMultiValueJoin(t *testing.T) { parser := &Parser{ParseMultiValue: "join"} require.NoError(t, parser.Init()) - metrics, err := parser.Parse(bytes) + metrics, err := parser.Parse(bytes, "") require.NoError(t, err) require.Len(t, metrics, 1) @@ -178,7 +178,7 @@ func TestParse_DefaultTags(t *testing.T) { "foo": "bar", }) require.NoError(t, err) - metrics, err := parser.Parse(bytes) + metrics, err := parser.Parse(bytes, "") require.NoError(t, err) require.Equal(t, "bar", metrics[0].Tags()["foo"]) @@ -198,7 +198,7 @@ func TestParse_SignSecurityLevel(t *testing.T) { bytes, err := buf.Bytes() require.NoError(t, err) - metrics, err := parser.Parse(bytes) + metrics, err := parser.Parse(bytes, "") require.NoError(t, err) assertEqualMetrics(t, singleMetric.expected, metrics) @@ -209,7 +209,7 @@ func TestParse_SignSecurityLevel(t *testing.T) { bytes, err = buf.Bytes() require.NoError(t, err) - metrics, err = parser.Parse(bytes) + metrics, err = parser.Parse(bytes, "") require.NoError(t, err) assertEqualMetrics(t, singleMetric.expected, metrics) @@ -219,7 +219,7 @@ func TestParse_SignSecurityLevel(t *testing.T) { bytes, err = buf.Bytes() require.NoError(t, err) - metrics, err = parser.Parse(bytes) + metrics, err = parser.Parse(bytes, "") require.NoError(t, err) require.Empty(t, metrics) @@ -230,7 +230,7 @@ func TestParse_SignSecurityLevel(t *testing.T) { bytes, err = buf.Bytes() require.NoError(t, err) - _, err = parser.Parse(bytes) + _, err = parser.Parse(bytes, "") require.Error(t, err) } @@ -248,7 +248,7 @@ func TestParse_EncryptSecurityLevel(t *testing.T) { bytes, err := buf.Bytes() require.NoError(t, err) - metrics, err := parser.Parse(bytes) + metrics, err := parser.Parse(bytes, "") require.NoError(t, err) require.Empty(t, metrics) @@ -259,7 +259,7 @@ func TestParse_EncryptSecurityLevel(t *testing.T) { bytes, err = buf.Bytes() require.NoError(t, err) - metrics, err = parser.Parse(bytes) + metrics, err = parser.Parse(bytes, "") require.NoError(t, err) assertEqualMetrics(t, singleMetric.expected, metrics) @@ -269,7 +269,7 @@ func TestParse_EncryptSecurityLevel(t *testing.T) { bytes, err = buf.Bytes() require.NoError(t, err) - metrics, err = parser.Parse(bytes) + metrics, err = parser.Parse(bytes, "") require.NoError(t, err) require.Empty(t, metrics) @@ -280,7 +280,7 @@ func TestParse_EncryptSecurityLevel(t *testing.T) { bytes, err = buf.Bytes() require.NoError(t, err) - _, err = parser.Parse(bytes) + _, err = parser.Parse(bytes, "") require.Error(t, err) } @@ -387,7 +387,7 @@ func TestBenchmarkData(t *testing.T) { parser := &Parser{} require.NoError(t, parser.Init()) - actual, err := parser.Parse(bytes) + actual, err := parser.Parse(bytes, "") require.NoError(t, err) testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime(), testutil.SortMetrics()) @@ -405,6 +405,6 @@ func BenchmarkParsing(b *testing.B) { b.ResetTimer() for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - parser.Parse(bytes) + parser.Parse(bytes, "") } } diff --git a/plugins/parsers/csv/parser.go b/plugins/parsers/csv/parser.go index d44c38ccc..ba1d9ff1c 100644 --- a/plugins/parsers/csv/parser.go +++ b/plugins/parsers/csv/parser.go @@ -217,7 +217,7 @@ func validDelim(r rune) bool { return r != 0 && r != '"' && r != '\r' && r != '\n' && utf8.ValidRune(r) && r != utf8.RuneError } -func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { +func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { // Reset the parser according to the specified mode if p.ResetMode == "always" { p.Reset() diff --git a/plugins/parsers/csv/parser_test.go b/plugins/parsers/csv/parser_test.go index 122597b92..46adbcfea 100644 --- a/plugins/parsers/csv/parser_test.go +++ b/plugins/parsers/csv/parser_test.go @@ -43,7 +43,7 @@ func TestHeaderConcatenationCSV(t *testing.T) { 1,2,3 3.4,70,test_name` - metrics, err := p.Parse([]byte(testCSV)) + metrics, err := p.Parse([]byte(testCSV), "") require.NoError(t, err) require.Equal(t, "test_name", metrics[0].Name()) } @@ -63,7 +63,7 @@ func TestHeaderOverride(t *testing.T) { "first": 3.4, "second": int64(70), } - metrics, err := p.Parse([]byte(testCSV)) + metrics, err := p.Parse([]byte(testCSV), "") require.NoError(t, err) require.Equal(t, "test_name", metrics[0].Name()) require.Equal(t, expectedFields, metrics[0].Fields()) @@ -78,7 +78,7 @@ func TestHeaderOverride(t *testing.T) { } err = p.Init() require.NoError(t, err) - metrics, err = p.Parse([]byte(testCSVRows[0])) + metrics, err = p.Parse([]byte(testCSVRows[0]), "") require.NoError(t, err) require.Empty(t, metrics) m, err := p.ParseLine(testCSVRows[1]) @@ -102,7 +102,7 @@ func TestTimestamp(t *testing.T) { testCSV := `line1,line2,line3 23/05/09 04:05:06 PM,70,test_name 07/11/09 04:05:06 PM,80,test_name2` - metrics, err := p.Parse([]byte(testCSV)) + metrics, err := p.Parse([]byte(testCSV), "") require.NoError(t, err) require.Equal(t, int64(1243094706000000000), metrics[0].Time().UnixNano()) @@ -124,7 +124,7 @@ func TestTimestampYYYYMMDDHHmm(t *testing.T) { testCSV := `line1,line2,line3 200905231605,70,test_name 200907111605,80,test_name2` - metrics, err := p.Parse([]byte(testCSV)) + metrics, err := p.Parse([]byte(testCSV), "") require.NoError(t, err) require.Equal(t, int64(1243094700000000000), metrics[0].Time().UnixNano()) @@ -143,7 +143,7 @@ func TestTimestampError(t *testing.T) { testCSV := `line1,line2,line3 23/05/09 04:05:06 PM,70,test_name 07/11/09 04:05:06 PM,80,test_name2` - _, err = p.Parse([]byte(testCSV)) + _, err = p.Parse([]byte(testCSV), "") require.Equal(t, errors.New("timestamp format must be specified"), err) } @@ -161,7 +161,7 @@ func TestTimestampUnixFormat(t *testing.T) { testCSV := `line1,line2,line3 1243094706,70,test_name 1257609906,80,test_name2` - metrics, err := p.Parse([]byte(testCSV)) + metrics, err := p.Parse([]byte(testCSV), "") require.NoError(t, err) require.Equal(t, int64(1243094706000000000), metrics[0].Time().UnixNano()) require.Equal(t, int64(1257609906000000000), metrics[1].Time().UnixNano()) @@ -181,7 +181,7 @@ func TestTimestampUnixMSFormat(t *testing.T) { testCSV := `line1,line2,line3 1243094706123,70,test_name 1257609906123,80,test_name2` - metrics, err := p.Parse([]byte(testCSV)) + metrics, err := p.Parse([]byte(testCSV), "") require.NoError(t, err) require.Equal(t, int64(1243094706123000000), metrics[0].Time().UnixNano()) require.Equal(t, int64(1257609906123000000), metrics[1].Time().UnixNano()) @@ -199,7 +199,7 @@ func TestQuotedCharacter(t *testing.T) { testCSV := `line1,line2,line3 "3,4",70,test_name` - metrics, err := p.Parse([]byte(testCSV)) + metrics, err := p.Parse([]byte(testCSV), "") require.NoError(t, err) require.Equal(t, "3,4", metrics[0].Fields()["first"]) } @@ -217,7 +217,7 @@ func TestDelimiter(t *testing.T) { testCSV := `line1%line2%line3 3,4%70%test_name` - metrics, err := p.Parse([]byte(testCSV)) + metrics, err := p.Parse([]byte(testCSV), "") require.NoError(t, err) require.Equal(t, "3,4", metrics[0].Fields()["first"]) } @@ -233,7 +233,7 @@ func TestNullDelimiter(t *testing.T) { require.NoError(t, err) testCSV := strings.Join([]string{"3.4", "70", "test_name"}, "\u0000") - metrics, err := p.Parse([]byte(testCSV)) + metrics, err := p.Parse([]byte(testCSV), "") require.NoError(t, err) require.InDelta(t, float64(3.4), metrics[0].Fields()["first"], testutil.DefaultDelta) require.Equal(t, int64(70), metrics[0].Fields()["second"]) @@ -260,7 +260,7 @@ func TestValueConversion(t *testing.T) { "fourth": "hello", } - metrics, err := p.Parse([]byte(testCSV)) + metrics, err := p.Parse([]byte(testCSV), "") require.NoError(t, err) expectedMetric := metric.New("test_value", expectedTags, expectedFields, time.Unix(0, 0)) @@ -272,7 +272,7 @@ func TestValueConversion(t *testing.T) { // Test explicit type conversion. p.ColumnTypes = []string{"float", "int", "bool", "string"} - metrics, err = p.Parse([]byte(testCSV)) + metrics, err = p.Parse([]byte(testCSV), "") require.NoError(t, err) returnedMetric = metric.New(metrics[0].Name(), metrics[0].Tags(), metrics[0].Fields(), time.Unix(0, 0)) @@ -301,7 +301,7 @@ func TestSkipComment(t *testing.T) { "fourth": "name_this", } - metrics, err := p.Parse([]byte(testCSV)) + metrics, err := p.Parse([]byte(testCSV), "") require.NoError(t, err) require.Equal(t, expectedFields, metrics[0].Fields()) } @@ -325,7 +325,7 @@ func TestTrimSpace(t *testing.T) { "fourth": "hello", } - metrics, err := p.Parse([]byte(testCSV)) + metrics, err := p.Parse([]byte(testCSV), "") require.NoError(t, err) require.Equal(t, expectedFields, metrics[0].Fields()) @@ -340,7 +340,7 @@ func TestTrimSpace(t *testing.T) { " 1 , 2 ,3\n" + " test space , 80 ,test_name" - metrics, err = p.Parse([]byte(testCSV)) + metrics, err = p.Parse([]byte(testCSV), "") require.NoError(t, err) require.Equal(t, map[string]interface{}{"col1": "test space", "col2": int64(80), "col3": "test_name"}, metrics[0].Fields()) } @@ -367,7 +367,7 @@ abcdefgh 0 2 false "fourth": true, } - metrics, err := p.Parse([]byte(testCSV)) + metrics, err := p.Parse([]byte(testCSV), "") require.NoError(t, err) require.Equal(t, expectedFields, metrics[1].Fields()) } @@ -393,7 +393,7 @@ hello,80,test_name2` expectedTags := map[string]string{ "line1": "hello", } - metrics, err := p.Parse([]byte(testCSV)) + metrics, err := p.Parse([]byte(testCSV), "") require.NoError(t, err) require.Equal(t, "test_name2", metrics[0].Name()) require.Equal(t, expectedFields, metrics[0].Fields()) @@ -410,7 +410,7 @@ hello,80,test_name2` require.NoError(t, err) testCSVRows := []string{"garbage nonsense\r\n", "line1,line2,line3\r\n", "hello,80,test_name2\r\n"} - metrics, err = p.Parse([]byte(testCSVRows[0])) + metrics, err = p.Parse([]byte(testCSVRows[0]), "") require.ErrorIs(t, err, parsers.ErrEOF) require.Nil(t, metrics) m, err := p.ParseLine(testCSVRows[1]) @@ -437,7 +437,7 @@ func TestSkipColumns(t *testing.T) { "line1": int64(80), "line2": "test_name", } - metrics, err := p.Parse([]byte(testCSV)) + metrics, err := p.Parse([]byte(testCSV), "") require.NoError(t, err) require.Equal(t, expectedFields, metrics[0].Fields()) } @@ -456,7 +456,7 @@ func TestSkipColumnsWithHeader(t *testing.T) { trash,80,test_name` // we should expect an error if we try to get col1 - metrics, err := p.Parse([]byte(testCSV)) + metrics, err := p.Parse([]byte(testCSV), "") require.NoError(t, err) require.Equal(t, map[string]interface{}{"col2": int64(80), "col3": "test_name"}, metrics[0].Fields()) } @@ -471,7 +471,7 @@ func TestMultiHeader(t *testing.T) { 1,2 80,test_name` - metrics, err := p.Parse([]byte(testCSV)) + metrics, err := p.Parse([]byte(testCSV), "") require.NoError(t, err) require.Equal(t, map[string]interface{}{"col1": int64(80), "col2": "test_name"}, metrics[0].Fields()) @@ -484,7 +484,7 @@ func TestMultiHeader(t *testing.T) { err = p.Init() require.NoError(t, err) - metrics, err = p.Parse([]byte(testCSVRows[0])) + metrics, err = p.Parse([]byte(testCSVRows[0]), "") require.ErrorIs(t, err, parsers.ErrEOF) require.Nil(t, metrics) m, err := p.ParseLine(testCSVRows[1]) @@ -507,7 +507,7 @@ func TestParseStream(t *testing.T) { csvHeader := "a,b,c" csvBody := "1,2,3" - metrics, err := p.Parse([]byte(csvHeader)) + metrics, err := p.Parse([]byte(csvHeader), "") require.NoError(t, err) require.Empty(t, metrics) m, err := p.ParseLine(csvBody) @@ -537,7 +537,7 @@ func TestParseLineMultiMetricErrorMessage(t *testing.T) { csvOneRow := "1,2,3" csvTwoRows := "4,5,6\n7,8,9" - metrics, err := p.Parse([]byte(csvHeader)) + metrics, err := p.Parse([]byte(csvHeader), "") require.NoError(t, err) require.Empty(t, metrics) m, err := p.ParseLine(csvOneRow) @@ -556,7 +556,7 @@ func TestParseLineMultiMetricErrorMessage(t *testing.T) { m, err = p.ParseLine(csvTwoRows) require.Errorf(t, err, "expected 1 metric found 2") require.Nil(t, m) - metrics, err = p.Parse([]byte(csvTwoRows)) + metrics, err = p.Parse([]byte(csvTwoRows), "") require.NoError(t, err) require.Len(t, metrics, 2) } @@ -585,7 +585,7 @@ func TestTimestampUnixFloatPrecision(t *testing.T) { ), } - metrics, err := p.Parse([]byte(data)) + metrics, err := p.Parse([]byte(data), "") require.NoError(t, err) testutil.RequireMetricsEqual(t, expected, metrics) } @@ -617,7 +617,7 @@ func TestSkipMeasurementColumn(t *testing.T) { ), } - metrics, err := p.Parse([]byte(data)) + metrics, err := p.Parse([]byte(data), "") require.NoError(t, err) testutil.RequireMetricsEqual(t, expected, metrics) } @@ -649,7 +649,7 @@ func TestSkipTimestampColumn(t *testing.T) { ), } - metrics, err := p.Parse([]byte(data)) + metrics, err := p.Parse([]byte(data), "") require.NoError(t, err) testutil.RequireMetricsEqual(t, expected, metrics) } @@ -670,7 +670,7 @@ func TestTimestampTimezone(t *testing.T) { testCSV := `line1,line2,line3 23/05/09 11:05:06 PM,70,test_name 07/11/09 11:05:06 PM,80,test_name2` - metrics, err := p.Parse([]byte(testCSV)) + metrics, err := p.Parse([]byte(testCSV), "") require.NoError(t, err) require.Equal(t, int64(1243094706000000000), metrics[0].Time().UnixNano()) @@ -689,7 +689,7 @@ func TestEmptyMeasurementName(t *testing.T) { testCSV := `,b 1,2` - metrics, err := p.Parse([]byte(testCSV)) + metrics, err := p.Parse([]byte(testCSV), "") require.NoError(t, err) expected := []telegraf.Metric{ @@ -716,7 +716,7 @@ func TestNumericMeasurementName(t *testing.T) { testCSV := `a,b 1,2` - metrics, err := p.Parse([]byte(testCSV)) + metrics, err := p.Parse([]byte(testCSV), "") require.NoError(t, err) expected := []telegraf.Metric{ @@ -742,7 +742,7 @@ func TestStaticMeasurementName(t *testing.T) { testCSV := `a,b 1,2` - metrics, err := p.Parse([]byte(testCSV)) + metrics, err := p.Parse([]byte(testCSV), "") require.NoError(t, err) expected := []telegraf.Metric{ @@ -770,7 +770,7 @@ func TestSkipEmptyStringValue(t *testing.T) { testCSV := `a,b 1,""` - metrics, err := p.Parse([]byte(testCSV)) + metrics, err := p.Parse([]byte(testCSV), "") require.NoError(t, err) expected := []telegraf.Metric{ @@ -797,7 +797,7 @@ func TestSkipSpecifiedStringValue(t *testing.T) { testCSV := `a,b 1,MM` - metrics, err := p.Parse([]byte(testCSV)) + metrics, err := p.Parse([]byte(testCSV), "") require.NoError(t, err) expected := []telegraf.Metric{ @@ -839,7 +839,7 @@ corrupted_line "b": int64(4), } - metrics, err := p.Parse([]byte(testCSV)) + metrics, err := p.Parse([]byte(testCSV), "") require.NoError(t, err) require.Equal(t, expectedFields0, metrics[0].Fields()) require.Equal(t, expectedFields1, metrics[1].Fields()) @@ -973,7 +973,7 @@ timestamp,type,name,status } // Set default Tags p.SetDefaultTags(map[string]string{"test": "tag"}) - metrics, err := p.Parse([]byte(testCSV)) + metrics, err := p.Parse([]byte(testCSV), "") require.NoError(t, err) for i, m := range metrics { require.Equal(t, expectedFields[i], m.Fields()) @@ -1067,7 +1067,7 @@ fourth=plain require.NoError(t, p.Init()) p.SetDefaultTags(defaultTags) - metrics, err := p.Parse(csv) + metrics, err := p.Parse(csv, "") require.NoError(t, err) require.Len(t, metrics, 1) require.EqualValues(t, tt.expectedTags, metrics[0].Tags()) @@ -1143,7 +1143,7 @@ timestamp,type,name,status p.SetDefaultTags(map[string]string{"test": "tag"}) // Do the parsing the first time - metrics, err := p.Parse([]byte(testCSV)) + metrics, err := p.Parse([]byte(testCSV), "") require.NoError(t, err) testutil.RequireMetricsEqual(t, expected, metrics) @@ -1165,12 +1165,12 @@ timestamp,type,name,status time.Date(2021, 12, 1, 19, 1, 0, 0, time.UTC), ), } - metrics, err = p.Parse([]byte(additionalCSV)) + metrics, err = p.Parse([]byte(additionalCSV), "") require.NoError(t, err) testutil.RequireMetricsEqual(t, additionalExpected, metrics) // This should fail when not resetting but reading again due to the header etc - _, err = p.Parse([]byte(testCSV)) + _, err = p.Parse([]byte(testCSV), "") require.Error( t, err, @@ -1346,13 +1346,13 @@ timestamp,type,name,status p.SetDefaultTags(map[string]string{"test": "tag"}) // Do the parsing the first time - metrics, err := p.Parse([]byte(testCSV)) + metrics, err := p.Parse([]byte(testCSV), "") require.NoError(t, err) testutil.RequireMetricsEqual(t, expected, metrics) // Parsing another data line should fail as it is interpreted as header additionalCSV := "2021-12-01T19:01:00+00:00,Reader,R009,5\r\n" - metrics, err = p.Parse([]byte(additionalCSV)) + metrics, err = p.Parse([]byte(additionalCSV), "") require.ErrorIs(t, err, parsers.ErrEOF) require.Nil(t, metrics) @@ -1400,7 +1400,7 @@ timestamp,category,id,flag } // This should work as the parser is reset - metrics, err = p.Parse([]byte(testCSV)) + metrics, err = p.Parse([]byte(testCSV), "") require.NoError(t, err) testutil.RequireMetricsEqual(t, expected, metrics) } @@ -1556,7 +1556,7 @@ func TestBenchmarkData(t *testing.T) { ), } - actual, err := plugin.Parse([]byte(benchmarkData)) + actual, err := plugin.Parse([]byte(benchmarkData), "") require.NoError(t, err) testutil.RequireMetricsEqual(t, expected, actual, testutil.SortMetrics()) } @@ -1573,6 +1573,6 @@ func BenchmarkParsing(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse([]byte(benchmarkData)) + plugin.Parse([]byte(benchmarkData), "") } } diff --git a/plugins/parsers/dropwizard/parser.go b/plugins/parsers/dropwizard/parser.go index 03738abb0..0417dc1b0 100644 --- a/plugins/parsers/dropwizard/parser.go +++ b/plugins/parsers/dropwizard/parser.go @@ -36,7 +36,7 @@ type Parser struct { } // Parse parses the input bytes to an array of metrics -func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { +func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { metrics := make([]telegraf.Metric, 0) metricTime, err := p.parseTime(buf) @@ -193,7 +193,7 @@ func (p *Parser) readDWMetrics(metricType string, dwms interface{}, metrics []te } } - parsed, err := p.seriesParser.Parse([]byte(measurementName)) + parsed, err := p.seriesParser.Parse([]byte(measurementName), "") var m telegraf.Metric if err != nil || len(parsed) != 1 { m = metric.New(measurementName, make(map[string]string), make(map[string]interface{}), tm) diff --git a/plugins/parsers/dropwizard/parser_test.go b/plugins/parsers/dropwizard/parser_test.go index d2af8266c..7e244fbb1 100644 --- a/plugins/parsers/dropwizard/parser_test.go +++ b/plugins/parsers/dropwizard/parser_test.go @@ -28,7 +28,7 @@ func TestParseValidEmptyJSON(t *testing.T) { require.NoError(t, parser.Init()) // Most basic vanilla test - metrics, err := parser.Parse([]byte(validEmptyJSON)) + metrics, err := parser.Parse([]byte(validEmptyJSON), "") require.NoError(t, err) require.Empty(t, metrics) } @@ -53,7 +53,7 @@ func TestParseValidCounterJSON(t *testing.T) { parser := &Parser{} require.NoError(t, parser.Init()) - metrics, err := parser.Parse([]byte(validCounterJSON)) + metrics, err := parser.Parse([]byte(validCounterJSON), "") require.NoError(t, err) require.Len(t, metrics, 1) require.Equal(t, "measurement", metrics[0].Name()) @@ -97,7 +97,7 @@ func TestParseValidEmbeddedCounterJSON(t *testing.T) { } require.NoError(t, parser.Init()) - metrics, err := parser.Parse([]byte(validEmbeddedCounterJSON)) + metrics, err := parser.Parse([]byte(validEmbeddedCounterJSON), "") require.NoError(t, err) require.Len(t, metrics, 1) require.Equal(t, "measurement", metrics[0].Name()) @@ -119,7 +119,7 @@ func TestParseValidEmbeddedCounterJSON(t *testing.T) { TimePath: "time", } require.NoError(t, parser2.Init()) - metrics2, err2 := parser2.Parse([]byte(validEmbeddedCounterJSON)) + metrics2, err2 := parser2.Parse([]byte(validEmbeddedCounterJSON), "") require.NoError(t, err2) require.Equal(t, map[string]string{"metric_type": "counter", "tag1": "green"}, metrics2[0].Tags()) } @@ -149,7 +149,7 @@ func TestParseValidMeterJSON1(t *testing.T) { parser := &Parser{} require.NoError(t, parser.Init()) - metrics, err := parser.Parse([]byte(validMeterJSON1)) + metrics, err := parser.Parse([]byte(validMeterJSON1), "") require.NoError(t, err) require.Len(t, metrics, 1) require.Equal(t, "measurement1", metrics[0].Name()) @@ -190,7 +190,7 @@ func TestParseValidMeterJSON2(t *testing.T) { parser := &Parser{} require.NoError(t, parser.Init()) - metrics, err := parser.Parse([]byte(validMeterJSON2)) + metrics, err := parser.Parse([]byte(validMeterJSON2), "") require.NoError(t, err) require.Len(t, metrics, 1) require.Equal(t, "measurement2", metrics[0].Name()) @@ -225,7 +225,7 @@ func TestParseValidGaugeJSON(t *testing.T) { parser := &Parser{} require.NoError(t, parser.Init()) - metrics, err := parser.Parse([]byte(validGaugeJSON)) + metrics, err := parser.Parse([]byte(validGaugeJSON), "") require.NoError(t, err) require.Len(t, metrics, 1) require.Equal(t, "measurement", metrics[0].Name()) @@ -265,7 +265,7 @@ func TestParseValidHistogramJSON(t *testing.T) { parser := &Parser{} require.NoError(t, parser.Init()) - metrics, err := parser.Parse([]byte(validHistogramJSON)) + metrics, err := parser.Parse([]byte(validHistogramJSON), "") require.NoError(t, err) require.Len(t, metrics, 1) require.Equal(t, "measurement", metrics[0].Name()) @@ -321,7 +321,7 @@ func TestParseValidTimerJSON(t *testing.T) { parser := &Parser{} require.NoError(t, parser.Init()) - metrics, err := parser.Parse([]byte(validTimerJSON)) + metrics, err := parser.Parse([]byte(validTimerJSON), "") require.NoError(t, err) require.Len(t, metrics, 1) require.Equal(t, "measurement", metrics[0].Name()) @@ -373,7 +373,7 @@ func TestParseValidAllJSON(t *testing.T) { parser := &Parser{} require.NoError(t, parser.Init()) - metrics, err := parser.Parse([]byte(validAllJSON)) + metrics, err := parser.Parse([]byte(validAllJSON), "") require.NoError(t, err) require.Len(t, metrics, 5) } @@ -387,7 +387,7 @@ func TestTagParsingProblems(t *testing.T) { } require.NoError(t, parser1.Init()) - metrics1, err1 := parser1.Parse([]byte(validEmbeddedCounterJSON)) + metrics1, err1 := parser1.Parse([]byte(validEmbeddedCounterJSON), "") require.NoError(t, err1) require.Len(t, metrics1, 1) require.Equal(t, map[string]string{"metric_type": "counter"}, metrics1[0].Tags()) @@ -400,7 +400,7 @@ func TestTagParsingProblems(t *testing.T) { Log: testutil.Logger{}, } require.NoError(t, parser2.Init()) - metrics2, err2 := parser2.Parse([]byte(validEmbeddedCounterJSON)) + metrics2, err2 := parser2.Parse([]byte(validEmbeddedCounterJSON), "") require.NoError(t, err2) require.Len(t, metrics2, 1) require.Equal(t, map[string]string{"metric_type": "counter", "tag1": "green"}, metrics2[0].Tags()) @@ -453,7 +453,7 @@ func TestParseSampleTemplateJSON(t *testing.T) { } require.NoError(t, parser.Init()) - metrics, err := parser.Parse([]byte(sampleTemplateJSON)) + metrics, err := parser.Parse([]byte(sampleTemplateJSON), "") require.NoError(t, err) require.Len(t, metrics, 11) @@ -579,7 +579,7 @@ func TestDropWizard(t *testing.T) { t.Run(tt.name, func(t *testing.T) { parser := &Parser{} require.NoError(t, parser.Init()) - metrics, err := parser.Parse(tt.input) + metrics, err := parser.Parse(tt.input, "") if tt.expectError { require.Error(t, err) } else { @@ -636,7 +636,7 @@ func TestBenchmarkData(t *testing.T) { ), } - actual, err := plugin.Parse([]byte(benchmarkData)) + actual, err := plugin.Parse([]byte(benchmarkData), "") require.NoError(t, err) testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime(), testutil.SortMetrics()) } @@ -647,6 +647,6 @@ func BenchmarkParsing(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse([]byte(benchmarkData)) + plugin.Parse([]byte(benchmarkData), "") } } diff --git a/plugins/parsers/form_urlencoded/parser.go b/plugins/parsers/form_urlencoded/parser.go index 8e53ec02b..e74b37fb0 100644 --- a/plugins/parsers/form_urlencoded/parser.go +++ b/plugins/parsers/form_urlencoded/parser.go @@ -22,7 +22,7 @@ type Parser struct { } // Parse converts a slice of bytes in "application/x-www-form-urlencoded" format into metrics -func (p Parser) Parse(buf []byte) ([]telegraf.Metric, error) { +func (p Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { buf = bytes.TrimSpace(buf) if len(buf) == 0 { return make([]telegraf.Metric, 0), nil @@ -47,7 +47,7 @@ func (p Parser) Parse(buf []byte) ([]telegraf.Metric, error) { // ParseLine delegates a single line of text to the Parse function func (p Parser) ParseLine(line string) (telegraf.Metric, error) { - metrics, err := p.Parse([]byte(line)) + metrics, err := p.Parse([]byte(line), "") if err != nil { return nil, err } diff --git a/plugins/parsers/form_urlencoded/parser_test.go b/plugins/parsers/form_urlencoded/parser_test.go index 05a975664..29fb6c0f5 100644 --- a/plugins/parsers/form_urlencoded/parser_test.go +++ b/plugins/parsers/form_urlencoded/parser_test.go @@ -24,7 +24,7 @@ func TestParseValidFormData(t *testing.T) { MetricName: "form_urlencoded_test", } - metrics, err := parser.Parse([]byte(validFormData)) + metrics, err := parser.Parse([]byte(validFormData), "") require.NoError(t, err) require.Len(t, metrics, 1) require.Equal(t, "form_urlencoded_test", metrics[0].Name()) @@ -56,7 +56,7 @@ func TestParseValidFormDataWithTags(t *testing.T) { TagKeys: []string{"tag1", "tag2"}, } - metrics, err := parser.Parse([]byte(validFormData)) + metrics, err := parser.Parse([]byte(validFormData), "") require.NoError(t, err) require.Len(t, metrics, 1) require.Equal(t, "form_urlencoded_test", metrics[0].Name()) @@ -77,7 +77,7 @@ func TestParseValidFormDataDefaultTags(t *testing.T) { DefaultTags: map[string]string{"tag4": "default"}, } - metrics, err := parser.Parse([]byte(validFormData)) + metrics, err := parser.Parse([]byte(validFormData), "") require.NoError(t, err) require.Len(t, metrics, 1) require.Equal(t, "form_urlencoded_test", metrics[0].Name()) @@ -99,7 +99,7 @@ func TestParseValidFormDataDefaultTagsOverride(t *testing.T) { DefaultTags: map[string]string{"tag1": "default"}, } - metrics, err := parser.Parse([]byte(validFormData)) + metrics, err := parser.Parse([]byte(validFormData), "") require.NoError(t, err) require.Len(t, metrics, 1) require.Equal(t, "form_urlencoded_test", metrics[0].Name()) @@ -119,7 +119,7 @@ func TestParseEncodedFormData(t *testing.T) { TagKeys: []string{"tag1"}, } - metrics, err := parser.Parse([]byte(encodedFormData)) + metrics, err := parser.Parse([]byte(encodedFormData), "") require.NoError(t, err) require.Len(t, metrics, 1) require.Equal(t, "form_urlencoded_test", metrics[0].Name()) @@ -136,7 +136,7 @@ func TestParseInvalidFormDataError(t *testing.T) { MetricName: "form_urlencoded_test", } - metrics, err := parser.Parse([]byte(notEscapedProperlyFormData)) + metrics, err := parser.Parse([]byte(notEscapedProperlyFormData), "") require.Error(t, err) require.Empty(t, metrics) } @@ -147,7 +147,7 @@ func TestParseInvalidFormDataEmptyKey(t *testing.T) { } // Empty key for field - metrics, err := parser.Parse([]byte(blankKeyFormData)) + metrics, err := parser.Parse([]byte(blankKeyFormData), "") require.NoError(t, err) require.Len(t, metrics, 1) require.Equal(t, map[string]string{}, metrics[0].Tags()) @@ -157,7 +157,7 @@ func TestParseInvalidFormDataEmptyKey(t *testing.T) { // Empty key for tag parser.TagKeys = []string{""} - metrics, err = parser.Parse([]byte(blankKeyFormData)) + metrics, err = parser.Parse([]byte(blankKeyFormData), "") require.NoError(t, err) require.Len(t, metrics, 1) require.Equal(t, map[string]string{}, metrics[0].Tags()) @@ -171,7 +171,7 @@ func TestParseInvalidFormDataEmptyString(t *testing.T) { MetricName: "form_urlencoded_test", } - metrics, err := parser.Parse([]byte(emptyFormData)) + metrics, err := parser.Parse([]byte(emptyFormData), "") require.NoError(t, err) require.Empty(t, metrics) } @@ -199,7 +199,7 @@ func TestBenchmarkData(t *testing.T) { ), } - actual, err := plugin.Parse([]byte(benchmarkData)) + actual, err := plugin.Parse([]byte(benchmarkData), "") require.NoError(t, err) testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime(), testutil.SortMetrics()) } @@ -212,6 +212,6 @@ func BenchmarkParsing(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse([]byte(benchmarkData)) + plugin.Parse([]byte(benchmarkData), "") } } diff --git a/plugins/parsers/graphite/parser.go b/plugins/parsers/graphite/parser.go index d7505133b..895acd243 100644 --- a/plugins/parsers/graphite/parser.go +++ b/plugins/parsers/graphite/parser.go @@ -48,7 +48,7 @@ func (p *Parser) Init() error { return nil } -func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { +func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { // parse even if the buffer begins with a newline if len(buf) != 0 && buf[0] == '\n' { buf = buf[1:] diff --git a/plugins/parsers/graphite/parser_test.go b/plugins/parsers/graphite/parser_test.go index ab41e33df..146fb10b5 100644 --- a/plugins/parsers/graphite/parser_test.go +++ b/plugins/parsers/graphite/parser_test.go @@ -30,7 +30,7 @@ func BenchmarkParse(b *testing.B) { require.NoError(b, p.Init()) for i := 0; i < b.N; i++ { - _, err := p.Parse([]byte("servers.localhost.cpu.load 11 1435077219")) + _, err := p.Parse([]byte("servers.localhost.cpu.load 11 1435077219"), "") require.NoError(b, err) } } @@ -387,7 +387,7 @@ func TestParse(t *testing.T) { p := Parser{Templates: []string{test.template}} require.NoError(t, p.Init()) - metrics, err := p.Parse(test.input) + metrics, err := p.Parse(test.input, "") if test.err != "" { require.EqualError(t, err, test.err) continue diff --git a/plugins/parsers/grok/parser.go b/plugins/parsers/grok/parser.go index 4a375df3e..42d1f46e7 100644 --- a/plugins/parsers/grok/parser.go +++ b/plugins/parsers/grok/parser.go @@ -381,7 +381,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { return metric.New(p.Measurement, tags, fields, p.tsModder.tsMod(timestamp)), nil } -func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { +func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { metrics := make([]telegraf.Metric, 0) if p.Multiline { diff --git a/plugins/parsers/grok/parser_test.go b/plugins/parsers/grok/parser_test.go index 51d42cf2f..871dd7244 100644 --- a/plugins/parsers/grok/parser_test.go +++ b/plugins/parsers/grok/parser_test.go @@ -21,7 +21,7 @@ func TestGrokParse(t *testing.T) { err := parser.Compile() require.NoError(t, err) - _, err = parser.Parse([]byte(`127.0.0.1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326`)) + _, err = parser.Parse([]byte(`127.0.0.1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326`), "") require.NoError(t, err) } @@ -1021,7 +1021,7 @@ func TestMultilinePatterns(t *testing.T) { Log: testutil.Logger{}, } require.NoError(t, p.Compile()) - actual, err := p.Parse(buf) + actual, err := p.Parse(buf, "") require.NoError(t, err) testutil.RequireMetricsEqual(t, expected, actual) } @@ -1181,7 +1181,7 @@ func TestMultilineNilMetric(t *testing.T) { Log: testutil.Logger{}, } require.NoError(t, p.Compile()) - actual, err := p.Parse(buf) + actual, err := p.Parse(buf, "") require.NoError(t, err) require.Empty(t, actual) } @@ -1224,7 +1224,7 @@ func TestBenchmarkData(t *testing.T) { ), } - actual, err := plugin.Parse([]byte(benchmarkData)) + actual, err := plugin.Parse([]byte(benchmarkData), "") require.NoError(t, err) testutil.RequireMetricsEqual(t, expected, actual, testutil.SortMetrics()) } @@ -1238,6 +1238,6 @@ func BenchmarkParsing(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse([]byte(benchmarkData)) + plugin.Parse([]byte(benchmarkData), "") } } diff --git a/plugins/parsers/influx/influx_upstream/parser.go b/plugins/parsers/influx/influx_upstream/parser.go index 9a21458c3..3aef5f2d7 100644 --- a/plugins/parsers/influx/influx_upstream/parser.go +++ b/plugins/parsers/influx/influx_upstream/parser.go @@ -117,7 +117,7 @@ func (p *Parser) SetTimeFunc(f TimeFunc) { p.defaultTime = f } -func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { +func (p *Parser) Parse(input []byte, extra string) ([]telegraf.Metric, error) { metrics := make([]telegraf.Metric, 0) decoder := lineprotocol.NewDecoderWithBytes(input) @@ -134,7 +134,7 @@ func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { } func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { - metrics, err := p.Parse([]byte(line)) + metrics, err := p.Parse([]byte(line), "") if err != nil { return nil, err } diff --git a/plugins/parsers/influx/influx_upstream/parser_test.go b/plugins/parsers/influx/influx_upstream/parser_test.go index 2bbb0aab1..3901a01f3 100644 --- a/plugins/parsers/influx/influx_upstream/parser_test.go +++ b/plugins/parsers/influx/influx_upstream/parser_test.go @@ -614,7 +614,7 @@ func TestParser(t *testing.T) { parser.SetTimeFunc(tt.timeFunc) } - metrics, err := parser.Parse(tt.input) + metrics, err := parser.Parse(tt.input, "") if tt.err == nil { require.NoError(t, err) } else { @@ -638,7 +638,7 @@ func BenchmarkParser(b *testing.B) { parser := Parser{} require.NoError(b, parser.Init()) for n := 0; n < b.N; n++ { - metrics, err := parser.Parse(tt.input) + metrics, err := parser.Parse(tt.input, "") _ = err _ = metrics } @@ -748,7 +748,7 @@ func TestSeriesParser(t *testing.T) { parser.SetTimeFunc(tt.timeFunc) } - metrics, err := parser.Parse(tt.input) + metrics, err := parser.Parse(tt.input, "") require.Equal(t, tt.err, err) if err != nil { require.Equal(t, tt.err.Error(), err.Error()) @@ -854,7 +854,7 @@ func TestParserTimestampPrecision(t *testing.T) { parser := Parser{InfluxTimestampPrecision: d} require.NoError(t, parser.Init()) - metrics, err := parser.Parse(tt.input) + metrics, err := parser.Parse(tt.input, "") require.NoError(t, err) require.Equal(t, tt.metrics, metrics) @@ -905,7 +905,7 @@ func TestParserErrorString(t *testing.T) { parser := Parser{} require.NoError(t, parser.Init()) - _, err := parser.Parse(tt.input) + _, err := parser.Parse(tt.input, "") require.Equal(t, tt.errString, err.Error()) }) } @@ -1057,7 +1057,7 @@ func TestBenchmarkData(t *testing.T) { } // Do the parsing - actual, err := plugin.Parse([]byte(benchmarkData)) + actual, err := plugin.Parse([]byte(benchmarkData), "") require.NoError(t, err) testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime(), testutil.SortMetrics()) } @@ -1068,6 +1068,6 @@ func BenchmarkParsing(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse([]byte(benchmarkData)) + plugin.Parse([]byte(benchmarkData), "") } } diff --git a/plugins/parsers/influx/parser.go b/plugins/parsers/influx/parser.go index a4adf330b..fe2d2e94a 100644 --- a/plugins/parsers/influx/parser.go +++ b/plugins/parsers/influx/parser.go @@ -79,7 +79,7 @@ func (p *Parser) SetTimePrecision(u time.Duration) { p.handler.SetTimePrecision(u) } -func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { +func (p *Parser) Parse(input []byte, extra string) ([]telegraf.Metric, error) { p.Lock() defer p.Unlock() metrics := make([]telegraf.Metric, 0) @@ -115,7 +115,7 @@ func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { } func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { - metrics, err := p.Parse([]byte(line)) + metrics, err := p.Parse([]byte(line), "") if err != nil { return nil, err } diff --git a/plugins/parsers/influx/parser_test.go b/plugins/parsers/influx/parser_test.go index 17aef0975..22726717c 100644 --- a/plugins/parsers/influx/parser_test.go +++ b/plugins/parsers/influx/parser_test.go @@ -588,7 +588,7 @@ func TestParser(t *testing.T) { parser.SetTimeFunc(tt.timeFunc) } - metrics, err := parser.Parse(tt.input) + metrics, err := parser.Parse(tt.input, "") require.Equal(t, tt.err, err) require.Len(t, metrics, len(tt.metrics)) @@ -693,7 +693,7 @@ func TestParserTimestampPrecision(t *testing.T) { parser := Parser{InfluxTimestampPrecision: d} require.NoError(t, parser.Init()) - metrics, err := parser.Parse(tt.input) + metrics, err := parser.Parse(tt.input, "") require.NoError(t, err) require.Equal(t, tt.metrics, metrics) @@ -716,7 +716,7 @@ func BenchmarkParser(b *testing.B) { parser := Parser{} require.NoError(b, parser.Init()) for n := 0; n < b.N; n++ { - metrics, err := parser.Parse(tt.input) + metrics, err := parser.Parse(tt.input, "") _ = err _ = metrics } @@ -824,7 +824,7 @@ func TestSeriesParser(t *testing.T) { parser.SetTimeFunc(tt.timeFunc) } - metrics, err := parser.Parse(tt.input) + metrics, err := parser.Parse(tt.input, "") require.Equal(t, tt.err, err) if err != nil { require.Equal(t, tt.err.Error(), err.Error()) @@ -872,7 +872,7 @@ func TestParserErrorString(t *testing.T) { parser := Parser{} require.NoError(t, parser.Init()) - _, err := parser.Parse(tt.input) + _, err := parser.Parse(tt.input, "") require.Equal(t, tt.errString, err.Error()) }) } @@ -1024,7 +1024,7 @@ func TestBenchmarkData(t *testing.T) { } // Do the parsing - actual, err := plugin.Parse([]byte(benchmarkData)) + actual, err := plugin.Parse([]byte(benchmarkData), "") require.NoError(t, err) testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime(), testutil.SortMetrics()) } @@ -1035,6 +1035,6 @@ func BenchmarkParsing(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse([]byte(benchmarkData)) + plugin.Parse([]byte(benchmarkData), "") } } diff --git a/plugins/parsers/json/parser.go b/plugins/parsers/json/parser.go index d6c913732..e448277f3 100644 --- a/plugins/parsers/json/parser.go +++ b/plugins/parsers/json/parser.go @@ -181,7 +181,7 @@ func (p *Parser) Init() error { return nil } -func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { +func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { if p.Query != "" { result := gjson.GetBytes(buf, p.Query) buf = []byte(result.Raw) @@ -220,7 +220,7 @@ func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { } func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { - metrics, err := p.Parse([]byte(line + "\n")) + metrics, err := p.Parse([]byte(line+"\n"), "") if err != nil { return nil, err diff --git a/plugins/parsers/json/parser_test.go b/plugins/parsers/json/parser_test.go index 56694ba9f..13265591c 100644 --- a/plugins/parsers/json/parser_test.go +++ b/plugins/parsers/json/parser_test.go @@ -116,7 +116,7 @@ func TestParseValidJSON(t *testing.T) { require.NoError(t, parser.Init()) // Most basic vanilla test - actual, err := parser.Parse([]byte(validJSON)) + actual, err := parser.Parse([]byte(validJSON), "") require.NoError(t, err) require.Len(t, actual, 1) require.Equal(t, "json_test", actual[0].Name()) @@ -127,7 +127,7 @@ func TestParseValidJSON(t *testing.T) { require.Equal(t, map[string]string{}, actual[0].Tags()) // Test that newlines are fine - actual, err = parser.Parse([]byte(validJSONNewline)) + actual, err = parser.Parse([]byte(validJSONNewline), "") require.NoError(t, err) require.Len(t, actual, 1) require.Equal(t, "json_test", actual[0].Name()) @@ -138,7 +138,7 @@ func TestParseValidJSON(t *testing.T) { require.Equal(t, map[string]string{}, actual[0].Tags()) // Test that strings without TagKeys defined are ignored - actual, err = parser.Parse([]byte(validJSONTags)) + actual, err = parser.Parse([]byte(validJSONTags), "") require.NoError(t, err) require.Len(t, actual, 1) require.Equal(t, "json_test", actual[0].Name()) @@ -149,12 +149,12 @@ func TestParseValidJSON(t *testing.T) { require.Equal(t, map[string]string{}, actual[0].Tags()) // Test that whitespace only will parse as an empty list of actual - actual, err = parser.Parse([]byte("\n\t")) + actual, err = parser.Parse([]byte("\n\t"), "") require.NoError(t, err) require.Empty(t, actual) // Test that an empty string will parse as an empty list of actual - actual, err = parser.Parse([]byte("")) + actual, err = parser.Parse([]byte(""), "") require.NoError(t, err) require.Empty(t, actual) } @@ -198,9 +198,9 @@ func TestParseInvalidJSON(t *testing.T) { parser := &Parser{MetricName: "json_test"} require.NoError(t, parser.Init()) - _, err := parser.Parse([]byte(invalidJSON)) + _, err := parser.Parse([]byte(invalidJSON), "") require.Error(t, err) - _, err = parser.Parse([]byte(invalidJSON2)) + _, err = parser.Parse([]byte(invalidJSON2), "") require.Error(t, err) _, err = parser.ParseLine(invalidJSON) require.Error(t, err) @@ -213,7 +213,7 @@ func TestParseJSONImplicitStrictness(t *testing.T) { } require.NoError(t, parserImplicitNoStrict.Init()) - _, err := parserImplicitNoStrict.Parse([]byte(mixedValidityJSON)) + _, err := parserImplicitNoStrict.Parse([]byte(mixedValidityJSON), "") require.NoError(t, err) } @@ -225,7 +225,7 @@ func TestParseJSONExplicitStrictnessFalse(t *testing.T) { } require.NoError(t, parserNoStrict.Init()) - _, err := parserNoStrict.Parse([]byte(mixedValidityJSON)) + _, err := parserNoStrict.Parse([]byte(mixedValidityJSON), "") require.NoError(t, err) } @@ -237,7 +237,7 @@ func TestParseJSONExplicitStrictnessTrue(t *testing.T) { } require.NoError(t, parserStrict.Init()) - _, err := parserStrict.Parse([]byte(mixedValidityJSON)) + _, err := parserStrict.Parse([]byte(mixedValidityJSON), "") require.Error(t, err) } @@ -249,7 +249,7 @@ func TestParseWithTagKeys(t *testing.T) { } require.NoError(t, parser.Init()) - actual, err := parser.Parse([]byte(validJSONTags)) + actual, err := parser.Parse([]byte(validJSONTags), "") require.NoError(t, err) require.Len(t, actual, 1) require.Equal(t, "json_test", actual[0].Name()) @@ -266,7 +266,7 @@ func TestParseWithTagKeys(t *testing.T) { } require.NoError(t, parser.Init()) - actual, err = parser.Parse([]byte(validJSONTags)) + actual, err = parser.Parse([]byte(validJSONTags), "") require.NoError(t, err) require.Len(t, actual, 1) require.Equal(t, "json_test", actual[0].Name()) @@ -285,7 +285,7 @@ func TestParseWithTagKeys(t *testing.T) { } require.NoError(t, parser.Init()) - actual, err = parser.Parse([]byte(validJSONTags)) + actual, err = parser.Parse([]byte(validJSONTags), "") require.NoError(t, err) require.Len(t, actual, 1) require.Equal(t, "json_test", actual[0].Name()) @@ -363,7 +363,7 @@ func TestParseValidJSONDefaultTags(t *testing.T) { require.NoError(t, parser.Init()) // Most basic vanilla test - actual, err := parser.Parse([]byte(validJSON)) + actual, err := parser.Parse([]byte(validJSON), "") require.NoError(t, err) require.Len(t, actual, 1) require.Equal(t, "json_test", actual[0].Name()) @@ -374,7 +374,7 @@ func TestParseValidJSONDefaultTags(t *testing.T) { require.Equal(t, map[string]string{"t4g": "default"}, actual[0].Tags()) // Test that tagkeys and default tags are applied - actual, err = parser.Parse([]byte(validJSONTags)) + actual, err = parser.Parse([]byte(validJSONTags), "") require.NoError(t, err) require.Len(t, actual, 1) require.Equal(t, "json_test", actual[0].Name()) @@ -398,7 +398,7 @@ func TestParseValidJSONDefaultTagsOverride(t *testing.T) { require.NoError(t, parser.Init()) // Most basic vanilla test - actual, err := parser.Parse([]byte(validJSON)) + actual, err := parser.Parse([]byte(validJSON), "") require.NoError(t, err) require.Len(t, actual, 1) require.Equal(t, "json_test", actual[0].Name()) @@ -409,7 +409,7 @@ func TestParseValidJSONDefaultTagsOverride(t *testing.T) { require.Equal(t, map[string]string{"mytag": "default"}, actual[0].Tags()) // Test that tagkeys override default tags - actual, err = parser.Parse([]byte(validJSONTags)) + actual, err = parser.Parse([]byte(validJSONTags), "") require.NoError(t, err) require.Len(t, actual, 1) require.Equal(t, "json_test", actual[0].Name()) @@ -428,7 +428,7 @@ func TestParseValidJSONArray(t *testing.T) { require.NoError(t, parser.Init()) // Most basic vanilla test - actual, err := parser.Parse([]byte(validJSONArray)) + actual, err := parser.Parse([]byte(validJSONArray), "") require.NoError(t, err) require.Len(t, actual, 1) require.Equal(t, "json_array_test", actual[0].Name()) @@ -439,7 +439,7 @@ func TestParseValidJSONArray(t *testing.T) { require.Equal(t, map[string]string{}, actual[0].Tags()) // Basic multiple datapoints - actual, err = parser.Parse([]byte(validJSONArrayMultiple)) + actual, err = parser.Parse([]byte(validJSONArrayMultiple), "") require.NoError(t, err) require.Len(t, actual, 2) require.Equal(t, "json_array_test", actual[0].Name()) @@ -464,7 +464,7 @@ func TestParseArrayWithTagKeys(t *testing.T) { } require.NoError(t, parser.Init()) - actual, err := parser.Parse([]byte(validJSONArrayTags)) + actual, err := parser.Parse([]byte(validJSONArrayTags), "") require.NoError(t, err) require.Len(t, actual, 2) require.Equal(t, "json_array_test", actual[0].Name()) @@ -488,7 +488,7 @@ func TestParseArrayWithTagKeys(t *testing.T) { } require.NoError(t, parser.Init()) - actual, err = parser.Parse([]byte(validJSONArrayTags)) + actual, err = parser.Parse([]byte(validJSONArrayTags), "") require.NoError(t, err) require.Len(t, actual, 2) require.Equal(t, "json_array_test", actual[0].Name()) @@ -516,7 +516,7 @@ func TestParseArrayWithTagKeys(t *testing.T) { } require.NoError(t, parser.Init()) - actual, err = parser.Parse([]byte(validJSONArrayTags)) + actual, err = parser.Parse([]byte(validJSONArrayTags), "") require.NoError(t, err) require.Len(t, actual, 2) require.Equal(t, "json_array_test", actual[0].Name()) @@ -547,7 +547,7 @@ func TestHttpJsonBOM(t *testing.T) { require.NoError(t, parser.Init()) // Most basic vanilla test - _, err := parser.Parse(jsonBOM) + _, err := parser.Parse(jsonBOM, "") require.NoError(t, err) } @@ -577,7 +577,7 @@ func TestJSONParseNestedArray(t *testing.T) { } require.NoError(t, parser.Init()) - actual, err := parser.Parse([]byte(testString)) + actual, err := parser.Parse([]byte(testString), "") require.Len(t, actual, 1) require.NoError(t, err) require.Len(t, actual[0].Tags(), 3) @@ -606,7 +606,7 @@ func TestJSONQueryErrorOnArray(t *testing.T) { } require.NoError(t, parser.Init()) - _, err := parser.Parse([]byte(testString)) + _, err := parser.Parse([]byte(testString), "") require.Error(t, err) } @@ -640,7 +640,7 @@ func TestArrayOfObjects(t *testing.T) { } require.NoError(t, parser.Init()) - actual, err := parser.Parse([]byte(testString)) + actual, err := parser.Parse([]byte(testString), "") require.NoError(t, err) require.Len(t, actual, 3) } @@ -668,7 +668,7 @@ func TestUseCaseJSONQuery(t *testing.T) { } require.NoError(t, parser.Init()) - actual, err := parser.Parse([]byte(testString)) + actual, err := parser.Parse([]byte(testString), "") require.NoError(t, err) require.Len(t, actual, 3) require.Equal(t, "Murphy", actual[0].Fields()["last"]) @@ -703,7 +703,7 @@ func TestTimeParser(t *testing.T) { } require.NoError(t, parser.Init()) - actual, err := parser.Parse([]byte(testString)) + actual, err := parser.Parse([]byte(testString), "") require.NoError(t, err) require.Len(t, actual, 2) require.NotEqual(t, actual[0].Time(), actual[1].Time()) @@ -722,7 +722,7 @@ func TestTimeParserWithTimezone(t *testing.T) { } require.NoError(t, parser.Init()) - actual, err := parser.Parse([]byte(testString)) + actual, err := parser.Parse([]byte(testString), "") require.NoError(t, err) require.Len(t, actual, 1) require.EqualValues(t, int64(1136405040000000000), actual[0].Time().UnixNano()) @@ -757,7 +757,7 @@ func TestUnixTimeParser(t *testing.T) { } require.NoError(t, parser.Init()) - actual, err := parser.Parse([]byte(testString)) + actual, err := parser.Parse([]byte(testString), "") require.NoError(t, err) require.Len(t, actual, 2) require.NotEqual(t, actual[0].Time(), actual[1].Time()) @@ -792,7 +792,7 @@ func TestUnixMsTimeParser(t *testing.T) { } require.NoError(t, parser.Init()) - actual, err := parser.Parse([]byte(testString)) + actual, err := parser.Parse([]byte(testString), "") require.NoError(t, err) require.Len(t, actual, 2) require.NotEqual(t, actual[0].Time(), actual[1].Time()) @@ -816,7 +816,7 @@ func TestTimeErrors(t *testing.T) { } require.NoError(t, parser.Init()) - actual, err := parser.Parse([]byte(testString)) + actual, err := parser.Parse([]byte(testString), "") require.Error(t, err) require.Empty(t, actual) @@ -836,7 +836,7 @@ func TestTimeErrors(t *testing.T) { } require.NoError(t, parser.Init()) - actual, err = parser.Parse([]byte(testString2)) + actual, err = parser.Parse([]byte(testString2), "") require.Error(t, err) require.Empty(t, actual) require.Equal(t, errors.New("'json_time_key' could not be found"), err) @@ -846,7 +846,7 @@ func TestShareTimestamp(t *testing.T) { parser := &Parser{MetricName: "json_test"} require.NoError(t, parser.Init()) - actual, err := parser.Parse([]byte(validJSONArrayMultiple)) + actual, err := parser.Parse([]byte(validJSONArrayMultiple), "") require.NoError(t, err) require.Len(t, actual, 2) require.Equal(t, actual[0].Time(), actual[1].Time()) @@ -866,7 +866,7 @@ func TestNameKey(t *testing.T) { parser := &Parser{NameKey: "b_c"} require.NoError(t, parser.Init()) - actual, err := parser.Parse([]byte(testString)) + actual, err := parser.Parse([]byte(testString), "") require.NoError(t, err) require.Equal(t, "this is my name", actual[0].Name()) } @@ -877,7 +877,7 @@ func TestParseArrayWithWrongType(t *testing.T) { parser := &Parser{} require.NoError(t, parser.Init()) - _, err := parser.Parse([]byte(data)) + _, err := parser.Parse([]byte(data), "") require.Error(t, err) } @@ -994,7 +994,7 @@ func TestParse(t *testing.T) { parser := tt.parser require.NoError(t, parser.Init()) - actual, err := parser.Parse(tt.input) + actual, err := parser.Parse(tt.input, "") require.NoError(t, err) testutil.RequireMetricsEqual(t, tt.expected, actual, testutil.IgnoreTime()) @@ -1106,7 +1106,7 @@ func TestParseWithWildcardTagKeys(t *testing.T) { parser := tt.parser require.NoError(t, parser.Init()) - actual, err := parser.Parse(tt.input) + actual, err := parser.Parse(tt.input, "") require.NoError(t, err) testutil.RequireMetricsEqual(t, tt.expected, actual, testutil.IgnoreTime()) }) @@ -1382,7 +1382,7 @@ func TestParseArrayWithWildcardTagKeys(t *testing.T) { parser := tt.parser require.NoError(t, parser.Init()) - actual, err := parser.Parse(tt.input) + actual, err := parser.Parse(tt.input, "") require.NoError(t, err) testutil.RequireMetricsEqual(t, tt.expected, actual, testutil.IgnoreTime()) @@ -1434,7 +1434,7 @@ func TestBenchmarkData(t *testing.T) { } // Do the parsing - actual, err := plugin.Parse([]byte(benchmarkData)) + actual, err := plugin.Parse([]byte(benchmarkData), "") require.NoError(t, err) testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime(), testutil.SortMetrics()) } @@ -1450,7 +1450,7 @@ func BenchmarkParsingSequential(b *testing.B) { // Do the benchmarking for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse([]byte(benchmarkData)) + plugin.Parse([]byte(benchmarkData), "") } } @@ -1466,7 +1466,7 @@ func BenchmarkParsingParallel(b *testing.B) { b.RunParallel(func(p *testing.PB) { for p.Next() { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse([]byte(benchmarkData)) + plugin.Parse([]byte(benchmarkData), "") } }) } @@ -1488,6 +1488,6 @@ func FuzzParserJSON(f *testing.F) { f.Fuzz(func(_ *testing.T, input []byte) { //nolint:errcheck // fuzz testing can give lots of errors, but we just want to test for crashes - parser.Parse(input) + parser.Parse(input, "") }) } diff --git a/plugins/parsers/json_v2/parser.go b/plugins/parsers/json_v2/parser.go index 0f9d4aa5b..865220643 100644 --- a/plugins/parsers/json_v2/parser.go +++ b/plugins/parsers/json_v2/parser.go @@ -120,7 +120,7 @@ func (p *Parser) Init() error { return nil } -func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { +func (p *Parser) Parse(input []byte, extra string) ([]telegraf.Metric, error) { // What we've done here is to put the entire former contents of Parse() // into parseCriticalPath(). // diff --git a/plugins/parsers/json_v2/parser_test.go b/plugins/parsers/json_v2/parser_test.go index 7544b1875..00133be8b 100644 --- a/plugins/parsers/json_v2/parser_test.go +++ b/plugins/parsers/json_v2/parser_test.go @@ -131,7 +131,7 @@ func BenchmarkParsingSequential(b *testing.B) { // Do the benchmarking for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse(input) + plugin.Parse(input, "") } } @@ -162,7 +162,7 @@ func BenchmarkParsingParallel(b *testing.B) { b.RunParallel(func(p *testing.PB) { for p.Next() { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse(input) + plugin.Parse(input, "") } }) } diff --git a/plugins/parsers/logfmt/parser.go b/plugins/parsers/logfmt/parser.go index bb6d650d3..66e6889d0 100644 --- a/plugins/parsers/logfmt/parser.go +++ b/plugins/parsers/logfmt/parser.go @@ -27,7 +27,7 @@ type Parser struct { } // Parse converts a slice of bytes in logfmt format to metrics. -func (p *Parser) Parse(b []byte) ([]telegraf.Metric, error) { +func (p *Parser) Parse(b []byte, extra string) ([]telegraf.Metric, error) { reader := bytes.NewReader(b) decoder := logfmt.NewDecoder(reader) metrics := make([]telegraf.Metric, 0) @@ -75,7 +75,7 @@ func (p *Parser) Parse(b []byte) ([]telegraf.Metric, error) { // ParseLine converts a single line of text in logfmt format to metrics. func (p *Parser) ParseLine(s string) (telegraf.Metric, error) { - metrics, err := p.Parse([]byte(s)) + metrics, err := p.Parse([]byte(s), "") if err != nil { return nil, err } diff --git a/plugins/parsers/logfmt/parser_test.go b/plugins/parsers/logfmt/parser_test.go index 5853ff4f0..b4d9b94f3 100644 --- a/plugins/parsers/logfmt/parser_test.go +++ b/plugins/parsers/logfmt/parser_test.go @@ -128,7 +128,7 @@ func TestParse(t *testing.T) { l := Parser{ metricName: tt.measurement, } - got, err := l.Parse(tt.bytes) + got, err := l.Parse(tt.bytes, "") if (err != nil) != tt.wantErr { t.Errorf("Logfmt.Parse error = %v, wantErr %v", err, tt.wantErr) return @@ -316,7 +316,7 @@ func TestBenchmarkData(t *testing.T) { ), } - actual, err := plugin.Parse([]byte(benchmarkData)) + actual, err := plugin.Parse([]byte(benchmarkData), "") require.NoError(t, err) testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime(), testutil.SortMetrics()) } @@ -329,6 +329,6 @@ func BenchmarkParsing(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse([]byte(benchmarkData)) + plugin.Parse([]byte(benchmarkData), "") } } diff --git a/plugins/parsers/nagios/parser.go b/plugins/parsers/nagios/parser.go index 7472deed5..ffc049075 100644 --- a/plugins/parsers/nagios/parser.go +++ b/plugins/parsers/nagios/parser.go @@ -103,7 +103,7 @@ var ( ) func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { - metrics, err := p.Parse([]byte(line)) + metrics, err := p.Parse([]byte(line), "") return metrics[0], err } @@ -111,7 +111,7 @@ func (p *Parser) SetDefaultTags(tags map[string]string) { p.DefaultTags = tags } -func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { +func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { ts := time.Now().UTC() s := bufio.NewScanner(bytes.NewReader(buf)) diff --git a/plugins/parsers/nagios/parser_test.go b/plugins/parsers/nagios/parser_test.go index d0c4e1b44..7b47d8fc4 100644 --- a/plugins/parsers/nagios/parser_test.go +++ b/plugins/parsers/nagios/parser_test.go @@ -466,7 +466,7 @@ with three lines for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - metrics, err := parser.Parse([]byte(tt.input)) + metrics, err := parser.Parse([]byte(tt.input), "") tt.assertF(t, metrics, err) }) } @@ -562,7 +562,7 @@ func TestBenchmarkData(t *testing.T) { ), } - actual, err := plugin.Parse([]byte(benchmarkData)) + actual, err := plugin.Parse([]byte(benchmarkData), "") require.NoError(t, err) testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime(), testutil.SortMetrics()) } @@ -572,6 +572,6 @@ func BenchmarkParsing(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse([]byte(benchmarkData)) + plugin.Parse([]byte(benchmarkData), "") } } diff --git a/plugins/parsers/openmetrics/parser.go b/plugins/parsers/openmetrics/parser.go index 19f9d0dd8..39b6ddcf8 100644 --- a/plugins/parsers/openmetrics/parser.go +++ b/plugins/parsers/openmetrics/parser.go @@ -44,7 +44,7 @@ func (p *Parser) SetDefaultTags(tags map[string]string) { p.DefaultTags = tags } -func (p *Parser) Parse(data []byte) ([]telegraf.Metric, error) { +func (p *Parser) Parse(data []byte, extra string) ([]telegraf.Metric, error) { // Determine the metric transport-type derived from the response header contentType := p.Header.Get("Content-Type") var mediaType string @@ -102,7 +102,7 @@ func (p *Parser) Parse(data []byte) ([]telegraf.Metric, error) { } func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { - metrics, err := p.Parse([]byte(line)) + metrics, err := p.Parse([]byte(line), "") if err != nil { return nil, err } diff --git a/plugins/parsers/openmetrics/parser_test.go b/plugins/parsers/openmetrics/parser_test.go index fc4c2cf6f..0da8db43b 100644 --- a/plugins/parsers/openmetrics/parser_test.go +++ b/plugins/parsers/openmetrics/parser_test.go @@ -160,7 +160,7 @@ func BenchmarkParsingMetricVersion1(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse(benchmarkData) + plugin.Parse(benchmarkData, "") } } @@ -173,6 +173,6 @@ func BenchmarkParsingMetricVersion2(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse(benchmarkData) + plugin.Parse(benchmarkData, "") } } diff --git a/plugins/parsers/opentsdb/parser.go b/plugins/parsers/opentsdb/parser.go index 5c36ca3af..eb5efc3ce 100644 --- a/plugins/parsers/opentsdb/parser.go +++ b/plugins/parsers/opentsdb/parser.go @@ -20,7 +20,7 @@ type Parser struct { Log telegraf.Logger `toml:"-"` } -func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { +func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { var metrics []telegraf.Metric scanner := bufio.NewScanner(bytes.NewReader(buf)) diff --git a/plugins/parsers/opentsdb/parser_test.go b/plugins/parsers/opentsdb/parser_test.go index 4c5d4949c..03d302a2c 100644 --- a/plugins/parsers/opentsdb/parser_test.go +++ b/plugins/parsers/opentsdb/parser_test.go @@ -231,7 +231,7 @@ func TestParse(t *testing.T) { t.Run(tt.name, func(t *testing.T) { p := &Parser{Log: testutil.Logger{}} - actual, err := p.Parse(tt.input) + actual, err := p.Parse(tt.input, "") require.NoError(t, err) testutil.RequireMetricsEqual(t, tt.expected, actual) @@ -301,7 +301,7 @@ func TestParse_DefaultTags(t *testing.T) { p := &Parser{Log: testutil.Logger{}} p.SetDefaultTags(tt.defaultTags) - actual, err := p.Parse(tt.input) + actual, err := p.Parse(tt.input, "") require.NoError(t, err) testutil.RequireMetricsEqual(t, tt.expected, actual) @@ -343,7 +343,7 @@ func TestBenchmarkData(t *testing.T) { ), } - actual, err := plugin.Parse([]byte(benchmarkData)) + actual, err := plugin.Parse([]byte(benchmarkData), "") require.NoError(t, err) testutil.RequireMetricsEqual(t, expected, actual, testutil.SortMetrics()) } @@ -353,6 +353,6 @@ func BenchmarkParsing(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse([]byte(benchmarkData)) + plugin.Parse([]byte(benchmarkData), "") } } diff --git a/plugins/parsers/parquet/parser.go b/plugins/parsers/parquet/parser.go index f36b31fd6..f6b096d65 100644 --- a/plugins/parsers/parquet/parser.go +++ b/plugins/parsers/parquet/parser.go @@ -44,7 +44,7 @@ func (p *Parser) Init() error { return nil } -func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { +func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { reader := bytes.NewReader(buf) parquetReader, err := file.NewParquetReader(reader) if err != nil { @@ -121,7 +121,7 @@ func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { } func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { - metrics, err := p.Parse([]byte(line)) + metrics, err := p.Parse([]byte(line), "") if err != nil { return nil, err } diff --git a/plugins/parsers/parquet/parser_test.go b/plugins/parsers/parquet/parser_test.go index b9b4635c9..011425af7 100644 --- a/plugins/parsers/parquet/parser_test.go +++ b/plugins/parsers/parquet/parser_test.go @@ -71,6 +71,6 @@ func BenchmarkParsing(b *testing.B) { b.ResetTimer() for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse(benchmarkData) + plugin.Parse(benchmarkData, "") } } diff --git a/plugins/parsers/phasor_binary/README.md b/plugins/parsers/phasor_binary/README.md new file mode 100644 index 000000000..244f09d0f --- /dev/null +++ b/plugins/parsers/phasor_binary/README.md @@ -0,0 +1,32 @@ +# Phasor Binary Format Parser Plugin + +The `phasor_binary` parser parses the phasor binary data from kafka into Telegraf metrics. +because the data is some kind of binary data, there is no example for test. + +## Configuration + +```toml +[[inputs.cl_kafka_consumer]] + ## Kafka brokers. + brokers = ["localhost:9092"] + + ## Set the minimal supported Kafka version. Should be a string contains + ## 4 digits in case if it is 0 version and 3 digits for versions starting + ## from 1.0.0 separated by dot. This setting enables the use of new + ## Kafka features and APIs. Must be 0.10.2.0(used as default) or greater. + ## Please, check the list of supported versions at + ## https://pkg.go.dev/github.com/Shopify/sarama#SupportedVersions + ## ex: kafka_version = "2.6.0" + ## ex: kafka_version = "0.10.2.0" + kafka_version = "3.9.0" + + ## Topics to consume. + topics = ["telegraf"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "phasor_binary" + +``` \ No newline at end of file diff --git a/plugins/parsers/phasor_binary/parser.go b/plugins/parsers/phasor_binary/parser.go new file mode 100644 index 000000000..119ea9eb5 --- /dev/null +++ b/plugins/parsers/phasor_binary/parser.go @@ -0,0 +1,204 @@ +package binary_phasor + +import ( + "encoding/binary" + "errors" + "math" + "strconv" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/plugins/parsers" +) + +// Parser adheres to the parser interface, contains the parser configuration, and data required to parse binary_phasor +type Parser struct { + Log telegraf.Logger + // // measurement is the name of the current config used in each line protocol + // measurement string + + // // parseMutex is here because Parse() is not threadsafe. If it is made threadsafe at some point, then we won't need it anymore. + // parseMutex sync.Mutex + + // pointFrequency data point frequency in one second + pointFrequency int + + defaultMetricName string +} + +const ( + deviceTypeI = 0x01 + deviceTypeU = 0x02 + + dataLengthI = 11306 + dataLengthU = 14106 +) + +func (p *Parser) Init() error { + p.pointFrequency = 50 + return nil +} + +func (p *Parser) Parse(data []byte, topic string) ([]telegraf.Metric, error) { + metrics, deviceType, err := p.checkHeaderAndInitMetrics(data, topic) + if err != nil { + return nil, err + } + + p.fillAnalogChanMetrics(metrics, data, 6) + p.fillSwitchChanMetrics(metrics, data, 9606) + + switch deviceType { + case deviceTypeI: + p.fillPQSPFChanMetrics(metrics, data, 9706) + case deviceTypeU: + p.fillFdFChanMetrics(metrics, data, 9706) + p.fillUABUBCUCAChanMetrics(metrics, data, 10506) + default: + return nil, errors.New("illegal device type") + } + + return metrics, nil +} + +func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { + return nil, errors.New("not implemented") +} + +func (p *Parser) SetDefaultTags(tags map[string]string) { + +} + +func init() { + // Register all variants + parsers.Add("phasor_binary", + func(defaultMetricName string) telegraf.Parser { + return &Parser{defaultMetricName: defaultMetricName} + }, + ) +} + +// simply check the data, and initialize metrics with data and topic +func (p *Parser) checkHeaderAndInitMetrics(data []byte, topic string) ([]telegraf.Metric, int, error) { + if len(data) < 6 { + return nil, 0, errors.New("no valid data") + } + + second := int64(binary.LittleEndian.Uint32(data[:4])) + deviceType := int(data[4]) + metrics := make([]telegraf.Metric, p.pointFrequency) + + device, _ := strings.CutSuffix(topic, "_Phasor") + switch deviceType { + case deviceTypeI: + if len(data) < dataLengthI { + return nil, 0, errors.New("illegal current data length") + } + if data[5] != 0x0e { // 14, current channel number + return nil, 0, errors.New("illegal current channel number") + } + + for i := range metrics { + metrics[i] = metric.New("current", + map[string]string{"device": device}, + make(map[string]any, 44), // 3*8+2*8+4 + time.Unix(second, int64(i*1e9/p.pointFrequency))) + } + case deviceTypeU: + if len(data) < dataLengthU { + return nil, 0, errors.New("illegal voltage data length") + } + if data[5] != 0x0f { // 15, voltage channel number + return nil, 0, errors.New("illegal voltage channel number") + } + + for i := range metrics { + metrics[i] = metric.New("voltage", + map[string]string{"device": device}, + make(map[string]any, 51), // 3*8+2*8+2+3*3 + time.Unix(second, int64(i*1e9/p.pointFrequency))) + } + default: + return nil, 0, errors.New("illegal device type") + } + + return metrics, deviceType, nil +} + +// analog metrics, voltage or current +func (p *Parser) fillAnalogChanMetrics(metrics []telegraf.Metric, data []byte, begin int) { + + for ci := range 8 { + chanNo := strconv.Itoa(ci + 1) + for mj := range metrics { + b := begin + (ci*p.pointFrequency+mj)*24 + + amp := math.Float64frombits(binary.LittleEndian.Uint64(data[b : b+8])) + pa := math.Float64frombits(binary.LittleEndian.Uint64(data[b+8 : b+16])) + rms := math.Float64frombits(binary.LittleEndian.Uint64(data[b+16 : b+24])) + + metrics[mj].AddField("c"+chanNo+"_amp", amp) + metrics[mj].AddField("c"+chanNo+"_pa", pa) + metrics[mj].AddField("c"+chanNo+"_rms", rms) + } + } +} + +// switch metrics +func (p *Parser) fillSwitchChanMetrics(metrics []telegraf.Metric, data []byte, begin int) { + + for ci := range 2 { + for mj := range metrics { + b := begin + ci*p.pointFrequency + mj + for bk := range 8 { + chanNo := strconv.Itoa(ci*8 + bk + 1) + + metrics[mj].AddField("i"+chanNo, uint8((data[b]>>bk)&1)) + } + } + } +} + +// current relative metrics +func (p *Parser) fillPQSPFChanMetrics(metrics []telegraf.Metric, data []byte, begin int) { + + for ci, channel := range []string{"p", "q", "s", "pf"} { + for mj := range metrics { + b := begin + (ci*p.pointFrequency+mj)*8 + + metrics[mj].AddField(channel, math.Float64frombits(binary.LittleEndian.Uint64(data[b:b+8]))) + } + } +} + +// voltage relative metrics +func (p *Parser) fillFdFChanMetrics(metrics []telegraf.Metric, data []byte, begin int) { + + for ci, channel := range []string{"f", "df"} { + for mj := range metrics { + b := begin + (ci*p.pointFrequency+mj)*8 + + metrics[mj].AddField(channel, math.Float64frombits(binary.LittleEndian.Uint64(data[b:b+8]))) + } + } +} + +// voltage metrics +func (p *Parser) fillUABUBCUCAChanMetrics(metrics []telegraf.Metric, data []byte, begin int) { + + for ci, channel := range []string{"uab", "ubc", "uca"} { + for mj := range metrics { + b := begin + (ci*p.pointFrequency+mj)*24 + + amp := math.Float64frombits(binary.LittleEndian.Uint64(data[b : b+8])) + pa := math.Float64frombits(binary.LittleEndian.Uint64(data[b+8 : b+16])) + rms := math.Float64frombits(binary.LittleEndian.Uint64(data[b+16 : b+24])) + + metrics[mj].AddField(channel+"_amp", amp) + metrics[mj].AddField(channel+"_pa", pa) + metrics[mj].AddField(channel+"_rms", rms) + } + } +} diff --git a/plugins/parsers/prometheus/parser.go b/plugins/parsers/prometheus/parser.go index e2d5a937c..a65360535 100644 --- a/plugins/parsers/prometheus/parser.go +++ b/plugins/parsers/prometheus/parser.go @@ -30,7 +30,7 @@ func (p *Parser) SetDefaultTags(tags map[string]string) { p.DefaultTags = tags } -func (p *Parser) Parse(data []byte) ([]telegraf.Metric, error) { +func (p *Parser) Parse(data []byte, extra string) ([]telegraf.Metric, error) { // Determine the metric transport-type derived from the response header and // create a matching decoder. format := expfmt.NewFormat(expfmt.TypeProtoCompact) @@ -74,7 +74,7 @@ func (p *Parser) Parse(data []byte) ([]telegraf.Metric, error) { } func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { - metrics, err := p.Parse([]byte(line)) + metrics, err := p.Parse([]byte(line), "") if err != nil { return nil, err } diff --git a/plugins/parsers/prometheus/parser_test.go b/plugins/parsers/prometheus/parser_test.go index 2df3d23dc..a534cb197 100644 --- a/plugins/parsers/prometheus/parser_test.go +++ b/plugins/parsers/prometheus/parser_test.go @@ -160,7 +160,7 @@ func BenchmarkParsingMetricVersion1(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse(benchmarkData) + plugin.Parse(benchmarkData, "") } } @@ -173,6 +173,6 @@ func BenchmarkParsingMetricVersion2(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse(benchmarkData) + plugin.Parse(benchmarkData, "") } } diff --git a/plugins/parsers/prometheusremotewrite/parser.go b/plugins/parsers/prometheusremotewrite/parser.go index 3f1788b75..99a711975 100644 --- a/plugins/parsers/prometheusremotewrite/parser.go +++ b/plugins/parsers/prometheusremotewrite/parser.go @@ -15,7 +15,7 @@ type Parser struct { DefaultTags map[string]string } -func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { +func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { var err error var metrics []telegraf.Metric var req prompb.WriteRequest @@ -44,7 +44,7 @@ func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { } func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { - metrics, err := p.Parse([]byte(line)) + metrics, err := p.Parse([]byte(line), "") if err != nil { return nil, err } diff --git a/plugins/parsers/prometheusremotewrite/parser_test.go b/plugins/parsers/prometheusremotewrite/parser_test.go index 8aba5ad45..8a57a422c 100644 --- a/plugins/parsers/prometheusremotewrite/parser_test.go +++ b/plugins/parsers/prometheusremotewrite/parser_test.go @@ -76,7 +76,7 @@ func TestCases(t *testing.T) { require.NoError(t, err) // Act and assert - parsed, err := parser.Parse(inputBytes) + parsed, err := parser.Parse(inputBytes, "") require.NoError(t, err) require.Len(t, parsed, len(expected)) // Ignore type when comparing, because expected metrics are parsed from influx lines and thus always untyped @@ -97,7 +97,7 @@ func BenchmarkParsingMetricVersion1(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - parser.Parse(benchmarkData) + parser.Parse(benchmarkData, "") } } @@ -112,7 +112,7 @@ func BenchmarkParsingMetricVersion2(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - parser.Parse(benchmarkData) + parser.Parse(benchmarkData, "") } } @@ -170,7 +170,7 @@ func TestParse(t *testing.T) { DefaultTags: map[string]string{}, } - metrics, err := parser.Parse(inoutBytes) + metrics, err := parser.Parse(inoutBytes, "") require.NoError(t, err) require.Len(t, metrics, 2) testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics()) @@ -279,7 +279,7 @@ func TestHistograms(t *testing.T) { parser := Parser{ DefaultTags: map[string]string{}, } - metrics, err := parser.Parse(inoutBytes) + metrics, err := parser.Parse(inoutBytes, "") require.NoError(t, err) require.Len(t, metrics, 22) testutil.RequireMetricsSubset(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics()) @@ -323,7 +323,7 @@ func TestDefaultTags(t *testing.T) { }, } - metrics, err := parser.Parse(inoutBytes) + metrics, err := parser.Parse(inoutBytes, "") require.NoError(t, err) require.Len(t, metrics, 1) testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics()) @@ -365,7 +365,7 @@ func TestMetricsWithTimestamp(t *testing.T) { DefaultTags: map[string]string{}, } - metrics, err := parser.Parse(inoutBytes) + metrics, err := parser.Parse(inoutBytes, "") require.NoError(t, err) require.Len(t, metrics, 1) testutil.RequireMetricsEqual(t, expected, metrics, testutil.SortMetrics()) @@ -430,7 +430,7 @@ func TestBenchmarkData(t *testing.T) { require.NoError(t, err) plugin := &Parser{} - actual, err := plugin.Parse(benchmarkData) + actual, err := plugin.Parse(benchmarkData, "") require.NoError(t, err) testutil.RequireMetricsEqual(t, expected, actual, testutil.SortMetrics()) } @@ -444,6 +444,6 @@ func BenchmarkParsing(b *testing.B) { b.ResetTimer() for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse(benchmarkData) + plugin.Parse(benchmarkData, "") } } diff --git a/plugins/parsers/value/parser.go b/plugins/parsers/value/parser.go index 6d32126df..e39f6d6bb 100644 --- a/plugins/parsers/value/parser.go +++ b/plugins/parsers/value/parser.go @@ -45,7 +45,7 @@ func (v *Parser) Init() error { return nil } -func (v *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { +func (v *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { vStr := string(bytes.TrimSpace(bytes.Trim(buf, "\x00"))) // unless it's a string, separate out any fields in the buffer, @@ -96,7 +96,7 @@ func (v *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { } func (v *Parser) ParseLine(line string) (telegraf.Metric, error) { - metrics, err := v.Parse([]byte(line)) + metrics, err := v.Parse([]byte(line), "") if err != nil { return nil, err diff --git a/plugins/parsers/value/parser_test.go b/plugins/parsers/value/parser_test.go index afdf6a416..19c520e8c 100644 --- a/plugins/parsers/value/parser_test.go +++ b/plugins/parsers/value/parser_test.go @@ -106,7 +106,7 @@ func TestParseValidValues(t *testing.T) { DataType: tt.dtype, } require.NoError(t, plugin.Init()) - actual, err := plugin.Parse(tt.input) + actual, err := plugin.Parse(tt.input, "") require.NoError(t, err) require.Len(t, actual, 1) testutil.RequireMetricEqual(t, expected, actual[0], testutil.IgnoreTime()) @@ -188,7 +188,7 @@ func TestParseCustomFieldName(t *testing.T) { } require.NoError(t, parser.Init()) - metrics, err := parser.Parse([]byte(`55`)) + metrics, err := parser.Parse([]byte(`55`), "") require.NoError(t, err) require.Equal(t, map[string]interface{}{"penguin": int64(55)}, metrics[0].Fields()) } @@ -223,7 +223,7 @@ func TestParseInvalidValues(t *testing.T) { DataType: tt.dtype, } require.NoError(t, plugin.Init()) - actual, err := plugin.Parse(tt.input) + actual, err := plugin.Parse(tt.input, "") require.ErrorContains(t, err, "invalid syntax") require.Empty(t, actual) }) @@ -282,7 +282,7 @@ func TestParseValidValuesDefaultTags(t *testing.T) { require.NoError(t, plugin.Init()) plugin.SetDefaultTags(map[string]string{"test": "tag"}) - actual, err := plugin.Parse([]byte("55")) + actual, err := plugin.Parse([]byte("55"), "") require.NoError(t, err) require.Len(t, actual, 1) @@ -295,7 +295,7 @@ func TestParseValuesWithNullCharacter(t *testing.T) { DataType: "integer", } require.NoError(t, parser.Init()) - metrics, err := parser.Parse([]byte("55\x00")) + metrics, err := parser.Parse([]byte("55\x00"), "") require.NoError(t, err) require.Len(t, metrics, 1) require.Equal(t, "value_test", metrics[0].Name()) @@ -330,7 +330,7 @@ func TestBenchmarkData(t *testing.T) { ), } - actual, err := plugin.Parse([]byte(benchmarkData)) + actual, err := plugin.Parse([]byte(benchmarkData), "") require.NoError(t, err) testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime(), testutil.SortMetrics()) } @@ -341,6 +341,6 @@ func BenchmarkParsing(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse([]byte(benchmarkData)) + plugin.Parse([]byte(benchmarkData), "") } } diff --git a/plugins/parsers/wavefront/parser.go b/plugins/parsers/wavefront/parser.go index 2b3968f94..d3c73b252 100644 --- a/plugins/parsers/wavefront/parser.go +++ b/plugins/parsers/wavefront/parser.go @@ -67,7 +67,7 @@ func (p *Parser) Init() error { func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { buf := []byte(line) - metrics, err := p.Parse(buf) + metrics, err := p.Parse(buf, "") if err != nil { return nil, err } @@ -79,7 +79,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { return nil, nil } -func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { +func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { pp := p.parsers.Get().(*PointParser) defer p.parsers.Put(pp) return pp.Parse(buf) diff --git a/plugins/parsers/wavefront/parser_test.go b/plugins/parsers/wavefront/parser_test.go index 95dbe51b4..d9e4525e7 100644 --- a/plugins/parsers/wavefront/parser_test.go +++ b/plugins/parsers/wavefront/parser_test.go @@ -15,25 +15,25 @@ func TestParse(t *testing.T) { parser := &Parser{} require.NoError(t, parser.Init()) - parsedMetrics, err := parser.Parse([]byte("test.metric 1")) + parsedMetrics, err := parser.Parse([]byte("test.metric 1"), "") require.NoError(t, err) testMetric := metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(0, 0)) require.Equal(t, parsedMetrics[0].Name(), testMetric.Name()) require.Equal(t, parsedMetrics[0].Fields(), testMetric.Fields()) - parsedMetrics, err = parser.Parse([]byte("\u2206test.delta 1 1530939936")) + parsedMetrics, err = parser.Parse([]byte("\u2206test.delta 1 1530939936"), "") require.NoError(t, err) testMetric = metric.New("\u2206test.delta", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) require.EqualValues(t, parsedMetrics[0], testMetric) - parsedMetrics, err = parser.Parse([]byte("\u0394test.delta 1 1530939936")) + parsedMetrics, err = parser.Parse([]byte("\u0394test.delta 1 1530939936"), "") require.NoError(t, err) testMetric = metric.New("\u0394test.delta", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) require.EqualValues(t, parsedMetrics[0], testMetric) - parsedMetrics, err = parser.Parse([]byte("\u0394test.delta 1.234 1530939936 source=\"mysource\" tag2=value2")) + parsedMetrics, err = parser.Parse([]byte("\u0394test.delta 1.234 1530939936 source=\"mysource\" tag2=value2"), "") require.NoError(t, err) testMetric = metric.New( "\u0394test.delta", @@ -43,22 +43,22 @@ func TestParse(t *testing.T) { ) require.EqualValues(t, parsedMetrics[0], testMetric) - parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936")) + parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936"), "") require.NoError(t, err) testMetric = metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) require.EqualValues(t, parsedMetrics[0], testMetric) - parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936 source=mysource")) + parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936 source=mysource"), "") require.NoError(t, err) testMetric = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) require.EqualValues(t, parsedMetrics[0], testMetric) - parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234 1530939936 source=\"mysource\"")) + parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234 1530939936 source=\"mysource\""), "") require.NoError(t, err) testMetric = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) require.EqualValues(t, parsedMetrics[0], testMetric) - parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234 1530939936 \"source\"=\"mysource\" tag2=value2")) + parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234 1530939936 \"source\"=\"mysource\" tag2=value2"), "") require.NoError(t, err) testMetric = metric.New( "test.metric", @@ -68,7 +68,7 @@ func TestParse(t *testing.T) { ) require.EqualValues(t, parsedMetrics[0], testMetric) - parsedMetrics, err = parser.Parse([]byte("\"test.metric\" -1.1234 1530939936 \"source\"=\"mysource\" tag2=value2")) + parsedMetrics, err = parser.Parse([]byte("\"test.metric\" -1.1234 1530939936 \"source\"=\"mysource\" tag2=value2"), "") require.NoError(t, err) testMetric = metric.New( "test.metric", @@ -78,7 +78,7 @@ func TestParse(t *testing.T) { ) require.EqualValues(t, parsedMetrics[0], testMetric) - parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234e04 1530939936 \"source\"=\"mysource\" tag2=value2")) + parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234e04 1530939936 \"source\"=\"mysource\" tag2=value2"), "") require.NoError(t, err) testMetric = metric.New( "test.metric", @@ -88,7 +88,7 @@ func TestParse(t *testing.T) { ) require.EqualValues(t, parsedMetrics[0], testMetric) - parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234e-04 1530939936 \"source\"=\"mysource\" tag2=value2")) + parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234e-04 1530939936 \"source\"=\"mysource\" tag2=value2"), "") require.NoError(t, err) testMetric = metric.New( "test.metric", @@ -98,7 +98,7 @@ func TestParse(t *testing.T) { ) require.EqualValues(t, parsedMetrics[0], testMetric) - parsedMetrics, err = parser.Parse([]byte("test.metric 1.1234 1530939936 source=\"mysource\" tag2=value2 ")) + parsedMetrics, err = parser.Parse([]byte("test.metric 1.1234 1530939936 source=\"mysource\" tag2=value2 "), "") require.NoError(t, err) testMetric = metric.New( "test.metric", @@ -159,7 +159,7 @@ func TestParseMultiple(t *testing.T) { parser := &Parser{} require.NoError(t, parser.Init()) - parsedMetrics, err := parser.Parse([]byte("test.metric 1\ntest.metric2 2 1530939936")) + parsedMetrics, err := parser.Parse([]byte("test.metric 1\ntest.metric2 2 1530939936"), "") require.NoError(t, err) testMetric1 := metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(0, 0)) testMetric2 := metric.New("test.metric2", map[string]string{}, map[string]interface{}{"value": 2.}, time.Unix(1530939936, 0)) @@ -168,7 +168,7 @@ func TestParseMultiple(t *testing.T) { require.Equal(t, parsedMetrics[0].Fields(), testMetrics[0].Fields()) require.EqualValues(t, parsedMetrics[1], testMetrics[1]) - parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936 source=mysource\n\"test.metric\" 1.1234 1530939936 source=\"mysource\"")) + parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936 source=mysource\n\"test.metric\" 1.1234 1530939936 source=\"mysource\""), "") require.NoError(t, err) testMetric1 = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) testMetric2 = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) @@ -177,9 +177,10 @@ func TestParseMultiple(t *testing.T) { parsedMetrics, err = parser.Parse( []byte( - "\"test.metric\" 1.1234 1530939936 \"source\"=\"mysource\" tag2=value2\n" + + "\"test.metric\" 1.1234 1530939936 \"source\"=\"mysource\" tag2=value2\n"+ "test.metric 1.1234 1530939936 source=\"mysource\" tag2=value2 ", ), + "", ) require.NoError(t, err) testMetric1 = metric.New( @@ -199,6 +200,7 @@ func TestParseMultiple(t *testing.T) { parsedMetrics, err = parser.Parse( []byte("test.metric 1 1530939936 source=mysource\n\"test.metric\" 1.1234 1530939936 source=\"mysource\"\ntest.metric3 333 1530939936 tagit=valueit"), + "", ) require.NoError(t, err) testMetric1 = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) @@ -227,31 +229,31 @@ func TestParseInvalid(t *testing.T) { parser := &Parser{} require.NoError(t, parser.Init()) - _, err := parser.Parse([]byte("test.metric")) + _, err := parser.Parse([]byte("test.metric"), "") require.Error(t, err) - _, err = parser.Parse([]byte("test.metric string")) + _, err = parser.Parse([]byte("test.metric string"), "") require.Error(t, err) - _, err = parser.Parse([]byte("test.metric 1 string")) + _, err = parser.Parse([]byte("test.metric 1 string"), "") require.Error(t, err) - _, err = parser.Parse([]byte("test.\u2206delta 1")) + _, err = parser.Parse([]byte("test.\u2206delta 1"), "") require.Error(t, err) - _, err = parser.Parse([]byte("test.metric 1 1530939936 tag_no_pair")) + _, err = parser.Parse([]byte("test.metric 1 1530939936 tag_no_pair"), "") require.Error(t, err) - _, err = parser.Parse([]byte("test.metric 1 1530939936 tag_broken_value=\"")) + _, err = parser.Parse([]byte("test.metric 1 1530939936 tag_broken_value=\""), "") require.Error(t, err) - _, err = parser.Parse([]byte("\"test.metric 1 1530939936")) + _, err = parser.Parse([]byte("\"test.metric 1 1530939936"), "") require.Error(t, err) - _, err = parser.Parse([]byte("test.metric 1 1530939936 tag1=val\\\"ue1")) + _, err = parser.Parse([]byte("test.metric 1 1530939936 tag1=val\\\"ue1"), "") require.Error(t, err) - _, err = parser.Parse([]byte("\"test.metric\" -1.12-34 1530939936 \"source\"=\"mysource\" tag2=value2")) + _, err = parser.Parse([]byte("\"test.metric\" -1.12-34 1530939936 \"source\"=\"mysource\" tag2=value2"), "") require.Error(t, err) } @@ -260,7 +262,7 @@ func TestParseDefaultTags(t *testing.T) { require.NoError(t, parser.Init()) parser.SetDefaultTags(map[string]string{"myDefault": "value1", "another": "test2"}) - parsedMetrics, err := parser.Parse([]byte("test.metric 1 1530939936")) + parsedMetrics, err := parser.Parse([]byte("test.metric 1 1530939936"), "") require.NoError(t, err) testMetric := metric.New( "test.metric", @@ -270,7 +272,7 @@ func TestParseDefaultTags(t *testing.T) { ) require.EqualValues(t, parsedMetrics[0], testMetric) - parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936 source=mysource")) + parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936 source=mysource"), "") require.NoError(t, err) testMetric = metric.New( "test.metric", @@ -280,7 +282,7 @@ func TestParseDefaultTags(t *testing.T) { ) require.EqualValues(t, parsedMetrics[0], testMetric) - parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234 1530939936 another=\"test3\"")) + parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234 1530939936 another=\"test3\""), "") require.NoError(t, err) testMetric = metric.New( "test.metric", @@ -326,7 +328,7 @@ func TestBenchmarkData(t *testing.T) { ), } - actual, err := plugin.Parse([]byte(benchmarkData)) + actual, err := plugin.Parse([]byte(benchmarkData), "") require.NoError(t, err) testutil.RequireMetricsEqual(t, expected, actual, testutil.SortMetrics()) } @@ -337,6 +339,6 @@ func BenchmarkParsing(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse([]byte(benchmarkData)) + plugin.Parse([]byte(benchmarkData), "") } } diff --git a/plugins/parsers/xpath/parser.go b/plugins/parsers/xpath/parser.go index cf2443922..86fbd8d23 100644 --- a/plugins/parsers/xpath/parser.go +++ b/plugins/parsers/xpath/parser.go @@ -196,7 +196,7 @@ func (p *Parser) Init() error { return nil } -func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { +func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { t := time.Now() // Parse the XML @@ -236,7 +236,7 @@ func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { } func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { - metrics, err := p.Parse([]byte(line)) + metrics, err := p.Parse([]byte(line), "") if err != nil { return nil, err } diff --git a/plugins/parsers/xpath/parser_test.go b/plugins/parsers/xpath/parser_test.go index af28561e0..8d5e78199 100644 --- a/plugins/parsers/xpath/parser_test.go +++ b/plugins/parsers/xpath/parser_test.go @@ -997,7 +997,7 @@ func TestParseMultiNodes(t *testing.T) { } require.NoError(t, parser.Init()) - actual, err := parser.Parse([]byte(tt.input)) + actual, err := parser.Parse([]byte(tt.input), "") require.NoError(t, err) testutil.RequireMetricsEqual(t, tt.expected, actual) @@ -1183,7 +1183,7 @@ func TestEmptySelection(t *testing.T) { } require.NoError(t, parser.Init()) - _, err := parser.Parse([]byte(tt.input)) + _, err := parser.Parse([]byte(tt.input), "") require.Error(t, err) require.Equal(t, "cannot parse with empty selection node", err.Error()) }) @@ -1257,7 +1257,7 @@ func TestEmptySelectionAllowed(t *testing.T) { } require.NoError(t, parser.Init()) - _, err := parser.Parse([]byte(tt.input)) + _, err := parser.Parse([]byte(tt.input), "") require.NoError(t, err) }) } @@ -1365,7 +1365,7 @@ func TestTestCases(t *testing.T) { Log: testutil.Logger{Name: "parsers.xml"}, } require.NoError(t, parser.Init()) - outputs, err := parser.Parse(content) + outputs, err := parser.Parse(content, "") if len(expectedErrors) == 0 { require.NoError(t, err) } @@ -1563,7 +1563,7 @@ func TestBenchmarkDataXML(t *testing.T) { } require.NoError(t, plugin.Init()) - actual, err := plugin.Parse([]byte(benchmarkDataXML)) + actual, err := plugin.Parse([]byte(benchmarkDataXML), "") require.NoError(t, err) testutil.RequireMetricsEqual(t, benchmarkExpectedMetrics, actual) } @@ -1579,7 +1579,7 @@ func BenchmarkParsingXML(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse([]byte(benchmarkDataXML)) + plugin.Parse([]byte(benchmarkDataXML), "") } } @@ -1626,7 +1626,7 @@ func TestBenchmarkDataJSON(t *testing.T) { } require.NoError(t, plugin.Init()) - actual, err := plugin.Parse([]byte(benchmarkDataJSON)) + actual, err := plugin.Parse([]byte(benchmarkDataJSON), "") require.NoError(t, err) testutil.RequireMetricsEqual(t, benchmarkExpectedMetrics, actual) } @@ -1642,7 +1642,7 @@ func BenchmarkParsingJSON(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse([]byte(benchmarkDataJSON)) + plugin.Parse([]byte(benchmarkDataJSON), "") } } @@ -1678,7 +1678,7 @@ func BenchmarkParsingProtobuf(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse(benchmarkData) + plugin.Parse(benchmarkData, "") } } @@ -1751,7 +1751,7 @@ func TestBenchmarkDataMsgPack(t *testing.T) { actual := make([]telegraf.Metric, 0, 2) for _, msg := range benchmarkDataMsgPack { - m, err := plugin.Parse(msg) + m, err := plugin.Parse(msg, "") require.NoError(t, err) actual = append(actual, m...) } @@ -1782,7 +1782,7 @@ func BenchmarkParsingMsgPack(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse(benchmarkDataMsgPack[n%2]) + plugin.Parse(benchmarkDataMsgPack[n%2], "") } } @@ -1815,6 +1815,6 @@ func BenchmarkParsingCBOR(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse(benchmarkData) + plugin.Parse(benchmarkData, "") } } diff --git a/plugins/processors/dedup/dedup.go b/plugins/processors/dedup/dedup.go index e77b14b5d..b4747d9fc 100644 --- a/plugins/processors/dedup/dedup.go +++ b/plugins/processors/dedup/dedup.go @@ -143,7 +143,7 @@ func (d *Dedup) SetState(state interface{}) error { if !ok { return fmt.Errorf("state has wrong type %T", state) } - metrics, err := p.Parse(data) + metrics, err := p.Parse(data, "") if err == nil { d.Apply(metrics...) } diff --git a/plugins/processors/execd/execd.go b/plugins/processors/execd/execd.go index 75b72978f..196ec8176 100644 --- a/plugins/processors/execd/execd.go +++ b/plugins/processors/execd/execd.go @@ -113,7 +113,7 @@ func (e *Execd) cmdReadOut(out io.Reader) { scanner.Buffer(scanBuf, 262144) for scanner.Scan() { - metrics, err := e.parser.Parse(scanner.Bytes()) + metrics, err := e.parser.Parse(scanner.Bytes(), "") if err != nil { e.Log.Errorf("Parse error: %s", err) } diff --git a/plugins/processors/parser/parser.go b/plugins/processors/parser/parser.go index c417a5a94..3725c58fd 100644 --- a/plugins/processors/parser/parser.go +++ b/plugins/processors/parser/parser.go @@ -85,7 +85,7 @@ func (p *Parser) Apply(metrics ...telegraf.Metric) []telegraf.Metric { value = decoded[:n] } - fromFieldMetric, err := p.parser.Parse(value) + fromFieldMetric, err := p.parser.Parse(value, "") if err != nil { p.Log.Errorf("could not parse field %s: %v", field.Key, err) continue @@ -178,7 +178,7 @@ func mergeWithTimestamp(base telegraf.Metric, metrics []telegraf.Metric) telegra } func (p *Parser) parseValue(value string) ([]telegraf.Metric, error) { - return p.parser.Parse([]byte(value)) + return p.parser.Parse([]byte(value), "") } func toBytes(value interface{}) ([]byte, error) { diff --git a/testutil/file.go b/testutil/file.go index 086def3a0..c461bbf06 100644 --- a/testutil/file.go +++ b/testutil/file.go @@ -103,7 +103,7 @@ func ParseMetricsFromFile(filename string, parser telegraf.Parser) ([]telegraf.M continue } - nonutc, err := parser.Parse(line) + nonutc, err := parser.Parse(line, "") if err != nil { return nil, fmt.Errorf("unable to parse metric in %q failed: %w", line, err) } diff --git a/testutil/plugin_input/plugin.go b/testutil/plugin_input/plugin.go index 5f000ce01..c32eddd0a 100644 --- a/testutil/plugin_input/plugin.go +++ b/testutil/plugin_input/plugin.go @@ -150,7 +150,7 @@ func (p *Plugin) Gather(acc telegraf.Accumulator) error { if err != nil { return err } - metrics, err := p.Parser.Parse(data) + metrics, err := p.Parser.Parse(data, "") if err != nil { return err }