From f7d0087df9b56d93f52e143d2950fa262dfadf85 Mon Sep 17 00:00:00 2001 From: zhuxu Date: Fri, 21 Nov 2025 19:30:20 +0800 Subject: [PATCH] use topic tag refresh metrics --- models/running_parsers.go | 4 +- parser.go | 2 +- plugins/common/socket/socket_test.go | 8 +- plugins/inputs/amqp_consumer/amqp_consumer.go | 2 +- .../amqp_consumer/amqp_consumer_test.go | 4 +- .../cl_kafka_consumer/kafka_consumer.go | 5 +- plugins/inputs/cloud_pubsub/cloud_pubsub.go | 2 +- .../cloud_pubsub_push/cloud_pubsub_push.go | 2 +- .../directory_monitor/directory_monitor.go | 2 +- .../eventhub_consumer/eventhub_consumer.go | 2 +- plugins/inputs/exec/exec.go | 2 +- plugins/inputs/execd/execd.go | 2 +- plugins/inputs/file/file.go | 2 +- plugins/inputs/firehose/firehose.go | 2 +- .../google_cloud_storage.go | 2 +- plugins/inputs/http/http.go | 2 +- .../http_listener_v2/http_listener_v2.go | 2 +- plugins/inputs/influxdb/influxdb_test.go | 2 +- .../influxdb_v2_listener.go | 4 +- .../inputs/kafka_consumer/kafka_consumer.go | 2 +- .../kinesis_consumer/kinesis_consumer.go | 2 +- plugins/inputs/mqtt_consumer/mqtt_consumer.go | 2 +- .../mqtt_consumer/mqtt_consumer_test.go | 6 +- plugins/inputs/nats_consumer/nats_consumer.go | 2 +- plugins/inputs/nsq_consumer/nsq_consumer.go | 2 +- plugins/inputs/prometheus/prometheus.go | 2 +- .../inputs/socket_listener/socket_listener.go | 2 +- plugins/inputs/tail/tail.go | 2 +- .../outputs/cloud_pubsub/cloud_pubsub_test.go | 2 +- plugins/outputs/cloud_pubsub/topic_stubbed.go | 2 +- plugins/outputs/mqtt/mqtt_test.go | 2 +- plugins/parsers/avro/parser.go | 4 +- plugins/parsers/avro/parser_test.go | 6 +- plugins/parsers/binary/parser.go | 4 +- plugins/parsers/binary/parser_test.go | 22 ++--- plugins/parsers/collectd/parser.go | 4 +- plugins/parsers/collectd/parser_test.go | 28 +++--- plugins/parsers/csv/parser.go | 2 +- plugins/parsers/csv/parser_test.go | 92 +++++++++---------- plugins/parsers/dropwizard/parser.go | 4 +- plugins/parsers/dropwizard/parser_test.go | 32 +++---- plugins/parsers/form_urlencoded/parser.go | 4 +- .../parsers/form_urlencoded/parser_test.go | 22 ++--- plugins/parsers/graphite/parser.go | 2 +- plugins/parsers/graphite/parser_test.go | 4 +- plugins/parsers/grok/parser.go | 2 +- plugins/parsers/grok/parser_test.go | 10 +- .../parsers/influx/influx_upstream/parser.go | 4 +- .../influx/influx_upstream/parser_test.go | 14 +-- plugins/parsers/influx/parser.go | 4 +- plugins/parsers/influx/parser_test.go | 14 +-- plugins/parsers/json/parser.go | 4 +- plugins/parsers/json/parser_test.go | 86 ++++++++--------- plugins/parsers/json_v2/parser.go | 2 +- plugins/parsers/json_v2/parser_test.go | 4 +- plugins/parsers/logfmt/parser.go | 4 +- plugins/parsers/logfmt/parser_test.go | 6 +- plugins/parsers/nagios/parser.go | 4 +- plugins/parsers/nagios/parser_test.go | 6 +- plugins/parsers/openmetrics/parser.go | 4 +- plugins/parsers/openmetrics/parser_test.go | 4 +- plugins/parsers/opentsdb/parser.go | 2 +- plugins/parsers/opentsdb/parser_test.go | 8 +- plugins/parsers/parquet/parser.go | 4 +- plugins/parsers/parquet/parser_test.go | 2 +- plugins/parsers/phasor_binary/parser.go | 68 +++++++------- plugins/parsers/phasor_binary/parser_test.go | 2 +- plugins/parsers/prometheus/parser.go | 4 +- plugins/parsers/prometheus/parser_test.go | 4 +- .../parsers/prometheusremotewrite/parser.go | 4 +- .../prometheusremotewrite/parser_test.go | 18 ++-- plugins/parsers/value/parser.go | 4 +- plugins/parsers/value/parser_test.go | 14 +-- plugins/parsers/wavefront/parser.go | 4 +- plugins/parsers/wavefront/parser_test.go | 60 ++++++------ plugins/parsers/xpath/parser.go | 4 +- plugins/parsers/xpath/parser_test.go | 24 ++--- plugins/processors/dedup/dedup.go | 2 +- plugins/processors/execd/execd.go | 2 +- plugins/processors/parser/parser.go | 4 +- testutil/file.go | 2 +- testutil/plugin_input/plugin.go | 2 +- 82 files changed, 360 insertions(+), 361 deletions(-) diff --git a/models/running_parsers.go b/models/running_parsers.go index f9034e92c..7595b1171 100644 --- a/models/running_parsers.go +++ b/models/running_parsers.go @@ -73,9 +73,9 @@ func (r *RunningParser) Init() error { return nil } -func (r *RunningParser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { +func (r *RunningParser) Parse(buf []byte) ([]telegraf.Metric, error) { start := time.Now() - m, err := r.Parser.Parse(buf, extra) + m, err := r.Parser.Parse(buf) elapsed := time.Since(start) r.ParseTime.Incr(elapsed.Nanoseconds()) r.MetricsParsed.Incr(int64(len(m))) diff --git a/parser.go b/parser.go index 6b05ac2e2..6111886df 100644 --- a/parser.go +++ b/parser.go @@ -7,7 +7,7 @@ type Parser interface { // and parses it into telegraf metrics // // Must be thread-safe. - Parse(buf []byte, extra string) ([]Metric, error) + Parse(buf []byte) ([]Metric, error) // ParseLine takes a single string metric // ie, "cpu.usage.idle 90" diff --git a/plugins/common/socket/socket_test.go b/plugins/common/socket/socket_test.go index 0ecd07983..e979f8a36 100644 --- a/plugins/common/socket/socket_test.go +++ b/plugins/common/socket/socket_test.go @@ -154,7 +154,7 @@ func TestListenData(t *testing.T) { var acc testutil.Accumulator onData := func(remote net.Addr, data []byte, _ time.Time) { - m, err := parser.Parse(data, "") + m, err := parser.Parse(data) require.NoError(t, err) addr, _, err := net.SplitHostPort(remote.String()) if err != nil { @@ -358,7 +358,7 @@ func TestListenConnection(t *testing.T) { onConnection := func(remote net.Addr, reader io.ReadCloser) { data, err := io.ReadAll(reader) require.NoError(t, err) - m, err := parser.Parse(data, "") + m, err := parser.Parse(data) require.NoError(t, err) addr, _, err := net.SplitHostPort(remote.String()) if err != nil { @@ -451,7 +451,7 @@ func TestClosingConnections(t *testing.T) { var acc testutil.Accumulator onData := func(_ net.Addr, data []byte, _ time.Time) { - m, err := parser.Parse(data, "") + m, err := parser.Parse(data) require.NoError(t, err) acc.AddMetrics(m) } @@ -667,7 +667,7 @@ func TestNoSplitter(t *testing.T) { onConnection := func(remote net.Addr, reader io.ReadCloser) { data, err := io.ReadAll(reader) require.NoError(t, err) - m, err := parser.Parse(data, "") + m, err := parser.Parse(data) require.NoError(t, err) addr, _, err := net.SplitHostPort(remote.String()) if err != nil { diff --git a/plugins/inputs/amqp_consumer/amqp_consumer.go b/plugins/inputs/amqp_consumer/amqp_consumer.go index 53edf9946..e5a32eab5 100644 --- a/plugins/inputs/amqp_consumer/amqp_consumer.go +++ b/plugins/inputs/amqp_consumer/amqp_consumer.go @@ -454,7 +454,7 @@ func (a *AMQPConsumer) onMessage(acc telegraf.TrackingAccumulator, d amqp.Delive return err } - metrics, err := a.parser.Parse(body, "") + metrics, err := a.parser.Parse(body) if err != nil { onError() return err diff --git a/plugins/inputs/amqp_consumer/amqp_consumer_test.go b/plugins/inputs/amqp_consumer/amqp_consumer_test.go index 9a56493c9..5dfaf37f1 100644 --- a/plugins/inputs/amqp_consumer/amqp_consumer_test.go +++ b/plugins/inputs/amqp_consumer/amqp_consumer_test.go @@ -124,7 +124,7 @@ func TestIntegration(t *testing.T) { } expected := make([]telegraf.Metric, 0, len(metrics)) for _, x := range metrics { - m, err := parser.Parse([]byte(x), "") + m, err := parser.Parse([]byte(x)) require.NoError(t, err) expected = append(expected, m...) } @@ -343,7 +343,7 @@ func TestStartupErrorBehaviorRetry(t *testing.T) { } expected := make([]telegraf.Metric, 0, len(metrics)) for _, x := range metrics { - m, err := parser.Parse([]byte(x), "") + m, err := parser.Parse([]byte(x)) require.NoError(t, err) expected = append(expected, m...) } diff --git a/plugins/inputs/cl_kafka_consumer/kafka_consumer.go b/plugins/inputs/cl_kafka_consumer/kafka_consumer.go index 68269b74e..cbd39afc5 100644 --- a/plugins/inputs/cl_kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/cl_kafka_consumer/kafka_consumer.go @@ -525,7 +525,7 @@ func (h *consumerGroupHandler) handle(session sarama.ConsumerGroupSession, msg * len(msg.Value), h.maxMessageLen) } - metrics, err := h.parser.Parse(msg.Value, msg.Topic) + metrics, err := h.parser.Parse(msg.Value) if err != nil { session.MarkMessage(msg, "") h.release() @@ -560,8 +560,9 @@ func (h *consumerGroupHandler) handle(session sarama.ConsumerGroupSession, msg * // Add topic name as tag with topicTag name specified in the config if len(h.topicTag) > 0 { + device, _ := strings.CutSuffix(msg.Topic, "_Phasor") for _, metric := range metrics { - metric.AddTag(h.topicTag, msg.Topic) + metric.AddTag(h.topicTag, device) } } diff --git a/plugins/inputs/cloud_pubsub/cloud_pubsub.go b/plugins/inputs/cloud_pubsub/cloud_pubsub.go index b2fb56748..d91c55f66 100644 --- a/plugins/inputs/cloud_pubsub/cloud_pubsub.go +++ b/plugins/inputs/cloud_pubsub/cloud_pubsub.go @@ -217,7 +217,7 @@ func (ps *PubSub) onMessage(ctx context.Context, msg message) error { return fmt.Errorf("unable to decode base64 message: %w", err) } - metrics, err := ps.parser.Parse(data, "") + metrics, err := ps.parser.Parse(data) if err != nil { msg.Ack() return fmt.Errorf("unable to parse message: %w", err) diff --git a/plugins/inputs/cloud_pubsub_push/cloud_pubsub_push.go b/plugins/inputs/cloud_pubsub_push/cloud_pubsub_push.go index c7a4b936d..d446d04e9 100644 --- a/plugins/inputs/cloud_pubsub_push/cloud_pubsub_push.go +++ b/plugins/inputs/cloud_pubsub_push/cloud_pubsub_push.go @@ -196,7 +196,7 @@ func (p *PubSubPush) serveWrite(res http.ResponseWriter, req *http.Request) { return } - metrics, err := p.Parse(sDec, "") + metrics, err := p.Parse(sDec) if err != nil { p.Log.Debug(err.Error()) res.WriteHeader(http.StatusBadRequest) diff --git a/plugins/inputs/directory_monitor/directory_monitor.go b/plugins/inputs/directory_monitor/directory_monitor.go index 7cfee1c12..2ef24a9fd 100644 --- a/plugins/inputs/directory_monitor/directory_monitor.go +++ b/plugins/inputs/directory_monitor/directory_monitor.go @@ -368,7 +368,7 @@ func (monitor *DirectoryMonitor) parseAtOnce(parser telegraf.Parser, reader io.R } func (monitor *DirectoryMonitor) parseMetrics(parser telegraf.Parser, line []byte, fileName string) (metrics []telegraf.Metric, err error) { - metrics, err = parser.Parse(line, "") + metrics, err = parser.Parse(line) if err != nil { if errors.Is(err, parsers.ErrEOF) { return nil, nil diff --git a/plugins/inputs/eventhub_consumer/eventhub_consumer.go b/plugins/inputs/eventhub_consumer/eventhub_consumer.go index 3a49e3367..652def78c 100644 --- a/plugins/inputs/eventhub_consumer/eventhub_consumer.go +++ b/plugins/inputs/eventhub_consumer/eventhub_consumer.go @@ -271,7 +271,7 @@ func deepCopyMetrics(in []telegraf.Metric) []telegraf.Metric { // CreateMetrics returns the Metrics from the Event. func (e *EventHub) createMetrics(event *eventhub.Event) ([]telegraf.Metric, error) { - metrics, err := e.parser.Parse(event.Data, "") + metrics, err := e.parser.Parse(event.Data) if err != nil { return nil, err } diff --git a/plugins/inputs/exec/exec.go b/plugins/inputs/exec/exec.go index c755795b8..ca87944c9 100644 --- a/plugins/inputs/exec/exec.go +++ b/plugins/inputs/exec/exec.go @@ -142,7 +142,7 @@ func (e *Exec) processCommand(acc telegraf.Accumulator, cmd string) error { return fmt.Errorf("exec: %w for command %q: %s", runErr, cmd, string(errBuf)) } - metrics, err := e.parser.Parse(out, "") + metrics, err := e.parser.Parse(out) if err != nil { return err } diff --git a/plugins/inputs/execd/execd.go b/plugins/inputs/execd/execd.go index aabd336bf..91eef2083 100644 --- a/plugins/inputs/execd/execd.go +++ b/plugins/inputs/execd/execd.go @@ -108,7 +108,7 @@ func (e *Execd) cmdReadOut(out io.Reader) { continue } - metrics, err := e.parser.Parse(data, "") + metrics, err := e.parser.Parse(data) if err != nil { e.acc.AddError(fmt.Errorf("parse error: %w", err)) } diff --git a/plugins/inputs/file/file.go b/plugins/inputs/file/file.go index 877395c04..3fba04e2e 100644 --- a/plugins/inputs/file/file.go +++ b/plugins/inputs/file/file.go @@ -109,7 +109,7 @@ func (f *File) readMetric(filename string) ([]telegraf.Metric, error) { if err != nil { return nil, fmt.Errorf("could not instantiate parser: %w", err) } - metrics, err := parser.Parse(fileContents, "") + metrics, err := parser.Parse(fileContents) if err != nil { return metrics, fmt.Errorf("could not parse %q: %w", filename, err) } diff --git a/plugins/inputs/firehose/firehose.go b/plugins/inputs/firehose/firehose.go index cbaa0f8e4..987864e0b 100644 --- a/plugins/inputs/firehose/firehose.go +++ b/plugins/inputs/firehose/firehose.go @@ -201,7 +201,7 @@ func (f *Firehose) handleRequest(req *http.Request) (*message, error) { // Parse the metrics var metrics []telegraf.Metric for _, record := range records { - m, err := f.parser.Parse(record, "") + m, err := f.parser.Parse(record) if err != nil { // respond with bad request status code to inform firehose about the failure msg.responseCode = http.StatusBadRequest diff --git a/plugins/inputs/google_cloud_storage/google_cloud_storage.go b/plugins/inputs/google_cloud_storage/google_cloud_storage.go index 13ff45598..2c45552ae 100644 --- a/plugins/inputs/google_cloud_storage/google_cloud_storage.go +++ b/plugins/inputs/google_cloud_storage/google_cloud_storage.go @@ -147,7 +147,7 @@ func (gcs *GCS) fetchedMetrics(r *storage.Reader) ([]telegraf.Metric, error) { return nil, err } - return gcs.parser.Parse(buf.Bytes(), "") + return gcs.parser.Parse(buf.Bytes()) } func (gcs *GCS) reachedThreshlod(processed int) bool { diff --git a/plugins/inputs/http/http.go b/plugins/inputs/http/http.go index b6b771bea..5557d5bd1 100644 --- a/plugins/inputs/http/http.go +++ b/plugins/inputs/http/http.go @@ -201,7 +201,7 @@ func (h *HTTP) gatherURL(acc telegraf.Accumulator, url string) error { if err != nil { return fmt.Errorf("instantiating parser failed: %w", err) } - metrics, err := parser.Parse(b, "") + metrics, err := parser.Parse(b) if err != nil { return fmt.Errorf("parsing metrics failed: %w", err) } diff --git a/plugins/inputs/http_listener_v2/http_listener_v2.go b/plugins/inputs/http_listener_v2/http_listener_v2.go index a5de8ee55..e940620fb 100644 --- a/plugins/inputs/http_listener_v2/http_listener_v2.go +++ b/plugins/inputs/http_listener_v2/http_listener_v2.go @@ -278,7 +278,7 @@ func (h *HTTPListenerV2) serveWrite(res http.ResponseWriter, req *http.Request) return } - metrics, err := h.Parse(bytes, "") + metrics, err := h.Parse(bytes) if err != nil { h.Log.Debugf("Parse error: %s", err.Error()) if err := badRequest(res); err != nil { diff --git a/plugins/inputs/influxdb/influxdb_test.go b/plugins/inputs/influxdb/influxdb_test.go index 5c2136d05..ff324be6b 100644 --- a/plugins/inputs/influxdb/influxdb_test.go +++ b/plugins/inputs/influxdb/influxdb_test.go @@ -215,7 +215,7 @@ func TestCloud1(t *testing.T) { buf, err := os.ReadFile("./testdata/cloud1.influx") require.NoError(t, err) - expected, err := parser.Parse(buf, "") + expected, err := parser.Parse(buf) require.NoError(t, err) // Check the output diff --git a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go index 143fe0166..52854d6f6 100644 --- a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go +++ b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go @@ -317,7 +317,7 @@ func (h *InfluxDBV2Listener) handleWrite() http.HandlerFunc { } } - metrics, err = parser.Parse(bytes, "") + metrics, err = parser.Parse(bytes) } else { parser := influx.Parser{} err = parser.Init() @@ -332,7 +332,7 @@ func (h *InfluxDBV2Listener) handleWrite() http.HandlerFunc { parser.SetTimePrecision(precision) } - metrics, err = parser.Parse(bytes, "") + metrics, err = parser.Parse(bytes) } if !errors.Is(err, io.EOF) && err != nil { diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index 9ff4ef3eb..ac335eec0 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -525,7 +525,7 @@ func (h *consumerGroupHandler) handle(session sarama.ConsumerGroupSession, msg * len(msg.Value), h.maxMessageLen) } - metrics, err := h.parser.Parse(msg.Value, "") + metrics, err := h.parser.Parse(msg.Value) if err != nil { session.MarkMessage(msg, "") h.release() diff --git a/plugins/inputs/kinesis_consumer/kinesis_consumer.go b/plugins/inputs/kinesis_consumer/kinesis_consumer.go index 5510cfc55..87b272e58 100644 --- a/plugins/inputs/kinesis_consumer/kinesis_consumer.go +++ b/plugins/inputs/kinesis_consumer/kinesis_consumer.go @@ -210,7 +210,7 @@ func (k *KinesisConsumer) onMessage(acc telegraf.TrackingAccumulator, shard stri if err != nil { return err } - metrics, err := k.parser.Parse(data, "") + metrics, err := k.parser.Parse(data) if err != nil { return err } diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index eda2197da..bbf826d81 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -250,7 +250,7 @@ func (m *MQTTConsumer) onMessage(_ mqtt.Client, msg mqtt.Message) { m.payloadSize.Incr(int64(payloadBytes)) m.messagesRecv.Incr(1) - metrics, err := m.parser.Parse(msg.Payload(), "") + metrics, err := m.parser.Parse(msg.Payload()) if err != nil || len(metrics) == 0 { if len(metrics) == 0 { once.Do(func() { diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go index 966f130d7..32f5b7e9f 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go @@ -64,7 +64,7 @@ type fakeParser struct{} // fakeParser satisfies telegraf.Parser var _ telegraf.Parser = &fakeParser{} -func (*fakeParser) Parse([]byte, string) ([]telegraf.Metric, error) { +func (*fakeParser) Parse([]byte) ([]telegraf.Metric, error) { panic("not implemented") } @@ -716,7 +716,7 @@ func TestIntegration(t *testing.T) { } expected := make([]telegraf.Metric, 0, len(metrics)) for _, x := range metrics { - metrics, err := parser.Parse([]byte(x), "") + metrics, err := parser.Parse([]byte(x)) for i := range metrics { metrics[i].AddTag("topic", topic) } @@ -949,7 +949,7 @@ func TestStartupErrorBehaviorRetryIntegration(t *testing.T) { } expected := make([]telegraf.Metric, 0, len(metrics)) for _, x := range metrics { - metrics, err := parser.Parse([]byte(x), "") + metrics, err := parser.Parse([]byte(x)) for i := range metrics { metrics[i].AddTag("topic", topic) } diff --git a/plugins/inputs/nats_consumer/nats_consumer.go b/plugins/inputs/nats_consumer/nats_consumer.go index 2226926d4..43531cc53 100644 --- a/plugins/inputs/nats_consumer/nats_consumer.go +++ b/plugins/inputs/nats_consumer/nats_consumer.go @@ -228,7 +228,7 @@ func (n *NatsConsumer) receiver(ctx context.Context) { <-sem <-sem case msg := <-n.in: - metrics, err := n.parser.Parse(msg.Data, "") + metrics, err := n.parser.Parse(msg.Data) if err != nil { n.Log.Errorf("Subject: %s, error: %s", msg.Subject, err.Error()) <-sem diff --git a/plugins/inputs/nsq_consumer/nsq_consumer.go b/plugins/inputs/nsq_consumer/nsq_consumer.go index 8feca6c08..1516e4f2a 100644 --- a/plugins/inputs/nsq_consumer/nsq_consumer.go +++ b/plugins/inputs/nsq_consumer/nsq_consumer.go @@ -89,7 +89,7 @@ func (n *NSQConsumer) Start(ac telegraf.Accumulator) error { } n.consumer.SetLogger(&logger{log: n.Log}, nsq.LogLevelInfo) n.consumer.AddHandler(nsq.HandlerFunc(func(message *nsq.Message) error { - metrics, err := n.parser.Parse(message.Body, "") + metrics, err := n.parser.Parse(message.Body) if err != nil { acc.AddError(err) // Remove the message from the queue diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index d0e15820b..560a7fae4 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -561,7 +561,7 @@ func (p *Prometheus) gatherURL(u urlAndAddress, acc telegraf.Accumulator) (map[s Log: p.Log, } } - metrics, err := metricParser.Parse(body, "") + metrics, err := metricParser.Parse(body) if err != nil { return requestFields, tags, fmt.Errorf("error reading metrics for %q: %w", u.url, err) } diff --git a/plugins/inputs/socket_listener/socket_listener.go b/plugins/inputs/socket_listener/socket_listener.go index b5955d21b..17d753147 100644 --- a/plugins/inputs/socket_listener/socket_listener.go +++ b/plugins/inputs/socket_listener/socket_listener.go @@ -51,7 +51,7 @@ func (sl *SocketListener) Init() error { func (sl *SocketListener) Start(acc telegraf.Accumulator) error { // Create the callbacks for parsing the data and recording issues onData := func(_ net.Addr, data []byte, receiveTime time.Time) { - metrics, err := sl.parser.Parse(data, "") + metrics, err := sl.parser.Parse(data) if err != nil { acc.AddError(err) diff --git a/plugins/inputs/tail/tail.go b/plugins/inputs/tail/tail.go index 9fa3a4d79..46b268f78 100644 --- a/plugins/inputs/tail/tail.go +++ b/plugins/inputs/tail/tail.go @@ -313,7 +313,7 @@ func (t *Tail) tailNewFiles() error { } func parseLine(parser telegraf.Parser, line string) ([]telegraf.Metric, error) { - m, err := parser.Parse([]byte(line), "") + m, err := parser.Parse([]byte(line)) if err != nil { if errors.Is(err, parsers.ErrEOF) { return nil, nil diff --git a/plugins/outputs/cloud_pubsub/cloud_pubsub_test.go b/plugins/outputs/cloud_pubsub/cloud_pubsub_test.go index c5fb2f898..9b0f7519d 100644 --- a/plugins/outputs/cloud_pubsub/cloud_pubsub_test.go +++ b/plugins/outputs/cloud_pubsub/cloud_pubsub_test.go @@ -222,7 +222,7 @@ func verifyMetricPublished(t *testing.T, m telegraf.Metric, published map[string data = v } - parsed, err := p.Parse(data, "") + parsed, err := p.Parse(data) if err != nil { t.Fatalf("could not parse influxdb metric from published message: %s", string(data)) } diff --git a/plugins/outputs/cloud_pubsub/topic_stubbed.go b/plugins/outputs/cloud_pubsub/topic_stubbed.go index 04da92d75..c47c0624f 100644 --- a/plugins/outputs/cloud_pubsub/topic_stubbed.go +++ b/plugins/outputs/cloud_pubsub/topic_stubbed.go @@ -207,7 +207,7 @@ func (t *stubTopic) parseIDs(msg *pubsub.Message) []string { } d = strData } - metrics, err := p.Parse(d, "") + metrics, err := p.Parse(d) if err != nil { t.Fatalf("unexpected parsing error: %v", err) } diff --git a/plugins/outputs/mqtt/mqtt_test.go b/plugins/outputs/mqtt/mqtt_test.go index 3753d7b19..9e12e77fb 100644 --- a/plugins/outputs/mqtt/mqtt_test.go +++ b/plugins/outputs/mqtt/mqtt_test.go @@ -802,7 +802,7 @@ func TestIntegrationMQTTLayoutHomieV4(t *testing.T) { func createMetricMessageHandler(acc telegraf.Accumulator, parser telegraf.Parser) paho.MessageHandler { return func(_ paho.Client, msg paho.Message) { - metrics, err := parser.Parse(msg.Payload(), "") + metrics, err := parser.Parse(msg.Payload()) if err != nil { acc.AddError(err) return diff --git a/plugins/parsers/avro/parser.go b/plugins/parsers/avro/parser.go index 6f7c547c0..6735eeb34 100644 --- a/plugins/parsers/avro/parser.go +++ b/plugins/parsers/avro/parser.go @@ -82,7 +82,7 @@ func (p *Parser) Init() error { return nil } -func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { +func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { var schema string var codec *goavro.Codec var err error @@ -145,7 +145,7 @@ func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { } func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { - metrics, err := p.Parse([]byte(line), "") + metrics, err := p.Parse([]byte(line)) if err != nil { return nil, err } diff --git a/plugins/parsers/avro/parser_test.go b/plugins/parsers/avro/parser_test.go index 7eafb238f..43c44ac8c 100644 --- a/plugins/parsers/avro/parser_test.go +++ b/plugins/parsers/avro/parser_test.go @@ -116,7 +116,7 @@ func BenchmarkParsing(b *testing.B) { b.ResetTimer() for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse(benchmarkData, "") + plugin.Parse(benchmarkData) } } @@ -150,7 +150,7 @@ func TestBenchmarkDataBinary(t *testing.T) { require.NoError(t, err) // Do the actual testing - actual, err := plugin.Parse(benchmarkData, "") + actual, err := plugin.Parse(benchmarkData) require.NoError(t, err) testutil.RequireMetricsEqual(t, expected, actual, testutil.SortMetrics()) } @@ -178,6 +178,6 @@ func BenchmarkParsingBinary(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse(benchmarkData, "") + plugin.Parse(benchmarkData) } } diff --git a/plugins/parsers/binary/parser.go b/plugins/parsers/binary/parser.go index 98c3c4776..9e417f63d 100644 --- a/plugins/parsers/binary/parser.go +++ b/plugins/parsers/binary/parser.go @@ -71,7 +71,7 @@ func (p *Parser) Init() error { return nil } -func (p *Parser) Parse(data []byte, extra string) ([]telegraf.Metric, error) { +func (p *Parser) Parse(data []byte) ([]telegraf.Metric, error) { t := time.Now() // If the data is encoded in HEX, we need to decode it first @@ -122,7 +122,7 @@ func (p *Parser) Parse(data []byte, extra string) ([]telegraf.Metric, error) { } func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { - metrics, err := p.Parse([]byte(line), "") + metrics, err := p.Parse([]byte(line)) if err != nil { return nil, err } diff --git a/plugins/parsers/binary/parser_test.go b/plugins/parsers/binary/parser_test.go index a59044842..2ccd4c145 100644 --- a/plugins/parsers/binary/parser_test.go +++ b/plugins/parsers/binary/parser_test.go @@ -196,7 +196,7 @@ func TestFilterMatchInvalid(t *testing.T) { metricName: "binary", } require.NoError(t, parser.Init()) - _, err := parser.Parse(testdata, "") + _, err := parser.Parse(testdata) require.EqualError(t, err, tt.expected) }) } @@ -221,7 +221,7 @@ func TestFilterNoMatch(t *testing.T) { data, err := generateBinary(testdata, internal.HostEndianness) require.NoError(t, err) - _, err = parser.Parse(data, "") + _, err = parser.Parse(data) require.EqualError(t, err, "no matching configuration") }) @@ -242,7 +242,7 @@ func TestFilterNoMatch(t *testing.T) { data, err := generateBinary(testdata, internal.HostEndianness) require.NoError(t, err) - metrics, err := parser.Parse(data, "") + metrics, err := parser.Parse(data) require.NoError(t, err) require.Empty(t, metrics) }) @@ -320,7 +320,7 @@ func TestFilterNone(t *testing.T) { data, err := generateBinary(tt.data, order) require.NoError(t, err) - metrics, err := parser.Parse(data, "") + metrics, err := parser.Parse(data) require.NoError(t, err) require.NotEmpty(t, metrics) }) @@ -392,7 +392,7 @@ func TestFilterLength(t *testing.T) { data, err := generateBinary(tt.data, internal.HostEndianness) require.NoError(t, err) - metrics, err := parser.Parse(data, "") + metrics, err := parser.Parse(data) require.NoError(t, err) if tt.expected { require.NotEmpty(t, metrics) @@ -558,7 +558,7 @@ func TestFilterContent(t *testing.T) { var metrics []telegraf.Metric for _, data := range testdata { - m, err := parser.Parse(data, "") + m, err := parser.Parse(data) require.NoError(t, err) metrics = append(metrics, m...) } @@ -865,7 +865,7 @@ func TestParseInvalid(t *testing.T) { data, err := generateBinary(tt.data, order) require.NoError(t, err) - _, err = parser.Parse(data, "") + _, err = parser.Parse(data) require.EqualError(t, err, tt.expected) }) } @@ -1390,7 +1390,7 @@ func TestParse(t *testing.T) { data, err := generateBinary(tt.data, order) require.NoError(t, err) - metrics, err := parser.Parse(data, "") + metrics, err := parser.Parse(data) require.NoError(t, err) var options []cmp.Option @@ -1479,7 +1479,7 @@ func TestHexEncoding(t *testing.T) { require.NoError(t, err) encoded := hex.EncodeToString(data) - metrics, err := parser.Parse([]byte(encoded), "") + metrics, err := parser.Parse([]byte(encoded)) require.NoError(t, err) require.NotEmpty(t, metrics) } @@ -1564,7 +1564,7 @@ func TestBenchmarkData(t *testing.T) { actual := make([]telegraf.Metric, 0, 2) for _, buf := range benchmarkData { - m, err := plugin.Parse(buf, "") + m, err := plugin.Parse(buf) require.NoError(t, err) actual = append(actual, m...) } @@ -1609,6 +1609,6 @@ func BenchmarkParsing(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse(benchmarkData[n%2], "") + plugin.Parse(benchmarkData[n%2]) } } diff --git a/plugins/parsers/collectd/parser.go b/plugins/parsers/collectd/parser.go index 381cb8045..8a617a33d 100644 --- a/plugins/parsers/collectd/parser.go +++ b/plugins/parsers/collectd/parser.go @@ -65,7 +65,7 @@ func (p *Parser) Init() error { return nil } -func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { +func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { valueLists, err := network.Parse(buf, p.popts) if err != nil { return nil, fmt.Errorf("collectd parser error: %w", err) @@ -91,7 +91,7 @@ func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { } func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { - metrics, err := p.Parse([]byte(line), "") + metrics, err := p.Parse([]byte(line)) if err != nil { return nil, err } diff --git a/plugins/parsers/collectd/parser_test.go b/plugins/parsers/collectd/parser_test.go index 3d71b24e8..362cd10bf 100644 --- a/plugins/parsers/collectd/parser_test.go +++ b/plugins/parsers/collectd/parser_test.go @@ -131,7 +131,7 @@ func TestParse(t *testing.T) { parser := &Parser{} require.NoError(t, parser.Init()) - metrics, err := parser.Parse(bytes, "") + metrics, err := parser.Parse(bytes) require.NoError(t, err) assertEqualMetrics(t, tc.expected, metrics) @@ -146,7 +146,7 @@ func TestParseMultiValueSplit(t *testing.T) { parser := &Parser{ParseMultiValue: "split"} require.NoError(t, parser.Init()) - metrics, err := parser.Parse(bytes, "") + metrics, err := parser.Parse(bytes) require.NoError(t, err) require.Len(t, metrics, 2) @@ -160,7 +160,7 @@ func TestParseMultiValueJoin(t *testing.T) { parser := &Parser{ParseMultiValue: "join"} require.NoError(t, parser.Init()) - metrics, err := parser.Parse(bytes, "") + metrics, err := parser.Parse(bytes) require.NoError(t, err) require.Len(t, metrics, 1) @@ -178,7 +178,7 @@ func TestParse_DefaultTags(t *testing.T) { "foo": "bar", }) require.NoError(t, err) - metrics, err := parser.Parse(bytes, "") + metrics, err := parser.Parse(bytes) require.NoError(t, err) require.Equal(t, "bar", metrics[0].Tags()["foo"]) @@ -198,7 +198,7 @@ func TestParse_SignSecurityLevel(t *testing.T) { bytes, err := buf.Bytes() require.NoError(t, err) - metrics, err := parser.Parse(bytes, "") + metrics, err := parser.Parse(bytes) require.NoError(t, err) assertEqualMetrics(t, singleMetric.expected, metrics) @@ -209,7 +209,7 @@ func TestParse_SignSecurityLevel(t *testing.T) { bytes, err = buf.Bytes() require.NoError(t, err) - metrics, err = parser.Parse(bytes, "") + metrics, err = parser.Parse(bytes) require.NoError(t, err) assertEqualMetrics(t, singleMetric.expected, metrics) @@ -219,7 +219,7 @@ func TestParse_SignSecurityLevel(t *testing.T) { bytes, err = buf.Bytes() require.NoError(t, err) - metrics, err = parser.Parse(bytes, "") + metrics, err = parser.Parse(bytes) require.NoError(t, err) require.Empty(t, metrics) @@ -230,7 +230,7 @@ func TestParse_SignSecurityLevel(t *testing.T) { bytes, err = buf.Bytes() require.NoError(t, err) - _, err = parser.Parse(bytes, "") + _, err = parser.Parse(bytes) require.Error(t, err) } @@ -248,7 +248,7 @@ func TestParse_EncryptSecurityLevel(t *testing.T) { bytes, err := buf.Bytes() require.NoError(t, err) - metrics, err := parser.Parse(bytes, "") + metrics, err := parser.Parse(bytes) require.NoError(t, err) require.Empty(t, metrics) @@ -259,7 +259,7 @@ func TestParse_EncryptSecurityLevel(t *testing.T) { bytes, err = buf.Bytes() require.NoError(t, err) - metrics, err = parser.Parse(bytes, "") + metrics, err = parser.Parse(bytes) require.NoError(t, err) assertEqualMetrics(t, singleMetric.expected, metrics) @@ -269,7 +269,7 @@ func TestParse_EncryptSecurityLevel(t *testing.T) { bytes, err = buf.Bytes() require.NoError(t, err) - metrics, err = parser.Parse(bytes, "") + metrics, err = parser.Parse(bytes) require.NoError(t, err) require.Empty(t, metrics) @@ -280,7 +280,7 @@ func TestParse_EncryptSecurityLevel(t *testing.T) { bytes, err = buf.Bytes() require.NoError(t, err) - _, err = parser.Parse(bytes, "") + _, err = parser.Parse(bytes) require.Error(t, err) } @@ -387,7 +387,7 @@ func TestBenchmarkData(t *testing.T) { parser := &Parser{} require.NoError(t, parser.Init()) - actual, err := parser.Parse(bytes, "") + actual, err := parser.Parse(bytes) require.NoError(t, err) testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime(), testutil.SortMetrics()) @@ -405,6 +405,6 @@ func BenchmarkParsing(b *testing.B) { b.ResetTimer() for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - parser.Parse(bytes, "") + parser.Parse(bytes) } } diff --git a/plugins/parsers/csv/parser.go b/plugins/parsers/csv/parser.go index ba1d9ff1c..d44c38ccc 100644 --- a/plugins/parsers/csv/parser.go +++ b/plugins/parsers/csv/parser.go @@ -217,7 +217,7 @@ func validDelim(r rune) bool { return r != 0 && r != '"' && r != '\r' && r != '\n' && utf8.ValidRune(r) && r != utf8.RuneError } -func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { +func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { // Reset the parser according to the specified mode if p.ResetMode == "always" { p.Reset() diff --git a/plugins/parsers/csv/parser_test.go b/plugins/parsers/csv/parser_test.go index 46adbcfea..122597b92 100644 --- a/plugins/parsers/csv/parser_test.go +++ b/plugins/parsers/csv/parser_test.go @@ -43,7 +43,7 @@ func TestHeaderConcatenationCSV(t *testing.T) { 1,2,3 3.4,70,test_name` - metrics, err := p.Parse([]byte(testCSV), "") + metrics, err := p.Parse([]byte(testCSV)) require.NoError(t, err) require.Equal(t, "test_name", metrics[0].Name()) } @@ -63,7 +63,7 @@ func TestHeaderOverride(t *testing.T) { "first": 3.4, "second": int64(70), } - metrics, err := p.Parse([]byte(testCSV), "") + metrics, err := p.Parse([]byte(testCSV)) require.NoError(t, err) require.Equal(t, "test_name", metrics[0].Name()) require.Equal(t, expectedFields, metrics[0].Fields()) @@ -78,7 +78,7 @@ func TestHeaderOverride(t *testing.T) { } err = p.Init() require.NoError(t, err) - metrics, err = p.Parse([]byte(testCSVRows[0]), "") + metrics, err = p.Parse([]byte(testCSVRows[0])) require.NoError(t, err) require.Empty(t, metrics) m, err := p.ParseLine(testCSVRows[1]) @@ -102,7 +102,7 @@ func TestTimestamp(t *testing.T) { testCSV := `line1,line2,line3 23/05/09 04:05:06 PM,70,test_name 07/11/09 04:05:06 PM,80,test_name2` - metrics, err := p.Parse([]byte(testCSV), "") + metrics, err := p.Parse([]byte(testCSV)) require.NoError(t, err) require.Equal(t, int64(1243094706000000000), metrics[0].Time().UnixNano()) @@ -124,7 +124,7 @@ func TestTimestampYYYYMMDDHHmm(t *testing.T) { testCSV := `line1,line2,line3 200905231605,70,test_name 200907111605,80,test_name2` - metrics, err := p.Parse([]byte(testCSV), "") + metrics, err := p.Parse([]byte(testCSV)) require.NoError(t, err) require.Equal(t, int64(1243094700000000000), metrics[0].Time().UnixNano()) @@ -143,7 +143,7 @@ func TestTimestampError(t *testing.T) { testCSV := `line1,line2,line3 23/05/09 04:05:06 PM,70,test_name 07/11/09 04:05:06 PM,80,test_name2` - _, err = p.Parse([]byte(testCSV), "") + _, err = p.Parse([]byte(testCSV)) require.Equal(t, errors.New("timestamp format must be specified"), err) } @@ -161,7 +161,7 @@ func TestTimestampUnixFormat(t *testing.T) { testCSV := `line1,line2,line3 1243094706,70,test_name 1257609906,80,test_name2` - metrics, err := p.Parse([]byte(testCSV), "") + metrics, err := p.Parse([]byte(testCSV)) require.NoError(t, err) require.Equal(t, int64(1243094706000000000), metrics[0].Time().UnixNano()) require.Equal(t, int64(1257609906000000000), metrics[1].Time().UnixNano()) @@ -181,7 +181,7 @@ func TestTimestampUnixMSFormat(t *testing.T) { testCSV := `line1,line2,line3 1243094706123,70,test_name 1257609906123,80,test_name2` - metrics, err := p.Parse([]byte(testCSV), "") + metrics, err := p.Parse([]byte(testCSV)) require.NoError(t, err) require.Equal(t, int64(1243094706123000000), metrics[0].Time().UnixNano()) require.Equal(t, int64(1257609906123000000), metrics[1].Time().UnixNano()) @@ -199,7 +199,7 @@ func TestQuotedCharacter(t *testing.T) { testCSV := `line1,line2,line3 "3,4",70,test_name` - metrics, err := p.Parse([]byte(testCSV), "") + metrics, err := p.Parse([]byte(testCSV)) require.NoError(t, err) require.Equal(t, "3,4", metrics[0].Fields()["first"]) } @@ -217,7 +217,7 @@ func TestDelimiter(t *testing.T) { testCSV := `line1%line2%line3 3,4%70%test_name` - metrics, err := p.Parse([]byte(testCSV), "") + metrics, err := p.Parse([]byte(testCSV)) require.NoError(t, err) require.Equal(t, "3,4", metrics[0].Fields()["first"]) } @@ -233,7 +233,7 @@ func TestNullDelimiter(t *testing.T) { require.NoError(t, err) testCSV := strings.Join([]string{"3.4", "70", "test_name"}, "\u0000") - metrics, err := p.Parse([]byte(testCSV), "") + metrics, err := p.Parse([]byte(testCSV)) require.NoError(t, err) require.InDelta(t, float64(3.4), metrics[0].Fields()["first"], testutil.DefaultDelta) require.Equal(t, int64(70), metrics[0].Fields()["second"]) @@ -260,7 +260,7 @@ func TestValueConversion(t *testing.T) { "fourth": "hello", } - metrics, err := p.Parse([]byte(testCSV), "") + metrics, err := p.Parse([]byte(testCSV)) require.NoError(t, err) expectedMetric := metric.New("test_value", expectedTags, expectedFields, time.Unix(0, 0)) @@ -272,7 +272,7 @@ func TestValueConversion(t *testing.T) { // Test explicit type conversion. p.ColumnTypes = []string{"float", "int", "bool", "string"} - metrics, err = p.Parse([]byte(testCSV), "") + metrics, err = p.Parse([]byte(testCSV)) require.NoError(t, err) returnedMetric = metric.New(metrics[0].Name(), metrics[0].Tags(), metrics[0].Fields(), time.Unix(0, 0)) @@ -301,7 +301,7 @@ func TestSkipComment(t *testing.T) { "fourth": "name_this", } - metrics, err := p.Parse([]byte(testCSV), "") + metrics, err := p.Parse([]byte(testCSV)) require.NoError(t, err) require.Equal(t, expectedFields, metrics[0].Fields()) } @@ -325,7 +325,7 @@ func TestTrimSpace(t *testing.T) { "fourth": "hello", } - metrics, err := p.Parse([]byte(testCSV), "") + metrics, err := p.Parse([]byte(testCSV)) require.NoError(t, err) require.Equal(t, expectedFields, metrics[0].Fields()) @@ -340,7 +340,7 @@ func TestTrimSpace(t *testing.T) { " 1 , 2 ,3\n" + " test space , 80 ,test_name" - metrics, err = p.Parse([]byte(testCSV), "") + metrics, err = p.Parse([]byte(testCSV)) require.NoError(t, err) require.Equal(t, map[string]interface{}{"col1": "test space", "col2": int64(80), "col3": "test_name"}, metrics[0].Fields()) } @@ -367,7 +367,7 @@ abcdefgh 0 2 false "fourth": true, } - metrics, err := p.Parse([]byte(testCSV), "") + metrics, err := p.Parse([]byte(testCSV)) require.NoError(t, err) require.Equal(t, expectedFields, metrics[1].Fields()) } @@ -393,7 +393,7 @@ hello,80,test_name2` expectedTags := map[string]string{ "line1": "hello", } - metrics, err := p.Parse([]byte(testCSV), "") + metrics, err := p.Parse([]byte(testCSV)) require.NoError(t, err) require.Equal(t, "test_name2", metrics[0].Name()) require.Equal(t, expectedFields, metrics[0].Fields()) @@ -410,7 +410,7 @@ hello,80,test_name2` require.NoError(t, err) testCSVRows := []string{"garbage nonsense\r\n", "line1,line2,line3\r\n", "hello,80,test_name2\r\n"} - metrics, err = p.Parse([]byte(testCSVRows[0]), "") + metrics, err = p.Parse([]byte(testCSVRows[0])) require.ErrorIs(t, err, parsers.ErrEOF) require.Nil(t, metrics) m, err := p.ParseLine(testCSVRows[1]) @@ -437,7 +437,7 @@ func TestSkipColumns(t *testing.T) { "line1": int64(80), "line2": "test_name", } - metrics, err := p.Parse([]byte(testCSV), "") + metrics, err := p.Parse([]byte(testCSV)) require.NoError(t, err) require.Equal(t, expectedFields, metrics[0].Fields()) } @@ -456,7 +456,7 @@ func TestSkipColumnsWithHeader(t *testing.T) { trash,80,test_name` // we should expect an error if we try to get col1 - metrics, err := p.Parse([]byte(testCSV), "") + metrics, err := p.Parse([]byte(testCSV)) require.NoError(t, err) require.Equal(t, map[string]interface{}{"col2": int64(80), "col3": "test_name"}, metrics[0].Fields()) } @@ -471,7 +471,7 @@ func TestMultiHeader(t *testing.T) { 1,2 80,test_name` - metrics, err := p.Parse([]byte(testCSV), "") + metrics, err := p.Parse([]byte(testCSV)) require.NoError(t, err) require.Equal(t, map[string]interface{}{"col1": int64(80), "col2": "test_name"}, metrics[0].Fields()) @@ -484,7 +484,7 @@ func TestMultiHeader(t *testing.T) { err = p.Init() require.NoError(t, err) - metrics, err = p.Parse([]byte(testCSVRows[0]), "") + metrics, err = p.Parse([]byte(testCSVRows[0])) require.ErrorIs(t, err, parsers.ErrEOF) require.Nil(t, metrics) m, err := p.ParseLine(testCSVRows[1]) @@ -507,7 +507,7 @@ func TestParseStream(t *testing.T) { csvHeader := "a,b,c" csvBody := "1,2,3" - metrics, err := p.Parse([]byte(csvHeader), "") + metrics, err := p.Parse([]byte(csvHeader)) require.NoError(t, err) require.Empty(t, metrics) m, err := p.ParseLine(csvBody) @@ -537,7 +537,7 @@ func TestParseLineMultiMetricErrorMessage(t *testing.T) { csvOneRow := "1,2,3" csvTwoRows := "4,5,6\n7,8,9" - metrics, err := p.Parse([]byte(csvHeader), "") + metrics, err := p.Parse([]byte(csvHeader)) require.NoError(t, err) require.Empty(t, metrics) m, err := p.ParseLine(csvOneRow) @@ -556,7 +556,7 @@ func TestParseLineMultiMetricErrorMessage(t *testing.T) { m, err = p.ParseLine(csvTwoRows) require.Errorf(t, err, "expected 1 metric found 2") require.Nil(t, m) - metrics, err = p.Parse([]byte(csvTwoRows), "") + metrics, err = p.Parse([]byte(csvTwoRows)) require.NoError(t, err) require.Len(t, metrics, 2) } @@ -585,7 +585,7 @@ func TestTimestampUnixFloatPrecision(t *testing.T) { ), } - metrics, err := p.Parse([]byte(data), "") + metrics, err := p.Parse([]byte(data)) require.NoError(t, err) testutil.RequireMetricsEqual(t, expected, metrics) } @@ -617,7 +617,7 @@ func TestSkipMeasurementColumn(t *testing.T) { ), } - metrics, err := p.Parse([]byte(data), "") + metrics, err := p.Parse([]byte(data)) require.NoError(t, err) testutil.RequireMetricsEqual(t, expected, metrics) } @@ -649,7 +649,7 @@ func TestSkipTimestampColumn(t *testing.T) { ), } - metrics, err := p.Parse([]byte(data), "") + metrics, err := p.Parse([]byte(data)) require.NoError(t, err) testutil.RequireMetricsEqual(t, expected, metrics) } @@ -670,7 +670,7 @@ func TestTimestampTimezone(t *testing.T) { testCSV := `line1,line2,line3 23/05/09 11:05:06 PM,70,test_name 07/11/09 11:05:06 PM,80,test_name2` - metrics, err := p.Parse([]byte(testCSV), "") + metrics, err := p.Parse([]byte(testCSV)) require.NoError(t, err) require.Equal(t, int64(1243094706000000000), metrics[0].Time().UnixNano()) @@ -689,7 +689,7 @@ func TestEmptyMeasurementName(t *testing.T) { testCSV := `,b 1,2` - metrics, err := p.Parse([]byte(testCSV), "") + metrics, err := p.Parse([]byte(testCSV)) require.NoError(t, err) expected := []telegraf.Metric{ @@ -716,7 +716,7 @@ func TestNumericMeasurementName(t *testing.T) { testCSV := `a,b 1,2` - metrics, err := p.Parse([]byte(testCSV), "") + metrics, err := p.Parse([]byte(testCSV)) require.NoError(t, err) expected := []telegraf.Metric{ @@ -742,7 +742,7 @@ func TestStaticMeasurementName(t *testing.T) { testCSV := `a,b 1,2` - metrics, err := p.Parse([]byte(testCSV), "") + metrics, err := p.Parse([]byte(testCSV)) require.NoError(t, err) expected := []telegraf.Metric{ @@ -770,7 +770,7 @@ func TestSkipEmptyStringValue(t *testing.T) { testCSV := `a,b 1,""` - metrics, err := p.Parse([]byte(testCSV), "") + metrics, err := p.Parse([]byte(testCSV)) require.NoError(t, err) expected := []telegraf.Metric{ @@ -797,7 +797,7 @@ func TestSkipSpecifiedStringValue(t *testing.T) { testCSV := `a,b 1,MM` - metrics, err := p.Parse([]byte(testCSV), "") + metrics, err := p.Parse([]byte(testCSV)) require.NoError(t, err) expected := []telegraf.Metric{ @@ -839,7 +839,7 @@ corrupted_line "b": int64(4), } - metrics, err := p.Parse([]byte(testCSV), "") + metrics, err := p.Parse([]byte(testCSV)) require.NoError(t, err) require.Equal(t, expectedFields0, metrics[0].Fields()) require.Equal(t, expectedFields1, metrics[1].Fields()) @@ -973,7 +973,7 @@ timestamp,type,name,status } // Set default Tags p.SetDefaultTags(map[string]string{"test": "tag"}) - metrics, err := p.Parse([]byte(testCSV), "") + metrics, err := p.Parse([]byte(testCSV)) require.NoError(t, err) for i, m := range metrics { require.Equal(t, expectedFields[i], m.Fields()) @@ -1067,7 +1067,7 @@ fourth=plain require.NoError(t, p.Init()) p.SetDefaultTags(defaultTags) - metrics, err := p.Parse(csv, "") + metrics, err := p.Parse(csv) require.NoError(t, err) require.Len(t, metrics, 1) require.EqualValues(t, tt.expectedTags, metrics[0].Tags()) @@ -1143,7 +1143,7 @@ timestamp,type,name,status p.SetDefaultTags(map[string]string{"test": "tag"}) // Do the parsing the first time - metrics, err := p.Parse([]byte(testCSV), "") + metrics, err := p.Parse([]byte(testCSV)) require.NoError(t, err) testutil.RequireMetricsEqual(t, expected, metrics) @@ -1165,12 +1165,12 @@ timestamp,type,name,status time.Date(2021, 12, 1, 19, 1, 0, 0, time.UTC), ), } - metrics, err = p.Parse([]byte(additionalCSV), "") + metrics, err = p.Parse([]byte(additionalCSV)) require.NoError(t, err) testutil.RequireMetricsEqual(t, additionalExpected, metrics) // This should fail when not resetting but reading again due to the header etc - _, err = p.Parse([]byte(testCSV), "") + _, err = p.Parse([]byte(testCSV)) require.Error( t, err, @@ -1346,13 +1346,13 @@ timestamp,type,name,status p.SetDefaultTags(map[string]string{"test": "tag"}) // Do the parsing the first time - metrics, err := p.Parse([]byte(testCSV), "") + metrics, err := p.Parse([]byte(testCSV)) require.NoError(t, err) testutil.RequireMetricsEqual(t, expected, metrics) // Parsing another data line should fail as it is interpreted as header additionalCSV := "2021-12-01T19:01:00+00:00,Reader,R009,5\r\n" - metrics, err = p.Parse([]byte(additionalCSV), "") + metrics, err = p.Parse([]byte(additionalCSV)) require.ErrorIs(t, err, parsers.ErrEOF) require.Nil(t, metrics) @@ -1400,7 +1400,7 @@ timestamp,category,id,flag } // This should work as the parser is reset - metrics, err = p.Parse([]byte(testCSV), "") + metrics, err = p.Parse([]byte(testCSV)) require.NoError(t, err) testutil.RequireMetricsEqual(t, expected, metrics) } @@ -1556,7 +1556,7 @@ func TestBenchmarkData(t *testing.T) { ), } - actual, err := plugin.Parse([]byte(benchmarkData), "") + actual, err := plugin.Parse([]byte(benchmarkData)) require.NoError(t, err) testutil.RequireMetricsEqual(t, expected, actual, testutil.SortMetrics()) } @@ -1573,6 +1573,6 @@ func BenchmarkParsing(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse([]byte(benchmarkData), "") + plugin.Parse([]byte(benchmarkData)) } } diff --git a/plugins/parsers/dropwizard/parser.go b/plugins/parsers/dropwizard/parser.go index 0417dc1b0..03738abb0 100644 --- a/plugins/parsers/dropwizard/parser.go +++ b/plugins/parsers/dropwizard/parser.go @@ -36,7 +36,7 @@ type Parser struct { } // Parse parses the input bytes to an array of metrics -func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { +func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { metrics := make([]telegraf.Metric, 0) metricTime, err := p.parseTime(buf) @@ -193,7 +193,7 @@ func (p *Parser) readDWMetrics(metricType string, dwms interface{}, metrics []te } } - parsed, err := p.seriesParser.Parse([]byte(measurementName), "") + parsed, err := p.seriesParser.Parse([]byte(measurementName)) var m telegraf.Metric if err != nil || len(parsed) != 1 { m = metric.New(measurementName, make(map[string]string), make(map[string]interface{}), tm) diff --git a/plugins/parsers/dropwizard/parser_test.go b/plugins/parsers/dropwizard/parser_test.go index 7e244fbb1..d2af8266c 100644 --- a/plugins/parsers/dropwizard/parser_test.go +++ b/plugins/parsers/dropwizard/parser_test.go @@ -28,7 +28,7 @@ func TestParseValidEmptyJSON(t *testing.T) { require.NoError(t, parser.Init()) // Most basic vanilla test - metrics, err := parser.Parse([]byte(validEmptyJSON), "") + metrics, err := parser.Parse([]byte(validEmptyJSON)) require.NoError(t, err) require.Empty(t, metrics) } @@ -53,7 +53,7 @@ func TestParseValidCounterJSON(t *testing.T) { parser := &Parser{} require.NoError(t, parser.Init()) - metrics, err := parser.Parse([]byte(validCounterJSON), "") + metrics, err := parser.Parse([]byte(validCounterJSON)) require.NoError(t, err) require.Len(t, metrics, 1) require.Equal(t, "measurement", metrics[0].Name()) @@ -97,7 +97,7 @@ func TestParseValidEmbeddedCounterJSON(t *testing.T) { } require.NoError(t, parser.Init()) - metrics, err := parser.Parse([]byte(validEmbeddedCounterJSON), "") + metrics, err := parser.Parse([]byte(validEmbeddedCounterJSON)) require.NoError(t, err) require.Len(t, metrics, 1) require.Equal(t, "measurement", metrics[0].Name()) @@ -119,7 +119,7 @@ func TestParseValidEmbeddedCounterJSON(t *testing.T) { TimePath: "time", } require.NoError(t, parser2.Init()) - metrics2, err2 := parser2.Parse([]byte(validEmbeddedCounterJSON), "") + metrics2, err2 := parser2.Parse([]byte(validEmbeddedCounterJSON)) require.NoError(t, err2) require.Equal(t, map[string]string{"metric_type": "counter", "tag1": "green"}, metrics2[0].Tags()) } @@ -149,7 +149,7 @@ func TestParseValidMeterJSON1(t *testing.T) { parser := &Parser{} require.NoError(t, parser.Init()) - metrics, err := parser.Parse([]byte(validMeterJSON1), "") + metrics, err := parser.Parse([]byte(validMeterJSON1)) require.NoError(t, err) require.Len(t, metrics, 1) require.Equal(t, "measurement1", metrics[0].Name()) @@ -190,7 +190,7 @@ func TestParseValidMeterJSON2(t *testing.T) { parser := &Parser{} require.NoError(t, parser.Init()) - metrics, err := parser.Parse([]byte(validMeterJSON2), "") + metrics, err := parser.Parse([]byte(validMeterJSON2)) require.NoError(t, err) require.Len(t, metrics, 1) require.Equal(t, "measurement2", metrics[0].Name()) @@ -225,7 +225,7 @@ func TestParseValidGaugeJSON(t *testing.T) { parser := &Parser{} require.NoError(t, parser.Init()) - metrics, err := parser.Parse([]byte(validGaugeJSON), "") + metrics, err := parser.Parse([]byte(validGaugeJSON)) require.NoError(t, err) require.Len(t, metrics, 1) require.Equal(t, "measurement", metrics[0].Name()) @@ -265,7 +265,7 @@ func TestParseValidHistogramJSON(t *testing.T) { parser := &Parser{} require.NoError(t, parser.Init()) - metrics, err := parser.Parse([]byte(validHistogramJSON), "") + metrics, err := parser.Parse([]byte(validHistogramJSON)) require.NoError(t, err) require.Len(t, metrics, 1) require.Equal(t, "measurement", metrics[0].Name()) @@ -321,7 +321,7 @@ func TestParseValidTimerJSON(t *testing.T) { parser := &Parser{} require.NoError(t, parser.Init()) - metrics, err := parser.Parse([]byte(validTimerJSON), "") + metrics, err := parser.Parse([]byte(validTimerJSON)) require.NoError(t, err) require.Len(t, metrics, 1) require.Equal(t, "measurement", metrics[0].Name()) @@ -373,7 +373,7 @@ func TestParseValidAllJSON(t *testing.T) { parser := &Parser{} require.NoError(t, parser.Init()) - metrics, err := parser.Parse([]byte(validAllJSON), "") + metrics, err := parser.Parse([]byte(validAllJSON)) require.NoError(t, err) require.Len(t, metrics, 5) } @@ -387,7 +387,7 @@ func TestTagParsingProblems(t *testing.T) { } require.NoError(t, parser1.Init()) - metrics1, err1 := parser1.Parse([]byte(validEmbeddedCounterJSON), "") + metrics1, err1 := parser1.Parse([]byte(validEmbeddedCounterJSON)) require.NoError(t, err1) require.Len(t, metrics1, 1) require.Equal(t, map[string]string{"metric_type": "counter"}, metrics1[0].Tags()) @@ -400,7 +400,7 @@ func TestTagParsingProblems(t *testing.T) { Log: testutil.Logger{}, } require.NoError(t, parser2.Init()) - metrics2, err2 := parser2.Parse([]byte(validEmbeddedCounterJSON), "") + metrics2, err2 := parser2.Parse([]byte(validEmbeddedCounterJSON)) require.NoError(t, err2) require.Len(t, metrics2, 1) require.Equal(t, map[string]string{"metric_type": "counter", "tag1": "green"}, metrics2[0].Tags()) @@ -453,7 +453,7 @@ func TestParseSampleTemplateJSON(t *testing.T) { } require.NoError(t, parser.Init()) - metrics, err := parser.Parse([]byte(sampleTemplateJSON), "") + metrics, err := parser.Parse([]byte(sampleTemplateJSON)) require.NoError(t, err) require.Len(t, metrics, 11) @@ -579,7 +579,7 @@ func TestDropWizard(t *testing.T) { t.Run(tt.name, func(t *testing.T) { parser := &Parser{} require.NoError(t, parser.Init()) - metrics, err := parser.Parse(tt.input, "") + metrics, err := parser.Parse(tt.input) if tt.expectError { require.Error(t, err) } else { @@ -636,7 +636,7 @@ func TestBenchmarkData(t *testing.T) { ), } - actual, err := plugin.Parse([]byte(benchmarkData), "") + actual, err := plugin.Parse([]byte(benchmarkData)) require.NoError(t, err) testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime(), testutil.SortMetrics()) } @@ -647,6 +647,6 @@ func BenchmarkParsing(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse([]byte(benchmarkData), "") + plugin.Parse([]byte(benchmarkData)) } } diff --git a/plugins/parsers/form_urlencoded/parser.go b/plugins/parsers/form_urlencoded/parser.go index e74b37fb0..8e53ec02b 100644 --- a/plugins/parsers/form_urlencoded/parser.go +++ b/plugins/parsers/form_urlencoded/parser.go @@ -22,7 +22,7 @@ type Parser struct { } // Parse converts a slice of bytes in "application/x-www-form-urlencoded" format into metrics -func (p Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { +func (p Parser) Parse(buf []byte) ([]telegraf.Metric, error) { buf = bytes.TrimSpace(buf) if len(buf) == 0 { return make([]telegraf.Metric, 0), nil @@ -47,7 +47,7 @@ func (p Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { // ParseLine delegates a single line of text to the Parse function func (p Parser) ParseLine(line string) (telegraf.Metric, error) { - metrics, err := p.Parse([]byte(line), "") + metrics, err := p.Parse([]byte(line)) if err != nil { return nil, err } diff --git a/plugins/parsers/form_urlencoded/parser_test.go b/plugins/parsers/form_urlencoded/parser_test.go index 29fb6c0f5..05a975664 100644 --- a/plugins/parsers/form_urlencoded/parser_test.go +++ b/plugins/parsers/form_urlencoded/parser_test.go @@ -24,7 +24,7 @@ func TestParseValidFormData(t *testing.T) { MetricName: "form_urlencoded_test", } - metrics, err := parser.Parse([]byte(validFormData), "") + metrics, err := parser.Parse([]byte(validFormData)) require.NoError(t, err) require.Len(t, metrics, 1) require.Equal(t, "form_urlencoded_test", metrics[0].Name()) @@ -56,7 +56,7 @@ func TestParseValidFormDataWithTags(t *testing.T) { TagKeys: []string{"tag1", "tag2"}, } - metrics, err := parser.Parse([]byte(validFormData), "") + metrics, err := parser.Parse([]byte(validFormData)) require.NoError(t, err) require.Len(t, metrics, 1) require.Equal(t, "form_urlencoded_test", metrics[0].Name()) @@ -77,7 +77,7 @@ func TestParseValidFormDataDefaultTags(t *testing.T) { DefaultTags: map[string]string{"tag4": "default"}, } - metrics, err := parser.Parse([]byte(validFormData), "") + metrics, err := parser.Parse([]byte(validFormData)) require.NoError(t, err) require.Len(t, metrics, 1) require.Equal(t, "form_urlencoded_test", metrics[0].Name()) @@ -99,7 +99,7 @@ func TestParseValidFormDataDefaultTagsOverride(t *testing.T) { DefaultTags: map[string]string{"tag1": "default"}, } - metrics, err := parser.Parse([]byte(validFormData), "") + metrics, err := parser.Parse([]byte(validFormData)) require.NoError(t, err) require.Len(t, metrics, 1) require.Equal(t, "form_urlencoded_test", metrics[0].Name()) @@ -119,7 +119,7 @@ func TestParseEncodedFormData(t *testing.T) { TagKeys: []string{"tag1"}, } - metrics, err := parser.Parse([]byte(encodedFormData), "") + metrics, err := parser.Parse([]byte(encodedFormData)) require.NoError(t, err) require.Len(t, metrics, 1) require.Equal(t, "form_urlencoded_test", metrics[0].Name()) @@ -136,7 +136,7 @@ func TestParseInvalidFormDataError(t *testing.T) { MetricName: "form_urlencoded_test", } - metrics, err := parser.Parse([]byte(notEscapedProperlyFormData), "") + metrics, err := parser.Parse([]byte(notEscapedProperlyFormData)) require.Error(t, err) require.Empty(t, metrics) } @@ -147,7 +147,7 @@ func TestParseInvalidFormDataEmptyKey(t *testing.T) { } // Empty key for field - metrics, err := parser.Parse([]byte(blankKeyFormData), "") + metrics, err := parser.Parse([]byte(blankKeyFormData)) require.NoError(t, err) require.Len(t, metrics, 1) require.Equal(t, map[string]string{}, metrics[0].Tags()) @@ -157,7 +157,7 @@ func TestParseInvalidFormDataEmptyKey(t *testing.T) { // Empty key for tag parser.TagKeys = []string{""} - metrics, err = parser.Parse([]byte(blankKeyFormData), "") + metrics, err = parser.Parse([]byte(blankKeyFormData)) require.NoError(t, err) require.Len(t, metrics, 1) require.Equal(t, map[string]string{}, metrics[0].Tags()) @@ -171,7 +171,7 @@ func TestParseInvalidFormDataEmptyString(t *testing.T) { MetricName: "form_urlencoded_test", } - metrics, err := parser.Parse([]byte(emptyFormData), "") + metrics, err := parser.Parse([]byte(emptyFormData)) require.NoError(t, err) require.Empty(t, metrics) } @@ -199,7 +199,7 @@ func TestBenchmarkData(t *testing.T) { ), } - actual, err := plugin.Parse([]byte(benchmarkData), "") + actual, err := plugin.Parse([]byte(benchmarkData)) require.NoError(t, err) testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime(), testutil.SortMetrics()) } @@ -212,6 +212,6 @@ func BenchmarkParsing(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse([]byte(benchmarkData), "") + plugin.Parse([]byte(benchmarkData)) } } diff --git a/plugins/parsers/graphite/parser.go b/plugins/parsers/graphite/parser.go index 895acd243..d7505133b 100644 --- a/plugins/parsers/graphite/parser.go +++ b/plugins/parsers/graphite/parser.go @@ -48,7 +48,7 @@ func (p *Parser) Init() error { return nil } -func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { +func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { // parse even if the buffer begins with a newline if len(buf) != 0 && buf[0] == '\n' { buf = buf[1:] diff --git a/plugins/parsers/graphite/parser_test.go b/plugins/parsers/graphite/parser_test.go index 146fb10b5..ab41e33df 100644 --- a/plugins/parsers/graphite/parser_test.go +++ b/plugins/parsers/graphite/parser_test.go @@ -30,7 +30,7 @@ func BenchmarkParse(b *testing.B) { require.NoError(b, p.Init()) for i := 0; i < b.N; i++ { - _, err := p.Parse([]byte("servers.localhost.cpu.load 11 1435077219"), "") + _, err := p.Parse([]byte("servers.localhost.cpu.load 11 1435077219")) require.NoError(b, err) } } @@ -387,7 +387,7 @@ func TestParse(t *testing.T) { p := Parser{Templates: []string{test.template}} require.NoError(t, p.Init()) - metrics, err := p.Parse(test.input, "") + metrics, err := p.Parse(test.input) if test.err != "" { require.EqualError(t, err, test.err) continue diff --git a/plugins/parsers/grok/parser.go b/plugins/parsers/grok/parser.go index 42d1f46e7..4a375df3e 100644 --- a/plugins/parsers/grok/parser.go +++ b/plugins/parsers/grok/parser.go @@ -381,7 +381,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { return metric.New(p.Measurement, tags, fields, p.tsModder.tsMod(timestamp)), nil } -func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { +func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { metrics := make([]telegraf.Metric, 0) if p.Multiline { diff --git a/plugins/parsers/grok/parser_test.go b/plugins/parsers/grok/parser_test.go index 871dd7244..51d42cf2f 100644 --- a/plugins/parsers/grok/parser_test.go +++ b/plugins/parsers/grok/parser_test.go @@ -21,7 +21,7 @@ func TestGrokParse(t *testing.T) { err := parser.Compile() require.NoError(t, err) - _, err = parser.Parse([]byte(`127.0.0.1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326`), "") + _, err = parser.Parse([]byte(`127.0.0.1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326`)) require.NoError(t, err) } @@ -1021,7 +1021,7 @@ func TestMultilinePatterns(t *testing.T) { Log: testutil.Logger{}, } require.NoError(t, p.Compile()) - actual, err := p.Parse(buf, "") + actual, err := p.Parse(buf) require.NoError(t, err) testutil.RequireMetricsEqual(t, expected, actual) } @@ -1181,7 +1181,7 @@ func TestMultilineNilMetric(t *testing.T) { Log: testutil.Logger{}, } require.NoError(t, p.Compile()) - actual, err := p.Parse(buf, "") + actual, err := p.Parse(buf) require.NoError(t, err) require.Empty(t, actual) } @@ -1224,7 +1224,7 @@ func TestBenchmarkData(t *testing.T) { ), } - actual, err := plugin.Parse([]byte(benchmarkData), "") + actual, err := plugin.Parse([]byte(benchmarkData)) require.NoError(t, err) testutil.RequireMetricsEqual(t, expected, actual, testutil.SortMetrics()) } @@ -1238,6 +1238,6 @@ func BenchmarkParsing(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse([]byte(benchmarkData), "") + plugin.Parse([]byte(benchmarkData)) } } diff --git a/plugins/parsers/influx/influx_upstream/parser.go b/plugins/parsers/influx/influx_upstream/parser.go index 3aef5f2d7..9a21458c3 100644 --- a/plugins/parsers/influx/influx_upstream/parser.go +++ b/plugins/parsers/influx/influx_upstream/parser.go @@ -117,7 +117,7 @@ func (p *Parser) SetTimeFunc(f TimeFunc) { p.defaultTime = f } -func (p *Parser) Parse(input []byte, extra string) ([]telegraf.Metric, error) { +func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { metrics := make([]telegraf.Metric, 0) decoder := lineprotocol.NewDecoderWithBytes(input) @@ -134,7 +134,7 @@ func (p *Parser) Parse(input []byte, extra string) ([]telegraf.Metric, error) { } func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { - metrics, err := p.Parse([]byte(line), "") + metrics, err := p.Parse([]byte(line)) if err != nil { return nil, err } diff --git a/plugins/parsers/influx/influx_upstream/parser_test.go b/plugins/parsers/influx/influx_upstream/parser_test.go index 3901a01f3..2bbb0aab1 100644 --- a/plugins/parsers/influx/influx_upstream/parser_test.go +++ b/plugins/parsers/influx/influx_upstream/parser_test.go @@ -614,7 +614,7 @@ func TestParser(t *testing.T) { parser.SetTimeFunc(tt.timeFunc) } - metrics, err := parser.Parse(tt.input, "") + metrics, err := parser.Parse(tt.input) if tt.err == nil { require.NoError(t, err) } else { @@ -638,7 +638,7 @@ func BenchmarkParser(b *testing.B) { parser := Parser{} require.NoError(b, parser.Init()) for n := 0; n < b.N; n++ { - metrics, err := parser.Parse(tt.input, "") + metrics, err := parser.Parse(tt.input) _ = err _ = metrics } @@ -748,7 +748,7 @@ func TestSeriesParser(t *testing.T) { parser.SetTimeFunc(tt.timeFunc) } - metrics, err := parser.Parse(tt.input, "") + metrics, err := parser.Parse(tt.input) require.Equal(t, tt.err, err) if err != nil { require.Equal(t, tt.err.Error(), err.Error()) @@ -854,7 +854,7 @@ func TestParserTimestampPrecision(t *testing.T) { parser := Parser{InfluxTimestampPrecision: d} require.NoError(t, parser.Init()) - metrics, err := parser.Parse(tt.input, "") + metrics, err := parser.Parse(tt.input) require.NoError(t, err) require.Equal(t, tt.metrics, metrics) @@ -905,7 +905,7 @@ func TestParserErrorString(t *testing.T) { parser := Parser{} require.NoError(t, parser.Init()) - _, err := parser.Parse(tt.input, "") + _, err := parser.Parse(tt.input) require.Equal(t, tt.errString, err.Error()) }) } @@ -1057,7 +1057,7 @@ func TestBenchmarkData(t *testing.T) { } // Do the parsing - actual, err := plugin.Parse([]byte(benchmarkData), "") + actual, err := plugin.Parse([]byte(benchmarkData)) require.NoError(t, err) testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime(), testutil.SortMetrics()) } @@ -1068,6 +1068,6 @@ func BenchmarkParsing(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse([]byte(benchmarkData), "") + plugin.Parse([]byte(benchmarkData)) } } diff --git a/plugins/parsers/influx/parser.go b/plugins/parsers/influx/parser.go index fe2d2e94a..a4adf330b 100644 --- a/plugins/parsers/influx/parser.go +++ b/plugins/parsers/influx/parser.go @@ -79,7 +79,7 @@ func (p *Parser) SetTimePrecision(u time.Duration) { p.handler.SetTimePrecision(u) } -func (p *Parser) Parse(input []byte, extra string) ([]telegraf.Metric, error) { +func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { p.Lock() defer p.Unlock() metrics := make([]telegraf.Metric, 0) @@ -115,7 +115,7 @@ func (p *Parser) Parse(input []byte, extra string) ([]telegraf.Metric, error) { } func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { - metrics, err := p.Parse([]byte(line), "") + metrics, err := p.Parse([]byte(line)) if err != nil { return nil, err } diff --git a/plugins/parsers/influx/parser_test.go b/plugins/parsers/influx/parser_test.go index 22726717c..17aef0975 100644 --- a/plugins/parsers/influx/parser_test.go +++ b/plugins/parsers/influx/parser_test.go @@ -588,7 +588,7 @@ func TestParser(t *testing.T) { parser.SetTimeFunc(tt.timeFunc) } - metrics, err := parser.Parse(tt.input, "") + metrics, err := parser.Parse(tt.input) require.Equal(t, tt.err, err) require.Len(t, metrics, len(tt.metrics)) @@ -693,7 +693,7 @@ func TestParserTimestampPrecision(t *testing.T) { parser := Parser{InfluxTimestampPrecision: d} require.NoError(t, parser.Init()) - metrics, err := parser.Parse(tt.input, "") + metrics, err := parser.Parse(tt.input) require.NoError(t, err) require.Equal(t, tt.metrics, metrics) @@ -716,7 +716,7 @@ func BenchmarkParser(b *testing.B) { parser := Parser{} require.NoError(b, parser.Init()) for n := 0; n < b.N; n++ { - metrics, err := parser.Parse(tt.input, "") + metrics, err := parser.Parse(tt.input) _ = err _ = metrics } @@ -824,7 +824,7 @@ func TestSeriesParser(t *testing.T) { parser.SetTimeFunc(tt.timeFunc) } - metrics, err := parser.Parse(tt.input, "") + metrics, err := parser.Parse(tt.input) require.Equal(t, tt.err, err) if err != nil { require.Equal(t, tt.err.Error(), err.Error()) @@ -872,7 +872,7 @@ func TestParserErrorString(t *testing.T) { parser := Parser{} require.NoError(t, parser.Init()) - _, err := parser.Parse(tt.input, "") + _, err := parser.Parse(tt.input) require.Equal(t, tt.errString, err.Error()) }) } @@ -1024,7 +1024,7 @@ func TestBenchmarkData(t *testing.T) { } // Do the parsing - actual, err := plugin.Parse([]byte(benchmarkData), "") + actual, err := plugin.Parse([]byte(benchmarkData)) require.NoError(t, err) testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime(), testutil.SortMetrics()) } @@ -1035,6 +1035,6 @@ func BenchmarkParsing(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse([]byte(benchmarkData), "") + plugin.Parse([]byte(benchmarkData)) } } diff --git a/plugins/parsers/json/parser.go b/plugins/parsers/json/parser.go index e448277f3..d6c913732 100644 --- a/plugins/parsers/json/parser.go +++ b/plugins/parsers/json/parser.go @@ -181,7 +181,7 @@ func (p *Parser) Init() error { return nil } -func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { +func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { if p.Query != "" { result := gjson.GetBytes(buf, p.Query) buf = []byte(result.Raw) @@ -220,7 +220,7 @@ func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { } func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { - metrics, err := p.Parse([]byte(line+"\n"), "") + metrics, err := p.Parse([]byte(line + "\n")) if err != nil { return nil, err diff --git a/plugins/parsers/json/parser_test.go b/plugins/parsers/json/parser_test.go index 13265591c..56694ba9f 100644 --- a/plugins/parsers/json/parser_test.go +++ b/plugins/parsers/json/parser_test.go @@ -116,7 +116,7 @@ func TestParseValidJSON(t *testing.T) { require.NoError(t, parser.Init()) // Most basic vanilla test - actual, err := parser.Parse([]byte(validJSON), "") + actual, err := parser.Parse([]byte(validJSON)) require.NoError(t, err) require.Len(t, actual, 1) require.Equal(t, "json_test", actual[0].Name()) @@ -127,7 +127,7 @@ func TestParseValidJSON(t *testing.T) { require.Equal(t, map[string]string{}, actual[0].Tags()) // Test that newlines are fine - actual, err = parser.Parse([]byte(validJSONNewline), "") + actual, err = parser.Parse([]byte(validJSONNewline)) require.NoError(t, err) require.Len(t, actual, 1) require.Equal(t, "json_test", actual[0].Name()) @@ -138,7 +138,7 @@ func TestParseValidJSON(t *testing.T) { require.Equal(t, map[string]string{}, actual[0].Tags()) // Test that strings without TagKeys defined are ignored - actual, err = parser.Parse([]byte(validJSONTags), "") + actual, err = parser.Parse([]byte(validJSONTags)) require.NoError(t, err) require.Len(t, actual, 1) require.Equal(t, "json_test", actual[0].Name()) @@ -149,12 +149,12 @@ func TestParseValidJSON(t *testing.T) { require.Equal(t, map[string]string{}, actual[0].Tags()) // Test that whitespace only will parse as an empty list of actual - actual, err = parser.Parse([]byte("\n\t"), "") + actual, err = parser.Parse([]byte("\n\t")) require.NoError(t, err) require.Empty(t, actual) // Test that an empty string will parse as an empty list of actual - actual, err = parser.Parse([]byte(""), "") + actual, err = parser.Parse([]byte("")) require.NoError(t, err) require.Empty(t, actual) } @@ -198,9 +198,9 @@ func TestParseInvalidJSON(t *testing.T) { parser := &Parser{MetricName: "json_test"} require.NoError(t, parser.Init()) - _, err := parser.Parse([]byte(invalidJSON), "") + _, err := parser.Parse([]byte(invalidJSON)) require.Error(t, err) - _, err = parser.Parse([]byte(invalidJSON2), "") + _, err = parser.Parse([]byte(invalidJSON2)) require.Error(t, err) _, err = parser.ParseLine(invalidJSON) require.Error(t, err) @@ -213,7 +213,7 @@ func TestParseJSONImplicitStrictness(t *testing.T) { } require.NoError(t, parserImplicitNoStrict.Init()) - _, err := parserImplicitNoStrict.Parse([]byte(mixedValidityJSON), "") + _, err := parserImplicitNoStrict.Parse([]byte(mixedValidityJSON)) require.NoError(t, err) } @@ -225,7 +225,7 @@ func TestParseJSONExplicitStrictnessFalse(t *testing.T) { } require.NoError(t, parserNoStrict.Init()) - _, err := parserNoStrict.Parse([]byte(mixedValidityJSON), "") + _, err := parserNoStrict.Parse([]byte(mixedValidityJSON)) require.NoError(t, err) } @@ -237,7 +237,7 @@ func TestParseJSONExplicitStrictnessTrue(t *testing.T) { } require.NoError(t, parserStrict.Init()) - _, err := parserStrict.Parse([]byte(mixedValidityJSON), "") + _, err := parserStrict.Parse([]byte(mixedValidityJSON)) require.Error(t, err) } @@ -249,7 +249,7 @@ func TestParseWithTagKeys(t *testing.T) { } require.NoError(t, parser.Init()) - actual, err := parser.Parse([]byte(validJSONTags), "") + actual, err := parser.Parse([]byte(validJSONTags)) require.NoError(t, err) require.Len(t, actual, 1) require.Equal(t, "json_test", actual[0].Name()) @@ -266,7 +266,7 @@ func TestParseWithTagKeys(t *testing.T) { } require.NoError(t, parser.Init()) - actual, err = parser.Parse([]byte(validJSONTags), "") + actual, err = parser.Parse([]byte(validJSONTags)) require.NoError(t, err) require.Len(t, actual, 1) require.Equal(t, "json_test", actual[0].Name()) @@ -285,7 +285,7 @@ func TestParseWithTagKeys(t *testing.T) { } require.NoError(t, parser.Init()) - actual, err = parser.Parse([]byte(validJSONTags), "") + actual, err = parser.Parse([]byte(validJSONTags)) require.NoError(t, err) require.Len(t, actual, 1) require.Equal(t, "json_test", actual[0].Name()) @@ -363,7 +363,7 @@ func TestParseValidJSONDefaultTags(t *testing.T) { require.NoError(t, parser.Init()) // Most basic vanilla test - actual, err := parser.Parse([]byte(validJSON), "") + actual, err := parser.Parse([]byte(validJSON)) require.NoError(t, err) require.Len(t, actual, 1) require.Equal(t, "json_test", actual[0].Name()) @@ -374,7 +374,7 @@ func TestParseValidJSONDefaultTags(t *testing.T) { require.Equal(t, map[string]string{"t4g": "default"}, actual[0].Tags()) // Test that tagkeys and default tags are applied - actual, err = parser.Parse([]byte(validJSONTags), "") + actual, err = parser.Parse([]byte(validJSONTags)) require.NoError(t, err) require.Len(t, actual, 1) require.Equal(t, "json_test", actual[0].Name()) @@ -398,7 +398,7 @@ func TestParseValidJSONDefaultTagsOverride(t *testing.T) { require.NoError(t, parser.Init()) // Most basic vanilla test - actual, err := parser.Parse([]byte(validJSON), "") + actual, err := parser.Parse([]byte(validJSON)) require.NoError(t, err) require.Len(t, actual, 1) require.Equal(t, "json_test", actual[0].Name()) @@ -409,7 +409,7 @@ func TestParseValidJSONDefaultTagsOverride(t *testing.T) { require.Equal(t, map[string]string{"mytag": "default"}, actual[0].Tags()) // Test that tagkeys override default tags - actual, err = parser.Parse([]byte(validJSONTags), "") + actual, err = parser.Parse([]byte(validJSONTags)) require.NoError(t, err) require.Len(t, actual, 1) require.Equal(t, "json_test", actual[0].Name()) @@ -428,7 +428,7 @@ func TestParseValidJSONArray(t *testing.T) { require.NoError(t, parser.Init()) // Most basic vanilla test - actual, err := parser.Parse([]byte(validJSONArray), "") + actual, err := parser.Parse([]byte(validJSONArray)) require.NoError(t, err) require.Len(t, actual, 1) require.Equal(t, "json_array_test", actual[0].Name()) @@ -439,7 +439,7 @@ func TestParseValidJSONArray(t *testing.T) { require.Equal(t, map[string]string{}, actual[0].Tags()) // Basic multiple datapoints - actual, err = parser.Parse([]byte(validJSONArrayMultiple), "") + actual, err = parser.Parse([]byte(validJSONArrayMultiple)) require.NoError(t, err) require.Len(t, actual, 2) require.Equal(t, "json_array_test", actual[0].Name()) @@ -464,7 +464,7 @@ func TestParseArrayWithTagKeys(t *testing.T) { } require.NoError(t, parser.Init()) - actual, err := parser.Parse([]byte(validJSONArrayTags), "") + actual, err := parser.Parse([]byte(validJSONArrayTags)) require.NoError(t, err) require.Len(t, actual, 2) require.Equal(t, "json_array_test", actual[0].Name()) @@ -488,7 +488,7 @@ func TestParseArrayWithTagKeys(t *testing.T) { } require.NoError(t, parser.Init()) - actual, err = parser.Parse([]byte(validJSONArrayTags), "") + actual, err = parser.Parse([]byte(validJSONArrayTags)) require.NoError(t, err) require.Len(t, actual, 2) require.Equal(t, "json_array_test", actual[0].Name()) @@ -516,7 +516,7 @@ func TestParseArrayWithTagKeys(t *testing.T) { } require.NoError(t, parser.Init()) - actual, err = parser.Parse([]byte(validJSONArrayTags), "") + actual, err = parser.Parse([]byte(validJSONArrayTags)) require.NoError(t, err) require.Len(t, actual, 2) require.Equal(t, "json_array_test", actual[0].Name()) @@ -547,7 +547,7 @@ func TestHttpJsonBOM(t *testing.T) { require.NoError(t, parser.Init()) // Most basic vanilla test - _, err := parser.Parse(jsonBOM, "") + _, err := parser.Parse(jsonBOM) require.NoError(t, err) } @@ -577,7 +577,7 @@ func TestJSONParseNestedArray(t *testing.T) { } require.NoError(t, parser.Init()) - actual, err := parser.Parse([]byte(testString), "") + actual, err := parser.Parse([]byte(testString)) require.Len(t, actual, 1) require.NoError(t, err) require.Len(t, actual[0].Tags(), 3) @@ -606,7 +606,7 @@ func TestJSONQueryErrorOnArray(t *testing.T) { } require.NoError(t, parser.Init()) - _, err := parser.Parse([]byte(testString), "") + _, err := parser.Parse([]byte(testString)) require.Error(t, err) } @@ -640,7 +640,7 @@ func TestArrayOfObjects(t *testing.T) { } require.NoError(t, parser.Init()) - actual, err := parser.Parse([]byte(testString), "") + actual, err := parser.Parse([]byte(testString)) require.NoError(t, err) require.Len(t, actual, 3) } @@ -668,7 +668,7 @@ func TestUseCaseJSONQuery(t *testing.T) { } require.NoError(t, parser.Init()) - actual, err := parser.Parse([]byte(testString), "") + actual, err := parser.Parse([]byte(testString)) require.NoError(t, err) require.Len(t, actual, 3) require.Equal(t, "Murphy", actual[0].Fields()["last"]) @@ -703,7 +703,7 @@ func TestTimeParser(t *testing.T) { } require.NoError(t, parser.Init()) - actual, err := parser.Parse([]byte(testString), "") + actual, err := parser.Parse([]byte(testString)) require.NoError(t, err) require.Len(t, actual, 2) require.NotEqual(t, actual[0].Time(), actual[1].Time()) @@ -722,7 +722,7 @@ func TestTimeParserWithTimezone(t *testing.T) { } require.NoError(t, parser.Init()) - actual, err := parser.Parse([]byte(testString), "") + actual, err := parser.Parse([]byte(testString)) require.NoError(t, err) require.Len(t, actual, 1) require.EqualValues(t, int64(1136405040000000000), actual[0].Time().UnixNano()) @@ -757,7 +757,7 @@ func TestUnixTimeParser(t *testing.T) { } require.NoError(t, parser.Init()) - actual, err := parser.Parse([]byte(testString), "") + actual, err := parser.Parse([]byte(testString)) require.NoError(t, err) require.Len(t, actual, 2) require.NotEqual(t, actual[0].Time(), actual[1].Time()) @@ -792,7 +792,7 @@ func TestUnixMsTimeParser(t *testing.T) { } require.NoError(t, parser.Init()) - actual, err := parser.Parse([]byte(testString), "") + actual, err := parser.Parse([]byte(testString)) require.NoError(t, err) require.Len(t, actual, 2) require.NotEqual(t, actual[0].Time(), actual[1].Time()) @@ -816,7 +816,7 @@ func TestTimeErrors(t *testing.T) { } require.NoError(t, parser.Init()) - actual, err := parser.Parse([]byte(testString), "") + actual, err := parser.Parse([]byte(testString)) require.Error(t, err) require.Empty(t, actual) @@ -836,7 +836,7 @@ func TestTimeErrors(t *testing.T) { } require.NoError(t, parser.Init()) - actual, err = parser.Parse([]byte(testString2), "") + actual, err = parser.Parse([]byte(testString2)) require.Error(t, err) require.Empty(t, actual) require.Equal(t, errors.New("'json_time_key' could not be found"), err) @@ -846,7 +846,7 @@ func TestShareTimestamp(t *testing.T) { parser := &Parser{MetricName: "json_test"} require.NoError(t, parser.Init()) - actual, err := parser.Parse([]byte(validJSONArrayMultiple), "") + actual, err := parser.Parse([]byte(validJSONArrayMultiple)) require.NoError(t, err) require.Len(t, actual, 2) require.Equal(t, actual[0].Time(), actual[1].Time()) @@ -866,7 +866,7 @@ func TestNameKey(t *testing.T) { parser := &Parser{NameKey: "b_c"} require.NoError(t, parser.Init()) - actual, err := parser.Parse([]byte(testString), "") + actual, err := parser.Parse([]byte(testString)) require.NoError(t, err) require.Equal(t, "this is my name", actual[0].Name()) } @@ -877,7 +877,7 @@ func TestParseArrayWithWrongType(t *testing.T) { parser := &Parser{} require.NoError(t, parser.Init()) - _, err := parser.Parse([]byte(data), "") + _, err := parser.Parse([]byte(data)) require.Error(t, err) } @@ -994,7 +994,7 @@ func TestParse(t *testing.T) { parser := tt.parser require.NoError(t, parser.Init()) - actual, err := parser.Parse(tt.input, "") + actual, err := parser.Parse(tt.input) require.NoError(t, err) testutil.RequireMetricsEqual(t, tt.expected, actual, testutil.IgnoreTime()) @@ -1106,7 +1106,7 @@ func TestParseWithWildcardTagKeys(t *testing.T) { parser := tt.parser require.NoError(t, parser.Init()) - actual, err := parser.Parse(tt.input, "") + actual, err := parser.Parse(tt.input) require.NoError(t, err) testutil.RequireMetricsEqual(t, tt.expected, actual, testutil.IgnoreTime()) }) @@ -1382,7 +1382,7 @@ func TestParseArrayWithWildcardTagKeys(t *testing.T) { parser := tt.parser require.NoError(t, parser.Init()) - actual, err := parser.Parse(tt.input, "") + actual, err := parser.Parse(tt.input) require.NoError(t, err) testutil.RequireMetricsEqual(t, tt.expected, actual, testutil.IgnoreTime()) @@ -1434,7 +1434,7 @@ func TestBenchmarkData(t *testing.T) { } // Do the parsing - actual, err := plugin.Parse([]byte(benchmarkData), "") + actual, err := plugin.Parse([]byte(benchmarkData)) require.NoError(t, err) testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime(), testutil.SortMetrics()) } @@ -1450,7 +1450,7 @@ func BenchmarkParsingSequential(b *testing.B) { // Do the benchmarking for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse([]byte(benchmarkData), "") + plugin.Parse([]byte(benchmarkData)) } } @@ -1466,7 +1466,7 @@ func BenchmarkParsingParallel(b *testing.B) { b.RunParallel(func(p *testing.PB) { for p.Next() { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse([]byte(benchmarkData), "") + plugin.Parse([]byte(benchmarkData)) } }) } @@ -1488,6 +1488,6 @@ func FuzzParserJSON(f *testing.F) { f.Fuzz(func(_ *testing.T, input []byte) { //nolint:errcheck // fuzz testing can give lots of errors, but we just want to test for crashes - parser.Parse(input, "") + parser.Parse(input) }) } diff --git a/plugins/parsers/json_v2/parser.go b/plugins/parsers/json_v2/parser.go index 865220643..0f9d4aa5b 100644 --- a/plugins/parsers/json_v2/parser.go +++ b/plugins/parsers/json_v2/parser.go @@ -120,7 +120,7 @@ func (p *Parser) Init() error { return nil } -func (p *Parser) Parse(input []byte, extra string) ([]telegraf.Metric, error) { +func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { // What we've done here is to put the entire former contents of Parse() // into parseCriticalPath(). // diff --git a/plugins/parsers/json_v2/parser_test.go b/plugins/parsers/json_v2/parser_test.go index 00133be8b..7544b1875 100644 --- a/plugins/parsers/json_v2/parser_test.go +++ b/plugins/parsers/json_v2/parser_test.go @@ -131,7 +131,7 @@ func BenchmarkParsingSequential(b *testing.B) { // Do the benchmarking for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse(input, "") + plugin.Parse(input) } } @@ -162,7 +162,7 @@ func BenchmarkParsingParallel(b *testing.B) { b.RunParallel(func(p *testing.PB) { for p.Next() { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse(input, "") + plugin.Parse(input) } }) } diff --git a/plugins/parsers/logfmt/parser.go b/plugins/parsers/logfmt/parser.go index 66e6889d0..bb6d650d3 100644 --- a/plugins/parsers/logfmt/parser.go +++ b/plugins/parsers/logfmt/parser.go @@ -27,7 +27,7 @@ type Parser struct { } // Parse converts a slice of bytes in logfmt format to metrics. -func (p *Parser) Parse(b []byte, extra string) ([]telegraf.Metric, error) { +func (p *Parser) Parse(b []byte) ([]telegraf.Metric, error) { reader := bytes.NewReader(b) decoder := logfmt.NewDecoder(reader) metrics := make([]telegraf.Metric, 0) @@ -75,7 +75,7 @@ func (p *Parser) Parse(b []byte, extra string) ([]telegraf.Metric, error) { // ParseLine converts a single line of text in logfmt format to metrics. func (p *Parser) ParseLine(s string) (telegraf.Metric, error) { - metrics, err := p.Parse([]byte(s), "") + metrics, err := p.Parse([]byte(s)) if err != nil { return nil, err } diff --git a/plugins/parsers/logfmt/parser_test.go b/plugins/parsers/logfmt/parser_test.go index b4d9b94f3..5853ff4f0 100644 --- a/plugins/parsers/logfmt/parser_test.go +++ b/plugins/parsers/logfmt/parser_test.go @@ -128,7 +128,7 @@ func TestParse(t *testing.T) { l := Parser{ metricName: tt.measurement, } - got, err := l.Parse(tt.bytes, "") + got, err := l.Parse(tt.bytes) if (err != nil) != tt.wantErr { t.Errorf("Logfmt.Parse error = %v, wantErr %v", err, tt.wantErr) return @@ -316,7 +316,7 @@ func TestBenchmarkData(t *testing.T) { ), } - actual, err := plugin.Parse([]byte(benchmarkData), "") + actual, err := plugin.Parse([]byte(benchmarkData)) require.NoError(t, err) testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime(), testutil.SortMetrics()) } @@ -329,6 +329,6 @@ func BenchmarkParsing(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse([]byte(benchmarkData), "") + plugin.Parse([]byte(benchmarkData)) } } diff --git a/plugins/parsers/nagios/parser.go b/plugins/parsers/nagios/parser.go index ffc049075..7472deed5 100644 --- a/plugins/parsers/nagios/parser.go +++ b/plugins/parsers/nagios/parser.go @@ -103,7 +103,7 @@ var ( ) func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { - metrics, err := p.Parse([]byte(line), "") + metrics, err := p.Parse([]byte(line)) return metrics[0], err } @@ -111,7 +111,7 @@ func (p *Parser) SetDefaultTags(tags map[string]string) { p.DefaultTags = tags } -func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { +func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { ts := time.Now().UTC() s := bufio.NewScanner(bytes.NewReader(buf)) diff --git a/plugins/parsers/nagios/parser_test.go b/plugins/parsers/nagios/parser_test.go index 7b47d8fc4..d0c4e1b44 100644 --- a/plugins/parsers/nagios/parser_test.go +++ b/plugins/parsers/nagios/parser_test.go @@ -466,7 +466,7 @@ with three lines for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - metrics, err := parser.Parse([]byte(tt.input), "") + metrics, err := parser.Parse([]byte(tt.input)) tt.assertF(t, metrics, err) }) } @@ -562,7 +562,7 @@ func TestBenchmarkData(t *testing.T) { ), } - actual, err := plugin.Parse([]byte(benchmarkData), "") + actual, err := plugin.Parse([]byte(benchmarkData)) require.NoError(t, err) testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime(), testutil.SortMetrics()) } @@ -572,6 +572,6 @@ func BenchmarkParsing(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse([]byte(benchmarkData), "") + plugin.Parse([]byte(benchmarkData)) } } diff --git a/plugins/parsers/openmetrics/parser.go b/plugins/parsers/openmetrics/parser.go index 39b6ddcf8..19f9d0dd8 100644 --- a/plugins/parsers/openmetrics/parser.go +++ b/plugins/parsers/openmetrics/parser.go @@ -44,7 +44,7 @@ func (p *Parser) SetDefaultTags(tags map[string]string) { p.DefaultTags = tags } -func (p *Parser) Parse(data []byte, extra string) ([]telegraf.Metric, error) { +func (p *Parser) Parse(data []byte) ([]telegraf.Metric, error) { // Determine the metric transport-type derived from the response header contentType := p.Header.Get("Content-Type") var mediaType string @@ -102,7 +102,7 @@ func (p *Parser) Parse(data []byte, extra string) ([]telegraf.Metric, error) { } func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { - metrics, err := p.Parse([]byte(line), "") + metrics, err := p.Parse([]byte(line)) if err != nil { return nil, err } diff --git a/plugins/parsers/openmetrics/parser_test.go b/plugins/parsers/openmetrics/parser_test.go index 0da8db43b..fc4c2cf6f 100644 --- a/plugins/parsers/openmetrics/parser_test.go +++ b/plugins/parsers/openmetrics/parser_test.go @@ -160,7 +160,7 @@ func BenchmarkParsingMetricVersion1(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse(benchmarkData, "") + plugin.Parse(benchmarkData) } } @@ -173,6 +173,6 @@ func BenchmarkParsingMetricVersion2(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse(benchmarkData, "") + plugin.Parse(benchmarkData) } } diff --git a/plugins/parsers/opentsdb/parser.go b/plugins/parsers/opentsdb/parser.go index eb5efc3ce..5c36ca3af 100644 --- a/plugins/parsers/opentsdb/parser.go +++ b/plugins/parsers/opentsdb/parser.go @@ -20,7 +20,7 @@ type Parser struct { Log telegraf.Logger `toml:"-"` } -func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { +func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { var metrics []telegraf.Metric scanner := bufio.NewScanner(bytes.NewReader(buf)) diff --git a/plugins/parsers/opentsdb/parser_test.go b/plugins/parsers/opentsdb/parser_test.go index 03d302a2c..4c5d4949c 100644 --- a/plugins/parsers/opentsdb/parser_test.go +++ b/plugins/parsers/opentsdb/parser_test.go @@ -231,7 +231,7 @@ func TestParse(t *testing.T) { t.Run(tt.name, func(t *testing.T) { p := &Parser{Log: testutil.Logger{}} - actual, err := p.Parse(tt.input, "") + actual, err := p.Parse(tt.input) require.NoError(t, err) testutil.RequireMetricsEqual(t, tt.expected, actual) @@ -301,7 +301,7 @@ func TestParse_DefaultTags(t *testing.T) { p := &Parser{Log: testutil.Logger{}} p.SetDefaultTags(tt.defaultTags) - actual, err := p.Parse(tt.input, "") + actual, err := p.Parse(tt.input) require.NoError(t, err) testutil.RequireMetricsEqual(t, tt.expected, actual) @@ -343,7 +343,7 @@ func TestBenchmarkData(t *testing.T) { ), } - actual, err := plugin.Parse([]byte(benchmarkData), "") + actual, err := plugin.Parse([]byte(benchmarkData)) require.NoError(t, err) testutil.RequireMetricsEqual(t, expected, actual, testutil.SortMetrics()) } @@ -353,6 +353,6 @@ func BenchmarkParsing(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse([]byte(benchmarkData), "") + plugin.Parse([]byte(benchmarkData)) } } diff --git a/plugins/parsers/parquet/parser.go b/plugins/parsers/parquet/parser.go index f6b096d65..f36b31fd6 100644 --- a/plugins/parsers/parquet/parser.go +++ b/plugins/parsers/parquet/parser.go @@ -44,7 +44,7 @@ func (p *Parser) Init() error { return nil } -func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { +func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { reader := bytes.NewReader(buf) parquetReader, err := file.NewParquetReader(reader) if err != nil { @@ -121,7 +121,7 @@ func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { } func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { - metrics, err := p.Parse([]byte(line), "") + metrics, err := p.Parse([]byte(line)) if err != nil { return nil, err } diff --git a/plugins/parsers/parquet/parser_test.go b/plugins/parsers/parquet/parser_test.go index 011425af7..b9b4635c9 100644 --- a/plugins/parsers/parquet/parser_test.go +++ b/plugins/parsers/parquet/parser_test.go @@ -71,6 +71,6 @@ func BenchmarkParsing(b *testing.B) { b.ResetTimer() for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse(benchmarkData, "") + plugin.Parse(benchmarkData) } } diff --git a/plugins/parsers/phasor_binary/parser.go b/plugins/parsers/phasor_binary/parser.go index 48230580b..de1cb0aec 100644 --- a/plugins/parsers/phasor_binary/parser.go +++ b/plugins/parsers/phasor_binary/parser.go @@ -5,7 +5,6 @@ import ( "errors" "math" "strconv" - "strings" "time" "github.com/influxdata/telegraf" @@ -46,21 +45,23 @@ func (p *Parser) Init() error { return nil } -func (p *Parser) Parse(data []byte, topic string) ([]telegraf.Metric, error) { - metrics, deviceType, err := p.checkHeaderAndInitMetrics(data, topic) +func (p *Parser) Parse(data []byte) ([]telegraf.Metric, error) { + metrics, deviceType, err := p.checkHeaderAndInitMetrics(data) if err != nil { return nil, err } - p.fillAnalogChanMetrics(metrics, data, 6) - p.fillSwitchChanMetrics(metrics, data, 9606) + p.fillAnalogChanMetrics(metrics, data[6:]) + p.fillSwitchChanMetrics(metrics, data[9606:]) switch deviceType { case deviceTypeI: - p.fillPQSPFChanMetrics(metrics, data, 9706) + p.fillPQSPFChanMetrics(metrics, data[9706:]) + case deviceTypeU: - p.fillFdFChanMetrics(metrics, data, 9706) - p.fillUABUBCUCAChanMetrics(metrics, data, 10506) + p.fillFdFChanMetrics(metrics, data[9706:]) + p.fillUABUBCUCAChanMetrics(metrics, data[10506:]) + default: return nil, errors.New("illegal device type") } @@ -85,8 +86,8 @@ func init() { ) } -// simply check the data, and initialize metrics with data and topic -func (p *Parser) checkHeaderAndInitMetrics(data []byte, topic string) ([]telegraf.Metric, int, error) { +// simply check the data, and initialize metrics +func (p *Parser) checkHeaderAndInitMetrics(data []byte) ([]telegraf.Metric, int, error) { if len(data) < 6 { return nil, 0, errors.New("no valid data") } @@ -95,7 +96,6 @@ func (p *Parser) checkHeaderAndInitMetrics(data []byte, topic string) ([]telegra deviceType := int(data[4]) metrics := make([]telegraf.Metric, p.pointFrequency) - device, _ := strings.CutSuffix(topic, "_Phasor") switch deviceType { case deviceTypeI: if len(data) < dataLengthI { @@ -107,7 +107,7 @@ func (p *Parser) checkHeaderAndInitMetrics(data []byte, topic string) ([]telegra for i := range metrics { metrics[i] = metric.New("current", - map[string]string{"device": device}, + map[string]string{}, make(map[string]any, 44), // 3*8+2*8+4 time.Unix(second, int64(i*1e9/p.pointFrequency))) } @@ -121,7 +121,7 @@ func (p *Parser) checkHeaderAndInitMetrics(data []byte, topic string) ([]telegra for i := range metrics { metrics[i] = metric.New("voltage", - map[string]string{"device": device}, + map[string]string{}, make(map[string]any, 49), // 3*8+2*8+2+3*3 time.Unix(second, int64(i*1e9/p.pointFrequency))) } @@ -133,12 +133,12 @@ func (p *Parser) checkHeaderAndInitMetrics(data []byte, topic string) ([]telegra } // yc metrics -func (p *Parser) fillAnalogChanMetrics(metrics []telegraf.Metric, data []byte, begin int) { +func (p *Parser) fillAnalogChanMetrics(metrics []telegraf.Metric, data []byte) { for ci := range 8 { chanNo := strconv.Itoa(ci + 1) for mj := range metrics { - b := begin + (ci*p.pointFrequency+mj)*24 + b := (ci*p.pointFrequency + mj) * 24 amp := math.Float64frombits(binary.LittleEndian.Uint64(data[b : b+8])) pa := math.Float64frombits(binary.LittleEndian.Uint64(data[b+8 : b+16])) @@ -152,11 +152,11 @@ func (p *Parser) fillAnalogChanMetrics(metrics []telegraf.Metric, data []byte, b } // yx metrics -func (p *Parser) fillSwitchChanMetrics(metrics []telegraf.Metric, data []byte, begin int) { +func (p *Parser) fillSwitchChanMetrics(metrics []telegraf.Metric, data []byte) { for ci := range 2 { for mj := range metrics { - b := begin + ci*p.pointFrequency + mj + b := ci*p.pointFrequency + mj for bk := range 8 { chanNo := strconv.Itoa(ci*8 + bk + 1) @@ -166,24 +166,12 @@ func (p *Parser) fillSwitchChanMetrics(metrics []telegraf.Metric, data []byte, b } } -// current relative metrics -func (p *Parser) fillPQSPFChanMetrics(metrics []telegraf.Metric, data []byte, begin int) { +// current metrics +func (p *Parser) fillPQSPFChanMetrics(metrics []telegraf.Metric, data []byte) { for ci, channel := range []string{"p", "q", "s", "pf"} { for mj := range metrics { - b := begin + (ci*p.pointFrequency+mj)*8 - - metrics[mj].AddField(channel, math.Float64frombits(binary.LittleEndian.Uint64(data[b:b+8]))) - } - } -} - -// voltage relative metrics -func (p *Parser) fillFdFChanMetrics(metrics []telegraf.Metric, data []byte, begin int) { - - for ci, channel := range []string{"f", "df"} { - for mj := range metrics { - b := begin + (ci*p.pointFrequency+mj)*8 + b := (ci*p.pointFrequency + mj) * 8 metrics[mj].AddField(channel, math.Float64frombits(binary.LittleEndian.Uint64(data[b:b+8]))) } @@ -191,11 +179,23 @@ func (p *Parser) fillFdFChanMetrics(metrics []telegraf.Metric, data []byte, begi } // voltage metrics -func (p *Parser) fillUABUBCUCAChanMetrics(metrics []telegraf.Metric, data []byte, begin int) { +func (p *Parser) fillFdFChanMetrics(metrics []telegraf.Metric, data []byte) { + + for ci, channel := range []string{"f", "df"} { + for mj := range metrics { + b := (ci*p.pointFrequency + mj) * 8 + + metrics[mj].AddField(channel, math.Float64frombits(binary.LittleEndian.Uint64(data[b:b+8]))) + } + } +} + +// voltage metrics +func (p *Parser) fillUABUBCUCAChanMetrics(metrics []telegraf.Metric, data []byte) { for ci, channel := range []string{"uab", "ubc", "uca"} { for mj := range metrics { - b := begin + (ci*p.pointFrequency+mj)*24 + b := (ci*p.pointFrequency + mj) * 24 amp := math.Float64frombits(binary.LittleEndian.Uint64(data[b : b+8])) pa := math.Float64frombits(binary.LittleEndian.Uint64(data[b+8 : b+16])) diff --git a/plugins/parsers/phasor_binary/parser_test.go b/plugins/parsers/phasor_binary/parser_test.go index 63097b3ca..067d10359 100644 --- a/plugins/parsers/phasor_binary/parser_test.go +++ b/plugins/parsers/phasor_binary/parser_test.go @@ -158,7 +158,7 @@ func TestParse(t *testing.T) { parser := new(Parser) parser.Init() - actual, err := parser.Parse(data, topic) + actual, err := parser.Parse(data) if err != nil { t.FailNow() } diff --git a/plugins/parsers/prometheus/parser.go b/plugins/parsers/prometheus/parser.go index a65360535..e2d5a937c 100644 --- a/plugins/parsers/prometheus/parser.go +++ b/plugins/parsers/prometheus/parser.go @@ -30,7 +30,7 @@ func (p *Parser) SetDefaultTags(tags map[string]string) { p.DefaultTags = tags } -func (p *Parser) Parse(data []byte, extra string) ([]telegraf.Metric, error) { +func (p *Parser) Parse(data []byte) ([]telegraf.Metric, error) { // Determine the metric transport-type derived from the response header and // create a matching decoder. format := expfmt.NewFormat(expfmt.TypeProtoCompact) @@ -74,7 +74,7 @@ func (p *Parser) Parse(data []byte, extra string) ([]telegraf.Metric, error) { } func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { - metrics, err := p.Parse([]byte(line), "") + metrics, err := p.Parse([]byte(line)) if err != nil { return nil, err } diff --git a/plugins/parsers/prometheus/parser_test.go b/plugins/parsers/prometheus/parser_test.go index a534cb197..2df3d23dc 100644 --- a/plugins/parsers/prometheus/parser_test.go +++ b/plugins/parsers/prometheus/parser_test.go @@ -160,7 +160,7 @@ func BenchmarkParsingMetricVersion1(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse(benchmarkData, "") + plugin.Parse(benchmarkData) } } @@ -173,6 +173,6 @@ func BenchmarkParsingMetricVersion2(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse(benchmarkData, "") + plugin.Parse(benchmarkData) } } diff --git a/plugins/parsers/prometheusremotewrite/parser.go b/plugins/parsers/prometheusremotewrite/parser.go index 99a711975..3f1788b75 100644 --- a/plugins/parsers/prometheusremotewrite/parser.go +++ b/plugins/parsers/prometheusremotewrite/parser.go @@ -15,7 +15,7 @@ type Parser struct { DefaultTags map[string]string } -func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { +func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { var err error var metrics []telegraf.Metric var req prompb.WriteRequest @@ -44,7 +44,7 @@ func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { } func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { - metrics, err := p.Parse([]byte(line), "") + metrics, err := p.Parse([]byte(line)) if err != nil { return nil, err } diff --git a/plugins/parsers/prometheusremotewrite/parser_test.go b/plugins/parsers/prometheusremotewrite/parser_test.go index 8a57a422c..8aba5ad45 100644 --- a/plugins/parsers/prometheusremotewrite/parser_test.go +++ b/plugins/parsers/prometheusremotewrite/parser_test.go @@ -76,7 +76,7 @@ func TestCases(t *testing.T) { require.NoError(t, err) // Act and assert - parsed, err := parser.Parse(inputBytes, "") + parsed, err := parser.Parse(inputBytes) require.NoError(t, err) require.Len(t, parsed, len(expected)) // Ignore type when comparing, because expected metrics are parsed from influx lines and thus always untyped @@ -97,7 +97,7 @@ func BenchmarkParsingMetricVersion1(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - parser.Parse(benchmarkData, "") + parser.Parse(benchmarkData) } } @@ -112,7 +112,7 @@ func BenchmarkParsingMetricVersion2(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - parser.Parse(benchmarkData, "") + parser.Parse(benchmarkData) } } @@ -170,7 +170,7 @@ func TestParse(t *testing.T) { DefaultTags: map[string]string{}, } - metrics, err := parser.Parse(inoutBytes, "") + metrics, err := parser.Parse(inoutBytes) require.NoError(t, err) require.Len(t, metrics, 2) testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics()) @@ -279,7 +279,7 @@ func TestHistograms(t *testing.T) { parser := Parser{ DefaultTags: map[string]string{}, } - metrics, err := parser.Parse(inoutBytes, "") + metrics, err := parser.Parse(inoutBytes) require.NoError(t, err) require.Len(t, metrics, 22) testutil.RequireMetricsSubset(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics()) @@ -323,7 +323,7 @@ func TestDefaultTags(t *testing.T) { }, } - metrics, err := parser.Parse(inoutBytes, "") + metrics, err := parser.Parse(inoutBytes) require.NoError(t, err) require.Len(t, metrics, 1) testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics()) @@ -365,7 +365,7 @@ func TestMetricsWithTimestamp(t *testing.T) { DefaultTags: map[string]string{}, } - metrics, err := parser.Parse(inoutBytes, "") + metrics, err := parser.Parse(inoutBytes) require.NoError(t, err) require.Len(t, metrics, 1) testutil.RequireMetricsEqual(t, expected, metrics, testutil.SortMetrics()) @@ -430,7 +430,7 @@ func TestBenchmarkData(t *testing.T) { require.NoError(t, err) plugin := &Parser{} - actual, err := plugin.Parse(benchmarkData, "") + actual, err := plugin.Parse(benchmarkData) require.NoError(t, err) testutil.RequireMetricsEqual(t, expected, actual, testutil.SortMetrics()) } @@ -444,6 +444,6 @@ func BenchmarkParsing(b *testing.B) { b.ResetTimer() for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse(benchmarkData, "") + plugin.Parse(benchmarkData) } } diff --git a/plugins/parsers/value/parser.go b/plugins/parsers/value/parser.go index e39f6d6bb..6d32126df 100644 --- a/plugins/parsers/value/parser.go +++ b/plugins/parsers/value/parser.go @@ -45,7 +45,7 @@ func (v *Parser) Init() error { return nil } -func (v *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { +func (v *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { vStr := string(bytes.TrimSpace(bytes.Trim(buf, "\x00"))) // unless it's a string, separate out any fields in the buffer, @@ -96,7 +96,7 @@ func (v *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { } func (v *Parser) ParseLine(line string) (telegraf.Metric, error) { - metrics, err := v.Parse([]byte(line), "") + metrics, err := v.Parse([]byte(line)) if err != nil { return nil, err diff --git a/plugins/parsers/value/parser_test.go b/plugins/parsers/value/parser_test.go index 19c520e8c..afdf6a416 100644 --- a/plugins/parsers/value/parser_test.go +++ b/plugins/parsers/value/parser_test.go @@ -106,7 +106,7 @@ func TestParseValidValues(t *testing.T) { DataType: tt.dtype, } require.NoError(t, plugin.Init()) - actual, err := plugin.Parse(tt.input, "") + actual, err := plugin.Parse(tt.input) require.NoError(t, err) require.Len(t, actual, 1) testutil.RequireMetricEqual(t, expected, actual[0], testutil.IgnoreTime()) @@ -188,7 +188,7 @@ func TestParseCustomFieldName(t *testing.T) { } require.NoError(t, parser.Init()) - metrics, err := parser.Parse([]byte(`55`), "") + metrics, err := parser.Parse([]byte(`55`)) require.NoError(t, err) require.Equal(t, map[string]interface{}{"penguin": int64(55)}, metrics[0].Fields()) } @@ -223,7 +223,7 @@ func TestParseInvalidValues(t *testing.T) { DataType: tt.dtype, } require.NoError(t, plugin.Init()) - actual, err := plugin.Parse(tt.input, "") + actual, err := plugin.Parse(tt.input) require.ErrorContains(t, err, "invalid syntax") require.Empty(t, actual) }) @@ -282,7 +282,7 @@ func TestParseValidValuesDefaultTags(t *testing.T) { require.NoError(t, plugin.Init()) plugin.SetDefaultTags(map[string]string{"test": "tag"}) - actual, err := plugin.Parse([]byte("55"), "") + actual, err := plugin.Parse([]byte("55")) require.NoError(t, err) require.Len(t, actual, 1) @@ -295,7 +295,7 @@ func TestParseValuesWithNullCharacter(t *testing.T) { DataType: "integer", } require.NoError(t, parser.Init()) - metrics, err := parser.Parse([]byte("55\x00"), "") + metrics, err := parser.Parse([]byte("55\x00")) require.NoError(t, err) require.Len(t, metrics, 1) require.Equal(t, "value_test", metrics[0].Name()) @@ -330,7 +330,7 @@ func TestBenchmarkData(t *testing.T) { ), } - actual, err := plugin.Parse([]byte(benchmarkData), "") + actual, err := plugin.Parse([]byte(benchmarkData)) require.NoError(t, err) testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime(), testutil.SortMetrics()) } @@ -341,6 +341,6 @@ func BenchmarkParsing(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse([]byte(benchmarkData), "") + plugin.Parse([]byte(benchmarkData)) } } diff --git a/plugins/parsers/wavefront/parser.go b/plugins/parsers/wavefront/parser.go index d3c73b252..2b3968f94 100644 --- a/plugins/parsers/wavefront/parser.go +++ b/plugins/parsers/wavefront/parser.go @@ -67,7 +67,7 @@ func (p *Parser) Init() error { func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { buf := []byte(line) - metrics, err := p.Parse(buf, "") + metrics, err := p.Parse(buf) if err != nil { return nil, err } @@ -79,7 +79,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { return nil, nil } -func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { +func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { pp := p.parsers.Get().(*PointParser) defer p.parsers.Put(pp) return pp.Parse(buf) diff --git a/plugins/parsers/wavefront/parser_test.go b/plugins/parsers/wavefront/parser_test.go index d9e4525e7..95dbe51b4 100644 --- a/plugins/parsers/wavefront/parser_test.go +++ b/plugins/parsers/wavefront/parser_test.go @@ -15,25 +15,25 @@ func TestParse(t *testing.T) { parser := &Parser{} require.NoError(t, parser.Init()) - parsedMetrics, err := parser.Parse([]byte("test.metric 1"), "") + parsedMetrics, err := parser.Parse([]byte("test.metric 1")) require.NoError(t, err) testMetric := metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(0, 0)) require.Equal(t, parsedMetrics[0].Name(), testMetric.Name()) require.Equal(t, parsedMetrics[0].Fields(), testMetric.Fields()) - parsedMetrics, err = parser.Parse([]byte("\u2206test.delta 1 1530939936"), "") + parsedMetrics, err = parser.Parse([]byte("\u2206test.delta 1 1530939936")) require.NoError(t, err) testMetric = metric.New("\u2206test.delta", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) require.EqualValues(t, parsedMetrics[0], testMetric) - parsedMetrics, err = parser.Parse([]byte("\u0394test.delta 1 1530939936"), "") + parsedMetrics, err = parser.Parse([]byte("\u0394test.delta 1 1530939936")) require.NoError(t, err) testMetric = metric.New("\u0394test.delta", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) require.EqualValues(t, parsedMetrics[0], testMetric) - parsedMetrics, err = parser.Parse([]byte("\u0394test.delta 1.234 1530939936 source=\"mysource\" tag2=value2"), "") + parsedMetrics, err = parser.Parse([]byte("\u0394test.delta 1.234 1530939936 source=\"mysource\" tag2=value2")) require.NoError(t, err) testMetric = metric.New( "\u0394test.delta", @@ -43,22 +43,22 @@ func TestParse(t *testing.T) { ) require.EqualValues(t, parsedMetrics[0], testMetric) - parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936"), "") + parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936")) require.NoError(t, err) testMetric = metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) require.EqualValues(t, parsedMetrics[0], testMetric) - parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936 source=mysource"), "") + parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936 source=mysource")) require.NoError(t, err) testMetric = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) require.EqualValues(t, parsedMetrics[0], testMetric) - parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234 1530939936 source=\"mysource\""), "") + parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234 1530939936 source=\"mysource\"")) require.NoError(t, err) testMetric = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) require.EqualValues(t, parsedMetrics[0], testMetric) - parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234 1530939936 \"source\"=\"mysource\" tag2=value2"), "") + parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234 1530939936 \"source\"=\"mysource\" tag2=value2")) require.NoError(t, err) testMetric = metric.New( "test.metric", @@ -68,7 +68,7 @@ func TestParse(t *testing.T) { ) require.EqualValues(t, parsedMetrics[0], testMetric) - parsedMetrics, err = parser.Parse([]byte("\"test.metric\" -1.1234 1530939936 \"source\"=\"mysource\" tag2=value2"), "") + parsedMetrics, err = parser.Parse([]byte("\"test.metric\" -1.1234 1530939936 \"source\"=\"mysource\" tag2=value2")) require.NoError(t, err) testMetric = metric.New( "test.metric", @@ -78,7 +78,7 @@ func TestParse(t *testing.T) { ) require.EqualValues(t, parsedMetrics[0], testMetric) - parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234e04 1530939936 \"source\"=\"mysource\" tag2=value2"), "") + parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234e04 1530939936 \"source\"=\"mysource\" tag2=value2")) require.NoError(t, err) testMetric = metric.New( "test.metric", @@ -88,7 +88,7 @@ func TestParse(t *testing.T) { ) require.EqualValues(t, parsedMetrics[0], testMetric) - parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234e-04 1530939936 \"source\"=\"mysource\" tag2=value2"), "") + parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234e-04 1530939936 \"source\"=\"mysource\" tag2=value2")) require.NoError(t, err) testMetric = metric.New( "test.metric", @@ -98,7 +98,7 @@ func TestParse(t *testing.T) { ) require.EqualValues(t, parsedMetrics[0], testMetric) - parsedMetrics, err = parser.Parse([]byte("test.metric 1.1234 1530939936 source=\"mysource\" tag2=value2 "), "") + parsedMetrics, err = parser.Parse([]byte("test.metric 1.1234 1530939936 source=\"mysource\" tag2=value2 ")) require.NoError(t, err) testMetric = metric.New( "test.metric", @@ -159,7 +159,7 @@ func TestParseMultiple(t *testing.T) { parser := &Parser{} require.NoError(t, parser.Init()) - parsedMetrics, err := parser.Parse([]byte("test.metric 1\ntest.metric2 2 1530939936"), "") + parsedMetrics, err := parser.Parse([]byte("test.metric 1\ntest.metric2 2 1530939936")) require.NoError(t, err) testMetric1 := metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(0, 0)) testMetric2 := metric.New("test.metric2", map[string]string{}, map[string]interface{}{"value": 2.}, time.Unix(1530939936, 0)) @@ -168,7 +168,7 @@ func TestParseMultiple(t *testing.T) { require.Equal(t, parsedMetrics[0].Fields(), testMetrics[0].Fields()) require.EqualValues(t, parsedMetrics[1], testMetrics[1]) - parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936 source=mysource\n\"test.metric\" 1.1234 1530939936 source=\"mysource\""), "") + parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936 source=mysource\n\"test.metric\" 1.1234 1530939936 source=\"mysource\"")) require.NoError(t, err) testMetric1 = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) testMetric2 = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) @@ -177,10 +177,9 @@ func TestParseMultiple(t *testing.T) { parsedMetrics, err = parser.Parse( []byte( - "\"test.metric\" 1.1234 1530939936 \"source\"=\"mysource\" tag2=value2\n"+ + "\"test.metric\" 1.1234 1530939936 \"source\"=\"mysource\" tag2=value2\n" + "test.metric 1.1234 1530939936 source=\"mysource\" tag2=value2 ", ), - "", ) require.NoError(t, err) testMetric1 = metric.New( @@ -200,7 +199,6 @@ func TestParseMultiple(t *testing.T) { parsedMetrics, err = parser.Parse( []byte("test.metric 1 1530939936 source=mysource\n\"test.metric\" 1.1234 1530939936 source=\"mysource\"\ntest.metric3 333 1530939936 tagit=valueit"), - "", ) require.NoError(t, err) testMetric1 = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) @@ -229,31 +227,31 @@ func TestParseInvalid(t *testing.T) { parser := &Parser{} require.NoError(t, parser.Init()) - _, err := parser.Parse([]byte("test.metric"), "") + _, err := parser.Parse([]byte("test.metric")) require.Error(t, err) - _, err = parser.Parse([]byte("test.metric string"), "") + _, err = parser.Parse([]byte("test.metric string")) require.Error(t, err) - _, err = parser.Parse([]byte("test.metric 1 string"), "") + _, err = parser.Parse([]byte("test.metric 1 string")) require.Error(t, err) - _, err = parser.Parse([]byte("test.\u2206delta 1"), "") + _, err = parser.Parse([]byte("test.\u2206delta 1")) require.Error(t, err) - _, err = parser.Parse([]byte("test.metric 1 1530939936 tag_no_pair"), "") + _, err = parser.Parse([]byte("test.metric 1 1530939936 tag_no_pair")) require.Error(t, err) - _, err = parser.Parse([]byte("test.metric 1 1530939936 tag_broken_value=\""), "") + _, err = parser.Parse([]byte("test.metric 1 1530939936 tag_broken_value=\"")) require.Error(t, err) - _, err = parser.Parse([]byte("\"test.metric 1 1530939936"), "") + _, err = parser.Parse([]byte("\"test.metric 1 1530939936")) require.Error(t, err) - _, err = parser.Parse([]byte("test.metric 1 1530939936 tag1=val\\\"ue1"), "") + _, err = parser.Parse([]byte("test.metric 1 1530939936 tag1=val\\\"ue1")) require.Error(t, err) - _, err = parser.Parse([]byte("\"test.metric\" -1.12-34 1530939936 \"source\"=\"mysource\" tag2=value2"), "") + _, err = parser.Parse([]byte("\"test.metric\" -1.12-34 1530939936 \"source\"=\"mysource\" tag2=value2")) require.Error(t, err) } @@ -262,7 +260,7 @@ func TestParseDefaultTags(t *testing.T) { require.NoError(t, parser.Init()) parser.SetDefaultTags(map[string]string{"myDefault": "value1", "another": "test2"}) - parsedMetrics, err := parser.Parse([]byte("test.metric 1 1530939936"), "") + parsedMetrics, err := parser.Parse([]byte("test.metric 1 1530939936")) require.NoError(t, err) testMetric := metric.New( "test.metric", @@ -272,7 +270,7 @@ func TestParseDefaultTags(t *testing.T) { ) require.EqualValues(t, parsedMetrics[0], testMetric) - parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936 source=mysource"), "") + parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936 source=mysource")) require.NoError(t, err) testMetric = metric.New( "test.metric", @@ -282,7 +280,7 @@ func TestParseDefaultTags(t *testing.T) { ) require.EqualValues(t, parsedMetrics[0], testMetric) - parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234 1530939936 another=\"test3\""), "") + parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234 1530939936 another=\"test3\"")) require.NoError(t, err) testMetric = metric.New( "test.metric", @@ -328,7 +326,7 @@ func TestBenchmarkData(t *testing.T) { ), } - actual, err := plugin.Parse([]byte(benchmarkData), "") + actual, err := plugin.Parse([]byte(benchmarkData)) require.NoError(t, err) testutil.RequireMetricsEqual(t, expected, actual, testutil.SortMetrics()) } @@ -339,6 +337,6 @@ func BenchmarkParsing(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse([]byte(benchmarkData), "") + plugin.Parse([]byte(benchmarkData)) } } diff --git a/plugins/parsers/xpath/parser.go b/plugins/parsers/xpath/parser.go index 86fbd8d23..cf2443922 100644 --- a/plugins/parsers/xpath/parser.go +++ b/plugins/parsers/xpath/parser.go @@ -196,7 +196,7 @@ func (p *Parser) Init() error { return nil } -func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { +func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { t := time.Now() // Parse the XML @@ -236,7 +236,7 @@ func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { } func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { - metrics, err := p.Parse([]byte(line), "") + metrics, err := p.Parse([]byte(line)) if err != nil { return nil, err } diff --git a/plugins/parsers/xpath/parser_test.go b/plugins/parsers/xpath/parser_test.go index 8d5e78199..af28561e0 100644 --- a/plugins/parsers/xpath/parser_test.go +++ b/plugins/parsers/xpath/parser_test.go @@ -997,7 +997,7 @@ func TestParseMultiNodes(t *testing.T) { } require.NoError(t, parser.Init()) - actual, err := parser.Parse([]byte(tt.input), "") + actual, err := parser.Parse([]byte(tt.input)) require.NoError(t, err) testutil.RequireMetricsEqual(t, tt.expected, actual) @@ -1183,7 +1183,7 @@ func TestEmptySelection(t *testing.T) { } require.NoError(t, parser.Init()) - _, err := parser.Parse([]byte(tt.input), "") + _, err := parser.Parse([]byte(tt.input)) require.Error(t, err) require.Equal(t, "cannot parse with empty selection node", err.Error()) }) @@ -1257,7 +1257,7 @@ func TestEmptySelectionAllowed(t *testing.T) { } require.NoError(t, parser.Init()) - _, err := parser.Parse([]byte(tt.input), "") + _, err := parser.Parse([]byte(tt.input)) require.NoError(t, err) }) } @@ -1365,7 +1365,7 @@ func TestTestCases(t *testing.T) { Log: testutil.Logger{Name: "parsers.xml"}, } require.NoError(t, parser.Init()) - outputs, err := parser.Parse(content, "") + outputs, err := parser.Parse(content) if len(expectedErrors) == 0 { require.NoError(t, err) } @@ -1563,7 +1563,7 @@ func TestBenchmarkDataXML(t *testing.T) { } require.NoError(t, plugin.Init()) - actual, err := plugin.Parse([]byte(benchmarkDataXML), "") + actual, err := plugin.Parse([]byte(benchmarkDataXML)) require.NoError(t, err) testutil.RequireMetricsEqual(t, benchmarkExpectedMetrics, actual) } @@ -1579,7 +1579,7 @@ func BenchmarkParsingXML(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse([]byte(benchmarkDataXML), "") + plugin.Parse([]byte(benchmarkDataXML)) } } @@ -1626,7 +1626,7 @@ func TestBenchmarkDataJSON(t *testing.T) { } require.NoError(t, plugin.Init()) - actual, err := plugin.Parse([]byte(benchmarkDataJSON), "") + actual, err := plugin.Parse([]byte(benchmarkDataJSON)) require.NoError(t, err) testutil.RequireMetricsEqual(t, benchmarkExpectedMetrics, actual) } @@ -1642,7 +1642,7 @@ func BenchmarkParsingJSON(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse([]byte(benchmarkDataJSON), "") + plugin.Parse([]byte(benchmarkDataJSON)) } } @@ -1678,7 +1678,7 @@ func BenchmarkParsingProtobuf(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse(benchmarkData, "") + plugin.Parse(benchmarkData) } } @@ -1751,7 +1751,7 @@ func TestBenchmarkDataMsgPack(t *testing.T) { actual := make([]telegraf.Metric, 0, 2) for _, msg := range benchmarkDataMsgPack { - m, err := plugin.Parse(msg, "") + m, err := plugin.Parse(msg) require.NoError(t, err) actual = append(actual, m...) } @@ -1782,7 +1782,7 @@ func BenchmarkParsingMsgPack(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse(benchmarkDataMsgPack[n%2], "") + plugin.Parse(benchmarkDataMsgPack[n%2]) } } @@ -1815,6 +1815,6 @@ func BenchmarkParsingCBOR(b *testing.B) { for n := 0; n < b.N; n++ { //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations - plugin.Parse(benchmarkData, "") + plugin.Parse(benchmarkData) } } diff --git a/plugins/processors/dedup/dedup.go b/plugins/processors/dedup/dedup.go index b4747d9fc..e77b14b5d 100644 --- a/plugins/processors/dedup/dedup.go +++ b/plugins/processors/dedup/dedup.go @@ -143,7 +143,7 @@ func (d *Dedup) SetState(state interface{}) error { if !ok { return fmt.Errorf("state has wrong type %T", state) } - metrics, err := p.Parse(data, "") + metrics, err := p.Parse(data) if err == nil { d.Apply(metrics...) } diff --git a/plugins/processors/execd/execd.go b/plugins/processors/execd/execd.go index 196ec8176..75b72978f 100644 --- a/plugins/processors/execd/execd.go +++ b/plugins/processors/execd/execd.go @@ -113,7 +113,7 @@ func (e *Execd) cmdReadOut(out io.Reader) { scanner.Buffer(scanBuf, 262144) for scanner.Scan() { - metrics, err := e.parser.Parse(scanner.Bytes(), "") + metrics, err := e.parser.Parse(scanner.Bytes()) if err != nil { e.Log.Errorf("Parse error: %s", err) } diff --git a/plugins/processors/parser/parser.go b/plugins/processors/parser/parser.go index 3725c58fd..c417a5a94 100644 --- a/plugins/processors/parser/parser.go +++ b/plugins/processors/parser/parser.go @@ -85,7 +85,7 @@ func (p *Parser) Apply(metrics ...telegraf.Metric) []telegraf.Metric { value = decoded[:n] } - fromFieldMetric, err := p.parser.Parse(value, "") + fromFieldMetric, err := p.parser.Parse(value) if err != nil { p.Log.Errorf("could not parse field %s: %v", field.Key, err) continue @@ -178,7 +178,7 @@ func mergeWithTimestamp(base telegraf.Metric, metrics []telegraf.Metric) telegra } func (p *Parser) parseValue(value string) ([]telegraf.Metric, error) { - return p.parser.Parse([]byte(value), "") + return p.parser.Parse([]byte(value)) } func toBytes(value interface{}) ([]byte, error) { diff --git a/testutil/file.go b/testutil/file.go index c461bbf06..086def3a0 100644 --- a/testutil/file.go +++ b/testutil/file.go @@ -103,7 +103,7 @@ func ParseMetricsFromFile(filename string, parser telegraf.Parser) ([]telegraf.M continue } - nonutc, err := parser.Parse(line, "") + nonutc, err := parser.Parse(line) if err != nil { return nil, fmt.Errorf("unable to parse metric in %q failed: %w", line, err) } diff --git a/testutil/plugin_input/plugin.go b/testutil/plugin_input/plugin.go index c32eddd0a..5f000ce01 100644 --- a/testutil/plugin_input/plugin.go +++ b/testutil/plugin_input/plugin.go @@ -150,7 +150,7 @@ func (p *Plugin) Gather(acc telegraf.Accumulator) error { if err != nil { return err } - metrics, err := p.Parser.Parse(data, "") + metrics, err := p.Parser.Parse(data) if err != nil { return err }