use topic tag refresh metrics

This commit is contained in:
zhuxu 2025-11-21 19:30:20 +08:00
parent 8d7371b5ba
commit f7d0087df9
82 changed files with 360 additions and 361 deletions

View File

@ -73,9 +73,9 @@ func (r *RunningParser) Init() error {
return nil return nil
} }
func (r *RunningParser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { func (r *RunningParser) Parse(buf []byte) ([]telegraf.Metric, error) {
start := time.Now() start := time.Now()
m, err := r.Parser.Parse(buf, extra) m, err := r.Parser.Parse(buf)
elapsed := time.Since(start) elapsed := time.Since(start)
r.ParseTime.Incr(elapsed.Nanoseconds()) r.ParseTime.Incr(elapsed.Nanoseconds())
r.MetricsParsed.Incr(int64(len(m))) r.MetricsParsed.Incr(int64(len(m)))

View File

@ -7,7 +7,7 @@ type Parser interface {
// and parses it into telegraf metrics // and parses it into telegraf metrics
// //
// Must be thread-safe. // Must be thread-safe.
Parse(buf []byte, extra string) ([]Metric, error) Parse(buf []byte) ([]Metric, error)
// ParseLine takes a single string metric // ParseLine takes a single string metric
// ie, "cpu.usage.idle 90" // ie, "cpu.usage.idle 90"

View File

@ -154,7 +154,7 @@ func TestListenData(t *testing.T) {
var acc testutil.Accumulator var acc testutil.Accumulator
onData := func(remote net.Addr, data []byte, _ time.Time) { onData := func(remote net.Addr, data []byte, _ time.Time) {
m, err := parser.Parse(data, "") m, err := parser.Parse(data)
require.NoError(t, err) require.NoError(t, err)
addr, _, err := net.SplitHostPort(remote.String()) addr, _, err := net.SplitHostPort(remote.String())
if err != nil { if err != nil {
@ -358,7 +358,7 @@ func TestListenConnection(t *testing.T) {
onConnection := func(remote net.Addr, reader io.ReadCloser) { onConnection := func(remote net.Addr, reader io.ReadCloser) {
data, err := io.ReadAll(reader) data, err := io.ReadAll(reader)
require.NoError(t, err) require.NoError(t, err)
m, err := parser.Parse(data, "") m, err := parser.Parse(data)
require.NoError(t, err) require.NoError(t, err)
addr, _, err := net.SplitHostPort(remote.String()) addr, _, err := net.SplitHostPort(remote.String())
if err != nil { if err != nil {
@ -451,7 +451,7 @@ func TestClosingConnections(t *testing.T) {
var acc testutil.Accumulator var acc testutil.Accumulator
onData := func(_ net.Addr, data []byte, _ time.Time) { onData := func(_ net.Addr, data []byte, _ time.Time) {
m, err := parser.Parse(data, "") m, err := parser.Parse(data)
require.NoError(t, err) require.NoError(t, err)
acc.AddMetrics(m) acc.AddMetrics(m)
} }
@ -667,7 +667,7 @@ func TestNoSplitter(t *testing.T) {
onConnection := func(remote net.Addr, reader io.ReadCloser) { onConnection := func(remote net.Addr, reader io.ReadCloser) {
data, err := io.ReadAll(reader) data, err := io.ReadAll(reader)
require.NoError(t, err) require.NoError(t, err)
m, err := parser.Parse(data, "") m, err := parser.Parse(data)
require.NoError(t, err) require.NoError(t, err)
addr, _, err := net.SplitHostPort(remote.String()) addr, _, err := net.SplitHostPort(remote.String())
if err != nil { if err != nil {

View File

@ -454,7 +454,7 @@ func (a *AMQPConsumer) onMessage(acc telegraf.TrackingAccumulator, d amqp.Delive
return err return err
} }
metrics, err := a.parser.Parse(body, "") metrics, err := a.parser.Parse(body)
if err != nil { if err != nil {
onError() onError()
return err return err

View File

@ -124,7 +124,7 @@ func TestIntegration(t *testing.T) {
} }
expected := make([]telegraf.Metric, 0, len(metrics)) expected := make([]telegraf.Metric, 0, len(metrics))
for _, x := range metrics { for _, x := range metrics {
m, err := parser.Parse([]byte(x), "") m, err := parser.Parse([]byte(x))
require.NoError(t, err) require.NoError(t, err)
expected = append(expected, m...) expected = append(expected, m...)
} }
@ -343,7 +343,7 @@ func TestStartupErrorBehaviorRetry(t *testing.T) {
} }
expected := make([]telegraf.Metric, 0, len(metrics)) expected := make([]telegraf.Metric, 0, len(metrics))
for _, x := range metrics { for _, x := range metrics {
m, err := parser.Parse([]byte(x), "") m, err := parser.Parse([]byte(x))
require.NoError(t, err) require.NoError(t, err)
expected = append(expected, m...) expected = append(expected, m...)
} }

View File

@ -525,7 +525,7 @@ func (h *consumerGroupHandler) handle(session sarama.ConsumerGroupSession, msg *
len(msg.Value), h.maxMessageLen) len(msg.Value), h.maxMessageLen)
} }
metrics, err := h.parser.Parse(msg.Value, msg.Topic) metrics, err := h.parser.Parse(msg.Value)
if err != nil { if err != nil {
session.MarkMessage(msg, "") session.MarkMessage(msg, "")
h.release() h.release()
@ -560,8 +560,9 @@ func (h *consumerGroupHandler) handle(session sarama.ConsumerGroupSession, msg *
// Add topic name as tag with topicTag name specified in the config // Add topic name as tag with topicTag name specified in the config
if len(h.topicTag) > 0 { if len(h.topicTag) > 0 {
device, _ := strings.CutSuffix(msg.Topic, "_Phasor")
for _, metric := range metrics { for _, metric := range metrics {
metric.AddTag(h.topicTag, msg.Topic) metric.AddTag(h.topicTag, device)
} }
} }

View File

@ -217,7 +217,7 @@ func (ps *PubSub) onMessage(ctx context.Context, msg message) error {
return fmt.Errorf("unable to decode base64 message: %w", err) return fmt.Errorf("unable to decode base64 message: %w", err)
} }
metrics, err := ps.parser.Parse(data, "") metrics, err := ps.parser.Parse(data)
if err != nil { if err != nil {
msg.Ack() msg.Ack()
return fmt.Errorf("unable to parse message: %w", err) return fmt.Errorf("unable to parse message: %w", err)

View File

@ -196,7 +196,7 @@ func (p *PubSubPush) serveWrite(res http.ResponseWriter, req *http.Request) {
return return
} }
metrics, err := p.Parse(sDec, "") metrics, err := p.Parse(sDec)
if err != nil { if err != nil {
p.Log.Debug(err.Error()) p.Log.Debug(err.Error())
res.WriteHeader(http.StatusBadRequest) res.WriteHeader(http.StatusBadRequest)

View File

@ -368,7 +368,7 @@ func (monitor *DirectoryMonitor) parseAtOnce(parser telegraf.Parser, reader io.R
} }
func (monitor *DirectoryMonitor) parseMetrics(parser telegraf.Parser, line []byte, fileName string) (metrics []telegraf.Metric, err error) { func (monitor *DirectoryMonitor) parseMetrics(parser telegraf.Parser, line []byte, fileName string) (metrics []telegraf.Metric, err error) {
metrics, err = parser.Parse(line, "") metrics, err = parser.Parse(line)
if err != nil { if err != nil {
if errors.Is(err, parsers.ErrEOF) { if errors.Is(err, parsers.ErrEOF) {
return nil, nil return nil, nil

View File

@ -271,7 +271,7 @@ func deepCopyMetrics(in []telegraf.Metric) []telegraf.Metric {
// CreateMetrics returns the Metrics from the Event. // CreateMetrics returns the Metrics from the Event.
func (e *EventHub) createMetrics(event *eventhub.Event) ([]telegraf.Metric, error) { func (e *EventHub) createMetrics(event *eventhub.Event) ([]telegraf.Metric, error) {
metrics, err := e.parser.Parse(event.Data, "") metrics, err := e.parser.Parse(event.Data)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -142,7 +142,7 @@ func (e *Exec) processCommand(acc telegraf.Accumulator, cmd string) error {
return fmt.Errorf("exec: %w for command %q: %s", runErr, cmd, string(errBuf)) return fmt.Errorf("exec: %w for command %q: %s", runErr, cmd, string(errBuf))
} }
metrics, err := e.parser.Parse(out, "") metrics, err := e.parser.Parse(out)
if err != nil { if err != nil {
return err return err
} }

View File

@ -108,7 +108,7 @@ func (e *Execd) cmdReadOut(out io.Reader) {
continue continue
} }
metrics, err := e.parser.Parse(data, "") metrics, err := e.parser.Parse(data)
if err != nil { if err != nil {
e.acc.AddError(fmt.Errorf("parse error: %w", err)) e.acc.AddError(fmt.Errorf("parse error: %w", err))
} }

View File

@ -109,7 +109,7 @@ func (f *File) readMetric(filename string) ([]telegraf.Metric, error) {
if err != nil { if err != nil {
return nil, fmt.Errorf("could not instantiate parser: %w", err) return nil, fmt.Errorf("could not instantiate parser: %w", err)
} }
metrics, err := parser.Parse(fileContents, "") metrics, err := parser.Parse(fileContents)
if err != nil { if err != nil {
return metrics, fmt.Errorf("could not parse %q: %w", filename, err) return metrics, fmt.Errorf("could not parse %q: %w", filename, err)
} }

View File

@ -201,7 +201,7 @@ func (f *Firehose) handleRequest(req *http.Request) (*message, error) {
// Parse the metrics // Parse the metrics
var metrics []telegraf.Metric var metrics []telegraf.Metric
for _, record := range records { for _, record := range records {
m, err := f.parser.Parse(record, "") m, err := f.parser.Parse(record)
if err != nil { if err != nil {
// respond with bad request status code to inform firehose about the failure // respond with bad request status code to inform firehose about the failure
msg.responseCode = http.StatusBadRequest msg.responseCode = http.StatusBadRequest

View File

@ -147,7 +147,7 @@ func (gcs *GCS) fetchedMetrics(r *storage.Reader) ([]telegraf.Metric, error) {
return nil, err return nil, err
} }
return gcs.parser.Parse(buf.Bytes(), "") return gcs.parser.Parse(buf.Bytes())
} }
func (gcs *GCS) reachedThreshlod(processed int) bool { func (gcs *GCS) reachedThreshlod(processed int) bool {

View File

@ -201,7 +201,7 @@ func (h *HTTP) gatherURL(acc telegraf.Accumulator, url string) error {
if err != nil { if err != nil {
return fmt.Errorf("instantiating parser failed: %w", err) return fmt.Errorf("instantiating parser failed: %w", err)
} }
metrics, err := parser.Parse(b, "") metrics, err := parser.Parse(b)
if err != nil { if err != nil {
return fmt.Errorf("parsing metrics failed: %w", err) return fmt.Errorf("parsing metrics failed: %w", err)
} }

View File

@ -278,7 +278,7 @@ func (h *HTTPListenerV2) serveWrite(res http.ResponseWriter, req *http.Request)
return return
} }
metrics, err := h.Parse(bytes, "") metrics, err := h.Parse(bytes)
if err != nil { if err != nil {
h.Log.Debugf("Parse error: %s", err.Error()) h.Log.Debugf("Parse error: %s", err.Error())
if err := badRequest(res); err != nil { if err := badRequest(res); err != nil {

View File

@ -215,7 +215,7 @@ func TestCloud1(t *testing.T) {
buf, err := os.ReadFile("./testdata/cloud1.influx") buf, err := os.ReadFile("./testdata/cloud1.influx")
require.NoError(t, err) require.NoError(t, err)
expected, err := parser.Parse(buf, "") expected, err := parser.Parse(buf)
require.NoError(t, err) require.NoError(t, err)
// Check the output // Check the output

View File

@ -317,7 +317,7 @@ func (h *InfluxDBV2Listener) handleWrite() http.HandlerFunc {
} }
} }
metrics, err = parser.Parse(bytes, "") metrics, err = parser.Parse(bytes)
} else { } else {
parser := influx.Parser{} parser := influx.Parser{}
err = parser.Init() err = parser.Init()
@ -332,7 +332,7 @@ func (h *InfluxDBV2Listener) handleWrite() http.HandlerFunc {
parser.SetTimePrecision(precision) parser.SetTimePrecision(precision)
} }
metrics, err = parser.Parse(bytes, "") metrics, err = parser.Parse(bytes)
} }
if !errors.Is(err, io.EOF) && err != nil { if !errors.Is(err, io.EOF) && err != nil {

View File

@ -525,7 +525,7 @@ func (h *consumerGroupHandler) handle(session sarama.ConsumerGroupSession, msg *
len(msg.Value), h.maxMessageLen) len(msg.Value), h.maxMessageLen)
} }
metrics, err := h.parser.Parse(msg.Value, "") metrics, err := h.parser.Parse(msg.Value)
if err != nil { if err != nil {
session.MarkMessage(msg, "") session.MarkMessage(msg, "")
h.release() h.release()

View File

@ -210,7 +210,7 @@ func (k *KinesisConsumer) onMessage(acc telegraf.TrackingAccumulator, shard stri
if err != nil { if err != nil {
return err return err
} }
metrics, err := k.parser.Parse(data, "") metrics, err := k.parser.Parse(data)
if err != nil { if err != nil {
return err return err
} }

View File

@ -250,7 +250,7 @@ func (m *MQTTConsumer) onMessage(_ mqtt.Client, msg mqtt.Message) {
m.payloadSize.Incr(int64(payloadBytes)) m.payloadSize.Incr(int64(payloadBytes))
m.messagesRecv.Incr(1) m.messagesRecv.Incr(1)
metrics, err := m.parser.Parse(msg.Payload(), "") metrics, err := m.parser.Parse(msg.Payload())
if err != nil || len(metrics) == 0 { if err != nil || len(metrics) == 0 {
if len(metrics) == 0 { if len(metrics) == 0 {
once.Do(func() { once.Do(func() {

View File

@ -64,7 +64,7 @@ type fakeParser struct{}
// fakeParser satisfies telegraf.Parser // fakeParser satisfies telegraf.Parser
var _ telegraf.Parser = &fakeParser{} var _ telegraf.Parser = &fakeParser{}
func (*fakeParser) Parse([]byte, string) ([]telegraf.Metric, error) { func (*fakeParser) Parse([]byte) ([]telegraf.Metric, error) {
panic("not implemented") panic("not implemented")
} }
@ -716,7 +716,7 @@ func TestIntegration(t *testing.T) {
} }
expected := make([]telegraf.Metric, 0, len(metrics)) expected := make([]telegraf.Metric, 0, len(metrics))
for _, x := range metrics { for _, x := range metrics {
metrics, err := parser.Parse([]byte(x), "") metrics, err := parser.Parse([]byte(x))
for i := range metrics { for i := range metrics {
metrics[i].AddTag("topic", topic) metrics[i].AddTag("topic", topic)
} }
@ -949,7 +949,7 @@ func TestStartupErrorBehaviorRetryIntegration(t *testing.T) {
} }
expected := make([]telegraf.Metric, 0, len(metrics)) expected := make([]telegraf.Metric, 0, len(metrics))
for _, x := range metrics { for _, x := range metrics {
metrics, err := parser.Parse([]byte(x), "") metrics, err := parser.Parse([]byte(x))
for i := range metrics { for i := range metrics {
metrics[i].AddTag("topic", topic) metrics[i].AddTag("topic", topic)
} }

View File

@ -228,7 +228,7 @@ func (n *NatsConsumer) receiver(ctx context.Context) {
<-sem <-sem
<-sem <-sem
case msg := <-n.in: case msg := <-n.in:
metrics, err := n.parser.Parse(msg.Data, "") metrics, err := n.parser.Parse(msg.Data)
if err != nil { if err != nil {
n.Log.Errorf("Subject: %s, error: %s", msg.Subject, err.Error()) n.Log.Errorf("Subject: %s, error: %s", msg.Subject, err.Error())
<-sem <-sem

View File

@ -89,7 +89,7 @@ func (n *NSQConsumer) Start(ac telegraf.Accumulator) error {
} }
n.consumer.SetLogger(&logger{log: n.Log}, nsq.LogLevelInfo) n.consumer.SetLogger(&logger{log: n.Log}, nsq.LogLevelInfo)
n.consumer.AddHandler(nsq.HandlerFunc(func(message *nsq.Message) error { n.consumer.AddHandler(nsq.HandlerFunc(func(message *nsq.Message) error {
metrics, err := n.parser.Parse(message.Body, "") metrics, err := n.parser.Parse(message.Body)
if err != nil { if err != nil {
acc.AddError(err) acc.AddError(err)
// Remove the message from the queue // Remove the message from the queue

View File

@ -561,7 +561,7 @@ func (p *Prometheus) gatherURL(u urlAndAddress, acc telegraf.Accumulator) (map[s
Log: p.Log, Log: p.Log,
} }
} }
metrics, err := metricParser.Parse(body, "") metrics, err := metricParser.Parse(body)
if err != nil { if err != nil {
return requestFields, tags, fmt.Errorf("error reading metrics for %q: %w", u.url, err) return requestFields, tags, fmt.Errorf("error reading metrics for %q: %w", u.url, err)
} }

View File

@ -51,7 +51,7 @@ func (sl *SocketListener) Init() error {
func (sl *SocketListener) Start(acc telegraf.Accumulator) error { func (sl *SocketListener) Start(acc telegraf.Accumulator) error {
// Create the callbacks for parsing the data and recording issues // Create the callbacks for parsing the data and recording issues
onData := func(_ net.Addr, data []byte, receiveTime time.Time) { onData := func(_ net.Addr, data []byte, receiveTime time.Time) {
metrics, err := sl.parser.Parse(data, "") metrics, err := sl.parser.Parse(data)
if err != nil { if err != nil {
acc.AddError(err) acc.AddError(err)

View File

@ -313,7 +313,7 @@ func (t *Tail) tailNewFiles() error {
} }
func parseLine(parser telegraf.Parser, line string) ([]telegraf.Metric, error) { func parseLine(parser telegraf.Parser, line string) ([]telegraf.Metric, error) {
m, err := parser.Parse([]byte(line), "") m, err := parser.Parse([]byte(line))
if err != nil { if err != nil {
if errors.Is(err, parsers.ErrEOF) { if errors.Is(err, parsers.ErrEOF) {
return nil, nil return nil, nil

View File

@ -222,7 +222,7 @@ func verifyMetricPublished(t *testing.T, m telegraf.Metric, published map[string
data = v data = v
} }
parsed, err := p.Parse(data, "") parsed, err := p.Parse(data)
if err != nil { if err != nil {
t.Fatalf("could not parse influxdb metric from published message: %s", string(data)) t.Fatalf("could not parse influxdb metric from published message: %s", string(data))
} }

View File

@ -207,7 +207,7 @@ func (t *stubTopic) parseIDs(msg *pubsub.Message) []string {
} }
d = strData d = strData
} }
metrics, err := p.Parse(d, "") metrics, err := p.Parse(d)
if err != nil { if err != nil {
t.Fatalf("unexpected parsing error: %v", err) t.Fatalf("unexpected parsing error: %v", err)
} }

View File

@ -802,7 +802,7 @@ func TestIntegrationMQTTLayoutHomieV4(t *testing.T) {
func createMetricMessageHandler(acc telegraf.Accumulator, parser telegraf.Parser) paho.MessageHandler { func createMetricMessageHandler(acc telegraf.Accumulator, parser telegraf.Parser) paho.MessageHandler {
return func(_ paho.Client, msg paho.Message) { return func(_ paho.Client, msg paho.Message) {
metrics, err := parser.Parse(msg.Payload(), "") metrics, err := parser.Parse(msg.Payload())
if err != nil { if err != nil {
acc.AddError(err) acc.AddError(err)
return return

View File

@ -82,7 +82,7 @@ func (p *Parser) Init() error {
return nil return nil
} }
func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) {
var schema string var schema string
var codec *goavro.Codec var codec *goavro.Codec
var err error var err error
@ -145,7 +145,7 @@ func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) {
} }
func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
metrics, err := p.Parse([]byte(line), "") metrics, err := p.Parse([]byte(line))
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -116,7 +116,7 @@ func BenchmarkParsing(b *testing.B) {
b.ResetTimer() b.ResetTimer()
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
//nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations
plugin.Parse(benchmarkData, "") plugin.Parse(benchmarkData)
} }
} }
@ -150,7 +150,7 @@ func TestBenchmarkDataBinary(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
// Do the actual testing // Do the actual testing
actual, err := plugin.Parse(benchmarkData, "") actual, err := plugin.Parse(benchmarkData)
require.NoError(t, err) require.NoError(t, err)
testutil.RequireMetricsEqual(t, expected, actual, testutil.SortMetrics()) testutil.RequireMetricsEqual(t, expected, actual, testutil.SortMetrics())
} }
@ -178,6 +178,6 @@ func BenchmarkParsingBinary(b *testing.B) {
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
//nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations
plugin.Parse(benchmarkData, "") plugin.Parse(benchmarkData)
} }
} }

View File

@ -71,7 +71,7 @@ func (p *Parser) Init() error {
return nil return nil
} }
func (p *Parser) Parse(data []byte, extra string) ([]telegraf.Metric, error) { func (p *Parser) Parse(data []byte) ([]telegraf.Metric, error) {
t := time.Now() t := time.Now()
// If the data is encoded in HEX, we need to decode it first // If the data is encoded in HEX, we need to decode it first
@ -122,7 +122,7 @@ func (p *Parser) Parse(data []byte, extra string) ([]telegraf.Metric, error) {
} }
func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
metrics, err := p.Parse([]byte(line), "") metrics, err := p.Parse([]byte(line))
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -196,7 +196,7 @@ func TestFilterMatchInvalid(t *testing.T) {
metricName: "binary", metricName: "binary",
} }
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
_, err := parser.Parse(testdata, "") _, err := parser.Parse(testdata)
require.EqualError(t, err, tt.expected) require.EqualError(t, err, tt.expected)
}) })
} }
@ -221,7 +221,7 @@ func TestFilterNoMatch(t *testing.T) {
data, err := generateBinary(testdata, internal.HostEndianness) data, err := generateBinary(testdata, internal.HostEndianness)
require.NoError(t, err) require.NoError(t, err)
_, err = parser.Parse(data, "") _, err = parser.Parse(data)
require.EqualError(t, err, "no matching configuration") require.EqualError(t, err, "no matching configuration")
}) })
@ -242,7 +242,7 @@ func TestFilterNoMatch(t *testing.T) {
data, err := generateBinary(testdata, internal.HostEndianness) data, err := generateBinary(testdata, internal.HostEndianness)
require.NoError(t, err) require.NoError(t, err)
metrics, err := parser.Parse(data, "") metrics, err := parser.Parse(data)
require.NoError(t, err) require.NoError(t, err)
require.Empty(t, metrics) require.Empty(t, metrics)
}) })
@ -320,7 +320,7 @@ func TestFilterNone(t *testing.T) {
data, err := generateBinary(tt.data, order) data, err := generateBinary(tt.data, order)
require.NoError(t, err) require.NoError(t, err)
metrics, err := parser.Parse(data, "") metrics, err := parser.Parse(data)
require.NoError(t, err) require.NoError(t, err)
require.NotEmpty(t, metrics) require.NotEmpty(t, metrics)
}) })
@ -392,7 +392,7 @@ func TestFilterLength(t *testing.T) {
data, err := generateBinary(tt.data, internal.HostEndianness) data, err := generateBinary(tt.data, internal.HostEndianness)
require.NoError(t, err) require.NoError(t, err)
metrics, err := parser.Parse(data, "") metrics, err := parser.Parse(data)
require.NoError(t, err) require.NoError(t, err)
if tt.expected { if tt.expected {
require.NotEmpty(t, metrics) require.NotEmpty(t, metrics)
@ -558,7 +558,7 @@ func TestFilterContent(t *testing.T) {
var metrics []telegraf.Metric var metrics []telegraf.Metric
for _, data := range testdata { for _, data := range testdata {
m, err := parser.Parse(data, "") m, err := parser.Parse(data)
require.NoError(t, err) require.NoError(t, err)
metrics = append(metrics, m...) metrics = append(metrics, m...)
} }
@ -865,7 +865,7 @@ func TestParseInvalid(t *testing.T) {
data, err := generateBinary(tt.data, order) data, err := generateBinary(tt.data, order)
require.NoError(t, err) require.NoError(t, err)
_, err = parser.Parse(data, "") _, err = parser.Parse(data)
require.EqualError(t, err, tt.expected) require.EqualError(t, err, tt.expected)
}) })
} }
@ -1390,7 +1390,7 @@ func TestParse(t *testing.T) {
data, err := generateBinary(tt.data, order) data, err := generateBinary(tt.data, order)
require.NoError(t, err) require.NoError(t, err)
metrics, err := parser.Parse(data, "") metrics, err := parser.Parse(data)
require.NoError(t, err) require.NoError(t, err)
var options []cmp.Option var options []cmp.Option
@ -1479,7 +1479,7 @@ func TestHexEncoding(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
encoded := hex.EncodeToString(data) encoded := hex.EncodeToString(data)
metrics, err := parser.Parse([]byte(encoded), "") metrics, err := parser.Parse([]byte(encoded))
require.NoError(t, err) require.NoError(t, err)
require.NotEmpty(t, metrics) require.NotEmpty(t, metrics)
} }
@ -1564,7 +1564,7 @@ func TestBenchmarkData(t *testing.T) {
actual := make([]telegraf.Metric, 0, 2) actual := make([]telegraf.Metric, 0, 2)
for _, buf := range benchmarkData { for _, buf := range benchmarkData {
m, err := plugin.Parse(buf, "") m, err := plugin.Parse(buf)
require.NoError(t, err) require.NoError(t, err)
actual = append(actual, m...) actual = append(actual, m...)
} }
@ -1609,6 +1609,6 @@ func BenchmarkParsing(b *testing.B) {
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
//nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations
plugin.Parse(benchmarkData[n%2], "") plugin.Parse(benchmarkData[n%2])
} }
} }

View File

@ -65,7 +65,7 @@ func (p *Parser) Init() error {
return nil return nil
} }
func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) {
valueLists, err := network.Parse(buf, p.popts) valueLists, err := network.Parse(buf, p.popts)
if err != nil { if err != nil {
return nil, fmt.Errorf("collectd parser error: %w", err) return nil, fmt.Errorf("collectd parser error: %w", err)
@ -91,7 +91,7 @@ func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) {
} }
func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
metrics, err := p.Parse([]byte(line), "") metrics, err := p.Parse([]byte(line))
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -131,7 +131,7 @@ func TestParse(t *testing.T) {
parser := &Parser{} parser := &Parser{}
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
metrics, err := parser.Parse(bytes, "") metrics, err := parser.Parse(bytes)
require.NoError(t, err) require.NoError(t, err)
assertEqualMetrics(t, tc.expected, metrics) assertEqualMetrics(t, tc.expected, metrics)
@ -146,7 +146,7 @@ func TestParseMultiValueSplit(t *testing.T) {
parser := &Parser{ParseMultiValue: "split"} parser := &Parser{ParseMultiValue: "split"}
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
metrics, err := parser.Parse(bytes, "") metrics, err := parser.Parse(bytes)
require.NoError(t, err) require.NoError(t, err)
require.Len(t, metrics, 2) require.Len(t, metrics, 2)
@ -160,7 +160,7 @@ func TestParseMultiValueJoin(t *testing.T) {
parser := &Parser{ParseMultiValue: "join"} parser := &Parser{ParseMultiValue: "join"}
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
metrics, err := parser.Parse(bytes, "") metrics, err := parser.Parse(bytes)
require.NoError(t, err) require.NoError(t, err)
require.Len(t, metrics, 1) require.Len(t, metrics, 1)
@ -178,7 +178,7 @@ func TestParse_DefaultTags(t *testing.T) {
"foo": "bar", "foo": "bar",
}) })
require.NoError(t, err) require.NoError(t, err)
metrics, err := parser.Parse(bytes, "") metrics, err := parser.Parse(bytes)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, "bar", metrics[0].Tags()["foo"]) require.Equal(t, "bar", metrics[0].Tags()["foo"])
@ -198,7 +198,7 @@ func TestParse_SignSecurityLevel(t *testing.T) {
bytes, err := buf.Bytes() bytes, err := buf.Bytes()
require.NoError(t, err) require.NoError(t, err)
metrics, err := parser.Parse(bytes, "") metrics, err := parser.Parse(bytes)
require.NoError(t, err) require.NoError(t, err)
assertEqualMetrics(t, singleMetric.expected, metrics) assertEqualMetrics(t, singleMetric.expected, metrics)
@ -209,7 +209,7 @@ func TestParse_SignSecurityLevel(t *testing.T) {
bytes, err = buf.Bytes() bytes, err = buf.Bytes()
require.NoError(t, err) require.NoError(t, err)
metrics, err = parser.Parse(bytes, "") metrics, err = parser.Parse(bytes)
require.NoError(t, err) require.NoError(t, err)
assertEqualMetrics(t, singleMetric.expected, metrics) assertEqualMetrics(t, singleMetric.expected, metrics)
@ -219,7 +219,7 @@ func TestParse_SignSecurityLevel(t *testing.T) {
bytes, err = buf.Bytes() bytes, err = buf.Bytes()
require.NoError(t, err) require.NoError(t, err)
metrics, err = parser.Parse(bytes, "") metrics, err = parser.Parse(bytes)
require.NoError(t, err) require.NoError(t, err)
require.Empty(t, metrics) require.Empty(t, metrics)
@ -230,7 +230,7 @@ func TestParse_SignSecurityLevel(t *testing.T) {
bytes, err = buf.Bytes() bytes, err = buf.Bytes()
require.NoError(t, err) require.NoError(t, err)
_, err = parser.Parse(bytes, "") _, err = parser.Parse(bytes)
require.Error(t, err) require.Error(t, err)
} }
@ -248,7 +248,7 @@ func TestParse_EncryptSecurityLevel(t *testing.T) {
bytes, err := buf.Bytes() bytes, err := buf.Bytes()
require.NoError(t, err) require.NoError(t, err)
metrics, err := parser.Parse(bytes, "") metrics, err := parser.Parse(bytes)
require.NoError(t, err) require.NoError(t, err)
require.Empty(t, metrics) require.Empty(t, metrics)
@ -259,7 +259,7 @@ func TestParse_EncryptSecurityLevel(t *testing.T) {
bytes, err = buf.Bytes() bytes, err = buf.Bytes()
require.NoError(t, err) require.NoError(t, err)
metrics, err = parser.Parse(bytes, "") metrics, err = parser.Parse(bytes)
require.NoError(t, err) require.NoError(t, err)
assertEqualMetrics(t, singleMetric.expected, metrics) assertEqualMetrics(t, singleMetric.expected, metrics)
@ -269,7 +269,7 @@ func TestParse_EncryptSecurityLevel(t *testing.T) {
bytes, err = buf.Bytes() bytes, err = buf.Bytes()
require.NoError(t, err) require.NoError(t, err)
metrics, err = parser.Parse(bytes, "") metrics, err = parser.Parse(bytes)
require.NoError(t, err) require.NoError(t, err)
require.Empty(t, metrics) require.Empty(t, metrics)
@ -280,7 +280,7 @@ func TestParse_EncryptSecurityLevel(t *testing.T) {
bytes, err = buf.Bytes() bytes, err = buf.Bytes()
require.NoError(t, err) require.NoError(t, err)
_, err = parser.Parse(bytes, "") _, err = parser.Parse(bytes)
require.Error(t, err) require.Error(t, err)
} }
@ -387,7 +387,7 @@ func TestBenchmarkData(t *testing.T) {
parser := &Parser{} parser := &Parser{}
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
actual, err := parser.Parse(bytes, "") actual, err := parser.Parse(bytes)
require.NoError(t, err) require.NoError(t, err)
testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime(), testutil.SortMetrics()) testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime(), testutil.SortMetrics())
@ -405,6 +405,6 @@ func BenchmarkParsing(b *testing.B) {
b.ResetTimer() b.ResetTimer()
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
//nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations
parser.Parse(bytes, "") parser.Parse(bytes)
} }
} }

View File

@ -217,7 +217,7 @@ func validDelim(r rune) bool {
return r != 0 && r != '"' && r != '\r' && r != '\n' && utf8.ValidRune(r) && r != utf8.RuneError return r != 0 && r != '"' && r != '\r' && r != '\n' && utf8.ValidRune(r) && r != utf8.RuneError
} }
func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) {
// Reset the parser according to the specified mode // Reset the parser according to the specified mode
if p.ResetMode == "always" { if p.ResetMode == "always" {
p.Reset() p.Reset()

View File

@ -43,7 +43,7 @@ func TestHeaderConcatenationCSV(t *testing.T) {
1,2,3 1,2,3
3.4,70,test_name` 3.4,70,test_name`
metrics, err := p.Parse([]byte(testCSV), "") metrics, err := p.Parse([]byte(testCSV))
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, "test_name", metrics[0].Name()) require.Equal(t, "test_name", metrics[0].Name())
} }
@ -63,7 +63,7 @@ func TestHeaderOverride(t *testing.T) {
"first": 3.4, "first": 3.4,
"second": int64(70), "second": int64(70),
} }
metrics, err := p.Parse([]byte(testCSV), "") metrics, err := p.Parse([]byte(testCSV))
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, "test_name", metrics[0].Name()) require.Equal(t, "test_name", metrics[0].Name())
require.Equal(t, expectedFields, metrics[0].Fields()) require.Equal(t, expectedFields, metrics[0].Fields())
@ -78,7 +78,7 @@ func TestHeaderOverride(t *testing.T) {
} }
err = p.Init() err = p.Init()
require.NoError(t, err) require.NoError(t, err)
metrics, err = p.Parse([]byte(testCSVRows[0]), "") metrics, err = p.Parse([]byte(testCSVRows[0]))
require.NoError(t, err) require.NoError(t, err)
require.Empty(t, metrics) require.Empty(t, metrics)
m, err := p.ParseLine(testCSVRows[1]) m, err := p.ParseLine(testCSVRows[1])
@ -102,7 +102,7 @@ func TestTimestamp(t *testing.T) {
testCSV := `line1,line2,line3 testCSV := `line1,line2,line3
23/05/09 04:05:06 PM,70,test_name 23/05/09 04:05:06 PM,70,test_name
07/11/09 04:05:06 PM,80,test_name2` 07/11/09 04:05:06 PM,80,test_name2`
metrics, err := p.Parse([]byte(testCSV), "") metrics, err := p.Parse([]byte(testCSV))
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, int64(1243094706000000000), metrics[0].Time().UnixNano()) require.Equal(t, int64(1243094706000000000), metrics[0].Time().UnixNano())
@ -124,7 +124,7 @@ func TestTimestampYYYYMMDDHHmm(t *testing.T) {
testCSV := `line1,line2,line3 testCSV := `line1,line2,line3
200905231605,70,test_name 200905231605,70,test_name
200907111605,80,test_name2` 200907111605,80,test_name2`
metrics, err := p.Parse([]byte(testCSV), "") metrics, err := p.Parse([]byte(testCSV))
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, int64(1243094700000000000), metrics[0].Time().UnixNano()) require.Equal(t, int64(1243094700000000000), metrics[0].Time().UnixNano())
@ -143,7 +143,7 @@ func TestTimestampError(t *testing.T) {
testCSV := `line1,line2,line3 testCSV := `line1,line2,line3
23/05/09 04:05:06 PM,70,test_name 23/05/09 04:05:06 PM,70,test_name
07/11/09 04:05:06 PM,80,test_name2` 07/11/09 04:05:06 PM,80,test_name2`
_, err = p.Parse([]byte(testCSV), "") _, err = p.Parse([]byte(testCSV))
require.Equal(t, errors.New("timestamp format must be specified"), err) require.Equal(t, errors.New("timestamp format must be specified"), err)
} }
@ -161,7 +161,7 @@ func TestTimestampUnixFormat(t *testing.T) {
testCSV := `line1,line2,line3 testCSV := `line1,line2,line3
1243094706,70,test_name 1243094706,70,test_name
1257609906,80,test_name2` 1257609906,80,test_name2`
metrics, err := p.Parse([]byte(testCSV), "") metrics, err := p.Parse([]byte(testCSV))
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, int64(1243094706000000000), metrics[0].Time().UnixNano()) require.Equal(t, int64(1243094706000000000), metrics[0].Time().UnixNano())
require.Equal(t, int64(1257609906000000000), metrics[1].Time().UnixNano()) require.Equal(t, int64(1257609906000000000), metrics[1].Time().UnixNano())
@ -181,7 +181,7 @@ func TestTimestampUnixMSFormat(t *testing.T) {
testCSV := `line1,line2,line3 testCSV := `line1,line2,line3
1243094706123,70,test_name 1243094706123,70,test_name
1257609906123,80,test_name2` 1257609906123,80,test_name2`
metrics, err := p.Parse([]byte(testCSV), "") metrics, err := p.Parse([]byte(testCSV))
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, int64(1243094706123000000), metrics[0].Time().UnixNano()) require.Equal(t, int64(1243094706123000000), metrics[0].Time().UnixNano())
require.Equal(t, int64(1257609906123000000), metrics[1].Time().UnixNano()) require.Equal(t, int64(1257609906123000000), metrics[1].Time().UnixNano())
@ -199,7 +199,7 @@ func TestQuotedCharacter(t *testing.T) {
testCSV := `line1,line2,line3 testCSV := `line1,line2,line3
"3,4",70,test_name` "3,4",70,test_name`
metrics, err := p.Parse([]byte(testCSV), "") metrics, err := p.Parse([]byte(testCSV))
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, "3,4", metrics[0].Fields()["first"]) require.Equal(t, "3,4", metrics[0].Fields()["first"])
} }
@ -217,7 +217,7 @@ func TestDelimiter(t *testing.T) {
testCSV := `line1%line2%line3 testCSV := `line1%line2%line3
3,4%70%test_name` 3,4%70%test_name`
metrics, err := p.Parse([]byte(testCSV), "") metrics, err := p.Parse([]byte(testCSV))
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, "3,4", metrics[0].Fields()["first"]) require.Equal(t, "3,4", metrics[0].Fields()["first"])
} }
@ -233,7 +233,7 @@ func TestNullDelimiter(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
testCSV := strings.Join([]string{"3.4", "70", "test_name"}, "\u0000") testCSV := strings.Join([]string{"3.4", "70", "test_name"}, "\u0000")
metrics, err := p.Parse([]byte(testCSV), "") metrics, err := p.Parse([]byte(testCSV))
require.NoError(t, err) require.NoError(t, err)
require.InDelta(t, float64(3.4), metrics[0].Fields()["first"], testutil.DefaultDelta) require.InDelta(t, float64(3.4), metrics[0].Fields()["first"], testutil.DefaultDelta)
require.Equal(t, int64(70), metrics[0].Fields()["second"]) require.Equal(t, int64(70), metrics[0].Fields()["second"])
@ -260,7 +260,7 @@ func TestValueConversion(t *testing.T) {
"fourth": "hello", "fourth": "hello",
} }
metrics, err := p.Parse([]byte(testCSV), "") metrics, err := p.Parse([]byte(testCSV))
require.NoError(t, err) require.NoError(t, err)
expectedMetric := metric.New("test_value", expectedTags, expectedFields, time.Unix(0, 0)) expectedMetric := metric.New("test_value", expectedTags, expectedFields, time.Unix(0, 0))
@ -272,7 +272,7 @@ func TestValueConversion(t *testing.T) {
// Test explicit type conversion. // Test explicit type conversion.
p.ColumnTypes = []string{"float", "int", "bool", "string"} p.ColumnTypes = []string{"float", "int", "bool", "string"}
metrics, err = p.Parse([]byte(testCSV), "") metrics, err = p.Parse([]byte(testCSV))
require.NoError(t, err) require.NoError(t, err)
returnedMetric = metric.New(metrics[0].Name(), metrics[0].Tags(), metrics[0].Fields(), time.Unix(0, 0)) returnedMetric = metric.New(metrics[0].Name(), metrics[0].Tags(), metrics[0].Fields(), time.Unix(0, 0))
@ -301,7 +301,7 @@ func TestSkipComment(t *testing.T) {
"fourth": "name_this", "fourth": "name_this",
} }
metrics, err := p.Parse([]byte(testCSV), "") metrics, err := p.Parse([]byte(testCSV))
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, expectedFields, metrics[0].Fields()) require.Equal(t, expectedFields, metrics[0].Fields())
} }
@ -325,7 +325,7 @@ func TestTrimSpace(t *testing.T) {
"fourth": "hello", "fourth": "hello",
} }
metrics, err := p.Parse([]byte(testCSV), "") metrics, err := p.Parse([]byte(testCSV))
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, expectedFields, metrics[0].Fields()) require.Equal(t, expectedFields, metrics[0].Fields())
@ -340,7 +340,7 @@ func TestTrimSpace(t *testing.T) {
" 1 , 2 ,3\n" + " 1 , 2 ,3\n" +
" test space , 80 ,test_name" " test space , 80 ,test_name"
metrics, err = p.Parse([]byte(testCSV), "") metrics, err = p.Parse([]byte(testCSV))
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, map[string]interface{}{"col1": "test space", "col2": int64(80), "col3": "test_name"}, metrics[0].Fields()) require.Equal(t, map[string]interface{}{"col1": "test space", "col2": int64(80), "col3": "test_name"}, metrics[0].Fields())
} }
@ -367,7 +367,7 @@ abcdefgh 0 2 false
"fourth": true, "fourth": true,
} }
metrics, err := p.Parse([]byte(testCSV), "") metrics, err := p.Parse([]byte(testCSV))
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, expectedFields, metrics[1].Fields()) require.Equal(t, expectedFields, metrics[1].Fields())
} }
@ -393,7 +393,7 @@ hello,80,test_name2`
expectedTags := map[string]string{ expectedTags := map[string]string{
"line1": "hello", "line1": "hello",
} }
metrics, err := p.Parse([]byte(testCSV), "") metrics, err := p.Parse([]byte(testCSV))
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, "test_name2", metrics[0].Name()) require.Equal(t, "test_name2", metrics[0].Name())
require.Equal(t, expectedFields, metrics[0].Fields()) require.Equal(t, expectedFields, metrics[0].Fields())
@ -410,7 +410,7 @@ hello,80,test_name2`
require.NoError(t, err) require.NoError(t, err)
testCSVRows := []string{"garbage nonsense\r\n", "line1,line2,line3\r\n", "hello,80,test_name2\r\n"} testCSVRows := []string{"garbage nonsense\r\n", "line1,line2,line3\r\n", "hello,80,test_name2\r\n"}
metrics, err = p.Parse([]byte(testCSVRows[0]), "") metrics, err = p.Parse([]byte(testCSVRows[0]))
require.ErrorIs(t, err, parsers.ErrEOF) require.ErrorIs(t, err, parsers.ErrEOF)
require.Nil(t, metrics) require.Nil(t, metrics)
m, err := p.ParseLine(testCSVRows[1]) m, err := p.ParseLine(testCSVRows[1])
@ -437,7 +437,7 @@ func TestSkipColumns(t *testing.T) {
"line1": int64(80), "line1": int64(80),
"line2": "test_name", "line2": "test_name",
} }
metrics, err := p.Parse([]byte(testCSV), "") metrics, err := p.Parse([]byte(testCSV))
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, expectedFields, metrics[0].Fields()) require.Equal(t, expectedFields, metrics[0].Fields())
} }
@ -456,7 +456,7 @@ func TestSkipColumnsWithHeader(t *testing.T) {
trash,80,test_name` trash,80,test_name`
// we should expect an error if we try to get col1 // we should expect an error if we try to get col1
metrics, err := p.Parse([]byte(testCSV), "") metrics, err := p.Parse([]byte(testCSV))
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, map[string]interface{}{"col2": int64(80), "col3": "test_name"}, metrics[0].Fields()) require.Equal(t, map[string]interface{}{"col2": int64(80), "col3": "test_name"}, metrics[0].Fields())
} }
@ -471,7 +471,7 @@ func TestMultiHeader(t *testing.T) {
1,2 1,2
80,test_name` 80,test_name`
metrics, err := p.Parse([]byte(testCSV), "") metrics, err := p.Parse([]byte(testCSV))
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, map[string]interface{}{"col1": int64(80), "col2": "test_name"}, metrics[0].Fields()) require.Equal(t, map[string]interface{}{"col1": int64(80), "col2": "test_name"}, metrics[0].Fields())
@ -484,7 +484,7 @@ func TestMultiHeader(t *testing.T) {
err = p.Init() err = p.Init()
require.NoError(t, err) require.NoError(t, err)
metrics, err = p.Parse([]byte(testCSVRows[0]), "") metrics, err = p.Parse([]byte(testCSVRows[0]))
require.ErrorIs(t, err, parsers.ErrEOF) require.ErrorIs(t, err, parsers.ErrEOF)
require.Nil(t, metrics) require.Nil(t, metrics)
m, err := p.ParseLine(testCSVRows[1]) m, err := p.ParseLine(testCSVRows[1])
@ -507,7 +507,7 @@ func TestParseStream(t *testing.T) {
csvHeader := "a,b,c" csvHeader := "a,b,c"
csvBody := "1,2,3" csvBody := "1,2,3"
metrics, err := p.Parse([]byte(csvHeader), "") metrics, err := p.Parse([]byte(csvHeader))
require.NoError(t, err) require.NoError(t, err)
require.Empty(t, metrics) require.Empty(t, metrics)
m, err := p.ParseLine(csvBody) m, err := p.ParseLine(csvBody)
@ -537,7 +537,7 @@ func TestParseLineMultiMetricErrorMessage(t *testing.T) {
csvOneRow := "1,2,3" csvOneRow := "1,2,3"
csvTwoRows := "4,5,6\n7,8,9" csvTwoRows := "4,5,6\n7,8,9"
metrics, err := p.Parse([]byte(csvHeader), "") metrics, err := p.Parse([]byte(csvHeader))
require.NoError(t, err) require.NoError(t, err)
require.Empty(t, metrics) require.Empty(t, metrics)
m, err := p.ParseLine(csvOneRow) m, err := p.ParseLine(csvOneRow)
@ -556,7 +556,7 @@ func TestParseLineMultiMetricErrorMessage(t *testing.T) {
m, err = p.ParseLine(csvTwoRows) m, err = p.ParseLine(csvTwoRows)
require.Errorf(t, err, "expected 1 metric found 2") require.Errorf(t, err, "expected 1 metric found 2")
require.Nil(t, m) require.Nil(t, m)
metrics, err = p.Parse([]byte(csvTwoRows), "") metrics, err = p.Parse([]byte(csvTwoRows))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, metrics, 2) require.Len(t, metrics, 2)
} }
@ -585,7 +585,7 @@ func TestTimestampUnixFloatPrecision(t *testing.T) {
), ),
} }
metrics, err := p.Parse([]byte(data), "") metrics, err := p.Parse([]byte(data))
require.NoError(t, err) require.NoError(t, err)
testutil.RequireMetricsEqual(t, expected, metrics) testutil.RequireMetricsEqual(t, expected, metrics)
} }
@ -617,7 +617,7 @@ func TestSkipMeasurementColumn(t *testing.T) {
), ),
} }
metrics, err := p.Parse([]byte(data), "") metrics, err := p.Parse([]byte(data))
require.NoError(t, err) require.NoError(t, err)
testutil.RequireMetricsEqual(t, expected, metrics) testutil.RequireMetricsEqual(t, expected, metrics)
} }
@ -649,7 +649,7 @@ func TestSkipTimestampColumn(t *testing.T) {
), ),
} }
metrics, err := p.Parse([]byte(data), "") metrics, err := p.Parse([]byte(data))
require.NoError(t, err) require.NoError(t, err)
testutil.RequireMetricsEqual(t, expected, metrics) testutil.RequireMetricsEqual(t, expected, metrics)
} }
@ -670,7 +670,7 @@ func TestTimestampTimezone(t *testing.T) {
testCSV := `line1,line2,line3 testCSV := `line1,line2,line3
23/05/09 11:05:06 PM,70,test_name 23/05/09 11:05:06 PM,70,test_name
07/11/09 11:05:06 PM,80,test_name2` 07/11/09 11:05:06 PM,80,test_name2`
metrics, err := p.Parse([]byte(testCSV), "") metrics, err := p.Parse([]byte(testCSV))
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, int64(1243094706000000000), metrics[0].Time().UnixNano()) require.Equal(t, int64(1243094706000000000), metrics[0].Time().UnixNano())
@ -689,7 +689,7 @@ func TestEmptyMeasurementName(t *testing.T) {
testCSV := `,b testCSV := `,b
1,2` 1,2`
metrics, err := p.Parse([]byte(testCSV), "") metrics, err := p.Parse([]byte(testCSV))
require.NoError(t, err) require.NoError(t, err)
expected := []telegraf.Metric{ expected := []telegraf.Metric{
@ -716,7 +716,7 @@ func TestNumericMeasurementName(t *testing.T) {
testCSV := `a,b testCSV := `a,b
1,2` 1,2`
metrics, err := p.Parse([]byte(testCSV), "") metrics, err := p.Parse([]byte(testCSV))
require.NoError(t, err) require.NoError(t, err)
expected := []telegraf.Metric{ expected := []telegraf.Metric{
@ -742,7 +742,7 @@ func TestStaticMeasurementName(t *testing.T) {
testCSV := `a,b testCSV := `a,b
1,2` 1,2`
metrics, err := p.Parse([]byte(testCSV), "") metrics, err := p.Parse([]byte(testCSV))
require.NoError(t, err) require.NoError(t, err)
expected := []telegraf.Metric{ expected := []telegraf.Metric{
@ -770,7 +770,7 @@ func TestSkipEmptyStringValue(t *testing.T) {
testCSV := `a,b testCSV := `a,b
1,""` 1,""`
metrics, err := p.Parse([]byte(testCSV), "") metrics, err := p.Parse([]byte(testCSV))
require.NoError(t, err) require.NoError(t, err)
expected := []telegraf.Metric{ expected := []telegraf.Metric{
@ -797,7 +797,7 @@ func TestSkipSpecifiedStringValue(t *testing.T) {
testCSV := `a,b testCSV := `a,b
1,MM` 1,MM`
metrics, err := p.Parse([]byte(testCSV), "") metrics, err := p.Parse([]byte(testCSV))
require.NoError(t, err) require.NoError(t, err)
expected := []telegraf.Metric{ expected := []telegraf.Metric{
@ -839,7 +839,7 @@ corrupted_line
"b": int64(4), "b": int64(4),
} }
metrics, err := p.Parse([]byte(testCSV), "") metrics, err := p.Parse([]byte(testCSV))
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, expectedFields0, metrics[0].Fields()) require.Equal(t, expectedFields0, metrics[0].Fields())
require.Equal(t, expectedFields1, metrics[1].Fields()) require.Equal(t, expectedFields1, metrics[1].Fields())
@ -973,7 +973,7 @@ timestamp,type,name,status
} }
// Set default Tags // Set default Tags
p.SetDefaultTags(map[string]string{"test": "tag"}) p.SetDefaultTags(map[string]string{"test": "tag"})
metrics, err := p.Parse([]byte(testCSV), "") metrics, err := p.Parse([]byte(testCSV))
require.NoError(t, err) require.NoError(t, err)
for i, m := range metrics { for i, m := range metrics {
require.Equal(t, expectedFields[i], m.Fields()) require.Equal(t, expectedFields[i], m.Fields())
@ -1067,7 +1067,7 @@ fourth=plain
require.NoError(t, p.Init()) require.NoError(t, p.Init())
p.SetDefaultTags(defaultTags) p.SetDefaultTags(defaultTags)
metrics, err := p.Parse(csv, "") metrics, err := p.Parse(csv)
require.NoError(t, err) require.NoError(t, err)
require.Len(t, metrics, 1) require.Len(t, metrics, 1)
require.EqualValues(t, tt.expectedTags, metrics[0].Tags()) require.EqualValues(t, tt.expectedTags, metrics[0].Tags())
@ -1143,7 +1143,7 @@ timestamp,type,name,status
p.SetDefaultTags(map[string]string{"test": "tag"}) p.SetDefaultTags(map[string]string{"test": "tag"})
// Do the parsing the first time // Do the parsing the first time
metrics, err := p.Parse([]byte(testCSV), "") metrics, err := p.Parse([]byte(testCSV))
require.NoError(t, err) require.NoError(t, err)
testutil.RequireMetricsEqual(t, expected, metrics) testutil.RequireMetricsEqual(t, expected, metrics)
@ -1165,12 +1165,12 @@ timestamp,type,name,status
time.Date(2021, 12, 1, 19, 1, 0, 0, time.UTC), time.Date(2021, 12, 1, 19, 1, 0, 0, time.UTC),
), ),
} }
metrics, err = p.Parse([]byte(additionalCSV), "") metrics, err = p.Parse([]byte(additionalCSV))
require.NoError(t, err) require.NoError(t, err)
testutil.RequireMetricsEqual(t, additionalExpected, metrics) testutil.RequireMetricsEqual(t, additionalExpected, metrics)
// This should fail when not resetting but reading again due to the header etc // This should fail when not resetting but reading again due to the header etc
_, err = p.Parse([]byte(testCSV), "") _, err = p.Parse([]byte(testCSV))
require.Error( require.Error(
t, t,
err, err,
@ -1346,13 +1346,13 @@ timestamp,type,name,status
p.SetDefaultTags(map[string]string{"test": "tag"}) p.SetDefaultTags(map[string]string{"test": "tag"})
// Do the parsing the first time // Do the parsing the first time
metrics, err := p.Parse([]byte(testCSV), "") metrics, err := p.Parse([]byte(testCSV))
require.NoError(t, err) require.NoError(t, err)
testutil.RequireMetricsEqual(t, expected, metrics) testutil.RequireMetricsEqual(t, expected, metrics)
// Parsing another data line should fail as it is interpreted as header // Parsing another data line should fail as it is interpreted as header
additionalCSV := "2021-12-01T19:01:00+00:00,Reader,R009,5\r\n" additionalCSV := "2021-12-01T19:01:00+00:00,Reader,R009,5\r\n"
metrics, err = p.Parse([]byte(additionalCSV), "") metrics, err = p.Parse([]byte(additionalCSV))
require.ErrorIs(t, err, parsers.ErrEOF) require.ErrorIs(t, err, parsers.ErrEOF)
require.Nil(t, metrics) require.Nil(t, metrics)
@ -1400,7 +1400,7 @@ timestamp,category,id,flag
} }
// This should work as the parser is reset // This should work as the parser is reset
metrics, err = p.Parse([]byte(testCSV), "") metrics, err = p.Parse([]byte(testCSV))
require.NoError(t, err) require.NoError(t, err)
testutil.RequireMetricsEqual(t, expected, metrics) testutil.RequireMetricsEqual(t, expected, metrics)
} }
@ -1556,7 +1556,7 @@ func TestBenchmarkData(t *testing.T) {
), ),
} }
actual, err := plugin.Parse([]byte(benchmarkData), "") actual, err := plugin.Parse([]byte(benchmarkData))
require.NoError(t, err) require.NoError(t, err)
testutil.RequireMetricsEqual(t, expected, actual, testutil.SortMetrics()) testutil.RequireMetricsEqual(t, expected, actual, testutil.SortMetrics())
} }
@ -1573,6 +1573,6 @@ func BenchmarkParsing(b *testing.B) {
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
//nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations
plugin.Parse([]byte(benchmarkData), "") plugin.Parse([]byte(benchmarkData))
} }
} }

View File

@ -36,7 +36,7 @@ type Parser struct {
} }
// Parse parses the input bytes to an array of metrics // Parse parses the input bytes to an array of metrics
func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) {
metrics := make([]telegraf.Metric, 0) metrics := make([]telegraf.Metric, 0)
metricTime, err := p.parseTime(buf) metricTime, err := p.parseTime(buf)
@ -193,7 +193,7 @@ func (p *Parser) readDWMetrics(metricType string, dwms interface{}, metrics []te
} }
} }
parsed, err := p.seriesParser.Parse([]byte(measurementName), "") parsed, err := p.seriesParser.Parse([]byte(measurementName))
var m telegraf.Metric var m telegraf.Metric
if err != nil || len(parsed) != 1 { if err != nil || len(parsed) != 1 {
m = metric.New(measurementName, make(map[string]string), make(map[string]interface{}), tm) m = metric.New(measurementName, make(map[string]string), make(map[string]interface{}), tm)

View File

@ -28,7 +28,7 @@ func TestParseValidEmptyJSON(t *testing.T) {
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
// Most basic vanilla test // Most basic vanilla test
metrics, err := parser.Parse([]byte(validEmptyJSON), "") metrics, err := parser.Parse([]byte(validEmptyJSON))
require.NoError(t, err) require.NoError(t, err)
require.Empty(t, metrics) require.Empty(t, metrics)
} }
@ -53,7 +53,7 @@ func TestParseValidCounterJSON(t *testing.T) {
parser := &Parser{} parser := &Parser{}
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
metrics, err := parser.Parse([]byte(validCounterJSON), "") metrics, err := parser.Parse([]byte(validCounterJSON))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, metrics, 1) require.Len(t, metrics, 1)
require.Equal(t, "measurement", metrics[0].Name()) require.Equal(t, "measurement", metrics[0].Name())
@ -97,7 +97,7 @@ func TestParseValidEmbeddedCounterJSON(t *testing.T) {
} }
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
metrics, err := parser.Parse([]byte(validEmbeddedCounterJSON), "") metrics, err := parser.Parse([]byte(validEmbeddedCounterJSON))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, metrics, 1) require.Len(t, metrics, 1)
require.Equal(t, "measurement", metrics[0].Name()) require.Equal(t, "measurement", metrics[0].Name())
@ -119,7 +119,7 @@ func TestParseValidEmbeddedCounterJSON(t *testing.T) {
TimePath: "time", TimePath: "time",
} }
require.NoError(t, parser2.Init()) require.NoError(t, parser2.Init())
metrics2, err2 := parser2.Parse([]byte(validEmbeddedCounterJSON), "") metrics2, err2 := parser2.Parse([]byte(validEmbeddedCounterJSON))
require.NoError(t, err2) require.NoError(t, err2)
require.Equal(t, map[string]string{"metric_type": "counter", "tag1": "green"}, metrics2[0].Tags()) require.Equal(t, map[string]string{"metric_type": "counter", "tag1": "green"}, metrics2[0].Tags())
} }
@ -149,7 +149,7 @@ func TestParseValidMeterJSON1(t *testing.T) {
parser := &Parser{} parser := &Parser{}
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
metrics, err := parser.Parse([]byte(validMeterJSON1), "") metrics, err := parser.Parse([]byte(validMeterJSON1))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, metrics, 1) require.Len(t, metrics, 1)
require.Equal(t, "measurement1", metrics[0].Name()) require.Equal(t, "measurement1", metrics[0].Name())
@ -190,7 +190,7 @@ func TestParseValidMeterJSON2(t *testing.T) {
parser := &Parser{} parser := &Parser{}
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
metrics, err := parser.Parse([]byte(validMeterJSON2), "") metrics, err := parser.Parse([]byte(validMeterJSON2))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, metrics, 1) require.Len(t, metrics, 1)
require.Equal(t, "measurement2", metrics[0].Name()) require.Equal(t, "measurement2", metrics[0].Name())
@ -225,7 +225,7 @@ func TestParseValidGaugeJSON(t *testing.T) {
parser := &Parser{} parser := &Parser{}
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
metrics, err := parser.Parse([]byte(validGaugeJSON), "") metrics, err := parser.Parse([]byte(validGaugeJSON))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, metrics, 1) require.Len(t, metrics, 1)
require.Equal(t, "measurement", metrics[0].Name()) require.Equal(t, "measurement", metrics[0].Name())
@ -265,7 +265,7 @@ func TestParseValidHistogramJSON(t *testing.T) {
parser := &Parser{} parser := &Parser{}
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
metrics, err := parser.Parse([]byte(validHistogramJSON), "") metrics, err := parser.Parse([]byte(validHistogramJSON))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, metrics, 1) require.Len(t, metrics, 1)
require.Equal(t, "measurement", metrics[0].Name()) require.Equal(t, "measurement", metrics[0].Name())
@ -321,7 +321,7 @@ func TestParseValidTimerJSON(t *testing.T) {
parser := &Parser{} parser := &Parser{}
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
metrics, err := parser.Parse([]byte(validTimerJSON), "") metrics, err := parser.Parse([]byte(validTimerJSON))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, metrics, 1) require.Len(t, metrics, 1)
require.Equal(t, "measurement", metrics[0].Name()) require.Equal(t, "measurement", metrics[0].Name())
@ -373,7 +373,7 @@ func TestParseValidAllJSON(t *testing.T) {
parser := &Parser{} parser := &Parser{}
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
metrics, err := parser.Parse([]byte(validAllJSON), "") metrics, err := parser.Parse([]byte(validAllJSON))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, metrics, 5) require.Len(t, metrics, 5)
} }
@ -387,7 +387,7 @@ func TestTagParsingProblems(t *testing.T) {
} }
require.NoError(t, parser1.Init()) require.NoError(t, parser1.Init())
metrics1, err1 := parser1.Parse([]byte(validEmbeddedCounterJSON), "") metrics1, err1 := parser1.Parse([]byte(validEmbeddedCounterJSON))
require.NoError(t, err1) require.NoError(t, err1)
require.Len(t, metrics1, 1) require.Len(t, metrics1, 1)
require.Equal(t, map[string]string{"metric_type": "counter"}, metrics1[0].Tags()) require.Equal(t, map[string]string{"metric_type": "counter"}, metrics1[0].Tags())
@ -400,7 +400,7 @@ func TestTagParsingProblems(t *testing.T) {
Log: testutil.Logger{}, Log: testutil.Logger{},
} }
require.NoError(t, parser2.Init()) require.NoError(t, parser2.Init())
metrics2, err2 := parser2.Parse([]byte(validEmbeddedCounterJSON), "") metrics2, err2 := parser2.Parse([]byte(validEmbeddedCounterJSON))
require.NoError(t, err2) require.NoError(t, err2)
require.Len(t, metrics2, 1) require.Len(t, metrics2, 1)
require.Equal(t, map[string]string{"metric_type": "counter", "tag1": "green"}, metrics2[0].Tags()) require.Equal(t, map[string]string{"metric_type": "counter", "tag1": "green"}, metrics2[0].Tags())
@ -453,7 +453,7 @@ func TestParseSampleTemplateJSON(t *testing.T) {
} }
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
metrics, err := parser.Parse([]byte(sampleTemplateJSON), "") metrics, err := parser.Parse([]byte(sampleTemplateJSON))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, metrics, 11) require.Len(t, metrics, 11)
@ -579,7 +579,7 @@ func TestDropWizard(t *testing.T) {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
parser := &Parser{} parser := &Parser{}
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
metrics, err := parser.Parse(tt.input, "") metrics, err := parser.Parse(tt.input)
if tt.expectError { if tt.expectError {
require.Error(t, err) require.Error(t, err)
} else { } else {
@ -636,7 +636,7 @@ func TestBenchmarkData(t *testing.T) {
), ),
} }
actual, err := plugin.Parse([]byte(benchmarkData), "") actual, err := plugin.Parse([]byte(benchmarkData))
require.NoError(t, err) require.NoError(t, err)
testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime(), testutil.SortMetrics()) testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime(), testutil.SortMetrics())
} }
@ -647,6 +647,6 @@ func BenchmarkParsing(b *testing.B) {
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
//nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations
plugin.Parse([]byte(benchmarkData), "") plugin.Parse([]byte(benchmarkData))
} }
} }

View File

@ -22,7 +22,7 @@ type Parser struct {
} }
// Parse converts a slice of bytes in "application/x-www-form-urlencoded" format into metrics // Parse converts a slice of bytes in "application/x-www-form-urlencoded" format into metrics
func (p Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { func (p Parser) Parse(buf []byte) ([]telegraf.Metric, error) {
buf = bytes.TrimSpace(buf) buf = bytes.TrimSpace(buf)
if len(buf) == 0 { if len(buf) == 0 {
return make([]telegraf.Metric, 0), nil return make([]telegraf.Metric, 0), nil
@ -47,7 +47,7 @@ func (p Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) {
// ParseLine delegates a single line of text to the Parse function // ParseLine delegates a single line of text to the Parse function
func (p Parser) ParseLine(line string) (telegraf.Metric, error) { func (p Parser) ParseLine(line string) (telegraf.Metric, error) {
metrics, err := p.Parse([]byte(line), "") metrics, err := p.Parse([]byte(line))
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -24,7 +24,7 @@ func TestParseValidFormData(t *testing.T) {
MetricName: "form_urlencoded_test", MetricName: "form_urlencoded_test",
} }
metrics, err := parser.Parse([]byte(validFormData), "") metrics, err := parser.Parse([]byte(validFormData))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, metrics, 1) require.Len(t, metrics, 1)
require.Equal(t, "form_urlencoded_test", metrics[0].Name()) require.Equal(t, "form_urlencoded_test", metrics[0].Name())
@ -56,7 +56,7 @@ func TestParseValidFormDataWithTags(t *testing.T) {
TagKeys: []string{"tag1", "tag2"}, TagKeys: []string{"tag1", "tag2"},
} }
metrics, err := parser.Parse([]byte(validFormData), "") metrics, err := parser.Parse([]byte(validFormData))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, metrics, 1) require.Len(t, metrics, 1)
require.Equal(t, "form_urlencoded_test", metrics[0].Name()) require.Equal(t, "form_urlencoded_test", metrics[0].Name())
@ -77,7 +77,7 @@ func TestParseValidFormDataDefaultTags(t *testing.T) {
DefaultTags: map[string]string{"tag4": "default"}, DefaultTags: map[string]string{"tag4": "default"},
} }
metrics, err := parser.Parse([]byte(validFormData), "") metrics, err := parser.Parse([]byte(validFormData))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, metrics, 1) require.Len(t, metrics, 1)
require.Equal(t, "form_urlencoded_test", metrics[0].Name()) require.Equal(t, "form_urlencoded_test", metrics[0].Name())
@ -99,7 +99,7 @@ func TestParseValidFormDataDefaultTagsOverride(t *testing.T) {
DefaultTags: map[string]string{"tag1": "default"}, DefaultTags: map[string]string{"tag1": "default"},
} }
metrics, err := parser.Parse([]byte(validFormData), "") metrics, err := parser.Parse([]byte(validFormData))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, metrics, 1) require.Len(t, metrics, 1)
require.Equal(t, "form_urlencoded_test", metrics[0].Name()) require.Equal(t, "form_urlencoded_test", metrics[0].Name())
@ -119,7 +119,7 @@ func TestParseEncodedFormData(t *testing.T) {
TagKeys: []string{"tag1"}, TagKeys: []string{"tag1"},
} }
metrics, err := parser.Parse([]byte(encodedFormData), "") metrics, err := parser.Parse([]byte(encodedFormData))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, metrics, 1) require.Len(t, metrics, 1)
require.Equal(t, "form_urlencoded_test", metrics[0].Name()) require.Equal(t, "form_urlencoded_test", metrics[0].Name())
@ -136,7 +136,7 @@ func TestParseInvalidFormDataError(t *testing.T) {
MetricName: "form_urlencoded_test", MetricName: "form_urlencoded_test",
} }
metrics, err := parser.Parse([]byte(notEscapedProperlyFormData), "") metrics, err := parser.Parse([]byte(notEscapedProperlyFormData))
require.Error(t, err) require.Error(t, err)
require.Empty(t, metrics) require.Empty(t, metrics)
} }
@ -147,7 +147,7 @@ func TestParseInvalidFormDataEmptyKey(t *testing.T) {
} }
// Empty key for field // Empty key for field
metrics, err := parser.Parse([]byte(blankKeyFormData), "") metrics, err := parser.Parse([]byte(blankKeyFormData))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, metrics, 1) require.Len(t, metrics, 1)
require.Equal(t, map[string]string{}, metrics[0].Tags()) require.Equal(t, map[string]string{}, metrics[0].Tags())
@ -157,7 +157,7 @@ func TestParseInvalidFormDataEmptyKey(t *testing.T) {
// Empty key for tag // Empty key for tag
parser.TagKeys = []string{""} parser.TagKeys = []string{""}
metrics, err = parser.Parse([]byte(blankKeyFormData), "") metrics, err = parser.Parse([]byte(blankKeyFormData))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, metrics, 1) require.Len(t, metrics, 1)
require.Equal(t, map[string]string{}, metrics[0].Tags()) require.Equal(t, map[string]string{}, metrics[0].Tags())
@ -171,7 +171,7 @@ func TestParseInvalidFormDataEmptyString(t *testing.T) {
MetricName: "form_urlencoded_test", MetricName: "form_urlencoded_test",
} }
metrics, err := parser.Parse([]byte(emptyFormData), "") metrics, err := parser.Parse([]byte(emptyFormData))
require.NoError(t, err) require.NoError(t, err)
require.Empty(t, metrics) require.Empty(t, metrics)
} }
@ -199,7 +199,7 @@ func TestBenchmarkData(t *testing.T) {
), ),
} }
actual, err := plugin.Parse([]byte(benchmarkData), "") actual, err := plugin.Parse([]byte(benchmarkData))
require.NoError(t, err) require.NoError(t, err)
testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime(), testutil.SortMetrics()) testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime(), testutil.SortMetrics())
} }
@ -212,6 +212,6 @@ func BenchmarkParsing(b *testing.B) {
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
//nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations
plugin.Parse([]byte(benchmarkData), "") plugin.Parse([]byte(benchmarkData))
} }
} }

View File

@ -48,7 +48,7 @@ func (p *Parser) Init() error {
return nil return nil
} }
func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) {
// parse even if the buffer begins with a newline // parse even if the buffer begins with a newline
if len(buf) != 0 && buf[0] == '\n' { if len(buf) != 0 && buf[0] == '\n' {
buf = buf[1:] buf = buf[1:]

View File

@ -30,7 +30,7 @@ func BenchmarkParse(b *testing.B) {
require.NoError(b, p.Init()) require.NoError(b, p.Init())
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
_, err := p.Parse([]byte("servers.localhost.cpu.load 11 1435077219"), "") _, err := p.Parse([]byte("servers.localhost.cpu.load 11 1435077219"))
require.NoError(b, err) require.NoError(b, err)
} }
} }
@ -387,7 +387,7 @@ func TestParse(t *testing.T) {
p := Parser{Templates: []string{test.template}} p := Parser{Templates: []string{test.template}}
require.NoError(t, p.Init()) require.NoError(t, p.Init())
metrics, err := p.Parse(test.input, "") metrics, err := p.Parse(test.input)
if test.err != "" { if test.err != "" {
require.EqualError(t, err, test.err) require.EqualError(t, err, test.err)
continue continue

View File

@ -381,7 +381,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
return metric.New(p.Measurement, tags, fields, p.tsModder.tsMod(timestamp)), nil return metric.New(p.Measurement, tags, fields, p.tsModder.tsMod(timestamp)), nil
} }
func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) {
metrics := make([]telegraf.Metric, 0) metrics := make([]telegraf.Metric, 0)
if p.Multiline { if p.Multiline {

View File

@ -21,7 +21,7 @@ func TestGrokParse(t *testing.T) {
err := parser.Compile() err := parser.Compile()
require.NoError(t, err) require.NoError(t, err)
_, err = parser.Parse([]byte(`127.0.0.1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326`), "") _, err = parser.Parse([]byte(`127.0.0.1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326`))
require.NoError(t, err) require.NoError(t, err)
} }
@ -1021,7 +1021,7 @@ func TestMultilinePatterns(t *testing.T) {
Log: testutil.Logger{}, Log: testutil.Logger{},
} }
require.NoError(t, p.Compile()) require.NoError(t, p.Compile())
actual, err := p.Parse(buf, "") actual, err := p.Parse(buf)
require.NoError(t, err) require.NoError(t, err)
testutil.RequireMetricsEqual(t, expected, actual) testutil.RequireMetricsEqual(t, expected, actual)
} }
@ -1181,7 +1181,7 @@ func TestMultilineNilMetric(t *testing.T) {
Log: testutil.Logger{}, Log: testutil.Logger{},
} }
require.NoError(t, p.Compile()) require.NoError(t, p.Compile())
actual, err := p.Parse(buf, "") actual, err := p.Parse(buf)
require.NoError(t, err) require.NoError(t, err)
require.Empty(t, actual) require.Empty(t, actual)
} }
@ -1224,7 +1224,7 @@ func TestBenchmarkData(t *testing.T) {
), ),
} }
actual, err := plugin.Parse([]byte(benchmarkData), "") actual, err := plugin.Parse([]byte(benchmarkData))
require.NoError(t, err) require.NoError(t, err)
testutil.RequireMetricsEqual(t, expected, actual, testutil.SortMetrics()) testutil.RequireMetricsEqual(t, expected, actual, testutil.SortMetrics())
} }
@ -1238,6 +1238,6 @@ func BenchmarkParsing(b *testing.B) {
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
//nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations
plugin.Parse([]byte(benchmarkData), "") plugin.Parse([]byte(benchmarkData))
} }
} }

View File

@ -117,7 +117,7 @@ func (p *Parser) SetTimeFunc(f TimeFunc) {
p.defaultTime = f p.defaultTime = f
} }
func (p *Parser) Parse(input []byte, extra string) ([]telegraf.Metric, error) { func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) {
metrics := make([]telegraf.Metric, 0) metrics := make([]telegraf.Metric, 0)
decoder := lineprotocol.NewDecoderWithBytes(input) decoder := lineprotocol.NewDecoderWithBytes(input)
@ -134,7 +134,7 @@ func (p *Parser) Parse(input []byte, extra string) ([]telegraf.Metric, error) {
} }
func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
metrics, err := p.Parse([]byte(line), "") metrics, err := p.Parse([]byte(line))
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -614,7 +614,7 @@ func TestParser(t *testing.T) {
parser.SetTimeFunc(tt.timeFunc) parser.SetTimeFunc(tt.timeFunc)
} }
metrics, err := parser.Parse(tt.input, "") metrics, err := parser.Parse(tt.input)
if tt.err == nil { if tt.err == nil {
require.NoError(t, err) require.NoError(t, err)
} else { } else {
@ -638,7 +638,7 @@ func BenchmarkParser(b *testing.B) {
parser := Parser{} parser := Parser{}
require.NoError(b, parser.Init()) require.NoError(b, parser.Init())
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
metrics, err := parser.Parse(tt.input, "") metrics, err := parser.Parse(tt.input)
_ = err _ = err
_ = metrics _ = metrics
} }
@ -748,7 +748,7 @@ func TestSeriesParser(t *testing.T) {
parser.SetTimeFunc(tt.timeFunc) parser.SetTimeFunc(tt.timeFunc)
} }
metrics, err := parser.Parse(tt.input, "") metrics, err := parser.Parse(tt.input)
require.Equal(t, tt.err, err) require.Equal(t, tt.err, err)
if err != nil { if err != nil {
require.Equal(t, tt.err.Error(), err.Error()) require.Equal(t, tt.err.Error(), err.Error())
@ -854,7 +854,7 @@ func TestParserTimestampPrecision(t *testing.T) {
parser := Parser{InfluxTimestampPrecision: d} parser := Parser{InfluxTimestampPrecision: d}
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
metrics, err := parser.Parse(tt.input, "") metrics, err := parser.Parse(tt.input)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, tt.metrics, metrics) require.Equal(t, tt.metrics, metrics)
@ -905,7 +905,7 @@ func TestParserErrorString(t *testing.T) {
parser := Parser{} parser := Parser{}
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
_, err := parser.Parse(tt.input, "") _, err := parser.Parse(tt.input)
require.Equal(t, tt.errString, err.Error()) require.Equal(t, tt.errString, err.Error())
}) })
} }
@ -1057,7 +1057,7 @@ func TestBenchmarkData(t *testing.T) {
} }
// Do the parsing // Do the parsing
actual, err := plugin.Parse([]byte(benchmarkData), "") actual, err := plugin.Parse([]byte(benchmarkData))
require.NoError(t, err) require.NoError(t, err)
testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime(), testutil.SortMetrics()) testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime(), testutil.SortMetrics())
} }
@ -1068,6 +1068,6 @@ func BenchmarkParsing(b *testing.B) {
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
//nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations
plugin.Parse([]byte(benchmarkData), "") plugin.Parse([]byte(benchmarkData))
} }
} }

View File

@ -79,7 +79,7 @@ func (p *Parser) SetTimePrecision(u time.Duration) {
p.handler.SetTimePrecision(u) p.handler.SetTimePrecision(u)
} }
func (p *Parser) Parse(input []byte, extra string) ([]telegraf.Metric, error) { func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) {
p.Lock() p.Lock()
defer p.Unlock() defer p.Unlock()
metrics := make([]telegraf.Metric, 0) metrics := make([]telegraf.Metric, 0)
@ -115,7 +115,7 @@ func (p *Parser) Parse(input []byte, extra string) ([]telegraf.Metric, error) {
} }
func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
metrics, err := p.Parse([]byte(line), "") metrics, err := p.Parse([]byte(line))
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -588,7 +588,7 @@ func TestParser(t *testing.T) {
parser.SetTimeFunc(tt.timeFunc) parser.SetTimeFunc(tt.timeFunc)
} }
metrics, err := parser.Parse(tt.input, "") metrics, err := parser.Parse(tt.input)
require.Equal(t, tt.err, err) require.Equal(t, tt.err, err)
require.Len(t, metrics, len(tt.metrics)) require.Len(t, metrics, len(tt.metrics))
@ -693,7 +693,7 @@ func TestParserTimestampPrecision(t *testing.T) {
parser := Parser{InfluxTimestampPrecision: d} parser := Parser{InfluxTimestampPrecision: d}
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
metrics, err := parser.Parse(tt.input, "") metrics, err := parser.Parse(tt.input)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, tt.metrics, metrics) require.Equal(t, tt.metrics, metrics)
@ -716,7 +716,7 @@ func BenchmarkParser(b *testing.B) {
parser := Parser{} parser := Parser{}
require.NoError(b, parser.Init()) require.NoError(b, parser.Init())
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
metrics, err := parser.Parse(tt.input, "") metrics, err := parser.Parse(tt.input)
_ = err _ = err
_ = metrics _ = metrics
} }
@ -824,7 +824,7 @@ func TestSeriesParser(t *testing.T) {
parser.SetTimeFunc(tt.timeFunc) parser.SetTimeFunc(tt.timeFunc)
} }
metrics, err := parser.Parse(tt.input, "") metrics, err := parser.Parse(tt.input)
require.Equal(t, tt.err, err) require.Equal(t, tt.err, err)
if err != nil { if err != nil {
require.Equal(t, tt.err.Error(), err.Error()) require.Equal(t, tt.err.Error(), err.Error())
@ -872,7 +872,7 @@ func TestParserErrorString(t *testing.T) {
parser := Parser{} parser := Parser{}
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
_, err := parser.Parse(tt.input, "") _, err := parser.Parse(tt.input)
require.Equal(t, tt.errString, err.Error()) require.Equal(t, tt.errString, err.Error())
}) })
} }
@ -1024,7 +1024,7 @@ func TestBenchmarkData(t *testing.T) {
} }
// Do the parsing // Do the parsing
actual, err := plugin.Parse([]byte(benchmarkData), "") actual, err := plugin.Parse([]byte(benchmarkData))
require.NoError(t, err) require.NoError(t, err)
testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime(), testutil.SortMetrics()) testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime(), testutil.SortMetrics())
} }
@ -1035,6 +1035,6 @@ func BenchmarkParsing(b *testing.B) {
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
//nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations
plugin.Parse([]byte(benchmarkData), "") plugin.Parse([]byte(benchmarkData))
} }
} }

View File

@ -181,7 +181,7 @@ func (p *Parser) Init() error {
return nil return nil
} }
func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) {
if p.Query != "" { if p.Query != "" {
result := gjson.GetBytes(buf, p.Query) result := gjson.GetBytes(buf, p.Query)
buf = []byte(result.Raw) buf = []byte(result.Raw)
@ -220,7 +220,7 @@ func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) {
} }
func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
metrics, err := p.Parse([]byte(line+"\n"), "") metrics, err := p.Parse([]byte(line + "\n"))
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -116,7 +116,7 @@ func TestParseValidJSON(t *testing.T) {
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
// Most basic vanilla test // Most basic vanilla test
actual, err := parser.Parse([]byte(validJSON), "") actual, err := parser.Parse([]byte(validJSON))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, actual, 1) require.Len(t, actual, 1)
require.Equal(t, "json_test", actual[0].Name()) require.Equal(t, "json_test", actual[0].Name())
@ -127,7 +127,7 @@ func TestParseValidJSON(t *testing.T) {
require.Equal(t, map[string]string{}, actual[0].Tags()) require.Equal(t, map[string]string{}, actual[0].Tags())
// Test that newlines are fine // Test that newlines are fine
actual, err = parser.Parse([]byte(validJSONNewline), "") actual, err = parser.Parse([]byte(validJSONNewline))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, actual, 1) require.Len(t, actual, 1)
require.Equal(t, "json_test", actual[0].Name()) require.Equal(t, "json_test", actual[0].Name())
@ -138,7 +138,7 @@ func TestParseValidJSON(t *testing.T) {
require.Equal(t, map[string]string{}, actual[0].Tags()) require.Equal(t, map[string]string{}, actual[0].Tags())
// Test that strings without TagKeys defined are ignored // Test that strings without TagKeys defined are ignored
actual, err = parser.Parse([]byte(validJSONTags), "") actual, err = parser.Parse([]byte(validJSONTags))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, actual, 1) require.Len(t, actual, 1)
require.Equal(t, "json_test", actual[0].Name()) require.Equal(t, "json_test", actual[0].Name())
@ -149,12 +149,12 @@ func TestParseValidJSON(t *testing.T) {
require.Equal(t, map[string]string{}, actual[0].Tags()) require.Equal(t, map[string]string{}, actual[0].Tags())
// Test that whitespace only will parse as an empty list of actual // Test that whitespace only will parse as an empty list of actual
actual, err = parser.Parse([]byte("\n\t"), "") actual, err = parser.Parse([]byte("\n\t"))
require.NoError(t, err) require.NoError(t, err)
require.Empty(t, actual) require.Empty(t, actual)
// Test that an empty string will parse as an empty list of actual // Test that an empty string will parse as an empty list of actual
actual, err = parser.Parse([]byte(""), "") actual, err = parser.Parse([]byte(""))
require.NoError(t, err) require.NoError(t, err)
require.Empty(t, actual) require.Empty(t, actual)
} }
@ -198,9 +198,9 @@ func TestParseInvalidJSON(t *testing.T) {
parser := &Parser{MetricName: "json_test"} parser := &Parser{MetricName: "json_test"}
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
_, err := parser.Parse([]byte(invalidJSON), "") _, err := parser.Parse([]byte(invalidJSON))
require.Error(t, err) require.Error(t, err)
_, err = parser.Parse([]byte(invalidJSON2), "") _, err = parser.Parse([]byte(invalidJSON2))
require.Error(t, err) require.Error(t, err)
_, err = parser.ParseLine(invalidJSON) _, err = parser.ParseLine(invalidJSON)
require.Error(t, err) require.Error(t, err)
@ -213,7 +213,7 @@ func TestParseJSONImplicitStrictness(t *testing.T) {
} }
require.NoError(t, parserImplicitNoStrict.Init()) require.NoError(t, parserImplicitNoStrict.Init())
_, err := parserImplicitNoStrict.Parse([]byte(mixedValidityJSON), "") _, err := parserImplicitNoStrict.Parse([]byte(mixedValidityJSON))
require.NoError(t, err) require.NoError(t, err)
} }
@ -225,7 +225,7 @@ func TestParseJSONExplicitStrictnessFalse(t *testing.T) {
} }
require.NoError(t, parserNoStrict.Init()) require.NoError(t, parserNoStrict.Init())
_, err := parserNoStrict.Parse([]byte(mixedValidityJSON), "") _, err := parserNoStrict.Parse([]byte(mixedValidityJSON))
require.NoError(t, err) require.NoError(t, err)
} }
@ -237,7 +237,7 @@ func TestParseJSONExplicitStrictnessTrue(t *testing.T) {
} }
require.NoError(t, parserStrict.Init()) require.NoError(t, parserStrict.Init())
_, err := parserStrict.Parse([]byte(mixedValidityJSON), "") _, err := parserStrict.Parse([]byte(mixedValidityJSON))
require.Error(t, err) require.Error(t, err)
} }
@ -249,7 +249,7 @@ func TestParseWithTagKeys(t *testing.T) {
} }
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
actual, err := parser.Parse([]byte(validJSONTags), "") actual, err := parser.Parse([]byte(validJSONTags))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, actual, 1) require.Len(t, actual, 1)
require.Equal(t, "json_test", actual[0].Name()) require.Equal(t, "json_test", actual[0].Name())
@ -266,7 +266,7 @@ func TestParseWithTagKeys(t *testing.T) {
} }
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
actual, err = parser.Parse([]byte(validJSONTags), "") actual, err = parser.Parse([]byte(validJSONTags))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, actual, 1) require.Len(t, actual, 1)
require.Equal(t, "json_test", actual[0].Name()) require.Equal(t, "json_test", actual[0].Name())
@ -285,7 +285,7 @@ func TestParseWithTagKeys(t *testing.T) {
} }
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
actual, err = parser.Parse([]byte(validJSONTags), "") actual, err = parser.Parse([]byte(validJSONTags))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, actual, 1) require.Len(t, actual, 1)
require.Equal(t, "json_test", actual[0].Name()) require.Equal(t, "json_test", actual[0].Name())
@ -363,7 +363,7 @@ func TestParseValidJSONDefaultTags(t *testing.T) {
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
// Most basic vanilla test // Most basic vanilla test
actual, err := parser.Parse([]byte(validJSON), "") actual, err := parser.Parse([]byte(validJSON))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, actual, 1) require.Len(t, actual, 1)
require.Equal(t, "json_test", actual[0].Name()) require.Equal(t, "json_test", actual[0].Name())
@ -374,7 +374,7 @@ func TestParseValidJSONDefaultTags(t *testing.T) {
require.Equal(t, map[string]string{"t4g": "default"}, actual[0].Tags()) require.Equal(t, map[string]string{"t4g": "default"}, actual[0].Tags())
// Test that tagkeys and default tags are applied // Test that tagkeys and default tags are applied
actual, err = parser.Parse([]byte(validJSONTags), "") actual, err = parser.Parse([]byte(validJSONTags))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, actual, 1) require.Len(t, actual, 1)
require.Equal(t, "json_test", actual[0].Name()) require.Equal(t, "json_test", actual[0].Name())
@ -398,7 +398,7 @@ func TestParseValidJSONDefaultTagsOverride(t *testing.T) {
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
// Most basic vanilla test // Most basic vanilla test
actual, err := parser.Parse([]byte(validJSON), "") actual, err := parser.Parse([]byte(validJSON))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, actual, 1) require.Len(t, actual, 1)
require.Equal(t, "json_test", actual[0].Name()) require.Equal(t, "json_test", actual[0].Name())
@ -409,7 +409,7 @@ func TestParseValidJSONDefaultTagsOverride(t *testing.T) {
require.Equal(t, map[string]string{"mytag": "default"}, actual[0].Tags()) require.Equal(t, map[string]string{"mytag": "default"}, actual[0].Tags())
// Test that tagkeys override default tags // Test that tagkeys override default tags
actual, err = parser.Parse([]byte(validJSONTags), "") actual, err = parser.Parse([]byte(validJSONTags))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, actual, 1) require.Len(t, actual, 1)
require.Equal(t, "json_test", actual[0].Name()) require.Equal(t, "json_test", actual[0].Name())
@ -428,7 +428,7 @@ func TestParseValidJSONArray(t *testing.T) {
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
// Most basic vanilla test // Most basic vanilla test
actual, err := parser.Parse([]byte(validJSONArray), "") actual, err := parser.Parse([]byte(validJSONArray))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, actual, 1) require.Len(t, actual, 1)
require.Equal(t, "json_array_test", actual[0].Name()) require.Equal(t, "json_array_test", actual[0].Name())
@ -439,7 +439,7 @@ func TestParseValidJSONArray(t *testing.T) {
require.Equal(t, map[string]string{}, actual[0].Tags()) require.Equal(t, map[string]string{}, actual[0].Tags())
// Basic multiple datapoints // Basic multiple datapoints
actual, err = parser.Parse([]byte(validJSONArrayMultiple), "") actual, err = parser.Parse([]byte(validJSONArrayMultiple))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, actual, 2) require.Len(t, actual, 2)
require.Equal(t, "json_array_test", actual[0].Name()) require.Equal(t, "json_array_test", actual[0].Name())
@ -464,7 +464,7 @@ func TestParseArrayWithTagKeys(t *testing.T) {
} }
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
actual, err := parser.Parse([]byte(validJSONArrayTags), "") actual, err := parser.Parse([]byte(validJSONArrayTags))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, actual, 2) require.Len(t, actual, 2)
require.Equal(t, "json_array_test", actual[0].Name()) require.Equal(t, "json_array_test", actual[0].Name())
@ -488,7 +488,7 @@ func TestParseArrayWithTagKeys(t *testing.T) {
} }
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
actual, err = parser.Parse([]byte(validJSONArrayTags), "") actual, err = parser.Parse([]byte(validJSONArrayTags))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, actual, 2) require.Len(t, actual, 2)
require.Equal(t, "json_array_test", actual[0].Name()) require.Equal(t, "json_array_test", actual[0].Name())
@ -516,7 +516,7 @@ func TestParseArrayWithTagKeys(t *testing.T) {
} }
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
actual, err = parser.Parse([]byte(validJSONArrayTags), "") actual, err = parser.Parse([]byte(validJSONArrayTags))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, actual, 2) require.Len(t, actual, 2)
require.Equal(t, "json_array_test", actual[0].Name()) require.Equal(t, "json_array_test", actual[0].Name())
@ -547,7 +547,7 @@ func TestHttpJsonBOM(t *testing.T) {
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
// Most basic vanilla test // Most basic vanilla test
_, err := parser.Parse(jsonBOM, "") _, err := parser.Parse(jsonBOM)
require.NoError(t, err) require.NoError(t, err)
} }
@ -577,7 +577,7 @@ func TestJSONParseNestedArray(t *testing.T) {
} }
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
actual, err := parser.Parse([]byte(testString), "") actual, err := parser.Parse([]byte(testString))
require.Len(t, actual, 1) require.Len(t, actual, 1)
require.NoError(t, err) require.NoError(t, err)
require.Len(t, actual[0].Tags(), 3) require.Len(t, actual[0].Tags(), 3)
@ -606,7 +606,7 @@ func TestJSONQueryErrorOnArray(t *testing.T) {
} }
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
_, err := parser.Parse([]byte(testString), "") _, err := parser.Parse([]byte(testString))
require.Error(t, err) require.Error(t, err)
} }
@ -640,7 +640,7 @@ func TestArrayOfObjects(t *testing.T) {
} }
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
actual, err := parser.Parse([]byte(testString), "") actual, err := parser.Parse([]byte(testString))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, actual, 3) require.Len(t, actual, 3)
} }
@ -668,7 +668,7 @@ func TestUseCaseJSONQuery(t *testing.T) {
} }
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
actual, err := parser.Parse([]byte(testString), "") actual, err := parser.Parse([]byte(testString))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, actual, 3) require.Len(t, actual, 3)
require.Equal(t, "Murphy", actual[0].Fields()["last"]) require.Equal(t, "Murphy", actual[0].Fields()["last"])
@ -703,7 +703,7 @@ func TestTimeParser(t *testing.T) {
} }
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
actual, err := parser.Parse([]byte(testString), "") actual, err := parser.Parse([]byte(testString))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, actual, 2) require.Len(t, actual, 2)
require.NotEqual(t, actual[0].Time(), actual[1].Time()) require.NotEqual(t, actual[0].Time(), actual[1].Time())
@ -722,7 +722,7 @@ func TestTimeParserWithTimezone(t *testing.T) {
} }
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
actual, err := parser.Parse([]byte(testString), "") actual, err := parser.Parse([]byte(testString))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, actual, 1) require.Len(t, actual, 1)
require.EqualValues(t, int64(1136405040000000000), actual[0].Time().UnixNano()) require.EqualValues(t, int64(1136405040000000000), actual[0].Time().UnixNano())
@ -757,7 +757,7 @@ func TestUnixTimeParser(t *testing.T) {
} }
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
actual, err := parser.Parse([]byte(testString), "") actual, err := parser.Parse([]byte(testString))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, actual, 2) require.Len(t, actual, 2)
require.NotEqual(t, actual[0].Time(), actual[1].Time()) require.NotEqual(t, actual[0].Time(), actual[1].Time())
@ -792,7 +792,7 @@ func TestUnixMsTimeParser(t *testing.T) {
} }
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
actual, err := parser.Parse([]byte(testString), "") actual, err := parser.Parse([]byte(testString))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, actual, 2) require.Len(t, actual, 2)
require.NotEqual(t, actual[0].Time(), actual[1].Time()) require.NotEqual(t, actual[0].Time(), actual[1].Time())
@ -816,7 +816,7 @@ func TestTimeErrors(t *testing.T) {
} }
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
actual, err := parser.Parse([]byte(testString), "") actual, err := parser.Parse([]byte(testString))
require.Error(t, err) require.Error(t, err)
require.Empty(t, actual) require.Empty(t, actual)
@ -836,7 +836,7 @@ func TestTimeErrors(t *testing.T) {
} }
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
actual, err = parser.Parse([]byte(testString2), "") actual, err = parser.Parse([]byte(testString2))
require.Error(t, err) require.Error(t, err)
require.Empty(t, actual) require.Empty(t, actual)
require.Equal(t, errors.New("'json_time_key' could not be found"), err) require.Equal(t, errors.New("'json_time_key' could not be found"), err)
@ -846,7 +846,7 @@ func TestShareTimestamp(t *testing.T) {
parser := &Parser{MetricName: "json_test"} parser := &Parser{MetricName: "json_test"}
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
actual, err := parser.Parse([]byte(validJSONArrayMultiple), "") actual, err := parser.Parse([]byte(validJSONArrayMultiple))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, actual, 2) require.Len(t, actual, 2)
require.Equal(t, actual[0].Time(), actual[1].Time()) require.Equal(t, actual[0].Time(), actual[1].Time())
@ -866,7 +866,7 @@ func TestNameKey(t *testing.T) {
parser := &Parser{NameKey: "b_c"} parser := &Parser{NameKey: "b_c"}
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
actual, err := parser.Parse([]byte(testString), "") actual, err := parser.Parse([]byte(testString))
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, "this is my name", actual[0].Name()) require.Equal(t, "this is my name", actual[0].Name())
} }
@ -877,7 +877,7 @@ func TestParseArrayWithWrongType(t *testing.T) {
parser := &Parser{} parser := &Parser{}
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
_, err := parser.Parse([]byte(data), "") _, err := parser.Parse([]byte(data))
require.Error(t, err) require.Error(t, err)
} }
@ -994,7 +994,7 @@ func TestParse(t *testing.T) {
parser := tt.parser parser := tt.parser
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
actual, err := parser.Parse(tt.input, "") actual, err := parser.Parse(tt.input)
require.NoError(t, err) require.NoError(t, err)
testutil.RequireMetricsEqual(t, tt.expected, actual, testutil.IgnoreTime()) testutil.RequireMetricsEqual(t, tt.expected, actual, testutil.IgnoreTime())
@ -1106,7 +1106,7 @@ func TestParseWithWildcardTagKeys(t *testing.T) {
parser := tt.parser parser := tt.parser
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
actual, err := parser.Parse(tt.input, "") actual, err := parser.Parse(tt.input)
require.NoError(t, err) require.NoError(t, err)
testutil.RequireMetricsEqual(t, tt.expected, actual, testutil.IgnoreTime()) testutil.RequireMetricsEqual(t, tt.expected, actual, testutil.IgnoreTime())
}) })
@ -1382,7 +1382,7 @@ func TestParseArrayWithWildcardTagKeys(t *testing.T) {
parser := tt.parser parser := tt.parser
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
actual, err := parser.Parse(tt.input, "") actual, err := parser.Parse(tt.input)
require.NoError(t, err) require.NoError(t, err)
testutil.RequireMetricsEqual(t, tt.expected, actual, testutil.IgnoreTime()) testutil.RequireMetricsEqual(t, tt.expected, actual, testutil.IgnoreTime())
@ -1434,7 +1434,7 @@ func TestBenchmarkData(t *testing.T) {
} }
// Do the parsing // Do the parsing
actual, err := plugin.Parse([]byte(benchmarkData), "") actual, err := plugin.Parse([]byte(benchmarkData))
require.NoError(t, err) require.NoError(t, err)
testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime(), testutil.SortMetrics()) testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime(), testutil.SortMetrics())
} }
@ -1450,7 +1450,7 @@ func BenchmarkParsingSequential(b *testing.B) {
// Do the benchmarking // Do the benchmarking
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
//nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations
plugin.Parse([]byte(benchmarkData), "") plugin.Parse([]byte(benchmarkData))
} }
} }
@ -1466,7 +1466,7 @@ func BenchmarkParsingParallel(b *testing.B) {
b.RunParallel(func(p *testing.PB) { b.RunParallel(func(p *testing.PB) {
for p.Next() { for p.Next() {
//nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations
plugin.Parse([]byte(benchmarkData), "") plugin.Parse([]byte(benchmarkData))
} }
}) })
} }
@ -1488,6 +1488,6 @@ func FuzzParserJSON(f *testing.F) {
f.Fuzz(func(_ *testing.T, input []byte) { f.Fuzz(func(_ *testing.T, input []byte) {
//nolint:errcheck // fuzz testing can give lots of errors, but we just want to test for crashes //nolint:errcheck // fuzz testing can give lots of errors, but we just want to test for crashes
parser.Parse(input, "") parser.Parse(input)
}) })
} }

View File

@ -120,7 +120,7 @@ func (p *Parser) Init() error {
return nil return nil
} }
func (p *Parser) Parse(input []byte, extra string) ([]telegraf.Metric, error) { func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) {
// What we've done here is to put the entire former contents of Parse() // What we've done here is to put the entire former contents of Parse()
// into parseCriticalPath(). // into parseCriticalPath().
// //

View File

@ -131,7 +131,7 @@ func BenchmarkParsingSequential(b *testing.B) {
// Do the benchmarking // Do the benchmarking
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
//nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations
plugin.Parse(input, "") plugin.Parse(input)
} }
} }
@ -162,7 +162,7 @@ func BenchmarkParsingParallel(b *testing.B) {
b.RunParallel(func(p *testing.PB) { b.RunParallel(func(p *testing.PB) {
for p.Next() { for p.Next() {
//nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations
plugin.Parse(input, "") plugin.Parse(input)
} }
}) })
} }

View File

@ -27,7 +27,7 @@ type Parser struct {
} }
// Parse converts a slice of bytes in logfmt format to metrics. // Parse converts a slice of bytes in logfmt format to metrics.
func (p *Parser) Parse(b []byte, extra string) ([]telegraf.Metric, error) { func (p *Parser) Parse(b []byte) ([]telegraf.Metric, error) {
reader := bytes.NewReader(b) reader := bytes.NewReader(b)
decoder := logfmt.NewDecoder(reader) decoder := logfmt.NewDecoder(reader)
metrics := make([]telegraf.Metric, 0) metrics := make([]telegraf.Metric, 0)
@ -75,7 +75,7 @@ func (p *Parser) Parse(b []byte, extra string) ([]telegraf.Metric, error) {
// ParseLine converts a single line of text in logfmt format to metrics. // ParseLine converts a single line of text in logfmt format to metrics.
func (p *Parser) ParseLine(s string) (telegraf.Metric, error) { func (p *Parser) ParseLine(s string) (telegraf.Metric, error) {
metrics, err := p.Parse([]byte(s), "") metrics, err := p.Parse([]byte(s))
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -128,7 +128,7 @@ func TestParse(t *testing.T) {
l := Parser{ l := Parser{
metricName: tt.measurement, metricName: tt.measurement,
} }
got, err := l.Parse(tt.bytes, "") got, err := l.Parse(tt.bytes)
if (err != nil) != tt.wantErr { if (err != nil) != tt.wantErr {
t.Errorf("Logfmt.Parse error = %v, wantErr %v", err, tt.wantErr) t.Errorf("Logfmt.Parse error = %v, wantErr %v", err, tt.wantErr)
return return
@ -316,7 +316,7 @@ func TestBenchmarkData(t *testing.T) {
), ),
} }
actual, err := plugin.Parse([]byte(benchmarkData), "") actual, err := plugin.Parse([]byte(benchmarkData))
require.NoError(t, err) require.NoError(t, err)
testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime(), testutil.SortMetrics()) testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime(), testutil.SortMetrics())
} }
@ -329,6 +329,6 @@ func BenchmarkParsing(b *testing.B) {
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
//nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations
plugin.Parse([]byte(benchmarkData), "") plugin.Parse([]byte(benchmarkData))
} }
} }

View File

@ -103,7 +103,7 @@ var (
) )
func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
metrics, err := p.Parse([]byte(line), "") metrics, err := p.Parse([]byte(line))
return metrics[0], err return metrics[0], err
} }
@ -111,7 +111,7 @@ func (p *Parser) SetDefaultTags(tags map[string]string) {
p.DefaultTags = tags p.DefaultTags = tags
} }
func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) {
ts := time.Now().UTC() ts := time.Now().UTC()
s := bufio.NewScanner(bytes.NewReader(buf)) s := bufio.NewScanner(bytes.NewReader(buf))

View File

@ -466,7 +466,7 @@ with three lines
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
metrics, err := parser.Parse([]byte(tt.input), "") metrics, err := parser.Parse([]byte(tt.input))
tt.assertF(t, metrics, err) tt.assertF(t, metrics, err)
}) })
} }
@ -562,7 +562,7 @@ func TestBenchmarkData(t *testing.T) {
), ),
} }
actual, err := plugin.Parse([]byte(benchmarkData), "") actual, err := plugin.Parse([]byte(benchmarkData))
require.NoError(t, err) require.NoError(t, err)
testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime(), testutil.SortMetrics()) testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime(), testutil.SortMetrics())
} }
@ -572,6 +572,6 @@ func BenchmarkParsing(b *testing.B) {
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
//nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations
plugin.Parse([]byte(benchmarkData), "") plugin.Parse([]byte(benchmarkData))
} }
} }

View File

@ -44,7 +44,7 @@ func (p *Parser) SetDefaultTags(tags map[string]string) {
p.DefaultTags = tags p.DefaultTags = tags
} }
func (p *Parser) Parse(data []byte, extra string) ([]telegraf.Metric, error) { func (p *Parser) Parse(data []byte) ([]telegraf.Metric, error) {
// Determine the metric transport-type derived from the response header // Determine the metric transport-type derived from the response header
contentType := p.Header.Get("Content-Type") contentType := p.Header.Get("Content-Type")
var mediaType string var mediaType string
@ -102,7 +102,7 @@ func (p *Parser) Parse(data []byte, extra string) ([]telegraf.Metric, error) {
} }
func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
metrics, err := p.Parse([]byte(line), "") metrics, err := p.Parse([]byte(line))
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -160,7 +160,7 @@ func BenchmarkParsingMetricVersion1(b *testing.B) {
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
//nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations
plugin.Parse(benchmarkData, "") plugin.Parse(benchmarkData)
} }
} }
@ -173,6 +173,6 @@ func BenchmarkParsingMetricVersion2(b *testing.B) {
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
//nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations
plugin.Parse(benchmarkData, "") plugin.Parse(benchmarkData)
} }
} }

View File

@ -20,7 +20,7 @@ type Parser struct {
Log telegraf.Logger `toml:"-"` Log telegraf.Logger `toml:"-"`
} }
func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) {
var metrics []telegraf.Metric var metrics []telegraf.Metric
scanner := bufio.NewScanner(bytes.NewReader(buf)) scanner := bufio.NewScanner(bytes.NewReader(buf))

View File

@ -231,7 +231,7 @@ func TestParse(t *testing.T) {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
p := &Parser{Log: testutil.Logger{}} p := &Parser{Log: testutil.Logger{}}
actual, err := p.Parse(tt.input, "") actual, err := p.Parse(tt.input)
require.NoError(t, err) require.NoError(t, err)
testutil.RequireMetricsEqual(t, tt.expected, actual) testutil.RequireMetricsEqual(t, tt.expected, actual)
@ -301,7 +301,7 @@ func TestParse_DefaultTags(t *testing.T) {
p := &Parser{Log: testutil.Logger{}} p := &Parser{Log: testutil.Logger{}}
p.SetDefaultTags(tt.defaultTags) p.SetDefaultTags(tt.defaultTags)
actual, err := p.Parse(tt.input, "") actual, err := p.Parse(tt.input)
require.NoError(t, err) require.NoError(t, err)
testutil.RequireMetricsEqual(t, tt.expected, actual) testutil.RequireMetricsEqual(t, tt.expected, actual)
@ -343,7 +343,7 @@ func TestBenchmarkData(t *testing.T) {
), ),
} }
actual, err := plugin.Parse([]byte(benchmarkData), "") actual, err := plugin.Parse([]byte(benchmarkData))
require.NoError(t, err) require.NoError(t, err)
testutil.RequireMetricsEqual(t, expected, actual, testutil.SortMetrics()) testutil.RequireMetricsEqual(t, expected, actual, testutil.SortMetrics())
} }
@ -353,6 +353,6 @@ func BenchmarkParsing(b *testing.B) {
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
//nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations
plugin.Parse([]byte(benchmarkData), "") plugin.Parse([]byte(benchmarkData))
} }
} }

View File

@ -44,7 +44,7 @@ func (p *Parser) Init() error {
return nil return nil
} }
func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) {
reader := bytes.NewReader(buf) reader := bytes.NewReader(buf)
parquetReader, err := file.NewParquetReader(reader) parquetReader, err := file.NewParquetReader(reader)
if err != nil { if err != nil {
@ -121,7 +121,7 @@ func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) {
} }
func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
metrics, err := p.Parse([]byte(line), "") metrics, err := p.Parse([]byte(line))
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -71,6 +71,6 @@ func BenchmarkParsing(b *testing.B) {
b.ResetTimer() b.ResetTimer()
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
//nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations
plugin.Parse(benchmarkData, "") plugin.Parse(benchmarkData)
} }
} }

View File

@ -5,7 +5,6 @@ import (
"errors" "errors"
"math" "math"
"strconv" "strconv"
"strings"
"time" "time"
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
@ -46,21 +45,23 @@ func (p *Parser) Init() error {
return nil return nil
} }
func (p *Parser) Parse(data []byte, topic string) ([]telegraf.Metric, error) { func (p *Parser) Parse(data []byte) ([]telegraf.Metric, error) {
metrics, deviceType, err := p.checkHeaderAndInitMetrics(data, topic) metrics, deviceType, err := p.checkHeaderAndInitMetrics(data)
if err != nil { if err != nil {
return nil, err return nil, err
} }
p.fillAnalogChanMetrics(metrics, data, 6) p.fillAnalogChanMetrics(metrics, data[6:])
p.fillSwitchChanMetrics(metrics, data, 9606) p.fillSwitchChanMetrics(metrics, data[9606:])
switch deviceType { switch deviceType {
case deviceTypeI: case deviceTypeI:
p.fillPQSPFChanMetrics(metrics, data, 9706) p.fillPQSPFChanMetrics(metrics, data[9706:])
case deviceTypeU: case deviceTypeU:
p.fillFdFChanMetrics(metrics, data, 9706) p.fillFdFChanMetrics(metrics, data[9706:])
p.fillUABUBCUCAChanMetrics(metrics, data, 10506) p.fillUABUBCUCAChanMetrics(metrics, data[10506:])
default: default:
return nil, errors.New("illegal device type") return nil, errors.New("illegal device type")
} }
@ -85,8 +86,8 @@ func init() {
) )
} }
// simply check the data, and initialize metrics with data and topic // simply check the data, and initialize metrics
func (p *Parser) checkHeaderAndInitMetrics(data []byte, topic string) ([]telegraf.Metric, int, error) { func (p *Parser) checkHeaderAndInitMetrics(data []byte) ([]telegraf.Metric, int, error) {
if len(data) < 6 { if len(data) < 6 {
return nil, 0, errors.New("no valid data") return nil, 0, errors.New("no valid data")
} }
@ -95,7 +96,6 @@ func (p *Parser) checkHeaderAndInitMetrics(data []byte, topic string) ([]telegra
deviceType := int(data[4]) deviceType := int(data[4])
metrics := make([]telegraf.Metric, p.pointFrequency) metrics := make([]telegraf.Metric, p.pointFrequency)
device, _ := strings.CutSuffix(topic, "_Phasor")
switch deviceType { switch deviceType {
case deviceTypeI: case deviceTypeI:
if len(data) < dataLengthI { if len(data) < dataLengthI {
@ -107,7 +107,7 @@ func (p *Parser) checkHeaderAndInitMetrics(data []byte, topic string) ([]telegra
for i := range metrics { for i := range metrics {
metrics[i] = metric.New("current", metrics[i] = metric.New("current",
map[string]string{"device": device}, map[string]string{},
make(map[string]any, 44), // 3*8+2*8+4 make(map[string]any, 44), // 3*8+2*8+4
time.Unix(second, int64(i*1e9/p.pointFrequency))) time.Unix(second, int64(i*1e9/p.pointFrequency)))
} }
@ -121,7 +121,7 @@ func (p *Parser) checkHeaderAndInitMetrics(data []byte, topic string) ([]telegra
for i := range metrics { for i := range metrics {
metrics[i] = metric.New("voltage", metrics[i] = metric.New("voltage",
map[string]string{"device": device}, map[string]string{},
make(map[string]any, 49), // 3*8+2*8+2+3*3 make(map[string]any, 49), // 3*8+2*8+2+3*3
time.Unix(second, int64(i*1e9/p.pointFrequency))) time.Unix(second, int64(i*1e9/p.pointFrequency)))
} }
@ -133,12 +133,12 @@ func (p *Parser) checkHeaderAndInitMetrics(data []byte, topic string) ([]telegra
} }
// yc metrics // yc metrics
func (p *Parser) fillAnalogChanMetrics(metrics []telegraf.Metric, data []byte, begin int) { func (p *Parser) fillAnalogChanMetrics(metrics []telegraf.Metric, data []byte) {
for ci := range 8 { for ci := range 8 {
chanNo := strconv.Itoa(ci + 1) chanNo := strconv.Itoa(ci + 1)
for mj := range metrics { for mj := range metrics {
b := begin + (ci*p.pointFrequency+mj)*24 b := (ci*p.pointFrequency + mj) * 24
amp := math.Float64frombits(binary.LittleEndian.Uint64(data[b : b+8])) amp := math.Float64frombits(binary.LittleEndian.Uint64(data[b : b+8]))
pa := math.Float64frombits(binary.LittleEndian.Uint64(data[b+8 : b+16])) pa := math.Float64frombits(binary.LittleEndian.Uint64(data[b+8 : b+16]))
@ -152,11 +152,11 @@ func (p *Parser) fillAnalogChanMetrics(metrics []telegraf.Metric, data []byte, b
} }
// yx metrics // yx metrics
func (p *Parser) fillSwitchChanMetrics(metrics []telegraf.Metric, data []byte, begin int) { func (p *Parser) fillSwitchChanMetrics(metrics []telegraf.Metric, data []byte) {
for ci := range 2 { for ci := range 2 {
for mj := range metrics { for mj := range metrics {
b := begin + ci*p.pointFrequency + mj b := ci*p.pointFrequency + mj
for bk := range 8 { for bk := range 8 {
chanNo := strconv.Itoa(ci*8 + bk + 1) chanNo := strconv.Itoa(ci*8 + bk + 1)
@ -166,24 +166,12 @@ func (p *Parser) fillSwitchChanMetrics(metrics []telegraf.Metric, data []byte, b
} }
} }
// current relative metrics // current metrics
func (p *Parser) fillPQSPFChanMetrics(metrics []telegraf.Metric, data []byte, begin int) { func (p *Parser) fillPQSPFChanMetrics(metrics []telegraf.Metric, data []byte) {
for ci, channel := range []string{"p", "q", "s", "pf"} { for ci, channel := range []string{"p", "q", "s", "pf"} {
for mj := range metrics { for mj := range metrics {
b := begin + (ci*p.pointFrequency+mj)*8 b := (ci*p.pointFrequency + mj) * 8
metrics[mj].AddField(channel, math.Float64frombits(binary.LittleEndian.Uint64(data[b:b+8])))
}
}
}
// voltage relative metrics
func (p *Parser) fillFdFChanMetrics(metrics []telegraf.Metric, data []byte, begin int) {
for ci, channel := range []string{"f", "df"} {
for mj := range metrics {
b := begin + (ci*p.pointFrequency+mj)*8
metrics[mj].AddField(channel, math.Float64frombits(binary.LittleEndian.Uint64(data[b:b+8]))) metrics[mj].AddField(channel, math.Float64frombits(binary.LittleEndian.Uint64(data[b:b+8])))
} }
@ -191,11 +179,23 @@ func (p *Parser) fillFdFChanMetrics(metrics []telegraf.Metric, data []byte, begi
} }
// voltage metrics // voltage metrics
func (p *Parser) fillUABUBCUCAChanMetrics(metrics []telegraf.Metric, data []byte, begin int) { func (p *Parser) fillFdFChanMetrics(metrics []telegraf.Metric, data []byte) {
for ci, channel := range []string{"f", "df"} {
for mj := range metrics {
b := (ci*p.pointFrequency + mj) * 8
metrics[mj].AddField(channel, math.Float64frombits(binary.LittleEndian.Uint64(data[b:b+8])))
}
}
}
// voltage metrics
func (p *Parser) fillUABUBCUCAChanMetrics(metrics []telegraf.Metric, data []byte) {
for ci, channel := range []string{"uab", "ubc", "uca"} { for ci, channel := range []string{"uab", "ubc", "uca"} {
for mj := range metrics { for mj := range metrics {
b := begin + (ci*p.pointFrequency+mj)*24 b := (ci*p.pointFrequency + mj) * 24
amp := math.Float64frombits(binary.LittleEndian.Uint64(data[b : b+8])) amp := math.Float64frombits(binary.LittleEndian.Uint64(data[b : b+8]))
pa := math.Float64frombits(binary.LittleEndian.Uint64(data[b+8 : b+16])) pa := math.Float64frombits(binary.LittleEndian.Uint64(data[b+8 : b+16]))

View File

@ -158,7 +158,7 @@ func TestParse(t *testing.T) {
parser := new(Parser) parser := new(Parser)
parser.Init() parser.Init()
actual, err := parser.Parse(data, topic) actual, err := parser.Parse(data)
if err != nil { if err != nil {
t.FailNow() t.FailNow()
} }

View File

@ -30,7 +30,7 @@ func (p *Parser) SetDefaultTags(tags map[string]string) {
p.DefaultTags = tags p.DefaultTags = tags
} }
func (p *Parser) Parse(data []byte, extra string) ([]telegraf.Metric, error) { func (p *Parser) Parse(data []byte) ([]telegraf.Metric, error) {
// Determine the metric transport-type derived from the response header and // Determine the metric transport-type derived from the response header and
// create a matching decoder. // create a matching decoder.
format := expfmt.NewFormat(expfmt.TypeProtoCompact) format := expfmt.NewFormat(expfmt.TypeProtoCompact)
@ -74,7 +74,7 @@ func (p *Parser) Parse(data []byte, extra string) ([]telegraf.Metric, error) {
} }
func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
metrics, err := p.Parse([]byte(line), "") metrics, err := p.Parse([]byte(line))
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -160,7 +160,7 @@ func BenchmarkParsingMetricVersion1(b *testing.B) {
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
//nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations
plugin.Parse(benchmarkData, "") plugin.Parse(benchmarkData)
} }
} }
@ -173,6 +173,6 @@ func BenchmarkParsingMetricVersion2(b *testing.B) {
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
//nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations
plugin.Parse(benchmarkData, "") plugin.Parse(benchmarkData)
} }
} }

View File

@ -15,7 +15,7 @@ type Parser struct {
DefaultTags map[string]string DefaultTags map[string]string
} }
func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) {
var err error var err error
var metrics []telegraf.Metric var metrics []telegraf.Metric
var req prompb.WriteRequest var req prompb.WriteRequest
@ -44,7 +44,7 @@ func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) {
} }
func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
metrics, err := p.Parse([]byte(line), "") metrics, err := p.Parse([]byte(line))
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -76,7 +76,7 @@ func TestCases(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
// Act and assert // Act and assert
parsed, err := parser.Parse(inputBytes, "") parsed, err := parser.Parse(inputBytes)
require.NoError(t, err) require.NoError(t, err)
require.Len(t, parsed, len(expected)) require.Len(t, parsed, len(expected))
// Ignore type when comparing, because expected metrics are parsed from influx lines and thus always untyped // Ignore type when comparing, because expected metrics are parsed from influx lines and thus always untyped
@ -97,7 +97,7 @@ func BenchmarkParsingMetricVersion1(b *testing.B) {
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
//nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations
parser.Parse(benchmarkData, "") parser.Parse(benchmarkData)
} }
} }
@ -112,7 +112,7 @@ func BenchmarkParsingMetricVersion2(b *testing.B) {
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
//nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations
parser.Parse(benchmarkData, "") parser.Parse(benchmarkData)
} }
} }
@ -170,7 +170,7 @@ func TestParse(t *testing.T) {
DefaultTags: map[string]string{}, DefaultTags: map[string]string{},
} }
metrics, err := parser.Parse(inoutBytes, "") metrics, err := parser.Parse(inoutBytes)
require.NoError(t, err) require.NoError(t, err)
require.Len(t, metrics, 2) require.Len(t, metrics, 2)
testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics()) testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics())
@ -279,7 +279,7 @@ func TestHistograms(t *testing.T) {
parser := Parser{ parser := Parser{
DefaultTags: map[string]string{}, DefaultTags: map[string]string{},
} }
metrics, err := parser.Parse(inoutBytes, "") metrics, err := parser.Parse(inoutBytes)
require.NoError(t, err) require.NoError(t, err)
require.Len(t, metrics, 22) require.Len(t, metrics, 22)
testutil.RequireMetricsSubset(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics()) testutil.RequireMetricsSubset(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics())
@ -323,7 +323,7 @@ func TestDefaultTags(t *testing.T) {
}, },
} }
metrics, err := parser.Parse(inoutBytes, "") metrics, err := parser.Parse(inoutBytes)
require.NoError(t, err) require.NoError(t, err)
require.Len(t, metrics, 1) require.Len(t, metrics, 1)
testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics()) testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics())
@ -365,7 +365,7 @@ func TestMetricsWithTimestamp(t *testing.T) {
DefaultTags: map[string]string{}, DefaultTags: map[string]string{},
} }
metrics, err := parser.Parse(inoutBytes, "") metrics, err := parser.Parse(inoutBytes)
require.NoError(t, err) require.NoError(t, err)
require.Len(t, metrics, 1) require.Len(t, metrics, 1)
testutil.RequireMetricsEqual(t, expected, metrics, testutil.SortMetrics()) testutil.RequireMetricsEqual(t, expected, metrics, testutil.SortMetrics())
@ -430,7 +430,7 @@ func TestBenchmarkData(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
plugin := &Parser{} plugin := &Parser{}
actual, err := plugin.Parse(benchmarkData, "") actual, err := plugin.Parse(benchmarkData)
require.NoError(t, err) require.NoError(t, err)
testutil.RequireMetricsEqual(t, expected, actual, testutil.SortMetrics()) testutil.RequireMetricsEqual(t, expected, actual, testutil.SortMetrics())
} }
@ -444,6 +444,6 @@ func BenchmarkParsing(b *testing.B) {
b.ResetTimer() b.ResetTimer()
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
//nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations
plugin.Parse(benchmarkData, "") plugin.Parse(benchmarkData)
} }
} }

View File

@ -45,7 +45,7 @@ func (v *Parser) Init() error {
return nil return nil
} }
func (v *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { func (v *Parser) Parse(buf []byte) ([]telegraf.Metric, error) {
vStr := string(bytes.TrimSpace(bytes.Trim(buf, "\x00"))) vStr := string(bytes.TrimSpace(bytes.Trim(buf, "\x00")))
// unless it's a string, separate out any fields in the buffer, // unless it's a string, separate out any fields in the buffer,
@ -96,7 +96,7 @@ func (v *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) {
} }
func (v *Parser) ParseLine(line string) (telegraf.Metric, error) { func (v *Parser) ParseLine(line string) (telegraf.Metric, error) {
metrics, err := v.Parse([]byte(line), "") metrics, err := v.Parse([]byte(line))
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -106,7 +106,7 @@ func TestParseValidValues(t *testing.T) {
DataType: tt.dtype, DataType: tt.dtype,
} }
require.NoError(t, plugin.Init()) require.NoError(t, plugin.Init())
actual, err := plugin.Parse(tt.input, "") actual, err := plugin.Parse(tt.input)
require.NoError(t, err) require.NoError(t, err)
require.Len(t, actual, 1) require.Len(t, actual, 1)
testutil.RequireMetricEqual(t, expected, actual[0], testutil.IgnoreTime()) testutil.RequireMetricEqual(t, expected, actual[0], testutil.IgnoreTime())
@ -188,7 +188,7 @@ func TestParseCustomFieldName(t *testing.T) {
} }
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
metrics, err := parser.Parse([]byte(`55`), "") metrics, err := parser.Parse([]byte(`55`))
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, map[string]interface{}{"penguin": int64(55)}, metrics[0].Fields()) require.Equal(t, map[string]interface{}{"penguin": int64(55)}, metrics[0].Fields())
} }
@ -223,7 +223,7 @@ func TestParseInvalidValues(t *testing.T) {
DataType: tt.dtype, DataType: tt.dtype,
} }
require.NoError(t, plugin.Init()) require.NoError(t, plugin.Init())
actual, err := plugin.Parse(tt.input, "") actual, err := plugin.Parse(tt.input)
require.ErrorContains(t, err, "invalid syntax") require.ErrorContains(t, err, "invalid syntax")
require.Empty(t, actual) require.Empty(t, actual)
}) })
@ -282,7 +282,7 @@ func TestParseValidValuesDefaultTags(t *testing.T) {
require.NoError(t, plugin.Init()) require.NoError(t, plugin.Init())
plugin.SetDefaultTags(map[string]string{"test": "tag"}) plugin.SetDefaultTags(map[string]string{"test": "tag"})
actual, err := plugin.Parse([]byte("55"), "") actual, err := plugin.Parse([]byte("55"))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, actual, 1) require.Len(t, actual, 1)
@ -295,7 +295,7 @@ func TestParseValuesWithNullCharacter(t *testing.T) {
DataType: "integer", DataType: "integer",
} }
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
metrics, err := parser.Parse([]byte("55\x00"), "") metrics, err := parser.Parse([]byte("55\x00"))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, metrics, 1) require.Len(t, metrics, 1)
require.Equal(t, "value_test", metrics[0].Name()) require.Equal(t, "value_test", metrics[0].Name())
@ -330,7 +330,7 @@ func TestBenchmarkData(t *testing.T) {
), ),
} }
actual, err := plugin.Parse([]byte(benchmarkData), "") actual, err := plugin.Parse([]byte(benchmarkData))
require.NoError(t, err) require.NoError(t, err)
testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime(), testutil.SortMetrics()) testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime(), testutil.SortMetrics())
} }
@ -341,6 +341,6 @@ func BenchmarkParsing(b *testing.B) {
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
//nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations
plugin.Parse([]byte(benchmarkData), "") plugin.Parse([]byte(benchmarkData))
} }
} }

View File

@ -67,7 +67,7 @@ func (p *Parser) Init() error {
func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
buf := []byte(line) buf := []byte(line)
metrics, err := p.Parse(buf, "") metrics, err := p.Parse(buf)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -79,7 +79,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
return nil, nil return nil, nil
} }
func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) {
pp := p.parsers.Get().(*PointParser) pp := p.parsers.Get().(*PointParser)
defer p.parsers.Put(pp) defer p.parsers.Put(pp)
return pp.Parse(buf) return pp.Parse(buf)

View File

@ -15,25 +15,25 @@ func TestParse(t *testing.T) {
parser := &Parser{} parser := &Parser{}
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
parsedMetrics, err := parser.Parse([]byte("test.metric 1"), "") parsedMetrics, err := parser.Parse([]byte("test.metric 1"))
require.NoError(t, err) require.NoError(t, err)
testMetric := metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(0, 0)) testMetric := metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(0, 0))
require.Equal(t, parsedMetrics[0].Name(), testMetric.Name()) require.Equal(t, parsedMetrics[0].Name(), testMetric.Name())
require.Equal(t, parsedMetrics[0].Fields(), testMetric.Fields()) require.Equal(t, parsedMetrics[0].Fields(), testMetric.Fields())
parsedMetrics, err = parser.Parse([]byte("\u2206test.delta 1 1530939936"), "") parsedMetrics, err = parser.Parse([]byte("\u2206test.delta 1 1530939936"))
require.NoError(t, err) require.NoError(t, err)
testMetric = metric.New("\u2206test.delta", map[string]string{}, testMetric = metric.New("\u2206test.delta", map[string]string{},
map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0))
require.EqualValues(t, parsedMetrics[0], testMetric) require.EqualValues(t, parsedMetrics[0], testMetric)
parsedMetrics, err = parser.Parse([]byte("\u0394test.delta 1 1530939936"), "") parsedMetrics, err = parser.Parse([]byte("\u0394test.delta 1 1530939936"))
require.NoError(t, err) require.NoError(t, err)
testMetric = metric.New("\u0394test.delta", map[string]string{}, testMetric = metric.New("\u0394test.delta", map[string]string{},
map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0))
require.EqualValues(t, parsedMetrics[0], testMetric) require.EqualValues(t, parsedMetrics[0], testMetric)
parsedMetrics, err = parser.Parse([]byte("\u0394test.delta 1.234 1530939936 source=\"mysource\" tag2=value2"), "") parsedMetrics, err = parser.Parse([]byte("\u0394test.delta 1.234 1530939936 source=\"mysource\" tag2=value2"))
require.NoError(t, err) require.NoError(t, err)
testMetric = metric.New( testMetric = metric.New(
"\u0394test.delta", "\u0394test.delta",
@ -43,22 +43,22 @@ func TestParse(t *testing.T) {
) )
require.EqualValues(t, parsedMetrics[0], testMetric) require.EqualValues(t, parsedMetrics[0], testMetric)
parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936"), "") parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936"))
require.NoError(t, err) require.NoError(t, err)
testMetric = metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) testMetric = metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0))
require.EqualValues(t, parsedMetrics[0], testMetric) require.EqualValues(t, parsedMetrics[0], testMetric)
parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936 source=mysource"), "") parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936 source=mysource"))
require.NoError(t, err) require.NoError(t, err)
testMetric = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) testMetric = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0))
require.EqualValues(t, parsedMetrics[0], testMetric) require.EqualValues(t, parsedMetrics[0], testMetric)
parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234 1530939936 source=\"mysource\""), "") parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234 1530939936 source=\"mysource\""))
require.NoError(t, err) require.NoError(t, err)
testMetric = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) testMetric = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0))
require.EqualValues(t, parsedMetrics[0], testMetric) require.EqualValues(t, parsedMetrics[0], testMetric)
parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234 1530939936 \"source\"=\"mysource\" tag2=value2"), "") parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234 1530939936 \"source\"=\"mysource\" tag2=value2"))
require.NoError(t, err) require.NoError(t, err)
testMetric = metric.New( testMetric = metric.New(
"test.metric", "test.metric",
@ -68,7 +68,7 @@ func TestParse(t *testing.T) {
) )
require.EqualValues(t, parsedMetrics[0], testMetric) require.EqualValues(t, parsedMetrics[0], testMetric)
parsedMetrics, err = parser.Parse([]byte("\"test.metric\" -1.1234 1530939936 \"source\"=\"mysource\" tag2=value2"), "") parsedMetrics, err = parser.Parse([]byte("\"test.metric\" -1.1234 1530939936 \"source\"=\"mysource\" tag2=value2"))
require.NoError(t, err) require.NoError(t, err)
testMetric = metric.New( testMetric = metric.New(
"test.metric", "test.metric",
@ -78,7 +78,7 @@ func TestParse(t *testing.T) {
) )
require.EqualValues(t, parsedMetrics[0], testMetric) require.EqualValues(t, parsedMetrics[0], testMetric)
parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234e04 1530939936 \"source\"=\"mysource\" tag2=value2"), "") parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234e04 1530939936 \"source\"=\"mysource\" tag2=value2"))
require.NoError(t, err) require.NoError(t, err)
testMetric = metric.New( testMetric = metric.New(
"test.metric", "test.metric",
@ -88,7 +88,7 @@ func TestParse(t *testing.T) {
) )
require.EqualValues(t, parsedMetrics[0], testMetric) require.EqualValues(t, parsedMetrics[0], testMetric)
parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234e-04 1530939936 \"source\"=\"mysource\" tag2=value2"), "") parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234e-04 1530939936 \"source\"=\"mysource\" tag2=value2"))
require.NoError(t, err) require.NoError(t, err)
testMetric = metric.New( testMetric = metric.New(
"test.metric", "test.metric",
@ -98,7 +98,7 @@ func TestParse(t *testing.T) {
) )
require.EqualValues(t, parsedMetrics[0], testMetric) require.EqualValues(t, parsedMetrics[0], testMetric)
parsedMetrics, err = parser.Parse([]byte("test.metric 1.1234 1530939936 source=\"mysource\" tag2=value2 "), "") parsedMetrics, err = parser.Parse([]byte("test.metric 1.1234 1530939936 source=\"mysource\" tag2=value2 "))
require.NoError(t, err) require.NoError(t, err)
testMetric = metric.New( testMetric = metric.New(
"test.metric", "test.metric",
@ -159,7 +159,7 @@ func TestParseMultiple(t *testing.T) {
parser := &Parser{} parser := &Parser{}
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
parsedMetrics, err := parser.Parse([]byte("test.metric 1\ntest.metric2 2 1530939936"), "") parsedMetrics, err := parser.Parse([]byte("test.metric 1\ntest.metric2 2 1530939936"))
require.NoError(t, err) require.NoError(t, err)
testMetric1 := metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(0, 0)) testMetric1 := metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(0, 0))
testMetric2 := metric.New("test.metric2", map[string]string{}, map[string]interface{}{"value": 2.}, time.Unix(1530939936, 0)) testMetric2 := metric.New("test.metric2", map[string]string{}, map[string]interface{}{"value": 2.}, time.Unix(1530939936, 0))
@ -168,7 +168,7 @@ func TestParseMultiple(t *testing.T) {
require.Equal(t, parsedMetrics[0].Fields(), testMetrics[0].Fields()) require.Equal(t, parsedMetrics[0].Fields(), testMetrics[0].Fields())
require.EqualValues(t, parsedMetrics[1], testMetrics[1]) require.EqualValues(t, parsedMetrics[1], testMetrics[1])
parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936 source=mysource\n\"test.metric\" 1.1234 1530939936 source=\"mysource\""), "") parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936 source=mysource\n\"test.metric\" 1.1234 1530939936 source=\"mysource\""))
require.NoError(t, err) require.NoError(t, err)
testMetric1 = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) testMetric1 = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0))
testMetric2 = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) testMetric2 = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0))
@ -177,10 +177,9 @@ func TestParseMultiple(t *testing.T) {
parsedMetrics, err = parser.Parse( parsedMetrics, err = parser.Parse(
[]byte( []byte(
"\"test.metric\" 1.1234 1530939936 \"source\"=\"mysource\" tag2=value2\n"+ "\"test.metric\" 1.1234 1530939936 \"source\"=\"mysource\" tag2=value2\n" +
"test.metric 1.1234 1530939936 source=\"mysource\" tag2=value2 ", "test.metric 1.1234 1530939936 source=\"mysource\" tag2=value2 ",
), ),
"",
) )
require.NoError(t, err) require.NoError(t, err)
testMetric1 = metric.New( testMetric1 = metric.New(
@ -200,7 +199,6 @@ func TestParseMultiple(t *testing.T) {
parsedMetrics, err = parser.Parse( parsedMetrics, err = parser.Parse(
[]byte("test.metric 1 1530939936 source=mysource\n\"test.metric\" 1.1234 1530939936 source=\"mysource\"\ntest.metric3 333 1530939936 tagit=valueit"), []byte("test.metric 1 1530939936 source=mysource\n\"test.metric\" 1.1234 1530939936 source=\"mysource\"\ntest.metric3 333 1530939936 tagit=valueit"),
"",
) )
require.NoError(t, err) require.NoError(t, err)
testMetric1 = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) testMetric1 = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0))
@ -229,31 +227,31 @@ func TestParseInvalid(t *testing.T) {
parser := &Parser{} parser := &Parser{}
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
_, err := parser.Parse([]byte("test.metric"), "") _, err := parser.Parse([]byte("test.metric"))
require.Error(t, err) require.Error(t, err)
_, err = parser.Parse([]byte("test.metric string"), "") _, err = parser.Parse([]byte("test.metric string"))
require.Error(t, err) require.Error(t, err)
_, err = parser.Parse([]byte("test.metric 1 string"), "") _, err = parser.Parse([]byte("test.metric 1 string"))
require.Error(t, err) require.Error(t, err)
_, err = parser.Parse([]byte("test.\u2206delta 1"), "") _, err = parser.Parse([]byte("test.\u2206delta 1"))
require.Error(t, err) require.Error(t, err)
_, err = parser.Parse([]byte("test.metric 1 1530939936 tag_no_pair"), "") _, err = parser.Parse([]byte("test.metric 1 1530939936 tag_no_pair"))
require.Error(t, err) require.Error(t, err)
_, err = parser.Parse([]byte("test.metric 1 1530939936 tag_broken_value=\""), "") _, err = parser.Parse([]byte("test.metric 1 1530939936 tag_broken_value=\""))
require.Error(t, err) require.Error(t, err)
_, err = parser.Parse([]byte("\"test.metric 1 1530939936"), "") _, err = parser.Parse([]byte("\"test.metric 1 1530939936"))
require.Error(t, err) require.Error(t, err)
_, err = parser.Parse([]byte("test.metric 1 1530939936 tag1=val\\\"ue1"), "") _, err = parser.Parse([]byte("test.metric 1 1530939936 tag1=val\\\"ue1"))
require.Error(t, err) require.Error(t, err)
_, err = parser.Parse([]byte("\"test.metric\" -1.12-34 1530939936 \"source\"=\"mysource\" tag2=value2"), "") _, err = parser.Parse([]byte("\"test.metric\" -1.12-34 1530939936 \"source\"=\"mysource\" tag2=value2"))
require.Error(t, err) require.Error(t, err)
} }
@ -262,7 +260,7 @@ func TestParseDefaultTags(t *testing.T) {
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
parser.SetDefaultTags(map[string]string{"myDefault": "value1", "another": "test2"}) parser.SetDefaultTags(map[string]string{"myDefault": "value1", "another": "test2"})
parsedMetrics, err := parser.Parse([]byte("test.metric 1 1530939936"), "") parsedMetrics, err := parser.Parse([]byte("test.metric 1 1530939936"))
require.NoError(t, err) require.NoError(t, err)
testMetric := metric.New( testMetric := metric.New(
"test.metric", "test.metric",
@ -272,7 +270,7 @@ func TestParseDefaultTags(t *testing.T) {
) )
require.EqualValues(t, parsedMetrics[0], testMetric) require.EqualValues(t, parsedMetrics[0], testMetric)
parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936 source=mysource"), "") parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936 source=mysource"))
require.NoError(t, err) require.NoError(t, err)
testMetric = metric.New( testMetric = metric.New(
"test.metric", "test.metric",
@ -282,7 +280,7 @@ func TestParseDefaultTags(t *testing.T) {
) )
require.EqualValues(t, parsedMetrics[0], testMetric) require.EqualValues(t, parsedMetrics[0], testMetric)
parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234 1530939936 another=\"test3\""), "") parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234 1530939936 another=\"test3\""))
require.NoError(t, err) require.NoError(t, err)
testMetric = metric.New( testMetric = metric.New(
"test.metric", "test.metric",
@ -328,7 +326,7 @@ func TestBenchmarkData(t *testing.T) {
), ),
} }
actual, err := plugin.Parse([]byte(benchmarkData), "") actual, err := plugin.Parse([]byte(benchmarkData))
require.NoError(t, err) require.NoError(t, err)
testutil.RequireMetricsEqual(t, expected, actual, testutil.SortMetrics()) testutil.RequireMetricsEqual(t, expected, actual, testutil.SortMetrics())
} }
@ -339,6 +337,6 @@ func BenchmarkParsing(b *testing.B) {
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
//nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations
plugin.Parse([]byte(benchmarkData), "") plugin.Parse([]byte(benchmarkData))
} }
} }

View File

@ -196,7 +196,7 @@ func (p *Parser) Init() error {
return nil return nil
} }
func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) { func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) {
t := time.Now() t := time.Now()
// Parse the XML // Parse the XML
@ -236,7 +236,7 @@ func (p *Parser) Parse(buf []byte, extra string) ([]telegraf.Metric, error) {
} }
func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
metrics, err := p.Parse([]byte(line), "") metrics, err := p.Parse([]byte(line))
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -997,7 +997,7 @@ func TestParseMultiNodes(t *testing.T) {
} }
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
actual, err := parser.Parse([]byte(tt.input), "") actual, err := parser.Parse([]byte(tt.input))
require.NoError(t, err) require.NoError(t, err)
testutil.RequireMetricsEqual(t, tt.expected, actual) testutil.RequireMetricsEqual(t, tt.expected, actual)
@ -1183,7 +1183,7 @@ func TestEmptySelection(t *testing.T) {
} }
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
_, err := parser.Parse([]byte(tt.input), "") _, err := parser.Parse([]byte(tt.input))
require.Error(t, err) require.Error(t, err)
require.Equal(t, "cannot parse with empty selection node", err.Error()) require.Equal(t, "cannot parse with empty selection node", err.Error())
}) })
@ -1257,7 +1257,7 @@ func TestEmptySelectionAllowed(t *testing.T) {
} }
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
_, err := parser.Parse([]byte(tt.input), "") _, err := parser.Parse([]byte(tt.input))
require.NoError(t, err) require.NoError(t, err)
}) })
} }
@ -1365,7 +1365,7 @@ func TestTestCases(t *testing.T) {
Log: testutil.Logger{Name: "parsers.xml"}, Log: testutil.Logger{Name: "parsers.xml"},
} }
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
outputs, err := parser.Parse(content, "") outputs, err := parser.Parse(content)
if len(expectedErrors) == 0 { if len(expectedErrors) == 0 {
require.NoError(t, err) require.NoError(t, err)
} }
@ -1563,7 +1563,7 @@ func TestBenchmarkDataXML(t *testing.T) {
} }
require.NoError(t, plugin.Init()) require.NoError(t, plugin.Init())
actual, err := plugin.Parse([]byte(benchmarkDataXML), "") actual, err := plugin.Parse([]byte(benchmarkDataXML))
require.NoError(t, err) require.NoError(t, err)
testutil.RequireMetricsEqual(t, benchmarkExpectedMetrics, actual) testutil.RequireMetricsEqual(t, benchmarkExpectedMetrics, actual)
} }
@ -1579,7 +1579,7 @@ func BenchmarkParsingXML(b *testing.B) {
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
//nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations
plugin.Parse([]byte(benchmarkDataXML), "") plugin.Parse([]byte(benchmarkDataXML))
} }
} }
@ -1626,7 +1626,7 @@ func TestBenchmarkDataJSON(t *testing.T) {
} }
require.NoError(t, plugin.Init()) require.NoError(t, plugin.Init())
actual, err := plugin.Parse([]byte(benchmarkDataJSON), "") actual, err := plugin.Parse([]byte(benchmarkDataJSON))
require.NoError(t, err) require.NoError(t, err)
testutil.RequireMetricsEqual(t, benchmarkExpectedMetrics, actual) testutil.RequireMetricsEqual(t, benchmarkExpectedMetrics, actual)
} }
@ -1642,7 +1642,7 @@ func BenchmarkParsingJSON(b *testing.B) {
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
//nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations
plugin.Parse([]byte(benchmarkDataJSON), "") plugin.Parse([]byte(benchmarkDataJSON))
} }
} }
@ -1678,7 +1678,7 @@ func BenchmarkParsingProtobuf(b *testing.B) {
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
//nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations
plugin.Parse(benchmarkData, "") plugin.Parse(benchmarkData)
} }
} }
@ -1751,7 +1751,7 @@ func TestBenchmarkDataMsgPack(t *testing.T) {
actual := make([]telegraf.Metric, 0, 2) actual := make([]telegraf.Metric, 0, 2)
for _, msg := range benchmarkDataMsgPack { for _, msg := range benchmarkDataMsgPack {
m, err := plugin.Parse(msg, "") m, err := plugin.Parse(msg)
require.NoError(t, err) require.NoError(t, err)
actual = append(actual, m...) actual = append(actual, m...)
} }
@ -1782,7 +1782,7 @@ func BenchmarkParsingMsgPack(b *testing.B) {
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
//nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations
plugin.Parse(benchmarkDataMsgPack[n%2], "") plugin.Parse(benchmarkDataMsgPack[n%2])
} }
} }
@ -1815,6 +1815,6 @@ func BenchmarkParsingCBOR(b *testing.B) {
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
//nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations //nolint:errcheck // Benchmarking so skip the error check to avoid the unnecessary operations
plugin.Parse(benchmarkData, "") plugin.Parse(benchmarkData)
} }
} }

View File

@ -143,7 +143,7 @@ func (d *Dedup) SetState(state interface{}) error {
if !ok { if !ok {
return fmt.Errorf("state has wrong type %T", state) return fmt.Errorf("state has wrong type %T", state)
} }
metrics, err := p.Parse(data, "") metrics, err := p.Parse(data)
if err == nil { if err == nil {
d.Apply(metrics...) d.Apply(metrics...)
} }

View File

@ -113,7 +113,7 @@ func (e *Execd) cmdReadOut(out io.Reader) {
scanner.Buffer(scanBuf, 262144) scanner.Buffer(scanBuf, 262144)
for scanner.Scan() { for scanner.Scan() {
metrics, err := e.parser.Parse(scanner.Bytes(), "") metrics, err := e.parser.Parse(scanner.Bytes())
if err != nil { if err != nil {
e.Log.Errorf("Parse error: %s", err) e.Log.Errorf("Parse error: %s", err)
} }

View File

@ -85,7 +85,7 @@ func (p *Parser) Apply(metrics ...telegraf.Metric) []telegraf.Metric {
value = decoded[:n] value = decoded[:n]
} }
fromFieldMetric, err := p.parser.Parse(value, "") fromFieldMetric, err := p.parser.Parse(value)
if err != nil { if err != nil {
p.Log.Errorf("could not parse field %s: %v", field.Key, err) p.Log.Errorf("could not parse field %s: %v", field.Key, err)
continue continue
@ -178,7 +178,7 @@ func mergeWithTimestamp(base telegraf.Metric, metrics []telegraf.Metric) telegra
} }
func (p *Parser) parseValue(value string) ([]telegraf.Metric, error) { func (p *Parser) parseValue(value string) ([]telegraf.Metric, error) {
return p.parser.Parse([]byte(value), "") return p.parser.Parse([]byte(value))
} }
func toBytes(value interface{}) ([]byte, error) { func toBytes(value interface{}) ([]byte, error) {

View File

@ -103,7 +103,7 @@ func ParseMetricsFromFile(filename string, parser telegraf.Parser) ([]telegraf.M
continue continue
} }
nonutc, err := parser.Parse(line, "") nonutc, err := parser.Parse(line)
if err != nil { if err != nil {
return nil, fmt.Errorf("unable to parse metric in %q failed: %w", line, err) return nil, fmt.Errorf("unable to parse metric in %q failed: %w", line, err)
} }

View File

@ -150,7 +150,7 @@ func (p *Plugin) Gather(acc telegraf.Accumulator) error {
if err != nil { if err != nil {
return err return err
} }
metrics, err := p.Parser.Parse(data, "") metrics, err := p.Parser.Parse(data)
if err != nil { if err != nil {
return err return err
} }