fix: Linter fixes for plugins/parsers/[a-z]* (#10145)
This commit is contained in:
parent
7d3531a29b
commit
8e85a67ee1
|
|
@ -10,13 +10,14 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/models"
|
||||
"github.com/influxdata/telegraf/plugins/common/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestConfig_LoadSingleInputWithEnvVars(t *testing.T) {
|
||||
|
|
@ -140,12 +141,17 @@ func TestConfig_LoadDirectory(t *testing.T) {
|
|||
expectedConfigs[0].Tags = make(map[string]string)
|
||||
|
||||
expectedPlugins[1] = inputs.Inputs["exec"]().(*MockupInputPlugin)
|
||||
p, err := parsers.NewParser(&parsers.Config{
|
||||
parserConfig := &parsers.Config{
|
||||
MetricName: "exec",
|
||||
DataFormat: "json",
|
||||
JSONStrict: true,
|
||||
})
|
||||
}
|
||||
p, err := parsers.NewParser(parserConfig)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Inject logger to have proper struct for comparison
|
||||
models.SetLoggerOnPlugin(p, models.NewLogger("parsers", parserConfig.DataFormat, parserConfig.MetricName))
|
||||
|
||||
expectedPlugins[1].SetParser(p)
|
||||
expectedPlugins[1].Command = "/usr/bin/myothercollector --foo=bar"
|
||||
expectedConfigs[1] = &models.InputConfig{
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@ package collectd
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"collectd.org/api"
|
||||
|
|
@ -24,6 +23,7 @@ type CollectdParser struct {
|
|||
//whether or not to split multi value metric into multiple metrics
|
||||
//default value is split
|
||||
ParseMultiValue string
|
||||
Log telegraf.Logger `toml:"-"`
|
||||
popts network.ParseOpts
|
||||
}
|
||||
|
||||
|
|
@ -81,7 +81,7 @@ func (p *CollectdParser) Parse(buf []byte) ([]telegraf.Metric, error) {
|
|||
|
||||
metrics := []telegraf.Metric{}
|
||||
for _, valueList := range valueLists {
|
||||
metrics = append(metrics, UnmarshalValueList(valueList, p.ParseMultiValue)...)
|
||||
metrics = append(metrics, p.unmarshalValueList(valueList)...)
|
||||
}
|
||||
|
||||
if len(p.DefaultTags) > 0 {
|
||||
|
|
@ -115,12 +115,13 @@ func (p *CollectdParser) SetDefaultTags(tags map[string]string) {
|
|||
p.DefaultTags = tags
|
||||
}
|
||||
|
||||
// UnmarshalValueList translates a ValueList into a Telegraf metric.
|
||||
func UnmarshalValueList(vl *api.ValueList, multiValue string) []telegraf.Metric {
|
||||
// unmarshalValueList translates a ValueList into a Telegraf metric.
|
||||
func (p *CollectdParser) unmarshalValueList(vl *api.ValueList) []telegraf.Metric {
|
||||
timestamp := vl.Time.UTC()
|
||||
|
||||
var metrics []telegraf.Metric
|
||||
|
||||
var multiValue = p.ParseMultiValue
|
||||
//set multiValue to default "split" if nothing is specified
|
||||
if multiValue == "" {
|
||||
multiValue = "split"
|
||||
|
|
@ -192,7 +193,7 @@ func UnmarshalValueList(vl *api.ValueList, multiValue string) []telegraf.Metric
|
|||
|
||||
metrics = append(metrics, m)
|
||||
default:
|
||||
log.Printf("parse-multi-value config can only be 'split' or 'join'")
|
||||
p.Log.Info("parse-multi-value config can only be 'split' or 'join'")
|
||||
}
|
||||
return metrics
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,7 +6,6 @@ import (
|
|||
|
||||
"collectd.org/api"
|
||||
"collectd.org/network"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
|
|
@ -144,7 +143,7 @@ func TestParseMultiValueSplit(t *testing.T) {
|
|||
metrics, err := parser.Parse(bytes)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 2, len(metrics))
|
||||
require.Equal(t, 2, len(metrics))
|
||||
}
|
||||
|
||||
func TestParse_DefaultTags(t *testing.T) {
|
||||
|
|
@ -215,7 +214,7 @@ func TestParse_SignSecurityLevel(t *testing.T) {
|
|||
bytes, err = buf.Bytes()
|
||||
require.NoError(t, err)
|
||||
|
||||
metrics, err = parser.Parse(bytes)
|
||||
_, err = parser.Parse(bytes)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
|
|
@ -270,7 +269,7 @@ func TestParse_EncryptSecurityLevel(t *testing.T) {
|
|||
bytes, err = buf.Bytes()
|
||||
require.NoError(t, err)
|
||||
|
||||
metrics, err = parser.Parse(bytes)
|
||||
_, err = parser.Parse(bytes)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -6,10 +6,11 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var DefaultTime = func() time.Time {
|
||||
|
|
@ -100,6 +101,8 @@ func TestTimestamp(t *testing.T) {
|
|||
TimeFunc: DefaultTime,
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
testCSV := `line1,line2,line3
|
||||
23/05/09 04:05:06 PM,70,test_name
|
||||
07/11/09 04:05:06 PM,80,test_name2`
|
||||
|
|
@ -121,6 +124,8 @@ func TestTimestampYYYYMMDDHHmm(t *testing.T) {
|
|||
TimeFunc: DefaultTime,
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
testCSV := `line1,line2,line3
|
||||
200905231605,70,test_name
|
||||
200907111605,80,test_name2`
|
||||
|
|
|
|||
|
|
@ -3,14 +3,14 @@ package dropwizard
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/tidwall/gjson"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal/templating"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
"github.com/influxdata/telegraf/plugins/parsers/influx"
|
||||
"github.com/tidwall/gjson"
|
||||
)
|
||||
|
||||
type TimeFunc func() time.Time
|
||||
|
|
@ -42,6 +42,8 @@ type parser struct {
|
|||
// an optional map of default tags to use for metrics
|
||||
DefaultTags map[string]string
|
||||
|
||||
Log telegraf.Logger `toml:"-"`
|
||||
|
||||
separator string
|
||||
templateEngine *templating.Engine
|
||||
|
||||
|
|
@ -152,7 +154,7 @@ func (p *parser) readTags(buf []byte) map[string]string {
|
|||
var tags map[string]string
|
||||
err := json.Unmarshal(tagsBytes, &tags)
|
||||
if err != nil {
|
||||
log.Printf("W! failed to parse tags from JSON path '%s': %s\n", p.TagsPath, err)
|
||||
p.Log.Warnf("Failed to parse tags from JSON path '%s': %s\n", p.TagsPath, err)
|
||||
} else if len(tags) > 0 {
|
||||
return tags
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,16 +1,15 @@
|
|||
package dropwizard
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var testTimeFunc = func() time.Time {
|
||||
|
|
@ -34,8 +33,8 @@ func TestParseValidEmptyJSON(t *testing.T) {
|
|||
|
||||
// Most basic vanilla test
|
||||
metrics, err := parser.Parse([]byte(validEmptyJSON))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 0)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, metrics, 0)
|
||||
}
|
||||
|
||||
// validCounterJSON is a valid dropwizard json document containing one counter
|
||||
|
|
@ -58,13 +57,13 @@ func TestParseValidCounterJSON(t *testing.T) {
|
|||
parser := NewParser()
|
||||
|
||||
metrics, err := parser.Parse([]byte(validCounterJSON))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 1)
|
||||
assert.Equal(t, "measurement", metrics[0].Name())
|
||||
assert.Equal(t, map[string]interface{}{
|
||||
require.NoError(t, err)
|
||||
require.Len(t, metrics, 1)
|
||||
require.Equal(t, "measurement", metrics[0].Name())
|
||||
require.Equal(t, map[string]interface{}{
|
||||
"count": float64(1),
|
||||
}, metrics[0].Fields())
|
||||
assert.Equal(t, map[string]string{"metric_type": "counter"}, metrics[0].Tags())
|
||||
require.Equal(t, map[string]string{"metric_type": "counter"}, metrics[0].Tags())
|
||||
}
|
||||
|
||||
// validEmbeddedCounterJSON is a valid json document containing separate fields for dropwizard metrics, tags and time override.
|
||||
|
|
@ -99,19 +98,19 @@ func TestParseValidEmbeddedCounterJSON(t *testing.T) {
|
|||
parser.TimePath = "time"
|
||||
|
||||
metrics, err := parser.Parse([]byte(validEmbeddedCounterJSON))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 1)
|
||||
assert.Equal(t, "measurement", metrics[0].Name())
|
||||
assert.Equal(t, map[string]interface{}{
|
||||
require.NoError(t, err)
|
||||
require.Len(t, metrics, 1)
|
||||
require.Equal(t, "measurement", metrics[0].Name())
|
||||
require.Equal(t, map[string]interface{}{
|
||||
"count": float64(1),
|
||||
}, metrics[0].Fields())
|
||||
assert.Equal(t, map[string]string{
|
||||
require.Equal(t, map[string]string{
|
||||
"metric_type": "counter",
|
||||
"tag1": "green",
|
||||
"tag2": "yellow",
|
||||
"tag3 space,comma=equals": "red ,=",
|
||||
}, metrics[0].Tags())
|
||||
assert.True(t, metricTime.Equal(metrics[0].Time()), fmt.Sprintf("%s should be equal to %s", metrics[0].Time(), metricTime))
|
||||
require.True(t, metricTime.Equal(metrics[0].Time()), fmt.Sprintf("%s should be equal to %s", metrics[0].Time(), metricTime))
|
||||
|
||||
// now test json tags through TagPathsMap
|
||||
parser2 := NewParser()
|
||||
|
|
@ -119,8 +118,8 @@ func TestParseValidEmbeddedCounterJSON(t *testing.T) {
|
|||
parser2.TagPathsMap = map[string]string{"tag1": "tags.tag1"}
|
||||
parser2.TimePath = "time"
|
||||
metrics2, err2 := parser2.Parse([]byte(validEmbeddedCounterJSON))
|
||||
assert.NoError(t, err2)
|
||||
assert.Equal(t, map[string]string{"metric_type": "counter", "tag1": "green"}, metrics2[0].Tags())
|
||||
require.NoError(t, err2)
|
||||
require.Equal(t, map[string]string{"metric_type": "counter", "tag1": "green"}, metrics2[0].Tags())
|
||||
}
|
||||
|
||||
// validMeterJSON1 is a valid dropwizard json document containing one meter
|
||||
|
|
@ -148,10 +147,10 @@ func TestParseValidMeterJSON1(t *testing.T) {
|
|||
parser := NewParser()
|
||||
|
||||
metrics, err := parser.Parse([]byte(validMeterJSON1))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 1)
|
||||
assert.Equal(t, "measurement1", metrics[0].Name())
|
||||
assert.Equal(t, map[string]interface{}{
|
||||
require.NoError(t, err)
|
||||
require.Len(t, metrics, 1)
|
||||
require.Equal(t, "measurement1", metrics[0].Name())
|
||||
require.Equal(t, map[string]interface{}{
|
||||
"count": float64(1),
|
||||
"m15_rate": float64(1),
|
||||
"m1_rate": float64(1),
|
||||
|
|
@ -160,7 +159,7 @@ func TestParseValidMeterJSON1(t *testing.T) {
|
|||
"units": "events/second",
|
||||
}, metrics[0].Fields())
|
||||
|
||||
assert.Equal(t, map[string]string{"metric_type": "meter"}, metrics[0].Tags())
|
||||
require.Equal(t, map[string]string{"metric_type": "meter"}, metrics[0].Tags())
|
||||
}
|
||||
|
||||
// validMeterJSON2 is a valid dropwizard json document containing one meter with one tag
|
||||
|
|
@ -188,10 +187,10 @@ func TestParseValidMeterJSON2(t *testing.T) {
|
|||
parser := NewParser()
|
||||
|
||||
metrics, err := parser.Parse([]byte(validMeterJSON2))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 1)
|
||||
assert.Equal(t, "measurement2", metrics[0].Name())
|
||||
assert.Equal(t, map[string]interface{}{
|
||||
require.NoError(t, err)
|
||||
require.Len(t, metrics, 1)
|
||||
require.Equal(t, "measurement2", metrics[0].Name())
|
||||
require.Equal(t, map[string]interface{}{
|
||||
"count": float64(2),
|
||||
"m15_rate": float64(2),
|
||||
"m1_rate": float64(2),
|
||||
|
|
@ -199,7 +198,7 @@ func TestParseValidMeterJSON2(t *testing.T) {
|
|||
"mean_rate": float64(2),
|
||||
"units": "events/second",
|
||||
}, metrics[0].Fields())
|
||||
assert.Equal(t, map[string]string{"metric_type": "meter", "key": "value"}, metrics[0].Tags())
|
||||
require.Equal(t, map[string]string{"metric_type": "meter", "key": "value"}, metrics[0].Tags())
|
||||
}
|
||||
|
||||
// validGaugeJSON is a valid dropwizard json document containing one gauge
|
||||
|
|
@ -222,13 +221,13 @@ func TestParseValidGaugeJSON(t *testing.T) {
|
|||
parser := NewParser()
|
||||
|
||||
metrics, err := parser.Parse([]byte(validGaugeJSON))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 1)
|
||||
assert.Equal(t, "measurement", metrics[0].Name())
|
||||
assert.Equal(t, map[string]interface{}{
|
||||
require.NoError(t, err)
|
||||
require.Len(t, metrics, 1)
|
||||
require.Equal(t, "measurement", metrics[0].Name())
|
||||
require.Equal(t, map[string]interface{}{
|
||||
"value": true,
|
||||
}, metrics[0].Fields())
|
||||
assert.Equal(t, map[string]string{"metric_type": "gauge"}, metrics[0].Tags())
|
||||
require.Equal(t, map[string]string{"metric_type": "gauge"}, metrics[0].Tags())
|
||||
}
|
||||
|
||||
// validHistogramJSON is a valid dropwizard json document containing one histogram
|
||||
|
|
@ -261,10 +260,10 @@ func TestParseValidHistogramJSON(t *testing.T) {
|
|||
parser := NewParser()
|
||||
|
||||
metrics, err := parser.Parse([]byte(validHistogramJSON))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 1)
|
||||
assert.Equal(t, "measurement", metrics[0].Name())
|
||||
assert.Equal(t, map[string]interface{}{
|
||||
require.NoError(t, err)
|
||||
require.Len(t, metrics, 1)
|
||||
require.Equal(t, "measurement", metrics[0].Name())
|
||||
require.Equal(t, map[string]interface{}{
|
||||
"count": float64(1),
|
||||
"max": float64(2),
|
||||
"mean": float64(3),
|
||||
|
|
@ -277,7 +276,7 @@ func TestParseValidHistogramJSON(t *testing.T) {
|
|||
"p999": float64(10),
|
||||
"stddev": float64(11),
|
||||
}, metrics[0].Fields())
|
||||
assert.Equal(t, map[string]string{"metric_type": "histogram"}, metrics[0].Tags())
|
||||
require.Equal(t, map[string]string{"metric_type": "histogram"}, metrics[0].Tags())
|
||||
}
|
||||
|
||||
// validTimerJSON is a valid dropwizard json document containing one timer
|
||||
|
|
@ -316,10 +315,10 @@ func TestParseValidTimerJSON(t *testing.T) {
|
|||
parser := NewParser()
|
||||
|
||||
metrics, err := parser.Parse([]byte(validTimerJSON))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 1)
|
||||
assert.Equal(t, "measurement", metrics[0].Name())
|
||||
assert.Equal(t, map[string]interface{}{
|
||||
require.NoError(t, err)
|
||||
require.Len(t, metrics, 1)
|
||||
require.Equal(t, "measurement", metrics[0].Name())
|
||||
require.Equal(t, map[string]interface{}{
|
||||
"count": float64(1),
|
||||
"max": float64(2),
|
||||
"mean": float64(3),
|
||||
|
|
@ -338,7 +337,7 @@ func TestParseValidTimerJSON(t *testing.T) {
|
|||
"duration_units": "seconds",
|
||||
"rate_units": "calls/second",
|
||||
}, metrics[0].Fields())
|
||||
assert.Equal(t, map[string]string{"metric_type": "timer"}, metrics[0].Tags())
|
||||
require.Equal(t, map[string]string{"metric_type": "timer"}, metrics[0].Tags())
|
||||
}
|
||||
|
||||
// validAllJSON is a valid dropwizard json document containing one metric of each type
|
||||
|
|
@ -367,8 +366,8 @@ func TestParseValidAllJSON(t *testing.T) {
|
|||
parser := NewParser()
|
||||
|
||||
metrics, err := parser.Parse([]byte(validAllJSON))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 5)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, metrics, 5)
|
||||
}
|
||||
|
||||
func TestTagParsingProblems(t *testing.T) {
|
||||
|
|
@ -376,20 +375,22 @@ func TestTagParsingProblems(t *testing.T) {
|
|||
parser1 := NewParser()
|
||||
parser1.MetricRegistryPath = "metrics"
|
||||
parser1.TagsPath = "tags1"
|
||||
parser1.Log = testutil.Logger{}
|
||||
metrics1, err1 := parser1.Parse([]byte(validEmbeddedCounterJSON))
|
||||
assert.NoError(t, err1)
|
||||
assert.Len(t, metrics1, 1)
|
||||
assert.Equal(t, map[string]string{"metric_type": "counter"}, metrics1[0].Tags())
|
||||
require.NoError(t, err1)
|
||||
require.Len(t, metrics1, 1)
|
||||
require.Equal(t, map[string]string{"metric_type": "counter"}, metrics1[0].Tags())
|
||||
|
||||
// giving a wrong TagsPath falls back to TagPathsMap
|
||||
parser2 := NewParser()
|
||||
parser2.MetricRegistryPath = "metrics"
|
||||
parser2.TagsPath = "tags1"
|
||||
parser2.TagPathsMap = map[string]string{"tag1": "tags.tag1"}
|
||||
parser2.Log = testutil.Logger{}
|
||||
metrics2, err2 := parser2.Parse([]byte(validEmbeddedCounterJSON))
|
||||
assert.NoError(t, err2)
|
||||
assert.Len(t, metrics2, 1)
|
||||
assert.Equal(t, map[string]string{"metric_type": "counter", "tag1": "green"}, metrics2[0].Tags())
|
||||
require.NoError(t, err2)
|
||||
require.Len(t, metrics2, 1)
|
||||
require.Equal(t, map[string]string{"metric_type": "counter", "tag1": "green"}, metrics2[0].Tags())
|
||||
}
|
||||
|
||||
// sampleTemplateJSON is a sample json document containing metrics to be tested against the templating engine.
|
||||
|
|
|
|||
|
|
@ -20,7 +20,6 @@ var (
|
|||
MaxDate = time.Date(2038, 1, 19, 0, 0, 0, 0, time.UTC)
|
||||
)
|
||||
|
||||
// Parser encapsulates a Graphite Parser.
|
||||
type GraphiteParser struct {
|
||||
Separator string
|
||||
Templates []string
|
||||
|
|
@ -77,9 +76,9 @@ func (p *GraphiteParser) Parse(buf []byte) ([]telegraf.Metric, error) {
|
|||
line = bytes.TrimSpace(buf) // last line
|
||||
}
|
||||
if len(line) != 0 {
|
||||
metric, err := p.ParseLine(string(line))
|
||||
m, err := p.ParseLine(string(line))
|
||||
if err == nil {
|
||||
metrics = append(metrics, metric)
|
||||
metrics = append(metrics, m)
|
||||
} else {
|
||||
errs = append(errs, err.Error())
|
||||
}
|
||||
|
|
@ -95,7 +94,7 @@ func (p *GraphiteParser) Parse(buf []byte) ([]telegraf.Metric, error) {
|
|||
return metrics, nil
|
||||
}
|
||||
|
||||
// Parse performs Graphite parsing of a single line.
|
||||
// ParseLine performs Graphite parsing of a single line.
|
||||
func (p *GraphiteParser) ParseLine(line string) (telegraf.Metric, error) {
|
||||
// Break into 3 fields (name, value, timestamp).
|
||||
fields := strings.Fields(line)
|
||||
|
|
|
|||
|
|
@ -6,11 +6,11 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf/internal/templating"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func BenchmarkParse(b *testing.B) {
|
||||
|
|
@ -30,7 +30,8 @@ func BenchmarkParse(b *testing.B) {
|
|||
}
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
p.Parse([]byte("servers.localhost.cpu.load 11 1435077219"))
|
||||
_, err := p.Parse([]byte("servers.localhost.cpu.load 11 1435077219"))
|
||||
require.NoError(b, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -285,7 +286,7 @@ func TestParseLine(t *testing.T) {
|
|||
t.Fatalf("unexpected error creating graphite parser: %v", err)
|
||||
}
|
||||
|
||||
metric, err := p.ParseLine(test.input)
|
||||
m, err := p.ParseLine(test.input)
|
||||
if errstr(err) != test.err {
|
||||
t.Fatalf("err does not match. expected %v, got %v", test.err, err)
|
||||
}
|
||||
|
|
@ -293,22 +294,22 @@ func TestParseLine(t *testing.T) {
|
|||
// If we erred out,it was intended and the following tests won't work
|
||||
continue
|
||||
}
|
||||
if metric.Name() != test.measurement {
|
||||
if m.Name() != test.measurement {
|
||||
t.Fatalf("name parse failer. expected %v, got %v",
|
||||
test.measurement, metric.Name())
|
||||
test.measurement, m.Name())
|
||||
}
|
||||
if len(metric.Tags()) != len(test.tags) {
|
||||
if len(m.Tags()) != len(test.tags) {
|
||||
t.Fatalf("tags len mismatch. expected %d, got %d",
|
||||
len(test.tags), len(metric.Tags()))
|
||||
len(test.tags), len(m.Tags()))
|
||||
}
|
||||
f := metric.Fields()["value"].(float64)
|
||||
f := m.Fields()["value"].(float64)
|
||||
if f != test.value {
|
||||
t.Fatalf("floatValue value mismatch. expected %v, got %v",
|
||||
test.value, f)
|
||||
}
|
||||
if metric.Time().UnixNano()/1000000 != test.time.UnixNano()/1000000 {
|
||||
if m.Time().UnixNano()/1000000 != test.time.UnixNano()/1000000 {
|
||||
t.Fatalf("time value mismatch. expected %v, got %v",
|
||||
test.time.UnixNano(), metric.Time().UnixNano())
|
||||
test.time.UnixNano(), m.Time().UnixNano())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -478,9 +479,9 @@ func TestFilterMatchDefault(t *testing.T) {
|
|||
time.Unix(1435077219, 0))
|
||||
|
||||
m, err := p.ParseLine("miss.servers.localhost.cpu_load 11 1435077219")
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, exp, m)
|
||||
require.Equal(t, exp, m)
|
||||
}
|
||||
|
||||
func TestFilterMatchMultipleMeasurement(t *testing.T) {
|
||||
|
|
@ -495,9 +496,9 @@ func TestFilterMatchMultipleMeasurement(t *testing.T) {
|
|||
time.Unix(1435077219, 0))
|
||||
|
||||
m, err := p.ParseLine("servers.localhost.cpu.cpu_load.10 11 1435077219")
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, exp, m)
|
||||
require.Equal(t, exp, m)
|
||||
}
|
||||
|
||||
func TestFilterMatchMultipleMeasurementSeparator(t *testing.T) {
|
||||
|
|
@ -505,7 +506,7 @@ func TestFilterMatchMultipleMeasurementSeparator(t *testing.T) {
|
|||
[]string{"servers.localhost .host.measurement.measurement*"},
|
||||
nil,
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
exp := metric.New("cpu_cpu_load_10",
|
||||
map[string]string{"host": "localhost"},
|
||||
|
|
@ -513,9 +514,9 @@ func TestFilterMatchMultipleMeasurementSeparator(t *testing.T) {
|
|||
time.Unix(1435077219, 0))
|
||||
|
||||
m, err := p.ParseLine("servers.localhost.cpu.cpu_load.10 11 1435077219")
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, exp, m)
|
||||
require.Equal(t, exp, m)
|
||||
}
|
||||
|
||||
func TestFilterMatchSingle(t *testing.T) {
|
||||
|
|
@ -530,9 +531,9 @@ func TestFilterMatchSingle(t *testing.T) {
|
|||
time.Unix(1435077219, 0))
|
||||
|
||||
m, err := p.ParseLine("servers.localhost.cpu_load 11 1435077219")
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, exp, m)
|
||||
require.Equal(t, exp, m)
|
||||
}
|
||||
|
||||
func TestParseNoMatch(t *testing.T) {
|
||||
|
|
@ -547,9 +548,9 @@ func TestParseNoMatch(t *testing.T) {
|
|||
time.Unix(1435077219, 0))
|
||||
|
||||
m, err := p.ParseLine("servers.localhost.memory.VmallocChunk 11 1435077219")
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, exp, m)
|
||||
require.Equal(t, exp, m)
|
||||
}
|
||||
|
||||
func TestFilterMatchWildcard(t *testing.T) {
|
||||
|
|
@ -564,9 +565,9 @@ func TestFilterMatchWildcard(t *testing.T) {
|
|||
time.Unix(1435077219, 0))
|
||||
|
||||
m, err := p.ParseLine("servers.localhost.cpu_load 11 1435077219")
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, exp, m)
|
||||
require.Equal(t, exp, m)
|
||||
}
|
||||
|
||||
func TestFilterMatchExactBeforeWildcard(t *testing.T) {
|
||||
|
|
@ -583,9 +584,9 @@ func TestFilterMatchExactBeforeWildcard(t *testing.T) {
|
|||
time.Unix(1435077219, 0))
|
||||
|
||||
m, err := p.ParseLine("servers.localhost.cpu_load 11 1435077219")
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, exp, m)
|
||||
require.Equal(t, exp, m)
|
||||
}
|
||||
|
||||
func TestFilterMatchMostLongestFilter(t *testing.T) {
|
||||
|
|
@ -602,7 +603,7 @@ func TestFilterMatchMostLongestFilter(t *testing.T) {
|
|||
}
|
||||
|
||||
m, err := p.ParseLine("servers.localhost.cpu.cpu_load 11 1435077219")
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
value, ok := m.GetTag("host")
|
||||
require.True(t, ok)
|
||||
|
|
@ -631,9 +632,9 @@ func TestFilterMatchMultipleWildcards(t *testing.T) {
|
|||
time.Unix(1435077219, 0))
|
||||
|
||||
m, err := p.ParseLine("servers.server01.cpu_load 11 1435077219")
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, exp, m)
|
||||
require.Equal(t, exp, m)
|
||||
}
|
||||
|
||||
func TestParseDefaultTags(t *testing.T) {
|
||||
|
|
@ -647,7 +648,7 @@ func TestParseDefaultTags(t *testing.T) {
|
|||
}
|
||||
|
||||
m, err := p.ParseLine("servers.localhost.cpu_load 11 1435077219")
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
value, ok := m.GetTag("host")
|
||||
require.True(t, ok)
|
||||
|
|
@ -672,7 +673,7 @@ func TestParseDefaultTemplateTags(t *testing.T) {
|
|||
}
|
||||
|
||||
m, err := p.ParseLine("servers.localhost.cpu_load 11 1435077219")
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
value, ok := m.GetTag("host")
|
||||
require.True(t, ok)
|
||||
|
|
@ -698,7 +699,7 @@ func TestParseDefaultTemplateTagsOverridGlobal(t *testing.T) {
|
|||
|
||||
m, err := p.ParseLine("servers.localhost.cpu_load 11 1435077219")
|
||||
_ = m
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
value, ok := m.GetTag("host")
|
||||
require.True(t, ok)
|
||||
|
|
@ -725,7 +726,7 @@ func TestParseTemplateWhitespace(t *testing.T) {
|
|||
}
|
||||
|
||||
m, err := p.ParseLine("servers.localhost.cpu_load 11 1435077219")
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
value, ok := m.GetTag("host")
|
||||
require.True(t, ok)
|
||||
|
|
@ -745,10 +746,11 @@ func TestApplyTemplate(t *testing.T) {
|
|||
p, err := NewGraphiteParser("_",
|
||||
[]string{"current.* measurement.measurement"},
|
||||
nil)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
measurement, _, _, _ := p.ApplyTemplate("current.users")
|
||||
assert.Equal(t, "current_users", measurement)
|
||||
measurement, _, _, err := p.ApplyTemplate("current.users")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "current_users", measurement)
|
||||
}
|
||||
|
||||
// Test basic functionality of ApplyTemplate
|
||||
|
|
@ -756,10 +758,11 @@ func TestApplyTemplateNoMatch(t *testing.T) {
|
|||
p, err := NewGraphiteParser(".",
|
||||
[]string{"foo.bar measurement.measurement"},
|
||||
nil)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
measurement, _, _, _ := p.ApplyTemplate("current.users")
|
||||
assert.Equal(t, "current.users", measurement)
|
||||
measurement, _, _, err := p.ApplyTemplate("current.users")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "current.users", measurement)
|
||||
}
|
||||
|
||||
// Test that most specific template is chosen
|
||||
|
|
@ -769,10 +772,10 @@ func TestApplyTemplateSpecific(t *testing.T) {
|
|||
"current.* measurement.measurement",
|
||||
"current.*.* measurement.measurement.service",
|
||||
}, nil)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
measurement, tags, _, _ := p.ApplyTemplate("current.users.facebook")
|
||||
assert.Equal(t, "current_users", measurement)
|
||||
require.Equal(t, "current_users", measurement)
|
||||
|
||||
service, ok := tags["service"]
|
||||
if !ok {
|
||||
|
|
@ -786,10 +789,10 @@ func TestApplyTemplateSpecific(t *testing.T) {
|
|||
func TestApplyTemplateTags(t *testing.T) {
|
||||
p, err := NewGraphiteParser("_",
|
||||
[]string{"current.* measurement.measurement region=us-west"}, nil)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
measurement, tags, _, _ := p.ApplyTemplate("current.users")
|
||||
assert.Equal(t, "current_users", measurement)
|
||||
require.Equal(t, "current_users", measurement)
|
||||
|
||||
region, ok := tags["region"]
|
||||
if !ok {
|
||||
|
|
@ -803,11 +806,11 @@ func TestApplyTemplateTags(t *testing.T) {
|
|||
func TestApplyTemplateField(t *testing.T) {
|
||||
p, err := NewGraphiteParser("_",
|
||||
[]string{"current.* measurement.measurement.field"}, nil)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
measurement, _, field, err := p.ApplyTemplate("current.users.logged_in")
|
||||
|
||||
assert.Equal(t, "current_users", measurement)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "current_users", measurement)
|
||||
|
||||
if field != "logged_in" {
|
||||
t.Errorf("Parser.ApplyTemplate unexpected result. got %s, exp %s",
|
||||
|
|
@ -818,11 +821,11 @@ func TestApplyTemplateField(t *testing.T) {
|
|||
func TestApplyTemplateMultipleFieldsTogether(t *testing.T) {
|
||||
p, err := NewGraphiteParser("_",
|
||||
[]string{"current.* measurement.measurement.field.field"}, nil)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
measurement, _, field, err := p.ApplyTemplate("current.users.logged_in.ssh")
|
||||
|
||||
assert.Equal(t, "current_users", measurement)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "current_users", measurement)
|
||||
|
||||
if field != "logged_in_ssh" {
|
||||
t.Errorf("Parser.ApplyTemplate unexpected result. got %s, exp %s",
|
||||
|
|
@ -833,11 +836,11 @@ func TestApplyTemplateMultipleFieldsTogether(t *testing.T) {
|
|||
func TestApplyTemplateMultipleFieldsApart(t *testing.T) {
|
||||
p, err := NewGraphiteParser("_",
|
||||
[]string{"current.* measurement.measurement.field.method.field"}, nil)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
measurement, _, field, err := p.ApplyTemplate("current.users.logged_in.ssh.total")
|
||||
|
||||
assert.Equal(t, "current_users", measurement)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "current_users", measurement)
|
||||
|
||||
if field != "logged_in_total" {
|
||||
t.Errorf("Parser.ApplyTemplate unexpected result. got %s, exp %s",
|
||||
|
|
@ -848,11 +851,11 @@ func TestApplyTemplateMultipleFieldsApart(t *testing.T) {
|
|||
func TestApplyTemplateGreedyField(t *testing.T) {
|
||||
p, err := NewGraphiteParser("_",
|
||||
[]string{"current.* measurement.measurement.field*"}, nil)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
measurement, _, field, err := p.ApplyTemplate("current.users.logged_in")
|
||||
|
||||
assert.Equal(t, "current_users", measurement)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "current_users", measurement)
|
||||
|
||||
if field != "logged_in" {
|
||||
t.Errorf("Parser.ApplyTemplate unexpected result. got %s, exp %s",
|
||||
|
|
@ -868,11 +871,12 @@ func TestApplyTemplateOverSpecific(t *testing.T) {
|
|||
},
|
||||
nil,
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
measurement, tags, _, err := p.ApplyTemplate("net.server001.a.b 2")
|
||||
assert.Equal(t, "net", measurement)
|
||||
assert.Equal(t,
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "net", measurement)
|
||||
require.Equal(t,
|
||||
map[string]string{"host": "server001", "metric": "a.b"},
|
||||
tags)
|
||||
}
|
||||
|
|
@ -887,17 +891,19 @@ func TestApplyTemplateMostSpecificTemplate(t *testing.T) {
|
|||
},
|
||||
nil,
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
measurement, tags, _, err := p.ApplyTemplate("net.server001.a.b.c 2")
|
||||
assert.Equal(t, "net", measurement)
|
||||
assert.Equal(t,
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "net", measurement)
|
||||
require.Equal(t,
|
||||
map[string]string{"host": "server001", "metric": "a.b.c"},
|
||||
tags)
|
||||
|
||||
measurement, tags, _, err = p.ApplyTemplate("net.server001.a.b 2")
|
||||
assert.Equal(t, "net", measurement)
|
||||
assert.Equal(t,
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "net", measurement)
|
||||
require.Equal(t,
|
||||
map[string]string{"host": "server001", "metric": "a.b"},
|
||||
tags)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,16 +4,16 @@ import (
|
|||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/vjeantet/grok"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
"github.com/vjeantet/grok"
|
||||
)
|
||||
|
||||
var timeLayouts = map[string]string{
|
||||
|
|
@ -76,6 +76,7 @@ type Parser struct {
|
|||
CustomPatternFiles []string
|
||||
Measurement string
|
||||
DefaultTags map[string]string
|
||||
Log telegraf.Logger `toml:"-"`
|
||||
|
||||
// Timezone is an optional component to help render log dates to
|
||||
// your chosen zone.
|
||||
|
|
@ -107,13 +108,13 @@ type Parser struct {
|
|||
// }
|
||||
// }
|
||||
tsMap map[string]map[string]string
|
||||
// patterns is a map of all of the parsed patterns from CustomPatterns
|
||||
// patternsMap is a map of all of the parsed patterns from CustomPatterns
|
||||
// and CustomPatternFiles.
|
||||
// ie, {
|
||||
// "DURATION": "%{NUMBER}[nuµm]?s"
|
||||
// "RESPONSE_CODE": "%{NUMBER:rc:tag}"
|
||||
// }
|
||||
patterns map[string]string
|
||||
patternsMap map[string]string
|
||||
// foundTsLayouts is a slice of timestamp patterns that have been found
|
||||
// in the log lines. This slice gets updated if the user uses the generic
|
||||
// 'ts' modifier for timestamps. This slice is checked first for matches,
|
||||
|
|
@ -130,7 +131,7 @@ type Parser struct {
|
|||
func (p *Parser) Compile() error {
|
||||
p.typeMap = make(map[string]map[string]string)
|
||||
p.tsMap = make(map[string]map[string]string)
|
||||
p.patterns = make(map[string]string)
|
||||
p.patternsMap = make(map[string]string)
|
||||
p.tsModder = &tsModder{}
|
||||
var err error
|
||||
p.g, err = grok.NewWithConfig(&grok.Config{NamedCapturesOnly: true})
|
||||
|
|
@ -180,7 +181,7 @@ func (p *Parser) Compile() error {
|
|||
|
||||
p.loc, err = time.LoadLocation(p.Timezone)
|
||||
if err != nil {
|
||||
log.Printf("W! improper timezone supplied (%s), setting loc to UTC", p.Timezone)
|
||||
p.Log.Warnf("Improper timezone supplied (%s), setting loc to UTC", p.Timezone)
|
||||
p.loc, _ = time.LoadLocation("UTC")
|
||||
}
|
||||
|
||||
|
|
@ -209,7 +210,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
|
|||
}
|
||||
|
||||
if len(values) == 0 {
|
||||
log.Printf("D! Grok no match found for: %q", line)
|
||||
p.Log.Debugf("Grok no match found for: %q", line)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
|
@ -252,21 +253,21 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
|
|||
case Int:
|
||||
iv, err := strconv.ParseInt(v, 0, 64)
|
||||
if err != nil {
|
||||
log.Printf("E! Error parsing %s to int: %s", v, err)
|
||||
p.Log.Errorf("Error parsing %s to int: %s", v, err)
|
||||
} else {
|
||||
fields[k] = iv
|
||||
}
|
||||
case Float:
|
||||
fv, err := strconv.ParseFloat(v, 64)
|
||||
if err != nil {
|
||||
log.Printf("E! Error parsing %s to float: %s", v, err)
|
||||
p.Log.Errorf("Error parsing %s to float: %s", v, err)
|
||||
} else {
|
||||
fields[k] = fv
|
||||
}
|
||||
case Duration:
|
||||
d, err := time.ParseDuration(v)
|
||||
if err != nil {
|
||||
log.Printf("E! Error parsing %s to duration: %s", v, err)
|
||||
p.Log.Errorf("Error parsing %s to duration: %s", v, err)
|
||||
} else {
|
||||
fields[k] = int64(d)
|
||||
}
|
||||
|
|
@ -277,13 +278,13 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
|
|||
case Epoch:
|
||||
parts := strings.SplitN(v, ".", 2)
|
||||
if len(parts) == 0 {
|
||||
log.Printf("E! Error parsing %s to timestamp: %s", v, err)
|
||||
p.Log.Errorf("Error parsing %s to timestamp: %s", v, err)
|
||||
break
|
||||
}
|
||||
|
||||
sec, err := strconv.ParseInt(parts[0], 10, 64)
|
||||
if err != nil {
|
||||
log.Printf("E! Error parsing %s to timestamp: %s", v, err)
|
||||
p.Log.Errorf("Error parsing %s to timestamp: %s", v, err)
|
||||
break
|
||||
}
|
||||
ts := time.Unix(sec, 0)
|
||||
|
|
@ -293,7 +294,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
|
|||
nsString := strings.Replace(padded[:9], " ", "0", -1)
|
||||
nanosec, err := strconv.ParseInt(nsString, 10, 64)
|
||||
if err != nil {
|
||||
log.Printf("E! Error parsing %s to timestamp: %s", v, err)
|
||||
p.Log.Errorf("Error parsing %s to timestamp: %s", v, err)
|
||||
break
|
||||
}
|
||||
ts = ts.Add(time.Duration(nanosec) * time.Nanosecond)
|
||||
|
|
@ -302,14 +303,14 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
|
|||
case EpochMilli:
|
||||
ms, err := strconv.ParseInt(v, 10, 64)
|
||||
if err != nil {
|
||||
log.Printf("E! Error parsing %s to int: %s", v, err)
|
||||
p.Log.Errorf("Error parsing %s to int: %s", v, err)
|
||||
} else {
|
||||
timestamp = time.Unix(0, ms*int64(time.Millisecond))
|
||||
}
|
||||
case EpochNano:
|
||||
iv, err := strconv.ParseInt(v, 10, 64)
|
||||
if err != nil {
|
||||
log.Printf("E! Error parsing %s to int: %s", v, err)
|
||||
p.Log.Errorf("Error parsing %s to int: %s", v, err)
|
||||
} else {
|
||||
timestamp = time.Unix(0, iv)
|
||||
}
|
||||
|
|
@ -321,7 +322,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
|
|||
}
|
||||
timestamp = ts
|
||||
} else {
|
||||
log.Printf("E! Error parsing %s to time layout [%s]: %s", v, t, err)
|
||||
p.Log.Errorf("Error parsing %s to time layout [%s]: %s", v, t, err)
|
||||
}
|
||||
case GenericTimestamp:
|
||||
var foundTs bool
|
||||
|
|
@ -350,7 +351,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
|
|||
// if we still haven't found a timestamp layout, log it and we will
|
||||
// just use time.Now()
|
||||
if !foundTs {
|
||||
log.Printf("E! Error parsing timestamp [%s], could not find any "+
|
||||
p.Log.Errorf("Error parsing timestamp [%s], could not find any "+
|
||||
"suitable time layouts.", v)
|
||||
}
|
||||
case Drop:
|
||||
|
|
@ -364,7 +365,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
|
|||
}
|
||||
timestamp = ts
|
||||
} else {
|
||||
log.Printf("E! Error parsing %s to time layout [%s]: %s", v, t, err)
|
||||
p.Log.Errorf("Error parsing %s to time layout [%s]: %s", v, t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -405,7 +406,7 @@ func (p *Parser) addCustomPatterns(scanner *bufio.Scanner) {
|
|||
line := strings.TrimSpace(scanner.Text())
|
||||
if len(line) > 0 && line[0] != '#' {
|
||||
names := strings.SplitN(line, " ", 2)
|
||||
p.patterns[names[0]] = names[1]
|
||||
p.patternsMap[names[0]] = names[1]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -415,30 +416,30 @@ func (p *Parser) compileCustomPatterns() error {
|
|||
// check if the pattern contains a subpattern that is already defined
|
||||
// replace it with the subpattern for modifier inheritance.
|
||||
for i := 0; i < 2; i++ {
|
||||
for name, pattern := range p.patterns {
|
||||
for name, pattern := range p.patternsMap {
|
||||
subNames := patternOnlyRe.FindAllStringSubmatch(pattern, -1)
|
||||
for _, subName := range subNames {
|
||||
if subPattern, ok := p.patterns[subName[1]]; ok {
|
||||
if subPattern, ok := p.patternsMap[subName[1]]; ok {
|
||||
pattern = strings.Replace(pattern, subName[0], subPattern, 1)
|
||||
}
|
||||
}
|
||||
p.patterns[name] = pattern
|
||||
p.patternsMap[name] = pattern
|
||||
}
|
||||
}
|
||||
|
||||
// check if pattern contains modifiers. Parse them out if it does.
|
||||
for name, pattern := range p.patterns {
|
||||
for name, pattern := range p.patternsMap {
|
||||
if modifierRe.MatchString(pattern) {
|
||||
// this pattern has modifiers, so parse out the modifiers
|
||||
pattern, err = p.parseTypedCaptures(name, pattern)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.patterns[name] = pattern
|
||||
p.patternsMap[name] = pattern
|
||||
}
|
||||
}
|
||||
|
||||
return p.g.AddPatternsFromMap(p.patterns)
|
||||
return p.g.AddPatternsFromMap(p.patternsMap)
|
||||
}
|
||||
|
||||
// parseTypedCaptures parses the capture modifiers, and then deletes the
|
||||
|
|
|
|||
|
|
@ -5,9 +5,9 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
func TestGrokParse(t *testing.T) {
|
||||
|
|
@ -15,9 +15,11 @@ func TestGrokParse(t *testing.T) {
|
|||
Measurement: "t_met",
|
||||
Patterns: []string{"%{COMMON_LOG_FORMAT}"},
|
||||
}
|
||||
parser.Compile()
|
||||
_, err := parser.Parse([]byte(`127.0.0.1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326`))
|
||||
assert.NoError(t, err)
|
||||
err := parser.Compile()
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = parser.Parse([]byte(`127.0.0.1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326`))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Verify that patterns with a regex lookahead fail at compile time.
|
||||
|
|
@ -29,23 +31,23 @@ func TestParsePatternsWithLookahead(t *testing.T) {
|
|||
MYLOG %{NUMBER:num:int} %{NOBOT:client}
|
||||
`,
|
||||
}
|
||||
assert.NoError(t, p.Compile())
|
||||
require.NoError(t, p.Compile())
|
||||
|
||||
_, err := p.ParseLine(`1466004605359052000 bot`)
|
||||
assert.Error(t, err)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestMeasurementName(t *testing.T) {
|
||||
p := &Parser{
|
||||
Patterns: []string{"%{COMMON_LOG_FORMAT}"},
|
||||
}
|
||||
assert.NoError(t, p.Compile())
|
||||
require.NoError(t, p.Compile())
|
||||
|
||||
// Parse an influxdb POST request
|
||||
m, err := p.ParseLine(`127.0.0.1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326`)
|
||||
require.NotNil(t, m)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
require.NoError(t, err)
|
||||
require.Equal(t,
|
||||
map[string]interface{}{
|
||||
"resp_bytes": int64(2326),
|
||||
"auth": "frank",
|
||||
|
|
@ -55,19 +57,19 @@ func TestMeasurementName(t *testing.T) {
|
|||
"request": "/apache_pb.gif",
|
||||
},
|
||||
m.Fields())
|
||||
assert.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags())
|
||||
require.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags())
|
||||
}
|
||||
|
||||
func TestCLF_IPv6(t *testing.T) {
|
||||
p := &Parser{
|
||||
Patterns: []string{"%{COMMON_LOG_FORMAT}"},
|
||||
}
|
||||
assert.NoError(t, p.Compile())
|
||||
require.NoError(t, p.Compile())
|
||||
|
||||
m, err := p.ParseLine(`2001:0db8:85a3:0000:0000:8a2e:0370:7334 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326`)
|
||||
require.NotNil(t, m)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
require.NoError(t, err)
|
||||
require.Equal(t,
|
||||
map[string]interface{}{
|
||||
"resp_bytes": int64(2326),
|
||||
"auth": "frank",
|
||||
|
|
@ -77,12 +79,12 @@ func TestCLF_IPv6(t *testing.T) {
|
|||
"request": "/apache_pb.gif",
|
||||
},
|
||||
m.Fields())
|
||||
assert.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags())
|
||||
require.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags())
|
||||
|
||||
m, err = p.ParseLine(`::1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326`)
|
||||
require.NotNil(t, m)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
require.NoError(t, err)
|
||||
require.Equal(t,
|
||||
map[string]interface{}{
|
||||
"resp_bytes": int64(2326),
|
||||
"auth": "frank",
|
||||
|
|
@ -92,20 +94,20 @@ func TestCLF_IPv6(t *testing.T) {
|
|||
"request": "/apache_pb.gif",
|
||||
},
|
||||
m.Fields())
|
||||
assert.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags())
|
||||
require.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags())
|
||||
}
|
||||
|
||||
func TestCustomInfluxdbHttpd(t *testing.T) {
|
||||
p := &Parser{
|
||||
Patterns: []string{`\[httpd\] %{COMBINED_LOG_FORMAT} %{UUID:uuid:drop} %{NUMBER:response_time_us:int}`},
|
||||
}
|
||||
assert.NoError(t, p.Compile())
|
||||
require.NoError(t, p.Compile())
|
||||
|
||||
// Parse an influxdb POST request
|
||||
m, err := p.ParseLine(`[httpd] ::1 - - [14/Jun/2016:11:33:29 +0100] "POST /write?consistency=any&db=telegraf&precision=ns&rp= HTTP/1.1" 204 0 "-" "InfluxDBClient" 6f61bc44-321b-11e6-8050-000000000000 2513`)
|
||||
require.NotNil(t, m)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
require.NoError(t, err)
|
||||
require.Equal(t,
|
||||
map[string]interface{}{
|
||||
"resp_bytes": int64(0),
|
||||
"auth": "-",
|
||||
|
|
@ -118,13 +120,13 @@ func TestCustomInfluxdbHttpd(t *testing.T) {
|
|||
"agent": "InfluxDBClient",
|
||||
},
|
||||
m.Fields())
|
||||
assert.Equal(t, map[string]string{"verb": "POST", "resp_code": "204"}, m.Tags())
|
||||
require.Equal(t, map[string]string{"verb": "POST", "resp_code": "204"}, m.Tags())
|
||||
|
||||
// Parse an influxdb GET request
|
||||
m, err = p.ParseLine(`[httpd] ::1 - - [14/Jun/2016:12:10:02 +0100] "GET /query?db=telegraf&q=SELECT+bytes%2Cresponse_time_us+FROM+logparser_grok+WHERE+http_method+%3D+%27GET%27+AND+response_time_us+%3E+0+AND+time+%3E+now%28%29+-+1h HTTP/1.1" 200 578 "http://localhost:8083/" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.84 Safari/537.36" 8a3806f1-3220-11e6-8006-000000000000 988`)
|
||||
require.NotNil(t, m)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
require.NoError(t, err)
|
||||
require.Equal(t,
|
||||
map[string]interface{}{
|
||||
"resp_bytes": int64(578),
|
||||
"auth": "-",
|
||||
|
|
@ -137,7 +139,7 @@ func TestCustomInfluxdbHttpd(t *testing.T) {
|
|||
"agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.84 Safari/537.36",
|
||||
},
|
||||
m.Fields())
|
||||
assert.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags())
|
||||
require.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags())
|
||||
}
|
||||
|
||||
// common log format
|
||||
|
|
@ -146,13 +148,13 @@ func TestBuiltinCommonLogFormat(t *testing.T) {
|
|||
p := &Parser{
|
||||
Patterns: []string{"%{COMMON_LOG_FORMAT}"},
|
||||
}
|
||||
assert.NoError(t, p.Compile())
|
||||
require.NoError(t, p.Compile())
|
||||
|
||||
// Parse an influxdb POST request
|
||||
m, err := p.ParseLine(`127.0.0.1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326`)
|
||||
require.NotNil(t, m)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
require.NoError(t, err)
|
||||
require.Equal(t,
|
||||
map[string]interface{}{
|
||||
"resp_bytes": int64(2326),
|
||||
"auth": "frank",
|
||||
|
|
@ -162,7 +164,7 @@ func TestBuiltinCommonLogFormat(t *testing.T) {
|
|||
"request": "/apache_pb.gif",
|
||||
},
|
||||
m.Fields())
|
||||
assert.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags())
|
||||
require.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags())
|
||||
}
|
||||
|
||||
// common log format
|
||||
|
|
@ -171,13 +173,13 @@ func TestBuiltinCommonLogFormatWithNumbers(t *testing.T) {
|
|||
p := &Parser{
|
||||
Patterns: []string{"%{COMMON_LOG_FORMAT}"},
|
||||
}
|
||||
assert.NoError(t, p.Compile())
|
||||
require.NoError(t, p.Compile())
|
||||
|
||||
// Parse an influxdb POST request
|
||||
m, err := p.ParseLine(`127.0.0.1 user1234 frank1234 [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326`)
|
||||
require.NotNil(t, m)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
require.NoError(t, err)
|
||||
require.Equal(t,
|
||||
map[string]interface{}{
|
||||
"resp_bytes": int64(2326),
|
||||
"auth": "frank1234",
|
||||
|
|
@ -187,7 +189,7 @@ func TestBuiltinCommonLogFormatWithNumbers(t *testing.T) {
|
|||
"request": "/apache_pb.gif",
|
||||
},
|
||||
m.Fields())
|
||||
assert.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags())
|
||||
require.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags())
|
||||
}
|
||||
|
||||
// combined log format
|
||||
|
|
@ -196,13 +198,13 @@ func TestBuiltinCombinedLogFormat(t *testing.T) {
|
|||
p := &Parser{
|
||||
Patterns: []string{"%{COMBINED_LOG_FORMAT}"},
|
||||
}
|
||||
assert.NoError(t, p.Compile())
|
||||
require.NoError(t, p.Compile())
|
||||
|
||||
// Parse an influxdb POST request
|
||||
m, err := p.ParseLine(`127.0.0.1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326 "-" "Mozilla"`)
|
||||
require.NotNil(t, m)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
require.NoError(t, err)
|
||||
require.Equal(t,
|
||||
map[string]interface{}{
|
||||
"resp_bytes": int64(2326),
|
||||
"auth": "frank",
|
||||
|
|
@ -214,7 +216,7 @@ func TestBuiltinCombinedLogFormat(t *testing.T) {
|
|||
"agent": "Mozilla",
|
||||
},
|
||||
m.Fields())
|
||||
assert.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags())
|
||||
require.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags())
|
||||
}
|
||||
|
||||
func TestCompileStringAndParse(t *testing.T) {
|
||||
|
|
@ -227,19 +229,19 @@ func TestCompileStringAndParse(t *testing.T) {
|
|||
TEST_LOG_A %{NUMBER:myfloat:float} %{RESPONSE_CODE} %{IPORHOST:clientip} %{RESPONSE_TIME}
|
||||
`,
|
||||
}
|
||||
assert.NoError(t, p.Compile())
|
||||
require.NoError(t, p.Compile())
|
||||
|
||||
metricA, err := p.ParseLine(`1.25 200 192.168.1.1 5.432µs`)
|
||||
require.NotNil(t, metricA)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
require.NoError(t, err)
|
||||
require.Equal(t,
|
||||
map[string]interface{}{
|
||||
"clientip": "192.168.1.1",
|
||||
"myfloat": float64(1.25),
|
||||
"response_time": int64(5432),
|
||||
},
|
||||
metricA.Fields())
|
||||
assert.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags())
|
||||
require.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags())
|
||||
}
|
||||
|
||||
func TestCompileErrorsOnInvalidPattern(t *testing.T) {
|
||||
|
|
@ -252,7 +254,7 @@ func TestCompileErrorsOnInvalidPattern(t *testing.T) {
|
|||
TEST_LOG_A %{NUMBER:myfloat:float} %{RESPONSE_CODE} %{IPORHOST:clientip} %{RESPONSE_TIME}
|
||||
`,
|
||||
}
|
||||
assert.Error(t, p.Compile())
|
||||
require.Error(t, p.Compile())
|
||||
|
||||
metricA, _ := p.ParseLine(`1.25 200 192.168.1.1 5.432µs`)
|
||||
require.Nil(t, metricA)
|
||||
|
|
@ -262,19 +264,19 @@ func TestParsePatternsWithoutCustom(t *testing.T) {
|
|||
p := &Parser{
|
||||
Patterns: []string{"%{POSINT:ts:ts-epochnano} response_time=%{POSINT:response_time:int} mymetric=%{NUMBER:metric:float}"},
|
||||
}
|
||||
assert.NoError(t, p.Compile())
|
||||
require.NoError(t, p.Compile())
|
||||
|
||||
metricA, err := p.ParseLine(`1466004605359052000 response_time=20821 mymetric=10890.645`)
|
||||
require.NotNil(t, metricA)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
require.NoError(t, err)
|
||||
require.Equal(t,
|
||||
map[string]interface{}{
|
||||
"response_time": int64(20821),
|
||||
"metric": float64(10890.645),
|
||||
},
|
||||
metricA.Fields())
|
||||
assert.Equal(t, map[string]string{}, metricA.Tags())
|
||||
assert.Equal(t, time.Unix(0, 1466004605359052000), metricA.Time())
|
||||
require.Equal(t, map[string]string{}, metricA.Tags())
|
||||
require.Equal(t, time.Unix(0, 1466004605359052000), metricA.Time())
|
||||
}
|
||||
|
||||
func TestParseEpochMilli(t *testing.T) {
|
||||
|
|
@ -284,19 +286,19 @@ func TestParseEpochMilli(t *testing.T) {
|
|||
MYAPP %{POSINT:ts:ts-epochmilli} response_time=%{POSINT:response_time:int} mymetric=%{NUMBER:metric:float}
|
||||
`,
|
||||
}
|
||||
assert.NoError(t, p.Compile())
|
||||
require.NoError(t, p.Compile())
|
||||
|
||||
metricA, err := p.ParseLine(`1568540909963 response_time=20821 mymetric=10890.645`)
|
||||
require.NotNil(t, metricA)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
require.NoError(t, err)
|
||||
require.Equal(t,
|
||||
map[string]interface{}{
|
||||
"response_time": int64(20821),
|
||||
"metric": float64(10890.645),
|
||||
},
|
||||
metricA.Fields())
|
||||
assert.Equal(t, map[string]string{}, metricA.Tags())
|
||||
assert.Equal(t, time.Unix(0, 1568540909963000000), metricA.Time())
|
||||
require.Equal(t, map[string]string{}, metricA.Tags())
|
||||
require.Equal(t, time.Unix(0, 1568540909963000000), metricA.Time())
|
||||
}
|
||||
|
||||
func TestParseEpochNano(t *testing.T) {
|
||||
|
|
@ -306,19 +308,19 @@ func TestParseEpochNano(t *testing.T) {
|
|||
MYAPP %{POSINT:ts:ts-epochnano} response_time=%{POSINT:response_time:int} mymetric=%{NUMBER:metric:float}
|
||||
`,
|
||||
}
|
||||
assert.NoError(t, p.Compile())
|
||||
require.NoError(t, p.Compile())
|
||||
|
||||
metricA, err := p.ParseLine(`1466004605359052000 response_time=20821 mymetric=10890.645`)
|
||||
require.NotNil(t, metricA)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
require.NoError(t, err)
|
||||
require.Equal(t,
|
||||
map[string]interface{}{
|
||||
"response_time": int64(20821),
|
||||
"metric": float64(10890.645),
|
||||
},
|
||||
metricA.Fields())
|
||||
assert.Equal(t, map[string]string{}, metricA.Tags())
|
||||
assert.Equal(t, time.Unix(0, 1466004605359052000), metricA.Time())
|
||||
require.Equal(t, map[string]string{}, metricA.Tags())
|
||||
require.Equal(t, time.Unix(0, 1466004605359052000), metricA.Time())
|
||||
}
|
||||
|
||||
func TestParseEpoch(t *testing.T) {
|
||||
|
|
@ -328,19 +330,19 @@ func TestParseEpoch(t *testing.T) {
|
|||
MYAPP %{POSINT:ts:ts-epoch} response_time=%{POSINT:response_time:int} mymetric=%{NUMBER:metric:float}
|
||||
`,
|
||||
}
|
||||
assert.NoError(t, p.Compile())
|
||||
require.NoError(t, p.Compile())
|
||||
|
||||
metricA, err := p.ParseLine(`1466004605 response_time=20821 mymetric=10890.645`)
|
||||
require.NotNil(t, metricA)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
require.NoError(t, err)
|
||||
require.Equal(t,
|
||||
map[string]interface{}{
|
||||
"response_time": int64(20821),
|
||||
"metric": float64(10890.645),
|
||||
},
|
||||
metricA.Fields())
|
||||
assert.Equal(t, map[string]string{}, metricA.Tags())
|
||||
assert.Equal(t, time.Unix(1466004605, 0), metricA.Time())
|
||||
require.Equal(t, map[string]string{}, metricA.Tags())
|
||||
require.Equal(t, time.Unix(1466004605, 0), metricA.Time())
|
||||
}
|
||||
|
||||
func TestParseEpochDecimal(t *testing.T) {
|
||||
|
|
@ -395,7 +397,7 @@ func TestParseEpochDecimal(t *testing.T) {
|
|||
parser := &Parser{
|
||||
Patterns: []string{"%{NUMBER:ts:ts-epoch} value=%{NUMBER:value:int}"},
|
||||
}
|
||||
assert.NoError(t, parser.Compile())
|
||||
require.NoError(t, parser.Compile())
|
||||
m, err := parser.ParseLine(tt.line)
|
||||
|
||||
if tt.noMatch {
|
||||
|
|
@ -420,71 +422,74 @@ func TestParseEpochErrors(t *testing.T) {
|
|||
CustomPatterns: `
|
||||
MYAPP %{WORD:ts:ts-epoch} response_time=%{POSINT:response_time:int} mymetric=%{NUMBER:metric:float}
|
||||
`,
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
assert.NoError(t, p.Compile())
|
||||
require.NoError(t, p.Compile())
|
||||
|
||||
_, err := p.ParseLine(`foobar response_time=20821 mymetric=10890.645`)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
p = &Parser{
|
||||
Patterns: []string{"%{MYAPP}"},
|
||||
CustomPatterns: `
|
||||
MYAPP %{WORD:ts:ts-epochnano} response_time=%{POSINT:response_time:int} mymetric=%{NUMBER:metric:float}
|
||||
`,
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
assert.NoError(t, p.Compile())
|
||||
require.NoError(t, p.Compile())
|
||||
|
||||
_, err = p.ParseLine(`foobar response_time=20821 mymetric=10890.645`)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestParseGenericTimestamp(t *testing.T) {
|
||||
p := &Parser{
|
||||
Patterns: []string{`\[%{HTTPDATE:ts:ts}\] response_time=%{POSINT:response_time:int} mymetric=%{NUMBER:metric:float}`},
|
||||
}
|
||||
assert.NoError(t, p.Compile())
|
||||
require.NoError(t, p.Compile())
|
||||
|
||||
metricA, err := p.ParseLine(`[09/Jun/2016:03:37:03 +0000] response_time=20821 mymetric=10890.645`)
|
||||
require.NotNil(t, metricA)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
require.NoError(t, err)
|
||||
require.Equal(t,
|
||||
map[string]interface{}{
|
||||
"response_time": int64(20821),
|
||||
"metric": float64(10890.645),
|
||||
},
|
||||
metricA.Fields())
|
||||
assert.Equal(t, map[string]string{}, metricA.Tags())
|
||||
assert.Equal(t, time.Unix(1465443423, 0).UTC(), metricA.Time().UTC())
|
||||
require.Equal(t, map[string]string{}, metricA.Tags())
|
||||
require.Equal(t, time.Unix(1465443423, 0).UTC(), metricA.Time().UTC())
|
||||
|
||||
metricB, err := p.ParseLine(`[09/Jun/2016:03:37:04 +0000] response_time=20821 mymetric=10890.645`)
|
||||
require.NotNil(t, metricB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
require.NoError(t, err)
|
||||
require.Equal(t,
|
||||
map[string]interface{}{
|
||||
"response_time": int64(20821),
|
||||
"metric": float64(10890.645),
|
||||
},
|
||||
metricB.Fields())
|
||||
assert.Equal(t, map[string]string{}, metricB.Tags())
|
||||
assert.Equal(t, time.Unix(1465443424, 0).UTC(), metricB.Time().UTC())
|
||||
require.Equal(t, map[string]string{}, metricB.Tags())
|
||||
require.Equal(t, time.Unix(1465443424, 0).UTC(), metricB.Time().UTC())
|
||||
}
|
||||
|
||||
func TestParseGenericTimestampNotFound(t *testing.T) {
|
||||
p := &Parser{
|
||||
Patterns: []string{`\[%{NOTSPACE:ts:ts}\] response_time=%{POSINT:response_time:int} mymetric=%{NUMBER:metric:float}`},
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
assert.NoError(t, p.Compile())
|
||||
require.NoError(t, p.Compile())
|
||||
|
||||
metricA, err := p.ParseLine(`[foobar] response_time=20821 mymetric=10890.645`)
|
||||
require.NotNil(t, metricA)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
require.NoError(t, err)
|
||||
require.Equal(t,
|
||||
map[string]interface{}{
|
||||
"response_time": int64(20821),
|
||||
"metric": float64(10890.645),
|
||||
},
|
||||
metricA.Fields())
|
||||
assert.Equal(t, map[string]string{}, metricA.Tags())
|
||||
require.Equal(t, map[string]string{}, metricA.Tags())
|
||||
}
|
||||
|
||||
func TestCompileFileAndParse(t *testing.T) {
|
||||
|
|
@ -492,12 +497,12 @@ func TestCompileFileAndParse(t *testing.T) {
|
|||
Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"},
|
||||
CustomPatternFiles: []string{"./testdata/test-patterns"},
|
||||
}
|
||||
assert.NoError(t, p.Compile())
|
||||
require.NoError(t, p.Compile())
|
||||
|
||||
metricA, err := p.ParseLine(`[04/Jun/2016:12:41:45 +0100] 1.25 200 192.168.1.1 5.432µs 101`)
|
||||
require.NotNil(t, metricA)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
require.NoError(t, err)
|
||||
require.Equal(t,
|
||||
map[string]interface{}{
|
||||
"clientip": "192.168.1.1",
|
||||
"myfloat": float64(1.25),
|
||||
|
|
@ -505,23 +510,23 @@ func TestCompileFileAndParse(t *testing.T) {
|
|||
"myint": int64(101),
|
||||
},
|
||||
metricA.Fields())
|
||||
assert.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags())
|
||||
assert.Equal(t,
|
||||
require.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags())
|
||||
require.Equal(t,
|
||||
time.Date(2016, time.June, 4, 12, 41, 45, 0, time.FixedZone("foo", 60*60)).Nanosecond(),
|
||||
metricA.Time().Nanosecond())
|
||||
|
||||
metricB, err := p.ParseLine(`[04/06/2016--12:41:46] 1.25 mystring dropme nomodifier`)
|
||||
require.NotNil(t, metricB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
require.NoError(t, err)
|
||||
require.Equal(t,
|
||||
map[string]interface{}{
|
||||
"myfloat": 1.25,
|
||||
"mystring": "mystring",
|
||||
"nomodifier": "nomodifier",
|
||||
},
|
||||
metricB.Fields())
|
||||
assert.Equal(t, map[string]string{}, metricB.Tags())
|
||||
assert.Equal(t,
|
||||
require.Equal(t, map[string]string{}, metricB.Tags())
|
||||
require.Equal(t,
|
||||
time.Date(2016, time.June, 4, 12, 41, 46, 0, time.FixedZone("foo", 60*60)).Nanosecond(),
|
||||
metricB.Time().Nanosecond())
|
||||
}
|
||||
|
|
@ -534,19 +539,19 @@ func TestCompileNoModifiersAndParse(t *testing.T) {
|
|||
TEST_LOG_C %{NUMBER:myfloat} %{NUMBER} %{IPORHOST:clientip} %{DURATION:rt}
|
||||
`,
|
||||
}
|
||||
assert.NoError(t, p.Compile())
|
||||
require.NoError(t, p.Compile())
|
||||
|
||||
metricA, err := p.ParseLine(`1.25 200 192.168.1.1 5.432µs`)
|
||||
require.NotNil(t, metricA)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
require.NoError(t, err)
|
||||
require.Equal(t,
|
||||
map[string]interface{}{
|
||||
"clientip": "192.168.1.1",
|
||||
"myfloat": "1.25",
|
||||
"rt": "5.432µs",
|
||||
},
|
||||
metricA.Fields())
|
||||
assert.Equal(t, map[string]string{}, metricA.Tags())
|
||||
require.Equal(t, map[string]string{}, metricA.Tags())
|
||||
}
|
||||
|
||||
func TestCompileNoNamesAndParse(t *testing.T) {
|
||||
|
|
@ -556,24 +561,26 @@ func TestCompileNoNamesAndParse(t *testing.T) {
|
|||
DURATION %{NUMBER}[nuµm]?s
|
||||
TEST_LOG_C %{NUMBER} %{NUMBER} %{IPORHOST} %{DURATION}
|
||||
`,
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
assert.NoError(t, p.Compile())
|
||||
require.NoError(t, p.Compile())
|
||||
|
||||
metricA, err := p.ParseLine(`1.25 200 192.168.1.1 5.432µs`)
|
||||
require.Nil(t, metricA)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestParseNoMatch(t *testing.T) {
|
||||
p := &Parser{
|
||||
Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"},
|
||||
CustomPatternFiles: []string{"./testdata/test-patterns"},
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
assert.NoError(t, p.Compile())
|
||||
require.NoError(t, p.Compile())
|
||||
|
||||
metricA, err := p.ParseLine(`[04/Jun/2016:12:41:45 +0100] notnumber 200 192.168.1.1 5.432µs 101`)
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, metricA)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, metricA)
|
||||
}
|
||||
|
||||
func TestCompileErrors(t *testing.T) {
|
||||
|
|
@ -584,14 +591,14 @@ func TestCompileErrors(t *testing.T) {
|
|||
TEST_LOG_A %{HTTPDATE:ts1:ts-httpd} %{HTTPDATE:ts2:ts-httpd} %{NUMBER:mynum:int}
|
||||
`,
|
||||
}
|
||||
assert.Error(t, p.Compile())
|
||||
require.Error(t, p.Compile())
|
||||
|
||||
// Compile fails because file doesn't exist:
|
||||
p = &Parser{
|
||||
Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"},
|
||||
CustomPatternFiles: []string{"/tmp/foo/bar/baz"},
|
||||
}
|
||||
assert.Error(t, p.Compile())
|
||||
require.Error(t, p.Compile())
|
||||
}
|
||||
|
||||
func TestParseErrors_MissingPattern(t *testing.T) {
|
||||
|
|
@ -614,6 +621,7 @@ func TestParseErrors_WrongIntegerType(t *testing.T) {
|
|||
CustomPatterns: `
|
||||
TEST_LOG_A %{NUMBER:ts:ts-epoch} %{WORD:myword:int}
|
||||
`,
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
require.NoError(t, p.Compile())
|
||||
m, err := p.ParseLine(`0 notnumber`)
|
||||
|
|
@ -630,6 +638,7 @@ func TestParseErrors_WrongFloatType(t *testing.T) {
|
|||
CustomPatterns: `
|
||||
TEST_LOG_A %{NUMBER:ts:ts-epoch} %{WORD:myword:float}
|
||||
`,
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
require.NoError(t, p.Compile())
|
||||
m, err := p.ParseLine(`0 notnumber`)
|
||||
|
|
@ -646,6 +655,7 @@ func TestParseErrors_WrongDurationType(t *testing.T) {
|
|||
CustomPatterns: `
|
||||
TEST_LOG_A %{NUMBER:ts:ts-epoch} %{WORD:myword:duration}
|
||||
`,
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
require.NoError(t, p.Compile())
|
||||
m, err := p.ParseLine(`0 notnumber`)
|
||||
|
|
@ -662,6 +672,7 @@ func TestParseErrors_WrongTimeLayout(t *testing.T) {
|
|||
CustomPatterns: `
|
||||
TEST_LOG_A %{NUMBER:ts:ts-epoch} %{WORD:myword:duration}
|
||||
`,
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
require.NoError(t, p.Compile())
|
||||
m, err := p.ParseLine(`0 notnumber`)
|
||||
|
|
@ -680,12 +691,12 @@ func TestParseInteger_Base16(t *testing.T) {
|
|||
TEST_LOG_C %{NUMBER:myfloat} %{BASE10OR16NUM:response_code:int} %{IPORHOST:clientip} %{DURATION:rt}
|
||||
`,
|
||||
}
|
||||
assert.NoError(t, p.Compile())
|
||||
require.NoError(t, p.Compile())
|
||||
|
||||
metricA, err := p.ParseLine(`1.25 0xc8 192.168.1.1 5.432µs`)
|
||||
require.NotNil(t, metricA)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
require.NoError(t, err)
|
||||
require.Equal(t,
|
||||
map[string]interface{}{
|
||||
"clientip": "192.168.1.1",
|
||||
"response_code": int64(200),
|
||||
|
|
@ -693,7 +704,7 @@ func TestParseInteger_Base16(t *testing.T) {
|
|||
"rt": "5.432µs",
|
||||
},
|
||||
metricA.Fields())
|
||||
assert.Equal(t, map[string]string{}, metricA.Tags())
|
||||
require.Equal(t, map[string]string{}, metricA.Tags())
|
||||
}
|
||||
|
||||
func TestTsModder(t *testing.T) {
|
||||
|
|
@ -701,47 +712,47 @@ func TestTsModder(t *testing.T) {
|
|||
|
||||
reftime := time.Date(2006, time.December, 1, 1, 1, 1, int(time.Millisecond), time.UTC)
|
||||
modt := tsm.tsMod(reftime)
|
||||
assert.Equal(t, reftime, modt)
|
||||
require.Equal(t, reftime, modt)
|
||||
modt = tsm.tsMod(reftime)
|
||||
assert.Equal(t, reftime.Add(time.Microsecond*1), modt)
|
||||
require.Equal(t, reftime.Add(time.Microsecond*1), modt)
|
||||
modt = tsm.tsMod(reftime)
|
||||
assert.Equal(t, reftime.Add(time.Microsecond*2), modt)
|
||||
require.Equal(t, reftime.Add(time.Microsecond*2), modt)
|
||||
modt = tsm.tsMod(reftime)
|
||||
assert.Equal(t, reftime.Add(time.Microsecond*3), modt)
|
||||
require.Equal(t, reftime.Add(time.Microsecond*3), modt)
|
||||
|
||||
reftime = time.Date(2006, time.December, 1, 1, 1, 1, int(time.Microsecond), time.UTC)
|
||||
modt = tsm.tsMod(reftime)
|
||||
assert.Equal(t, reftime, modt)
|
||||
require.Equal(t, reftime, modt)
|
||||
modt = tsm.tsMod(reftime)
|
||||
assert.Equal(t, reftime.Add(time.Nanosecond*1), modt)
|
||||
require.Equal(t, reftime.Add(time.Nanosecond*1), modt)
|
||||
modt = tsm.tsMod(reftime)
|
||||
assert.Equal(t, reftime.Add(time.Nanosecond*2), modt)
|
||||
require.Equal(t, reftime.Add(time.Nanosecond*2), modt)
|
||||
modt = tsm.tsMod(reftime)
|
||||
assert.Equal(t, reftime.Add(time.Nanosecond*3), modt)
|
||||
require.Equal(t, reftime.Add(time.Nanosecond*3), modt)
|
||||
|
||||
reftime = time.Date(2006, time.December, 1, 1, 1, 1, int(time.Microsecond)*999, time.UTC)
|
||||
modt = tsm.tsMod(reftime)
|
||||
assert.Equal(t, reftime, modt)
|
||||
require.Equal(t, reftime, modt)
|
||||
modt = tsm.tsMod(reftime)
|
||||
assert.Equal(t, reftime.Add(time.Nanosecond*1), modt)
|
||||
require.Equal(t, reftime.Add(time.Nanosecond*1), modt)
|
||||
modt = tsm.tsMod(reftime)
|
||||
assert.Equal(t, reftime.Add(time.Nanosecond*2), modt)
|
||||
require.Equal(t, reftime.Add(time.Nanosecond*2), modt)
|
||||
modt = tsm.tsMod(reftime)
|
||||
assert.Equal(t, reftime.Add(time.Nanosecond*3), modt)
|
||||
require.Equal(t, reftime.Add(time.Nanosecond*3), modt)
|
||||
|
||||
reftime = time.Date(2006, time.December, 1, 1, 1, 1, 0, time.UTC)
|
||||
modt = tsm.tsMod(reftime)
|
||||
assert.Equal(t, reftime, modt)
|
||||
require.Equal(t, reftime, modt)
|
||||
modt = tsm.tsMod(reftime)
|
||||
assert.Equal(t, reftime.Add(time.Millisecond*1), modt)
|
||||
require.Equal(t, reftime.Add(time.Millisecond*1), modt)
|
||||
modt = tsm.tsMod(reftime)
|
||||
assert.Equal(t, reftime.Add(time.Millisecond*2), modt)
|
||||
require.Equal(t, reftime.Add(time.Millisecond*2), modt)
|
||||
modt = tsm.tsMod(reftime)
|
||||
assert.Equal(t, reftime.Add(time.Millisecond*3), modt)
|
||||
require.Equal(t, reftime.Add(time.Millisecond*3), modt)
|
||||
|
||||
reftime = time.Time{}
|
||||
modt = tsm.tsMod(reftime)
|
||||
assert.Equal(t, reftime, modt)
|
||||
require.Equal(t, reftime, modt)
|
||||
}
|
||||
|
||||
func TestTsModder_Rollover(t *testing.T) {
|
||||
|
|
@ -752,14 +763,14 @@ func TestTsModder_Rollover(t *testing.T) {
|
|||
for i := 1; i < 1000; i++ {
|
||||
modt = tsm.tsMod(reftime)
|
||||
}
|
||||
assert.Equal(t, reftime.Add(time.Microsecond*999+time.Nanosecond), modt)
|
||||
require.Equal(t, reftime.Add(time.Microsecond*999+time.Nanosecond), modt)
|
||||
|
||||
reftime = time.Date(2006, time.December, 1, 1, 1, 1, int(time.Microsecond), time.UTC)
|
||||
modt = tsm.tsMod(reftime)
|
||||
for i := 1; i < 1001; i++ {
|
||||
modt = tsm.tsMod(reftime)
|
||||
}
|
||||
assert.Equal(t, reftime.Add(time.Nanosecond*1000), modt)
|
||||
require.Equal(t, reftime.Add(time.Nanosecond*1000), modt)
|
||||
}
|
||||
|
||||
func TestShortPatternRegression(t *testing.T) {
|
||||
|
|
@ -788,12 +799,12 @@ func TestTimezoneEmptyCompileFileAndParse(t *testing.T) {
|
|||
CustomPatternFiles: []string{"./testdata/test-patterns"},
|
||||
Timezone: "",
|
||||
}
|
||||
assert.NoError(t, p.Compile())
|
||||
require.NoError(t, p.Compile())
|
||||
|
||||
metricA, err := p.ParseLine(`[04/Jun/2016:12:41:45 +0100] 1.25 200 192.168.1.1 5.432µs 101`)
|
||||
require.NotNil(t, metricA)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
require.NoError(t, err)
|
||||
require.Equal(t,
|
||||
map[string]interface{}{
|
||||
"clientip": "192.168.1.1",
|
||||
"myfloat": float64(1.25),
|
||||
|
|
@ -801,21 +812,21 @@ func TestTimezoneEmptyCompileFileAndParse(t *testing.T) {
|
|||
"myint": int64(101),
|
||||
},
|
||||
metricA.Fields())
|
||||
assert.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags())
|
||||
assert.Equal(t, int64(1465040505000000000), metricA.Time().UnixNano())
|
||||
require.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags())
|
||||
require.Equal(t, int64(1465040505000000000), metricA.Time().UnixNano())
|
||||
|
||||
metricB, err := p.ParseLine(`[04/06/2016--12:41:46] 1.25 mystring dropme nomodifier`)
|
||||
require.NotNil(t, metricB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
require.NoError(t, err)
|
||||
require.Equal(t,
|
||||
map[string]interface{}{
|
||||
"myfloat": 1.25,
|
||||
"mystring": "mystring",
|
||||
"nomodifier": "nomodifier",
|
||||
},
|
||||
metricB.Fields())
|
||||
assert.Equal(t, map[string]string{}, metricB.Tags())
|
||||
assert.Equal(t, int64(1465044106000000000), metricB.Time().UnixNano())
|
||||
require.Equal(t, map[string]string{}, metricB.Tags())
|
||||
require.Equal(t, int64(1465044106000000000), metricB.Time().UnixNano())
|
||||
}
|
||||
|
||||
func TestTimezoneMalformedCompileFileAndParse(t *testing.T) {
|
||||
|
|
@ -823,13 +834,14 @@ func TestTimezoneMalformedCompileFileAndParse(t *testing.T) {
|
|||
Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"},
|
||||
CustomPatternFiles: []string{"./testdata/test-patterns"},
|
||||
Timezone: "Something/Weird",
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
assert.NoError(t, p.Compile())
|
||||
require.NoError(t, p.Compile())
|
||||
|
||||
metricA, err := p.ParseLine(`[04/Jun/2016:12:41:45 +0100] 1.25 200 192.168.1.1 5.432µs 101`)
|
||||
require.NotNil(t, metricA)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
require.NoError(t, err)
|
||||
require.Equal(t,
|
||||
map[string]interface{}{
|
||||
"clientip": "192.168.1.1",
|
||||
"myfloat": float64(1.25),
|
||||
|
|
@ -837,21 +849,21 @@ func TestTimezoneMalformedCompileFileAndParse(t *testing.T) {
|
|||
"myint": int64(101),
|
||||
},
|
||||
metricA.Fields())
|
||||
assert.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags())
|
||||
assert.Equal(t, int64(1465040505000000000), metricA.Time().UnixNano())
|
||||
require.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags())
|
||||
require.Equal(t, int64(1465040505000000000), metricA.Time().UnixNano())
|
||||
|
||||
metricB, err := p.ParseLine(`[04/06/2016--12:41:46] 1.25 mystring dropme nomodifier`)
|
||||
require.NotNil(t, metricB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
require.NoError(t, err)
|
||||
require.Equal(t,
|
||||
map[string]interface{}{
|
||||
"myfloat": 1.25,
|
||||
"mystring": "mystring",
|
||||
"nomodifier": "nomodifier",
|
||||
},
|
||||
metricB.Fields())
|
||||
assert.Equal(t, map[string]string{}, metricB.Tags())
|
||||
assert.Equal(t, int64(1465044106000000000), metricB.Time().UnixNano())
|
||||
require.Equal(t, map[string]string{}, metricB.Tags())
|
||||
require.Equal(t, int64(1465044106000000000), metricB.Time().UnixNano())
|
||||
}
|
||||
|
||||
func TestTimezoneEuropeCompileFileAndParse(t *testing.T) {
|
||||
|
|
@ -860,12 +872,12 @@ func TestTimezoneEuropeCompileFileAndParse(t *testing.T) {
|
|||
CustomPatternFiles: []string{"./testdata/test-patterns"},
|
||||
Timezone: "Europe/Berlin",
|
||||
}
|
||||
assert.NoError(t, p.Compile())
|
||||
require.NoError(t, p.Compile())
|
||||
|
||||
metricA, err := p.ParseLine(`[04/Jun/2016:12:41:45 +0100] 1.25 200 192.168.1.1 5.432µs 101`)
|
||||
require.NotNil(t, metricA)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
require.NoError(t, err)
|
||||
require.Equal(t,
|
||||
map[string]interface{}{
|
||||
"clientip": "192.168.1.1",
|
||||
"myfloat": float64(1.25),
|
||||
|
|
@ -873,21 +885,21 @@ func TestTimezoneEuropeCompileFileAndParse(t *testing.T) {
|
|||
"myint": int64(101),
|
||||
},
|
||||
metricA.Fields())
|
||||
assert.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags())
|
||||
assert.Equal(t, int64(1465040505000000000), metricA.Time().UnixNano())
|
||||
require.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags())
|
||||
require.Equal(t, int64(1465040505000000000), metricA.Time().UnixNano())
|
||||
|
||||
metricB, err := p.ParseLine(`[04/06/2016--12:41:46] 1.25 mystring dropme nomodifier`)
|
||||
require.NotNil(t, metricB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
require.NoError(t, err)
|
||||
require.Equal(t,
|
||||
map[string]interface{}{
|
||||
"myfloat": 1.25,
|
||||
"mystring": "mystring",
|
||||
"nomodifier": "nomodifier",
|
||||
},
|
||||
metricB.Fields())
|
||||
assert.Equal(t, map[string]string{}, metricB.Tags())
|
||||
assert.Equal(t, int64(1465036906000000000), metricB.Time().UnixNano())
|
||||
require.Equal(t, map[string]string{}, metricB.Tags())
|
||||
require.Equal(t, int64(1465036906000000000), metricB.Time().UnixNano())
|
||||
}
|
||||
|
||||
func TestTimezoneAmericasCompileFileAndParse(t *testing.T) {
|
||||
|
|
@ -896,12 +908,12 @@ func TestTimezoneAmericasCompileFileAndParse(t *testing.T) {
|
|||
CustomPatternFiles: []string{"./testdata/test-patterns"},
|
||||
Timezone: "Canada/Eastern",
|
||||
}
|
||||
assert.NoError(t, p.Compile())
|
||||
require.NoError(t, p.Compile())
|
||||
|
||||
metricA, err := p.ParseLine(`[04/Jun/2016:12:41:45 +0100] 1.25 200 192.168.1.1 5.432µs 101`)
|
||||
require.NotNil(t, metricA)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
require.NoError(t, err)
|
||||
require.Equal(t,
|
||||
map[string]interface{}{
|
||||
"clientip": "192.168.1.1",
|
||||
"myfloat": float64(1.25),
|
||||
|
|
@ -909,21 +921,21 @@ func TestTimezoneAmericasCompileFileAndParse(t *testing.T) {
|
|||
"myint": int64(101),
|
||||
},
|
||||
metricA.Fields())
|
||||
assert.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags())
|
||||
assert.Equal(t, int64(1465040505000000000), metricA.Time().UnixNano())
|
||||
require.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags())
|
||||
require.Equal(t, int64(1465040505000000000), metricA.Time().UnixNano())
|
||||
|
||||
metricB, err := p.ParseLine(`[04/06/2016--12:41:46] 1.25 mystring dropme nomodifier`)
|
||||
require.NotNil(t, metricB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
require.NoError(t, err)
|
||||
require.Equal(t,
|
||||
map[string]interface{}{
|
||||
"myfloat": 1.25,
|
||||
"mystring": "mystring",
|
||||
"nomodifier": "nomodifier",
|
||||
},
|
||||
metricB.Fields())
|
||||
assert.Equal(t, map[string]string{}, metricB.Tags())
|
||||
assert.Equal(t, int64(1465058506000000000), metricB.Time().UnixNano())
|
||||
require.Equal(t, map[string]string{}, metricB.Tags())
|
||||
require.Equal(t, int64(1465058506000000000), metricB.Time().UnixNano())
|
||||
}
|
||||
|
||||
func TestTimezoneLocalCompileFileAndParse(t *testing.T) {
|
||||
|
|
@ -932,12 +944,12 @@ func TestTimezoneLocalCompileFileAndParse(t *testing.T) {
|
|||
CustomPatternFiles: []string{"./testdata/test-patterns"},
|
||||
Timezone: "Local",
|
||||
}
|
||||
assert.NoError(t, p.Compile())
|
||||
require.NoError(t, p.Compile())
|
||||
|
||||
metricA, err := p.ParseLine(`[04/Jun/2016:12:41:45 +0100] 1.25 200 192.168.1.1 5.432µs 101`)
|
||||
require.NotNil(t, metricA)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
require.NoError(t, err)
|
||||
require.Equal(t,
|
||||
map[string]interface{}{
|
||||
"clientip": "192.168.1.1",
|
||||
"myfloat": float64(1.25),
|
||||
|
|
@ -945,21 +957,21 @@ func TestTimezoneLocalCompileFileAndParse(t *testing.T) {
|
|||
"myint": int64(101),
|
||||
},
|
||||
metricA.Fields())
|
||||
assert.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags())
|
||||
assert.Equal(t, int64(1465040505000000000), metricA.Time().UnixNano())
|
||||
require.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags())
|
||||
require.Equal(t, int64(1465040505000000000), metricA.Time().UnixNano())
|
||||
|
||||
metricB, err := p.ParseLine(`[04/06/2016--12:41:46] 1.25 mystring dropme nomodifier`)
|
||||
require.NotNil(t, metricB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
require.NoError(t, err)
|
||||
require.Equal(t,
|
||||
map[string]interface{}{
|
||||
"myfloat": 1.25,
|
||||
"mystring": "mystring",
|
||||
"nomodifier": "nomodifier",
|
||||
},
|
||||
metricB.Fields())
|
||||
assert.Equal(t, map[string]string{}, metricB.Tags())
|
||||
assert.Equal(t, time.Date(2016, time.June, 4, 12, 41, 46, 0, time.Local).UnixNano(), metricB.Time().UnixNano())
|
||||
require.Equal(t, map[string]string{}, metricB.Tags())
|
||||
require.Equal(t, time.Date(2016, time.June, 4, 12, 41, 46, 0, time.Local).UnixNano(), metricB.Time().UnixNano())
|
||||
}
|
||||
|
||||
func TestNewlineInPatterns(t *testing.T) {
|
||||
|
|
@ -1087,7 +1099,8 @@ func TestEmptyYearInTimestamp(t *testing.T) {
|
|||
`,
|
||||
}
|
||||
require.NoError(t, p.Compile())
|
||||
p.ParseLine("Nov 6 13:57:03 generic iTunes[6504]: info> Scale factor of main display = 2.0")
|
||||
_, err := p.ParseLine("Nov 6 13:57:03 generic iTunes[6504]: info> Scale factor of main display = 2.0")
|
||||
require.NoError(t, err)
|
||||
m, err := p.ParseLine("Nov 6 13:57:03 generic iTunes[6504]: objc[6504]: Object descriptor was null.")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, m)
|
||||
|
|
|
|||
|
|
@ -7,8 +7,9 @@ import (
|
|||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/plugins/parsers/influx"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf/plugins/parsers/influx"
|
||||
)
|
||||
|
||||
type TestingHandler struct {
|
||||
|
|
@ -1950,7 +1951,10 @@ type MockHandler struct {
|
|||
}
|
||||
|
||||
func (h *MockHandler) SetMeasurement(name []byte) error {
|
||||
h.TestingHandler.SetMeasurement(name)
|
||||
err := h.TestingHandler.SetMeasurement(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return h.SetMeasurementF(name)
|
||||
}
|
||||
|
||||
|
|
@ -1963,8 +1967,7 @@ func (h *MockHandler) AddInt(name, value []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h.TestingHandler.AddInt(name, value)
|
||||
return nil
|
||||
return h.TestingHandler.AddInt(name, value)
|
||||
}
|
||||
|
||||
func (h *MockHandler) AddUint(name, value []byte) error {
|
||||
|
|
@ -1972,8 +1975,7 @@ func (h *MockHandler) AddUint(name, value []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h.TestingHandler.AddUint(name, value)
|
||||
return nil
|
||||
return h.TestingHandler.AddUint(name, value)
|
||||
}
|
||||
|
||||
func (h *MockHandler) AddFloat(name, value []byte) error {
|
||||
|
|
|
|||
|
|
@ -9,10 +9,11 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var DefaultTime = func() time.Time {
|
||||
|
|
@ -849,7 +850,10 @@ func TestStreamParserProducesAllAvailableMetrics(t *testing.T) {
|
|||
parser := NewStreamParser(r)
|
||||
parser.SetTimeFunc(DefaultTime)
|
||||
|
||||
go w.Write([]byte("metric value=1\nmetric2 value=1\n"))
|
||||
go func() {
|
||||
_, err := w.Write([]byte("metric value=1\nmetric2 value=1\n"))
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
|
||||
_, err := parser.Next()
|
||||
require.NoError(t, err)
|
||||
|
|
|
|||
|
|
@ -5,15 +5,15 @@ import (
|
|||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/tidwall/gjson"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/filter"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
"github.com/tidwall/gjson"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
@ -45,6 +45,8 @@ type Parser struct {
|
|||
timezone string
|
||||
defaultTags map[string]string
|
||||
strict bool
|
||||
|
||||
Log telegraf.Logger `toml:"-"`
|
||||
}
|
||||
|
||||
func New(config *Config) (*Parser, error) {
|
||||
|
|
@ -110,8 +112,7 @@ func (p *Parser) parseObject(data map[string]interface{}, timestamp time.Time) (
|
|||
|
||||
// checks if json_name_key is set
|
||||
if p.nameKey != "" {
|
||||
switch field := f.Fields[p.nameKey].(type) {
|
||||
case string:
|
||||
if field, ok := f.Fields[p.nameKey].(string); ok {
|
||||
name = field
|
||||
}
|
||||
}
|
||||
|
|
@ -172,7 +173,7 @@ func (p *Parser) switchFieldToTag(tags map[string]string, fields map[string]inte
|
|||
tags[name] = strconv.FormatFloat(t, 'f', -1, 64)
|
||||
delete(fields, name)
|
||||
default:
|
||||
log.Printf("E! [parsers.json] Unrecognized type %T", value)
|
||||
p.Log.Errorf("Unrecognized type %T", value)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -194,7 +195,7 @@ func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) {
|
|||
result := gjson.GetBytes(buf, p.query)
|
||||
buf = []byte(result.Raw)
|
||||
if !result.IsArray() && !result.IsObject() && result.Type != gjson.Null {
|
||||
err := fmt.Errorf("E! Query path must lead to a JSON object, array of objects or null, but lead to: %v", result.Type)
|
||||
err := fmt.Errorf("query path must lead to a JSON object, array of objects or null, but lead to: %v", result.Type)
|
||||
return nil, err
|
||||
}
|
||||
if result.Type == gjson.Null {
|
||||
|
|
@ -292,23 +293,21 @@ func (f *JSONFlattener) FullFlattenJSON(
|
|||
}
|
||||
err := f.FullFlattenJSON(fieldkey, v, convertString, convertBool)
|
||||
if err != nil {
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
}
|
||||
case float64:
|
||||
f.Fields[fieldname] = t
|
||||
case string:
|
||||
if convertString {
|
||||
f.Fields[fieldname] = v.(string)
|
||||
} else {
|
||||
if !convertString {
|
||||
return nil
|
||||
}
|
||||
f.Fields[fieldname] = v.(string)
|
||||
case bool:
|
||||
if convertBool {
|
||||
f.Fields[fieldname] = v.(bool)
|
||||
} else {
|
||||
if !convertBool {
|
||||
return nil
|
||||
}
|
||||
f.Fields[fieldname] = v.(bool)
|
||||
case nil:
|
||||
return nil
|
||||
default:
|
||||
|
|
|
|||
|
|
@ -5,7 +5,6 @@ import (
|
|||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
|
@ -74,6 +73,7 @@ func TryAddState(runErr error, metrics []telegraf.Metric) ([]telegraf.Metric, er
|
|||
type NagiosParser struct {
|
||||
MetricName string
|
||||
DefaultTags map[string]string
|
||||
Log telegraf.Logger `toml:"-"`
|
||||
}
|
||||
|
||||
// Got from Alignak
|
||||
|
|
@ -111,12 +111,12 @@ func (p *NagiosParser) Parse(buf []byte) ([]telegraf.Metric, error) {
|
|||
case 2:
|
||||
ms, err := parsePerfData(string(parts[1]), ts)
|
||||
if err != nil {
|
||||
log.Printf("E! [parser.nagios] failed to parse performance data: %s\n", err.Error())
|
||||
p.Log.Errorf("Failed to parse performance data: %s\n", err.Error())
|
||||
}
|
||||
metrics = append(metrics, ms...)
|
||||
fallthrough
|
||||
case 1:
|
||||
msg.Write(bytes.TrimSpace(parts[0]))
|
||||
msg.Write(bytes.TrimSpace(parts[0])) //nolint:revive // from buffer.go: "err is always nil"
|
||||
default:
|
||||
return nil, errors.New("illegal output format")
|
||||
}
|
||||
|
|
@ -126,34 +126,34 @@ func (p *NagiosParser) Parse(buf []byte) ([]telegraf.Metric, error) {
|
|||
if bytes.Contains(s.Bytes(), []byte{'|'}) {
|
||||
parts := bytes.Split(s.Bytes(), []byte{'|'})
|
||||
if longmsg.Len() != 0 {
|
||||
longmsg.WriteByte('\n')
|
||||
longmsg.WriteByte('\n') //nolint:revive // from buffer.go: "err is always nil"
|
||||
}
|
||||
longmsg.Write(bytes.TrimSpace(parts[0]))
|
||||
longmsg.Write(bytes.TrimSpace(parts[0])) //nolint:revive // from buffer.go: "err is always nil"
|
||||
|
||||
ms, err := parsePerfData(string(parts[1]), ts)
|
||||
if err != nil {
|
||||
log.Printf("E! [parser.nagios] failed to parse performance data: %s\n", err.Error())
|
||||
p.Log.Errorf("Failed to parse performance data: %s\n", err.Error())
|
||||
}
|
||||
metrics = append(metrics, ms...)
|
||||
break
|
||||
}
|
||||
if longmsg.Len() != 0 {
|
||||
longmsg.WriteByte('\n')
|
||||
longmsg.WriteByte('\n') //nolint:revive // from buffer.go: "err is always nil"
|
||||
}
|
||||
longmsg.Write(bytes.TrimSpace((s.Bytes())))
|
||||
longmsg.Write(bytes.TrimSpace(s.Bytes())) //nolint:revive // from buffer.go: "err is always nil"
|
||||
}
|
||||
|
||||
// Parse extra performance data.
|
||||
for s.Scan() {
|
||||
ms, err := parsePerfData(s.Text(), ts)
|
||||
if err != nil {
|
||||
log.Printf("E! [parser.nagios] failed to parse performance data: %s\n", err.Error())
|
||||
p.Log.Errorf("Failed to parse performance data: %s\n", err.Error())
|
||||
}
|
||||
metrics = append(metrics, ms...)
|
||||
}
|
||||
|
||||
if s.Err() != nil {
|
||||
log.Printf("D! [parser.nagios] unexpected io error: %s\n", s.Err())
|
||||
p.Log.Debugf("Unexpected io error: %s\n", s.Err())
|
||||
}
|
||||
|
||||
// Create nagios state.
|
||||
|
|
@ -291,5 +291,5 @@ func parseThreshold(threshold string) (min float64, max float64, err error) {
|
|||
return 0, 0, ErrBadThresholdFormat
|
||||
}
|
||||
|
||||
return
|
||||
return min, max, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,7 +5,6 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
|
|
@ -195,8 +194,8 @@ func TestTryAddState(t *testing.T) {
|
|||
}
|
||||
|
||||
func assertNagiosState(t *testing.T, m telegraf.Metric, f map[string]interface{}) {
|
||||
assert.Equal(t, map[string]string{}, m.Tags())
|
||||
assert.Equal(t, f, m.Fields())
|
||||
require.Equal(t, map[string]string{}, m.Tags())
|
||||
require.Equal(t, f, m.Fields())
|
||||
}
|
||||
|
||||
func TestParse(t *testing.T) {
|
||||
|
|
@ -219,11 +218,11 @@ with three lines
|
|||
require.NoError(t, err)
|
||||
require.Len(t, metrics, 3)
|
||||
// rta
|
||||
assert.Equal(t, map[string]string{
|
||||
require.Equal(t, map[string]string{
|
||||
"unit": "ms",
|
||||
"perfdata": "rta",
|
||||
}, metrics[0].Tags())
|
||||
assert.Equal(t, map[string]interface{}{
|
||||
require.Equal(t, map[string]interface{}{
|
||||
"value": float64(0.298),
|
||||
"warning_lt": float64(0),
|
||||
"warning_gt": float64(4000),
|
||||
|
|
@ -233,11 +232,11 @@ with three lines
|
|||
}, metrics[0].Fields())
|
||||
|
||||
// pl
|
||||
assert.Equal(t, map[string]string{
|
||||
require.Equal(t, map[string]string{
|
||||
"unit": "%",
|
||||
"perfdata": "pl",
|
||||
}, metrics[1].Tags())
|
||||
assert.Equal(t, map[string]interface{}{
|
||||
require.Equal(t, map[string]interface{}{
|
||||
"value": float64(0),
|
||||
"warning_lt": float64(0),
|
||||
"warning_gt": float64(80),
|
||||
|
|
@ -260,11 +259,11 @@ with three lines
|
|||
require.NoError(t, err)
|
||||
require.Len(t, metrics, 2)
|
||||
// time
|
||||
assert.Equal(t, map[string]string{
|
||||
require.Equal(t, map[string]string{
|
||||
"unit": "s",
|
||||
"perfdata": "time",
|
||||
}, metrics[0].Tags())
|
||||
assert.Equal(t, map[string]interface{}{
|
||||
require.Equal(t, map[string]interface{}{
|
||||
"value": float64(0.008457),
|
||||
"min": float64(0),
|
||||
"max": float64(10),
|
||||
|
|
@ -282,10 +281,10 @@ with three lines
|
|||
require.NoError(t, err)
|
||||
require.Len(t, metrics, 2)
|
||||
// time
|
||||
assert.Equal(t, map[string]string{
|
||||
require.Equal(t, map[string]string{
|
||||
"perfdata": "time",
|
||||
}, metrics[0].Tags())
|
||||
assert.Equal(t, map[string]interface{}{
|
||||
require.Equal(t, map[string]interface{}{
|
||||
"value": float64(0.008457),
|
||||
}, metrics[0].Fields())
|
||||
|
||||
|
|
@ -301,10 +300,10 @@ with three lines
|
|||
require.NoError(t, err)
|
||||
require.Len(t, metrics, 4)
|
||||
// load1
|
||||
assert.Equal(t, map[string]string{
|
||||
require.Equal(t, map[string]string{
|
||||
"perfdata": "load1",
|
||||
}, metrics[0].Tags())
|
||||
assert.Equal(t, map[string]interface{}{
|
||||
require.Equal(t, map[string]interface{}{
|
||||
"value": float64(0.00),
|
||||
"warning_lt": MinFloat64,
|
||||
"warning_gt": float64(4),
|
||||
|
|
@ -314,10 +313,10 @@ with three lines
|
|||
}, metrics[0].Fields())
|
||||
|
||||
// load5
|
||||
assert.Equal(t, map[string]string{
|
||||
require.Equal(t, map[string]string{
|
||||
"perfdata": "load5",
|
||||
}, metrics[1].Tags())
|
||||
assert.Equal(t, map[string]interface{}{
|
||||
require.Equal(t, map[string]interface{}{
|
||||
"value": float64(0.01),
|
||||
"warning_gt": float64(3),
|
||||
"warning_lt": float64(0),
|
||||
|
|
@ -327,10 +326,10 @@ with three lines
|
|||
}, metrics[1].Fields())
|
||||
|
||||
// load15
|
||||
assert.Equal(t, map[string]string{
|
||||
require.Equal(t, map[string]string{
|
||||
"perfdata": "load15",
|
||||
}, metrics[2].Tags())
|
||||
assert.Equal(t, map[string]interface{}{
|
||||
require.Equal(t, map[string]interface{}{
|
||||
"value": float64(0.05),
|
||||
"warning_lt": float64(0),
|
||||
"warning_gt": float64(2),
|
||||
|
|
@ -382,11 +381,11 @@ with three lines
|
|||
require.NoError(t, err)
|
||||
require.Len(t, metrics, 5)
|
||||
// /=2643MB;5948;5958;0;5968
|
||||
assert.Equal(t, map[string]string{
|
||||
require.Equal(t, map[string]string{
|
||||
"unit": "MB",
|
||||
"perfdata": "/",
|
||||
}, metrics[0].Tags())
|
||||
assert.Equal(t, map[string]interface{}{
|
||||
require.Equal(t, map[string]interface{}{
|
||||
"value": float64(2643),
|
||||
"warning_lt": float64(0),
|
||||
"warning_gt": float64(5948),
|
||||
|
|
@ -397,11 +396,11 @@ with three lines
|
|||
}, metrics[0].Fields())
|
||||
|
||||
// /boot=68MB;88;93;0;98
|
||||
assert.Equal(t, map[string]string{
|
||||
require.Equal(t, map[string]string{
|
||||
"unit": "MB",
|
||||
"perfdata": "/boot",
|
||||
}, metrics[1].Tags())
|
||||
assert.Equal(t, map[string]interface{}{
|
||||
require.Equal(t, map[string]interface{}{
|
||||
"value": float64(68),
|
||||
"warning_lt": float64(0),
|
||||
"warning_gt": float64(88),
|
||||
|
|
@ -412,11 +411,11 @@ with three lines
|
|||
}, metrics[1].Fields())
|
||||
|
||||
// /home=69357MB;253404;253409;0;253414
|
||||
assert.Equal(t, map[string]string{
|
||||
require.Equal(t, map[string]string{
|
||||
"unit": "MB",
|
||||
"perfdata": "/home",
|
||||
}, metrics[2].Tags())
|
||||
assert.Equal(t, map[string]interface{}{
|
||||
require.Equal(t, map[string]interface{}{
|
||||
"value": float64(69357),
|
||||
"warning_lt": float64(0),
|
||||
"warning_gt": float64(253404),
|
||||
|
|
@ -427,11 +426,11 @@ with three lines
|
|||
}, metrics[2].Fields())
|
||||
|
||||
// /var/log=818MB;970;975;0;980
|
||||
assert.Equal(t, map[string]string{
|
||||
require.Equal(t, map[string]string{
|
||||
"unit": "MB",
|
||||
"perfdata": "/var/log",
|
||||
}, metrics[3].Tags())
|
||||
assert.Equal(t, map[string]interface{}{
|
||||
require.Equal(t, map[string]interface{}{
|
||||
"value": float64(818),
|
||||
"warning_lt": float64(0),
|
||||
"warning_gt": float64(970),
|
||||
|
|
|
|||
|
|
@ -11,13 +11,12 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/matttproud/golang_protobuf_extensions/pbutil"
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
"github.com/prometheus/common/expfmt"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
"github.com/influxdata/telegraf/plugins/parsers/prometheus/common"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
"github.com/prometheus/common/expfmt"
|
||||
)
|
||||
|
||||
type Parser struct {
|
||||
|
|
@ -119,7 +118,7 @@ func makeQuantiles(m *dto.Metric, tags map[string]string, metricName string, met
|
|||
fields := make(map[string]interface{})
|
||||
|
||||
fields[metricName+"_count"] = float64(m.GetSummary().GetSampleCount())
|
||||
fields[metricName+"_sum"] = float64(m.GetSummary().GetSampleSum())
|
||||
fields[metricName+"_sum"] = m.GetSummary().GetSampleSum()
|
||||
met := metric.New("prometheus", tags, fields, t, common.ValueType(metricType))
|
||||
metrics = append(metrics, met)
|
||||
|
||||
|
|
@ -128,7 +127,7 @@ func makeQuantiles(m *dto.Metric, tags map[string]string, metricName string, met
|
|||
fields = make(map[string]interface{})
|
||||
|
||||
newTags["quantile"] = fmt.Sprint(q.GetQuantile())
|
||||
fields[metricName] = float64(q.GetValue())
|
||||
fields[metricName] = q.GetValue()
|
||||
|
||||
quantileMetric := metric.New("prometheus", newTags, fields, t, common.ValueType(metricType))
|
||||
metrics = append(metrics, quantileMetric)
|
||||
|
|
@ -142,7 +141,7 @@ func makeBuckets(m *dto.Metric, tags map[string]string, metricName string, metri
|
|||
fields := make(map[string]interface{})
|
||||
|
||||
fields[metricName+"_count"] = float64(m.GetHistogram().GetSampleCount())
|
||||
fields[metricName+"_sum"] = float64(m.GetHistogram().GetSampleSum())
|
||||
fields[metricName+"_sum"] = m.GetHistogram().GetSampleSum()
|
||||
|
||||
met := metric.New("prometheus", tags, fields, t, common.ValueType(metricType))
|
||||
metrics = append(metrics, met)
|
||||
|
|
@ -164,15 +163,15 @@ func getNameAndValue(m *dto.Metric, metricName string) map[string]interface{} {
|
|||
fields := make(map[string]interface{})
|
||||
if m.Gauge != nil {
|
||||
if !math.IsNaN(m.GetGauge().GetValue()) {
|
||||
fields[metricName] = float64(m.GetGauge().GetValue())
|
||||
fields[metricName] = m.GetGauge().GetValue()
|
||||
}
|
||||
} else if m.Counter != nil {
|
||||
if !math.IsNaN(m.GetCounter().GetValue()) {
|
||||
fields[metricName] = float64(m.GetCounter().GetValue())
|
||||
fields[metricName] = m.GetCounter().GetValue()
|
||||
}
|
||||
} else if m.Untyped != nil {
|
||||
if !math.IsNaN(m.GetUntyped().GetValue()) {
|
||||
fields[metricName] = float64(m.GetUntyped().GetValue())
|
||||
fields[metricName] = m.GetUntyped().GetValue()
|
||||
}
|
||||
}
|
||||
return fields
|
||||
|
|
|
|||
|
|
@ -8,10 +8,10 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -69,8 +69,8 @@ func TestParsingValidGauge(t *testing.T) {
|
|||
|
||||
metrics, err := parse([]byte(validUniqueGauge))
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 1)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, metrics, 1)
|
||||
testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics())
|
||||
}
|
||||
|
||||
|
|
@ -89,8 +89,8 @@ func TestParsingValidCounter(t *testing.T) {
|
|||
|
||||
metrics, err := parse([]byte(validUniqueCounter))
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 1)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, metrics, 1)
|
||||
testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics())
|
||||
}
|
||||
|
||||
|
|
@ -148,8 +148,8 @@ func TestParsingValidSummary(t *testing.T) {
|
|||
|
||||
metrics, err := parse([]byte(validUniqueSummary))
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 4)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, metrics, 4)
|
||||
testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics())
|
||||
}
|
||||
|
||||
|
|
@ -276,8 +276,8 @@ func TestParsingValidHistogram(t *testing.T) {
|
|||
|
||||
metrics, err := parse([]byte(validUniqueHistogram))
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 9)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, metrics, 9)
|
||||
testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics())
|
||||
}
|
||||
|
||||
|
|
@ -309,8 +309,8 @@ func TestDefautTags(t *testing.T) {
|
|||
}
|
||||
metrics, err := parser.Parse([]byte(validUniqueGauge))
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 1)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, metrics, 1)
|
||||
testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics())
|
||||
}
|
||||
|
||||
|
|
@ -363,7 +363,7 @@ test_counter{label="test"} 1 %d
|
|||
metric, _ := parser.ParseLine(metricsWithTimestamps)
|
||||
|
||||
testutil.RequireMetricEqual(t, expected, metric, testutil.IgnoreTime(), testutil.SortMetrics())
|
||||
assert.WithinDuration(t, time.Now(), metric.Time(), 5*time.Second)
|
||||
require.WithinDuration(t, time.Now(), metric.Time(), 5*time.Second)
|
||||
}
|
||||
|
||||
func parse(buf []byte) ([]telegraf.Metric, error) {
|
||||
|
|
@ -448,7 +448,8 @@ func TestParserProtobufHeader(t *testing.T) {
|
|||
sampleProtoBufData := []uint8{67, 10, 9, 115, 119, 97, 112, 95, 102, 114, 101, 101, 18, 25, 84, 101, 108, 101, 103, 114, 97, 102, 32, 99, 111, 108, 108, 101, 99, 116, 101, 100, 32, 109, 101, 116, 114, 105, 99, 24, 1, 34, 25, 10, 12, 10, 4, 104, 111, 115, 116, 18, 4, 111, 109, 115, 107, 18, 9, 9, 0, 0, 0, 0, 224, 36, 205, 65, 65, 10, 7, 115, 119, 97, 112, 95, 105, 110, 18, 25, 84, 101, 108, 101, 103, 114, 97, 102, 32, 99, 111, 108, 108, 101, 99, 116, 101, 100, 32, 109, 101, 116, 114, 105, 99, 24, 0, 34, 25, 10, 12, 10, 4, 104, 111, 115, 116, 18, 4, 111, 109, 115, 107, 26, 9, 9, 0, 0, 0, 0, 0, 0, 63, 65, 66, 10, 8, 115, 119, 97, 112, 95, 111, 117, 116, 18, 25, 84, 101, 108, 101, 103, 114, 97, 102, 32, 99, 111, 108, 108, 101, 99, 116, 101, 100, 32, 109, 101, 116, 114, 105, 99, 24, 0, 34, 25, 10, 12, 10, 4, 104, 111, 115, 116, 18, 4, 111, 109, 115, 107, 26, 9, 9, 0, 0, 0, 0, 0, 30, 110, 65, 68, 10, 10, 115, 119, 97, 112, 95, 116, 111, 116, 97, 108, 18, 25, 84, 101, 108, 101, 103, 114, 97, 102, 32, 99, 111, 108, 108, 101, 99, 116, 101, 100, 32, 109, 101, 116, 114, 105, 99, 24, 1, 34, 25, 10, 12, 10, 4, 104, 111, 115, 116, 18, 4, 111, 109, 115, 107, 18, 9, 9, 0, 0, 0, 0, 104, 153, 205, 65, 67, 10, 9, 115, 119, 97, 112, 95, 117, 115, 101, 100, 18, 25, 84, 101, 108, 101, 103, 114, 97, 102, 32, 99, 111, 108, 108, 101, 99, 116, 101, 100, 32, 109, 101, 116, 114, 105, 99, 24, 1, 34, 25, 10, 12, 10, 4, 104, 111, 115, 116, 18, 4, 111, 109, 115, 107, 18, 9, 9, 0, 0, 0, 0, 0, 34, 109, 65, 75, 10, 17, 115, 119, 97, 112, 95, 117, 115, 101, 100, 95, 112, 101, 114, 99, 101, 110, 116, 18, 25, 84, 101, 108, 101, 103, 114, 97, 102, 32, 99, 111, 108, 108, 101, 99, 116, 101, 100, 32, 109, 101, 116, 114, 105, 99, 24, 1, 34, 25, 10, 12, 10, 4, 104, 111, 115, 116, 18, 4, 111, 109, 115, 107, 18, 9, 9, 109, 234, 180, 197, 37, 155, 248, 63}
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited")
|
||||
w.Write(sampleProtoBufData)
|
||||
_, err := w.Write(sampleProtoBufData)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
defer ts.Close()
|
||||
req, err := http.NewRequest("GET", ts.URL, nil)
|
||||
|
|
|
|||
|
|
@ -4,10 +4,11 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParse(t *testing.T) {
|
||||
|
|
@ -35,7 +36,7 @@ func TestParse(t *testing.T) {
|
|||
}
|
||||
|
||||
inoutBytes, err := prompbInput.Marshal()
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
expected := []telegraf.Metric{
|
||||
testutil.MustMetric(
|
||||
|
|
@ -65,8 +66,8 @@ func TestParse(t *testing.T) {
|
|||
}
|
||||
|
||||
metrics, err := parser.Parse(inoutBytes)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 2)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, metrics, 2)
|
||||
testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics())
|
||||
}
|
||||
|
||||
|
|
@ -86,7 +87,7 @@ func TestDefaultTags(t *testing.T) {
|
|||
}
|
||||
|
||||
inoutBytes, err := prompbInput.Marshal()
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
expected := []telegraf.Metric{
|
||||
testutil.MustMetric(
|
||||
|
|
@ -109,8 +110,8 @@ func TestDefaultTags(t *testing.T) {
|
|||
}
|
||||
|
||||
metrics, err := parser.Parse(inoutBytes)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 1)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, metrics, 1)
|
||||
testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics())
|
||||
}
|
||||
|
||||
|
|
@ -132,7 +133,7 @@ func TestMetricsWithTimestamp(t *testing.T) {
|
|||
}
|
||||
|
||||
inoutBytes, err := prompbInput.Marshal()
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
expected := []telegraf.Metric{
|
||||
testutil.MustMetric(
|
||||
|
|
@ -151,7 +152,7 @@ func TestMetricsWithTimestamp(t *testing.T) {
|
|||
}
|
||||
|
||||
metrics, err := parser.Parse(inoutBytes)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 1)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, metrics, 1)
|
||||
testutil.RequireMetricsEqual(t, expected, metrics, testutil.SortMetrics())
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,49 +3,49 @@ package value
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestParseValidValues(t *testing.T) {
|
||||
parser := NewValueParser("value_test", "integer", "", nil)
|
||||
metrics, err := parser.Parse([]byte("55"))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 1)
|
||||
assert.Equal(t, "value_test", metrics[0].Name())
|
||||
assert.Equal(t, map[string]interface{}{
|
||||
require.NoError(t, err)
|
||||
require.Len(t, metrics, 1)
|
||||
require.Equal(t, "value_test", metrics[0].Name())
|
||||
require.Equal(t, map[string]interface{}{
|
||||
"value": int64(55),
|
||||
}, metrics[0].Fields())
|
||||
assert.Equal(t, map[string]string{}, metrics[0].Tags())
|
||||
require.Equal(t, map[string]string{}, metrics[0].Tags())
|
||||
|
||||
parser = NewValueParser("value_test", "float", "", nil)
|
||||
metrics, err = parser.Parse([]byte("64"))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 1)
|
||||
assert.Equal(t, "value_test", metrics[0].Name())
|
||||
assert.Equal(t, map[string]interface{}{
|
||||
require.NoError(t, err)
|
||||
require.Len(t, metrics, 1)
|
||||
require.Equal(t, "value_test", metrics[0].Name())
|
||||
require.Equal(t, map[string]interface{}{
|
||||
"value": float64(64),
|
||||
}, metrics[0].Fields())
|
||||
assert.Equal(t, map[string]string{}, metrics[0].Tags())
|
||||
require.Equal(t, map[string]string{}, metrics[0].Tags())
|
||||
|
||||
parser = NewValueParser("value_test", "string", "", nil)
|
||||
metrics, err = parser.Parse([]byte("foobar"))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 1)
|
||||
assert.Equal(t, "value_test", metrics[0].Name())
|
||||
assert.Equal(t, map[string]interface{}{
|
||||
require.NoError(t, err)
|
||||
require.Len(t, metrics, 1)
|
||||
require.Equal(t, "value_test", metrics[0].Name())
|
||||
require.Equal(t, map[string]interface{}{
|
||||
"value": "foobar",
|
||||
}, metrics[0].Fields())
|
||||
assert.Equal(t, map[string]string{}, metrics[0].Tags())
|
||||
require.Equal(t, map[string]string{}, metrics[0].Tags())
|
||||
|
||||
parser = NewValueParser("value_test", "boolean", "", nil)
|
||||
metrics, err = parser.Parse([]byte("true"))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 1)
|
||||
assert.Equal(t, "value_test", metrics[0].Name())
|
||||
assert.Equal(t, map[string]interface{}{
|
||||
require.NoError(t, err)
|
||||
require.Len(t, metrics, 1)
|
||||
require.Equal(t, "value_test", metrics[0].Name())
|
||||
require.Equal(t, map[string]interface{}{
|
||||
"value": true,
|
||||
}, metrics[0].Fields())
|
||||
assert.Equal(t, map[string]string{}, metrics[0].Tags())
|
||||
require.Equal(t, map[string]string{}, metrics[0].Tags())
|
||||
}
|
||||
|
||||
func TestParseMultipleValues(t *testing.T) {
|
||||
|
|
@ -56,13 +56,13 @@ func TestParseMultipleValues(t *testing.T) {
|
|||
12
|
||||
999
|
||||
`))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 1)
|
||||
assert.Equal(t, "value_test", metrics[0].Name())
|
||||
assert.Equal(t, map[string]interface{}{
|
||||
require.NoError(t, err)
|
||||
require.Len(t, metrics, 1)
|
||||
require.Equal(t, "value_test", metrics[0].Name())
|
||||
require.Equal(t, map[string]interface{}{
|
||||
"value": int64(999),
|
||||
}, metrics[0].Fields())
|
||||
assert.Equal(t, map[string]string{}, metrics[0].Tags())
|
||||
require.Equal(t, map[string]string{}, metrics[0].Tags())
|
||||
}
|
||||
|
||||
func TestParseCustomFieldName(t *testing.T) {
|
||||
|
|
@ -70,8 +70,8 @@ func TestParseCustomFieldName(t *testing.T) {
|
|||
parser.FieldName = "penguin"
|
||||
metrics, err := parser.Parse([]byte(`55`))
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, map[string]interface{}{
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, map[string]interface{}{
|
||||
"penguin": int64(55),
|
||||
}, metrics[0].Fields())
|
||||
}
|
||||
|
|
@ -79,126 +79,126 @@ func TestParseCustomFieldName(t *testing.T) {
|
|||
func TestParseLineValidValues(t *testing.T) {
|
||||
parser := NewValueParser("value_test", "integer", "", nil)
|
||||
metric, err := parser.ParseLine("55")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "value_test", metric.Name())
|
||||
assert.Equal(t, map[string]interface{}{
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "value_test", metric.Name())
|
||||
require.Equal(t, map[string]interface{}{
|
||||
"value": int64(55),
|
||||
}, metric.Fields())
|
||||
assert.Equal(t, map[string]string{}, metric.Tags())
|
||||
require.Equal(t, map[string]string{}, metric.Tags())
|
||||
|
||||
parser = NewValueParser("value_test", "float", "", nil)
|
||||
metric, err = parser.ParseLine("64")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "value_test", metric.Name())
|
||||
assert.Equal(t, map[string]interface{}{
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "value_test", metric.Name())
|
||||
require.Equal(t, map[string]interface{}{
|
||||
"value": float64(64),
|
||||
}, metric.Fields())
|
||||
assert.Equal(t, map[string]string{}, metric.Tags())
|
||||
require.Equal(t, map[string]string{}, metric.Tags())
|
||||
|
||||
parser = NewValueParser("value_test", "string", "", nil)
|
||||
metric, err = parser.ParseLine("foobar")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "value_test", metric.Name())
|
||||
assert.Equal(t, map[string]interface{}{
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "value_test", metric.Name())
|
||||
require.Equal(t, map[string]interface{}{
|
||||
"value": "foobar",
|
||||
}, metric.Fields())
|
||||
assert.Equal(t, map[string]string{}, metric.Tags())
|
||||
require.Equal(t, map[string]string{}, metric.Tags())
|
||||
|
||||
parser = NewValueParser("value_test", "boolean", "", nil)
|
||||
metric, err = parser.ParseLine("true")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "value_test", metric.Name())
|
||||
assert.Equal(t, map[string]interface{}{
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "value_test", metric.Name())
|
||||
require.Equal(t, map[string]interface{}{
|
||||
"value": true,
|
||||
}, metric.Fields())
|
||||
assert.Equal(t, map[string]string{}, metric.Tags())
|
||||
require.Equal(t, map[string]string{}, metric.Tags())
|
||||
}
|
||||
|
||||
func TestParseInvalidValues(t *testing.T) {
|
||||
parser := NewValueParser("value_test", "integer", "", nil)
|
||||
metrics, err := parser.Parse([]byte("55.0"))
|
||||
assert.Error(t, err)
|
||||
assert.Len(t, metrics, 0)
|
||||
require.Error(t, err)
|
||||
require.Len(t, metrics, 0)
|
||||
|
||||
parser = NewValueParser("value_test", "float", "", nil)
|
||||
metrics, err = parser.Parse([]byte("foobar"))
|
||||
assert.Error(t, err)
|
||||
assert.Len(t, metrics, 0)
|
||||
require.Error(t, err)
|
||||
require.Len(t, metrics, 0)
|
||||
|
||||
parser = NewValueParser("value_test", "boolean", "", nil)
|
||||
metrics, err = parser.Parse([]byte("213"))
|
||||
assert.Error(t, err)
|
||||
assert.Len(t, metrics, 0)
|
||||
require.Error(t, err)
|
||||
require.Len(t, metrics, 0)
|
||||
}
|
||||
|
||||
func TestParseLineInvalidValues(t *testing.T) {
|
||||
parser := NewValueParser("value_test", "integer", "", nil)
|
||||
_, err := parser.ParseLine("55.0")
|
||||
assert.Error(t, err)
|
||||
require.Error(t, err)
|
||||
|
||||
parser = NewValueParser("value_test", "float", "", nil)
|
||||
_, err = parser.ParseLine("foobar")
|
||||
assert.Error(t, err)
|
||||
require.Error(t, err)
|
||||
|
||||
parser = NewValueParser("value_test", "boolean", "", nil)
|
||||
_, err = parser.ParseLine("213")
|
||||
assert.Error(t, err)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestParseValidValuesDefaultTags(t *testing.T) {
|
||||
parser := NewValueParser("value_test", "integer", "", nil)
|
||||
parser.SetDefaultTags(map[string]string{"test": "tag"})
|
||||
metrics, err := parser.Parse([]byte("55"))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 1)
|
||||
assert.Equal(t, "value_test", metrics[0].Name())
|
||||
assert.Equal(t, map[string]interface{}{
|
||||
require.NoError(t, err)
|
||||
require.Len(t, metrics, 1)
|
||||
require.Equal(t, "value_test", metrics[0].Name())
|
||||
require.Equal(t, map[string]interface{}{
|
||||
"value": int64(55),
|
||||
}, metrics[0].Fields())
|
||||
assert.Equal(t, map[string]string{"test": "tag"}, metrics[0].Tags())
|
||||
require.Equal(t, map[string]string{"test": "tag"}, metrics[0].Tags())
|
||||
|
||||
parser = NewValueParser("value_test", "float", "", nil)
|
||||
parser.SetDefaultTags(map[string]string{"test": "tag"})
|
||||
metrics, err = parser.Parse([]byte("64"))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 1)
|
||||
assert.Equal(t, "value_test", metrics[0].Name())
|
||||
assert.Equal(t, map[string]interface{}{
|
||||
require.NoError(t, err)
|
||||
require.Len(t, metrics, 1)
|
||||
require.Equal(t, "value_test", metrics[0].Name())
|
||||
require.Equal(t, map[string]interface{}{
|
||||
"value": float64(64),
|
||||
}, metrics[0].Fields())
|
||||
assert.Equal(t, map[string]string{"test": "tag"}, metrics[0].Tags())
|
||||
require.Equal(t, map[string]string{"test": "tag"}, metrics[0].Tags())
|
||||
|
||||
parser = NewValueParser("value_test", "string", "", nil)
|
||||
parser.SetDefaultTags(map[string]string{"test": "tag"})
|
||||
metrics, err = parser.Parse([]byte("foobar"))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 1)
|
||||
assert.Equal(t, "value_test", metrics[0].Name())
|
||||
assert.Equal(t, map[string]interface{}{
|
||||
require.NoError(t, err)
|
||||
require.Len(t, metrics, 1)
|
||||
require.Equal(t, "value_test", metrics[0].Name())
|
||||
require.Equal(t, map[string]interface{}{
|
||||
"value": "foobar",
|
||||
}, metrics[0].Fields())
|
||||
assert.Equal(t, map[string]string{"test": "tag"}, metrics[0].Tags())
|
||||
require.Equal(t, map[string]string{"test": "tag"}, metrics[0].Tags())
|
||||
|
||||
parser = NewValueParser("value_test", "boolean", "", nil)
|
||||
parser.SetDefaultTags(map[string]string{"test": "tag"})
|
||||
metrics, err = parser.Parse([]byte("true"))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 1)
|
||||
assert.Equal(t, "value_test", metrics[0].Name())
|
||||
assert.Equal(t, map[string]interface{}{
|
||||
require.NoError(t, err)
|
||||
require.Len(t, metrics, 1)
|
||||
require.Equal(t, "value_test", metrics[0].Name())
|
||||
require.Equal(t, map[string]interface{}{
|
||||
"value": true,
|
||||
}, metrics[0].Fields())
|
||||
assert.Equal(t, map[string]string{"test": "tag"}, metrics[0].Tags())
|
||||
require.Equal(t, map[string]string{"test": "tag"}, metrics[0].Tags())
|
||||
}
|
||||
|
||||
func TestParseValuesWithNullCharacter(t *testing.T) {
|
||||
parser := NewValueParser("value_test", "integer", "", nil)
|
||||
metrics, err := parser.Parse([]byte("55\x00"))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 1)
|
||||
assert.Equal(t, "value_test", metrics[0].Name())
|
||||
assert.Equal(t, map[string]interface{}{
|
||||
require.NoError(t, err)
|
||||
require.Len(t, metrics, 1)
|
||||
require.Equal(t, "value_test", metrics[0].Name())
|
||||
require.Equal(t, map[string]interface{}{
|
||||
"value": int64(55),
|
||||
}, metrics[0].Fields())
|
||||
assert.Equal(t, map[string]string{}, metrics[0].Tags())
|
||||
require.Equal(t, map[string]string{}, metrics[0].Tags())
|
||||
}
|
||||
|
|
|
|||
|
|
@ -9,10 +9,9 @@ import (
|
|||
|
||||
var (
|
||||
ErrEOF = errors.New("EOF")
|
||||
ErrInvalidTimestamp = errors.New("Invalid timestamp")
|
||||
ErrInvalidTimestamp = errors.New("invalid timestamp")
|
||||
)
|
||||
|
||||
// Interface for parsing line elements.
|
||||
type ElementParser interface {
|
||||
parse(p *PointParser, pt *Point) error
|
||||
}
|
||||
|
|
@ -116,11 +115,10 @@ func setTimestamp(pt *Point, ts int64, numDigits int) error {
|
|||
ts = ts / 1e3
|
||||
} else if numDigits != 10 {
|
||||
// must be in seconds, return error if not 0
|
||||
if ts == 0 {
|
||||
ts = getCurrentTime()
|
||||
} else {
|
||||
if ts != 0 {
|
||||
return ErrInvalidTimestamp
|
||||
}
|
||||
ts = getCurrentTime()
|
||||
}
|
||||
pt.Timestamp = ts
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -4,7 +4,6 @@ import (
|
|||
"bufio"
|
||||
"bytes"
|
||||
"io"
|
||||
"log"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
|
@ -26,6 +25,7 @@ type Point struct {
|
|||
type WavefrontParser struct {
|
||||
parsers *sync.Pool
|
||||
defaultTags map[string]string
|
||||
Log telegraf.Logger `toml:"-"`
|
||||
}
|
||||
|
||||
// PointParser is a thread-unsafe parser and must be kept in a pool.
|
||||
|
|
@ -42,7 +42,7 @@ type PointParser struct {
|
|||
parent *WavefrontParser
|
||||
}
|
||||
|
||||
// Returns a slice of ElementParser's for the Graphite format
|
||||
// NewWavefrontElements returns a slice of ElementParser's for the Graphite format
|
||||
func NewWavefrontElements() []ElementParser {
|
||||
var elements []ElementParser
|
||||
wsParser := WhiteSpaceParser{}
|
||||
|
|
@ -200,7 +200,7 @@ func (p *PointParser) unscan() {
|
|||
func (p *PointParser) unscanTokens(n int) {
|
||||
if n > MaxBufferSize {
|
||||
// just log for now
|
||||
log.Printf("cannot unscan more than %d tokens", MaxBufferSize)
|
||||
p.parent.Log.Infof("Cannot unscan more than %d tokens", MaxBufferSize)
|
||||
}
|
||||
p.buf.n += n
|
||||
}
|
||||
|
|
@ -208,7 +208,7 @@ func (p *PointParser) unscanTokens(n int) {
|
|||
func (p *PointParser) reset(buf []byte) {
|
||||
// reset the scan buffer and write new byte
|
||||
p.scanBuf.Reset()
|
||||
p.scanBuf.Write(buf)
|
||||
p.scanBuf.Write(buf) //nolint:revive // from buffer.go: "err is always nil"
|
||||
|
||||
if p.s == nil {
|
||||
p.s = NewScanner(&p.scanBuf)
|
||||
|
|
|
|||
|
|
@ -4,208 +4,209 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParse(t *testing.T) {
|
||||
parser := NewWavefrontParser(nil)
|
||||
|
||||
parsedMetrics, err := parser.Parse([]byte("test.metric 1"))
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
testMetric := metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(0, 0))
|
||||
assert.Equal(t, parsedMetrics[0].Name(), testMetric.Name())
|
||||
assert.Equal(t, parsedMetrics[0].Fields(), testMetric.Fields())
|
||||
require.Equal(t, parsedMetrics[0].Name(), testMetric.Name())
|
||||
require.Equal(t, parsedMetrics[0].Fields(), testMetric.Fields())
|
||||
|
||||
parsedMetrics, err = parser.Parse([]byte("\u2206test.delta 1 1530939936"))
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
testMetric = metric.New("\u2206test.delta", map[string]string{},
|
||||
map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0))
|
||||
assert.EqualValues(t, parsedMetrics[0], testMetric)
|
||||
require.EqualValues(t, parsedMetrics[0], testMetric)
|
||||
|
||||
parsedMetrics, err = parser.Parse([]byte("\u0394test.delta 1 1530939936"))
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
testMetric = metric.New("\u0394test.delta", map[string]string{},
|
||||
map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0))
|
||||
assert.EqualValues(t, parsedMetrics[0], testMetric)
|
||||
require.EqualValues(t, parsedMetrics[0], testMetric)
|
||||
|
||||
parsedMetrics, err = parser.Parse([]byte("\u0394test.delta 1.234 1530939936 source=\"mysource\" tag2=value2"))
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
testMetric = metric.New("\u0394test.delta", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.234}, time.Unix(1530939936, 0))
|
||||
assert.EqualValues(t, parsedMetrics[0], testMetric)
|
||||
require.EqualValues(t, parsedMetrics[0], testMetric)
|
||||
|
||||
parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936"))
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
testMetric = metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0))
|
||||
assert.EqualValues(t, parsedMetrics[0], testMetric)
|
||||
require.EqualValues(t, parsedMetrics[0], testMetric)
|
||||
|
||||
parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936 source=mysource"))
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
testMetric = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0))
|
||||
assert.EqualValues(t, parsedMetrics[0], testMetric)
|
||||
require.EqualValues(t, parsedMetrics[0], testMetric)
|
||||
|
||||
parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234 1530939936 source=\"mysource\""))
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
testMetric = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0))
|
||||
assert.EqualValues(t, parsedMetrics[0], testMetric)
|
||||
require.EqualValues(t, parsedMetrics[0], testMetric)
|
||||
|
||||
parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234 1530939936 \"source\"=\"mysource\" tag2=value2"))
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
testMetric = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0))
|
||||
assert.EqualValues(t, parsedMetrics[0], testMetric)
|
||||
require.EqualValues(t, parsedMetrics[0], testMetric)
|
||||
|
||||
parsedMetrics, err = parser.Parse([]byte("\"test.metric\" -1.1234 1530939936 \"source\"=\"mysource\" tag2=value2"))
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
testMetric = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": -1.1234}, time.Unix(1530939936, 0))
|
||||
assert.EqualValues(t, parsedMetrics[0], testMetric)
|
||||
require.EqualValues(t, parsedMetrics[0], testMetric)
|
||||
|
||||
parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234e04 1530939936 \"source\"=\"mysource\" tag2=value2"))
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
testMetric = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234e04}, time.Unix(1530939936, 0))
|
||||
assert.EqualValues(t, parsedMetrics[0], testMetric)
|
||||
require.EqualValues(t, parsedMetrics[0], testMetric)
|
||||
|
||||
parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234e-04 1530939936 \"source\"=\"mysource\" tag2=value2"))
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
testMetric = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234e-04}, time.Unix(1530939936, 0))
|
||||
assert.EqualValues(t, parsedMetrics[0], testMetric)
|
||||
require.EqualValues(t, parsedMetrics[0], testMetric)
|
||||
|
||||
parsedMetrics, err = parser.Parse([]byte("test.metric 1.1234 1530939936 source=\"mysource\" tag2=value2 "))
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
testMetric = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0))
|
||||
assert.EqualValues(t, parsedMetrics[0], testMetric)
|
||||
require.EqualValues(t, parsedMetrics[0], testMetric)
|
||||
}
|
||||
|
||||
func TestParseLine(t *testing.T) {
|
||||
parser := NewWavefrontParser(nil)
|
||||
|
||||
parsedMetric, err := parser.ParseLine("test.metric 1")
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
testMetric := metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(0, 0))
|
||||
assert.Equal(t, parsedMetric.Name(), testMetric.Name())
|
||||
assert.Equal(t, parsedMetric.Fields(), testMetric.Fields())
|
||||
require.Equal(t, parsedMetric.Name(), testMetric.Name())
|
||||
require.Equal(t, parsedMetric.Fields(), testMetric.Fields())
|
||||
|
||||
parsedMetric, err = parser.ParseLine("test.metric 1 1530939936")
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
testMetric = metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0))
|
||||
assert.EqualValues(t, parsedMetric, testMetric)
|
||||
require.EqualValues(t, parsedMetric, testMetric)
|
||||
|
||||
parsedMetric, err = parser.ParseLine("test.metric 1 1530939936 source=mysource")
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
testMetric = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0))
|
||||
assert.EqualValues(t, parsedMetric, testMetric)
|
||||
require.EqualValues(t, parsedMetric, testMetric)
|
||||
|
||||
parsedMetric, err = parser.ParseLine("\"test.metric\" 1.1234 1530939936 source=\"mysource\"")
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
testMetric = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0))
|
||||
assert.EqualValues(t, parsedMetric, testMetric)
|
||||
require.EqualValues(t, parsedMetric, testMetric)
|
||||
|
||||
parsedMetric, err = parser.ParseLine("\"test.metric\" 1.1234 1530939936 \"source\"=\"mysource\" tag2=value2")
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
testMetric = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0))
|
||||
assert.EqualValues(t, parsedMetric, testMetric)
|
||||
require.EqualValues(t, parsedMetric, testMetric)
|
||||
|
||||
parsedMetric, err = parser.ParseLine("test.metric 1.1234 1530939936 source=\"mysource\" tag2=value2 ")
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
testMetric = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0))
|
||||
assert.EqualValues(t, parsedMetric, testMetric)
|
||||
require.EqualValues(t, parsedMetric, testMetric)
|
||||
}
|
||||
|
||||
func TestParseMultiple(t *testing.T) {
|
||||
parser := NewWavefrontParser(nil)
|
||||
|
||||
parsedMetrics, err := parser.Parse([]byte("test.metric 1\ntest.metric2 2 1530939936"))
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
testMetric1 := metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(0, 0))
|
||||
testMetric2 := metric.New("test.metric2", map[string]string{}, map[string]interface{}{"value": 2.}, time.Unix(1530939936, 0))
|
||||
testMetrics := []telegraf.Metric{testMetric1, testMetric2}
|
||||
assert.Equal(t, parsedMetrics[0].Name(), testMetrics[0].Name())
|
||||
assert.Equal(t, parsedMetrics[0].Fields(), testMetrics[0].Fields())
|
||||
assert.EqualValues(t, parsedMetrics[1], testMetrics[1])
|
||||
require.Equal(t, parsedMetrics[0].Name(), testMetrics[0].Name())
|
||||
require.Equal(t, parsedMetrics[0].Fields(), testMetrics[0].Fields())
|
||||
require.EqualValues(t, parsedMetrics[1], testMetrics[1])
|
||||
|
||||
parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936 source=mysource\n\"test.metric\" 1.1234 1530939936 source=\"mysource\""))
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
testMetric1 = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0))
|
||||
testMetric2 = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0))
|
||||
testMetrics = []telegraf.Metric{testMetric1, testMetric2}
|
||||
assert.EqualValues(t, parsedMetrics, testMetrics)
|
||||
require.EqualValues(t, parsedMetrics, testMetrics)
|
||||
|
||||
parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234 1530939936 \"source\"=\"mysource\" tag2=value2\ntest.metric 1.1234 1530939936 source=\"mysource\" tag2=value2 "))
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
testMetric1 = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0))
|
||||
testMetric2 = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0))
|
||||
testMetrics = []telegraf.Metric{testMetric1, testMetric2}
|
||||
assert.EqualValues(t, parsedMetrics, testMetrics)
|
||||
require.EqualValues(t, parsedMetrics, testMetrics)
|
||||
|
||||
parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936 source=mysource\n\"test.metric\" 1.1234 1530939936 source=\"mysource\"\ntest.metric3 333 1530939936 tagit=valueit"))
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
testMetric1 = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0))
|
||||
testMetric2 = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0))
|
||||
testMetric3 := metric.New("test.metric3", map[string]string{"tagit": "valueit"}, map[string]interface{}{"value": 333.}, time.Unix(1530939936, 0))
|
||||
testMetrics = []telegraf.Metric{testMetric1, testMetric2, testMetric3}
|
||||
assert.EqualValues(t, parsedMetrics, testMetrics)
|
||||
require.EqualValues(t, parsedMetrics, testMetrics)
|
||||
}
|
||||
|
||||
func TestParseSpecial(t *testing.T) {
|
||||
parser := NewWavefrontParser(nil)
|
||||
|
||||
parsedMetric, err := parser.ParseLine("\"test.metric\" 1 1530939936")
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
testMetric := metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0))
|
||||
assert.EqualValues(t, parsedMetric, testMetric)
|
||||
require.EqualValues(t, parsedMetric, testMetric)
|
||||
|
||||
parsedMetric, err = parser.ParseLine("test.metric 1 1530939936 tag1=\"val\\\"ue1\"")
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
testMetric = metric.New("test.metric", map[string]string{"tag1": "val\\\"ue1"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0))
|
||||
assert.EqualValues(t, parsedMetric, testMetric)
|
||||
require.EqualValues(t, parsedMetric, testMetric)
|
||||
}
|
||||
|
||||
func TestParseInvalid(t *testing.T) {
|
||||
parser := NewWavefrontParser(nil)
|
||||
|
||||
_, err := parser.Parse([]byte("test.metric"))
|
||||
assert.Error(t, err)
|
||||
require.Error(t, err)
|
||||
|
||||
_, err = parser.Parse([]byte("test.metric string"))
|
||||
assert.Error(t, err)
|
||||
require.Error(t, err)
|
||||
|
||||
_, err = parser.Parse([]byte("test.metric 1 string"))
|
||||
assert.Error(t, err)
|
||||
require.Error(t, err)
|
||||
|
||||
_, err = parser.Parse([]byte("test.\u2206delta 1"))
|
||||
assert.Error(t, err)
|
||||
require.Error(t, err)
|
||||
|
||||
_, err = parser.Parse([]byte("test.metric 1 1530939936 tag_no_pair"))
|
||||
assert.Error(t, err)
|
||||
require.Error(t, err)
|
||||
|
||||
_, err = parser.Parse([]byte("test.metric 1 1530939936 tag_broken_value=\""))
|
||||
assert.Error(t, err)
|
||||
require.Error(t, err)
|
||||
|
||||
_, err = parser.Parse([]byte("\"test.metric 1 1530939936"))
|
||||
assert.Error(t, err)
|
||||
require.Error(t, err)
|
||||
|
||||
_, err = parser.Parse([]byte("test.metric 1 1530939936 tag1=val\\\"ue1"))
|
||||
assert.Error(t, err)
|
||||
require.Error(t, err)
|
||||
|
||||
_, err = parser.Parse([]byte("\"test.metric\" -1.12-34 1530939936 \"source\"=\"mysource\" tag2=value2"))
|
||||
assert.Error(t, err)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestParseDefaultTags(t *testing.T) {
|
||||
parser := NewWavefrontParser(map[string]string{"myDefault": "value1", "another": "test2"})
|
||||
|
||||
parsedMetrics, err := parser.Parse([]byte("test.metric 1 1530939936"))
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
testMetric := metric.New("test.metric", map[string]string{"myDefault": "value1", "another": "test2"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0))
|
||||
assert.EqualValues(t, parsedMetrics[0], testMetric)
|
||||
require.EqualValues(t, parsedMetrics[0], testMetric)
|
||||
|
||||
parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936 source=mysource"))
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
testMetric = metric.New("test.metric", map[string]string{"myDefault": "value1", "another": "test2", "source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0))
|
||||
assert.EqualValues(t, parsedMetrics[0], testMetric)
|
||||
require.EqualValues(t, parsedMetrics[0], testMetric)
|
||||
|
||||
parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234 1530939936 another=\"test3\""))
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
testMetric = metric.New("test.metric", map[string]string{"myDefault": "value1", "another": "test2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0))
|
||||
assert.EqualValues(t, parsedMetrics[0], testMetric)
|
||||
require.EqualValues(t, parsedMetrics[0], testMetric)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -315,26 +315,26 @@ func (p *Parser) parseQuery(starttime time.Time, doc, selected dataNode, config
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query field value for '%s': %v", name, err)
|
||||
}
|
||||
path := name
|
||||
|
||||
if config.FieldNameExpand {
|
||||
p := p.document.GetNodePath(selectedfield, selected, "_")
|
||||
if len(p) > 0 {
|
||||
path = p + "_" + name
|
||||
name = p + "_" + name
|
||||
}
|
||||
}
|
||||
|
||||
// Check if field name already exists and if so, append an index number.
|
||||
if _, ok := fields[path]; ok {
|
||||
if _, ok := fields[name]; ok {
|
||||
for i := 1; ; i++ {
|
||||
p := path + "_" + strconv.Itoa(i)
|
||||
p := name + "_" + strconv.Itoa(i)
|
||||
if _, ok := fields[p]; !ok {
|
||||
path = p
|
||||
name = p
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fields[path] = v
|
||||
fields[name] = v
|
||||
}
|
||||
} else {
|
||||
p.debugEmptyQuery("field selection", selected, config.FieldSelection)
|
||||
|
|
|
|||
Loading…
Reference in New Issue