chore: Fix typos throughout codebase (#15338)
This commit is contained in:
parent
536b3d04ce
commit
31a1d34d99
|
|
@ -622,7 +622,7 @@
|
|||
|
||||
- [#5261](https://github.com/influxdata/telegraf/pull/5261): Fix arithmetic overflow in sqlserver input.
|
||||
- [#5194](https://github.com/influxdata/telegraf/issues/5194): Fix latest metrics not sent first when output fails.
|
||||
- [#5285](https://github.com/influxdata/telegraf/issues/5285): Fix amqp_consumer stops consuming when it receives unparseable messages.
|
||||
- [#5285](https://github.com/influxdata/telegraf/issues/5285): Fix amqp_consumer stops consuming when it receives unparsable messages.
|
||||
- [#5281](https://github.com/influxdata/telegraf/issues/5281): Fix prometheus input not detecting added and removed pods.
|
||||
- [#5215](https://github.com/influxdata/telegraf/issues/5215): Remove userinfo from cluster tag in couchbase.
|
||||
- [#5298](https://github.com/influxdata/telegraf/issues/5298): Fix internal_write buffer_size not reset on timed writes.
|
||||
|
|
@ -917,7 +917,7 @@
|
|||
- [#4498](https://github.com/influxdata/telegraf/pull/4498): Keep leading whitespace for messages in syslog input.
|
||||
- [#4470](https://github.com/influxdata/telegraf/issues/4470): Skip bad entries on interrupt input.
|
||||
- [#4501](https://github.com/influxdata/telegraf/issues/4501): Preserve metric type when using filters in output plugins.
|
||||
- [#3794](https://github.com/influxdata/telegraf/issues/3794): Fix error message if URL is unparseable in influxdb output.
|
||||
- [#3794](https://github.com/influxdata/telegraf/issues/3794): Fix error message if URL is unparsable in influxdb output.
|
||||
- [#4059](https://github.com/influxdata/telegraf/issues/4059): Use explicit zpool properties to fix parse error on FreeBSD 11.2.
|
||||
- [#4514](https://github.com/influxdata/telegraf/pull/4514): Lock buffer when adding metrics.
|
||||
|
||||
|
|
|
|||
|
|
@ -471,7 +471,7 @@ func TestConfig_InlineTables(t *testing.T) {
|
|||
require.NoError(t, c.LoadConfig("./testdata/inline_table.toml"))
|
||||
require.Len(t, c.Outputs, 2)
|
||||
|
||||
output, ok := c.Outputs[1].Output.(*MockupOuputPlugin)
|
||||
output, ok := c.Outputs[1].Output.(*MockupOutputPlugin)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, map[string]string{"Authorization": "Token test", "Content-Type": "application/json"}, output.Headers)
|
||||
require.Equal(t, []string{"org_id"}, c.Outputs[0].Config.Filter.TagInclude)
|
||||
|
|
@ -484,7 +484,7 @@ func TestConfig_SliceComment(t *testing.T) {
|
|||
require.NoError(t, c.LoadConfig("./testdata/slice_comment.toml"))
|
||||
require.Len(t, c.Outputs, 1)
|
||||
|
||||
output, ok := c.Outputs[0].Output.(*MockupOuputPlugin)
|
||||
output, ok := c.Outputs[0].Output.(*MockupOutputPlugin)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, []string{"test"}, output.Scopes)
|
||||
}
|
||||
|
|
@ -510,7 +510,7 @@ func TestConfig_AzureMonitorNamespacePrefix(t *testing.T) {
|
|||
|
||||
expectedPrefix := []string{"Telegraf/", ""}
|
||||
for i, plugin := range c.Outputs {
|
||||
output, ok := plugin.Output.(*MockupOuputPlugin)
|
||||
output, ok := plugin.Output.(*MockupOutputPlugin)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, expectedPrefix[i], output.NamespacePrefix)
|
||||
}
|
||||
|
|
@ -1453,7 +1453,7 @@ func (m *MockupProcessorPluginParserFunc) SetParserFunc(pf telegraf.ParserFunc)
|
|||
}
|
||||
|
||||
/*** Mockup OUTPUT plugin for testing to avoid cyclic dependencies ***/
|
||||
type MockupOuputPlugin struct {
|
||||
type MockupOutputPlugin struct {
|
||||
URL string `toml:"url"`
|
||||
Headers map[string]string `toml:"headers"`
|
||||
Scopes []string `toml:"scopes"`
|
||||
|
|
@ -1462,16 +1462,16 @@ type MockupOuputPlugin struct {
|
|||
tls.ClientConfig
|
||||
}
|
||||
|
||||
func (m *MockupOuputPlugin) Connect() error {
|
||||
func (m *MockupOutputPlugin) Connect() error {
|
||||
return nil
|
||||
}
|
||||
func (m *MockupOuputPlugin) Close() error {
|
||||
func (m *MockupOutputPlugin) Close() error {
|
||||
return nil
|
||||
}
|
||||
func (m *MockupOuputPlugin) SampleConfig() string {
|
||||
func (m *MockupOutputPlugin) SampleConfig() string {
|
||||
return "Mockup test output plugin"
|
||||
}
|
||||
func (m *MockupOuputPlugin) Write(_ []telegraf.Metric) error {
|
||||
func (m *MockupOutputPlugin) Write(_ []telegraf.Metric) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -1624,10 +1624,10 @@ func init() {
|
|||
|
||||
// Register the mockup output plugin for the required names
|
||||
outputs.Add("azure_monitor", func() telegraf.Output {
|
||||
return &MockupOuputPlugin{NamespacePrefix: "Telegraf/"}
|
||||
return &MockupOutputPlugin{NamespacePrefix: "Telegraf/"}
|
||||
})
|
||||
outputs.Add("http", func() telegraf.Output {
|
||||
return &MockupOuputPlugin{}
|
||||
return &MockupOutputPlugin{}
|
||||
})
|
||||
outputs.Add("serializer_test_new", func() telegraf.Output {
|
||||
return &MockupOutputPluginSerializerNew{}
|
||||
|
|
|
|||
|
|
@ -32,19 +32,19 @@ func processTable(parent string, table *ast.Table) ([]keyValuePair, error) {
|
|||
})
|
||||
case *ast.Table:
|
||||
key := prefix + k
|
||||
childs, err := processTable(key, v)
|
||||
children, err := processTable(key, v)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing table for %q failed: %w", key, err)
|
||||
}
|
||||
options = append(options, childs...)
|
||||
options = append(options, children...)
|
||||
case []*ast.Table:
|
||||
for i, t := range v {
|
||||
key := fmt.Sprintf("%s#%d.%s", prefix, i, k)
|
||||
childs, err := processTable(key, t)
|
||||
children, err := processTable(key, t)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing table for %q #%d failed: %w", key, i, err)
|
||||
}
|
||||
options = append(options, childs...)
|
||||
options = append(options, children...)
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown node type %T in key %q", value, prefix+k)
|
||||
|
|
|
|||
|
|
@ -70,7 +70,7 @@ In case you still want to continue with the PR, feel free to reopen it.
|
|||
|
||||
## Linting
|
||||
|
||||
Each pull request will have the appropriate linters checking the files for any common mistakes. The github action Super Linter is used: [super-pinter](https://github.com/github/super-linter). If it is failing you can click on the action and read the logs to figure out the issue. You can also run the github action locally by following these instructions: [run-linter-locally.md](https://github.com/github/super-linter/blob/main/docs/run-linter-locally.md). You can find more information on each of the linters in the super linter readme.
|
||||
Each pull request will have the appropriate linters checking the files for any common mistakes. The github action Super Linter is used: [super-linter](https://github.com/github/super-linter). If it is failing you can click on the action and read the logs to figure out the issue. You can also run the github action locally by following these instructions: [run-linter-locally.md](https://github.com/github/super-linter/blob/main/docs/run-linter-locally.md). You can find more information on each of the linters in the super linter readme.
|
||||
|
||||
## Testing
|
||||
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ import (
|
|||
type SASLAuth struct {
|
||||
SASLUsername config.Secret `toml:"sasl_username"`
|
||||
SASLPassword config.Secret `toml:"sasl_password"`
|
||||
SASLExtentions map[string]string `toml:"sasl_extensions"`
|
||||
SASLExtensions map[string]string `toml:"sasl_extensions"`
|
||||
SASLMechanism string `toml:"sasl_mechanism"`
|
||||
SASLVersion *int `toml:"sasl_version"`
|
||||
|
||||
|
|
@ -92,7 +92,7 @@ func (k *SASLAuth) Token() (*sarama.AccessToken, error) {
|
|||
defer token.Destroy()
|
||||
return &sarama.AccessToken{
|
||||
Token: token.String(),
|
||||
Extensions: k.SASLExtentions,
|
||||
Extensions: k.SASLExtensions,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -374,8 +374,8 @@ func parseTag(tagSpec string, data interface{}) (tagKey string, tagValue string,
|
|||
|
||||
func (s *AliyunCMS) prepareTagsAndDimensions(metric *Metric) {
|
||||
var (
|
||||
newData bool
|
||||
defaulTags = []string{"RegionId:RegionId"}
|
||||
newData bool
|
||||
defaultTags = []string{"RegionId:RegionId"}
|
||||
)
|
||||
|
||||
if s.dt == nil { //Discovery is not activated
|
||||
|
|
@ -411,7 +411,7 @@ L:
|
|||
//Start filing tags
|
||||
//Remove old value if exist
|
||||
delete(metric.discoveryTags, instanceID)
|
||||
metric.discoveryTags[instanceID] = make(map[string]string, len(metric.TagsQueryPath)+len(defaulTags))
|
||||
metric.discoveryTags[instanceID] = make(map[string]string, len(metric.TagsQueryPath)+len(defaultTags))
|
||||
|
||||
for _, tagQueryPath := range metric.TagsQueryPath {
|
||||
tagKey, tagValue, err := parseTag(tagQueryPath, elem)
|
||||
|
|
@ -428,7 +428,7 @@ L:
|
|||
}
|
||||
|
||||
//Adding default tags if not already there
|
||||
for _, defaultTagQP := range defaulTags {
|
||||
for _, defaultTagQP := range defaultTags {
|
||||
tagKey, tagValue, err := parseTag(defaultTagQP, elem)
|
||||
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -391,10 +391,10 @@ func TestGather(t *testing.T) {
|
|||
|
||||
//test table:
|
||||
tests := []struct {
|
||||
name string
|
||||
hasMeasurment bool
|
||||
metricNames []string
|
||||
expected []telegraf.Metric
|
||||
name string
|
||||
hasMeasurement bool
|
||||
metricNames []string
|
||||
expected []telegraf.Metric
|
||||
}{
|
||||
{
|
||||
name: "Empty data point",
|
||||
|
|
@ -408,9 +408,9 @@ func TestGather(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "Data point with fields & tags",
|
||||
hasMeasurment: true,
|
||||
metricNames: []string{"InstanceActiveConnection"},
|
||||
name: "Data point with fields & tags",
|
||||
hasMeasurement: true,
|
||||
metricNames: []string{"InstanceActiveConnection"},
|
||||
expected: []telegraf.Metric{
|
||||
testutil.MustMetric(
|
||||
"aliyuncms_acs_slb_dashboard",
|
||||
|
|
@ -434,8 +434,8 @@ func TestGather(t *testing.T) {
|
|||
var acc testutil.Accumulator
|
||||
plugin.Metrics[0].MetricNames = tt.metricNames
|
||||
require.Empty(t, acc.GatherError(plugin.Gather))
|
||||
require.Equal(t, acc.HasMeasurement("aliyuncms_acs_slb_dashboard"), tt.hasMeasurment)
|
||||
if tt.hasMeasurment {
|
||||
require.Equal(t, acc.HasMeasurement("aliyuncms_acs_slb_dashboard"), tt.hasMeasurement)
|
||||
if tt.hasMeasurement {
|
||||
acc.AssertContainsTaggedFields(t, "aliyuncms_acs_slb_dashboard", tt.expected[0].Fields(), tt.expected[0].Tags())
|
||||
}
|
||||
})
|
||||
|
|
|
|||
|
|
@ -90,6 +90,6 @@ of versions and small set of GPUs. Currently the latest ROCm version tested is
|
|||
information provided by `rocm-smi` can vary so that some fields would start/stop
|
||||
appearing in the metrics upon updates. The `rocm-smi` JSON output is not
|
||||
perfectly homogeneous and is possibly changing in the future, hence parsing and
|
||||
unmarshaling can start failing upon updating ROCm.
|
||||
unmarshalling can start failing upon updating ROCm.
|
||||
|
||||
Inspired by the current state of the art of the `nvidia-smi` plugin.
|
||||
|
|
|
|||
|
|
@ -122,11 +122,11 @@ func TestIntegration(t *testing.T) {
|
|||
"test,source=B value=1i 1712780301000000100",
|
||||
"test,source=C value=2i 1712780301000000200",
|
||||
}
|
||||
expexted := make([]telegraf.Metric, 0, len(metrics))
|
||||
expected := make([]telegraf.Metric, 0, len(metrics))
|
||||
for _, x := range metrics {
|
||||
m, err := parser.Parse([]byte(x))
|
||||
require.NoError(t, err)
|
||||
expexted = append(expexted, m...)
|
||||
expected = append(expected, m...)
|
||||
}
|
||||
|
||||
// Start the plugin
|
||||
|
|
@ -141,12 +141,12 @@ func TestIntegration(t *testing.T) {
|
|||
|
||||
// Verify that the metrics were actually written
|
||||
require.Eventually(t, func() bool {
|
||||
return acc.NMetrics() >= uint64(len(expexted))
|
||||
return acc.NMetrics() >= uint64(len(expected))
|
||||
}, 3*time.Second, 100*time.Millisecond)
|
||||
|
||||
client.close()
|
||||
plugin.Stop()
|
||||
testutil.RequireMetricsEqual(t, expexted, acc.GetTelegrafMetrics())
|
||||
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics())
|
||||
}
|
||||
|
||||
func TestStartupErrorBehaviorError(t *testing.T) {
|
||||
|
|
@ -341,11 +341,11 @@ func TestStartupErrorBehaviorRetry(t *testing.T) {
|
|||
"test,source=B value=1i 1712780301000000100",
|
||||
"test,source=C value=2i 1712780301000000200",
|
||||
}
|
||||
expexted := make([]telegraf.Metric, 0, len(metrics))
|
||||
expected := make([]telegraf.Metric, 0, len(metrics))
|
||||
for _, x := range metrics {
|
||||
m, err := parser.Parse([]byte(x))
|
||||
require.NoError(t, err)
|
||||
expexted = append(expexted, m...)
|
||||
expected = append(expected, m...)
|
||||
}
|
||||
|
||||
// Starting the plugin should succeed as we will retry to startup later
|
||||
|
|
@ -374,12 +374,12 @@ func TestStartupErrorBehaviorRetry(t *testing.T) {
|
|||
|
||||
// Verify that the metrics were actually collected
|
||||
require.Eventually(t, func() bool {
|
||||
return acc.NMetrics() >= uint64(len(expexted))
|
||||
return acc.NMetrics() >= uint64(len(expected))
|
||||
}, 3*time.Second, 100*time.Millisecond)
|
||||
|
||||
client.close()
|
||||
plugin.Stop()
|
||||
testutil.RequireMetricsEqual(t, expexted, acc.GetTelegrafMetrics())
|
||||
testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics())
|
||||
}
|
||||
|
||||
type producer struct {
|
||||
|
|
|
|||
|
|
@ -116,7 +116,7 @@ func TestAurora(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "float64 unparseable",
|
||||
name: "float64 unparsable",
|
||||
leaderhealth: func(_ *testing.T, w http.ResponseWriter, _ *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
},
|
||||
|
|
@ -136,7 +136,7 @@ func TestAurora(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "int64 unparseable",
|
||||
name: "int64 unparsable",
|
||||
leaderhealth: func(_ *testing.T, w http.ResponseWriter, _ *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
},
|
||||
|
|
|
|||
|
|
@ -617,7 +617,7 @@ func TestBindXmlStatsV3(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestBindUnparseableURL(t *testing.T) {
|
||||
func TestBindUnparsableURL(t *testing.T) {
|
||||
b := Bind{
|
||||
Urls: []string{"://example.com"},
|
||||
}
|
||||
|
|
|
|||
|
|
@ -108,7 +108,7 @@ Partner Churned Count: 0
|
|||
Slave Interface: eth1
|
||||
MII Status: down
|
||||
Speed: Unknown
|
||||
Duplex: Unkown
|
||||
Duplex: Unknown
|
||||
Link Failure Count: 1
|
||||
Permanent HW addr: 3c:ec:ef:5e:71:59
|
||||
Slave queue ID: 0
|
||||
|
|
|
|||
|
|
@ -96,7 +96,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
|||
|
||||
## The interval that defines the fastest rate at which the node values should be sampled and values captured. (default: 1s)
|
||||
## The sampling frequency should be adjusted to the dynamics of the signal to be sampled.
|
||||
## Higher sampling frequence increases load on ctrlX Data Layer.
|
||||
## Higher sampling frequencies increases load on ctrlX Data Layer.
|
||||
## The sampling frequency can be higher, than the publish interval. Captured samples are put in a queue and sent in publish interval.
|
||||
## Note: The minimum sampling interval can be overruled by a global setting in the ctrlX Data Layer configuration ('datalayer/subscriptions/settings').
|
||||
# sampling_interval = "1s"
|
||||
|
|
|
|||
|
|
@ -74,7 +74,7 @@
|
|||
|
||||
## The interval that defines the fastest rate at which the node values should be sampled and values captured. (default: 1s)
|
||||
## The sampling frequency should be adjusted to the dynamics of the signal to be sampled.
|
||||
## Higher sampling frequence increases load on ctrlX Data Layer.
|
||||
## Higher sampling frequencies increases load on ctrlX Data Layer.
|
||||
## The sampling frequency can be higher, than the publish interval. Captured samples are put in a queue and sent in publish interval.
|
||||
## Note: The minimum sampling interval can be overruled by a global setting in the ctrlX Data Layer configuration ('datalayer/subscriptions/settings').
|
||||
# sampling_interval = "1s"
|
||||
|
|
|
|||
|
|
@ -94,7 +94,7 @@ func (gcs *GCS) Gather(acc telegraf.Accumulator) error {
|
|||
|
||||
name = attrs.Name
|
||||
|
||||
if !gcs.shoudIgnore(name) {
|
||||
if !gcs.shouldIgnore(name) {
|
||||
if err := gcs.processMeasurementsInObject(name, bucket, acc); err != nil {
|
||||
gcs.Log.Errorf("Could not process object %q in bucket %q: %v", name, bucketName, err)
|
||||
acc.AddError(fmt.Errorf("COULD NOT PROCESS OBJECT %q IN BUCKET %q: %w", name, bucketName, err))
|
||||
|
|
@ -119,7 +119,7 @@ func (gcs *GCS) createQuery() storage.Query {
|
|||
return storage.Query{Prefix: gcs.Prefix}
|
||||
}
|
||||
|
||||
func (gcs *GCS) shoudIgnore(name string) bool {
|
||||
func (gcs *GCS) shouldIgnore(name string) bool {
|
||||
return gcs.offSet.OffSet == name || gcs.OffsetKey == name
|
||||
}
|
||||
|
||||
|
|
@ -159,7 +159,7 @@ func (gcs *GCS) reachedThreshlod(processed int) bool {
|
|||
}
|
||||
|
||||
func (gcs *GCS) updateOffset(bucket *storage.BucketHandle, name string) error {
|
||||
if gcs.shoudIgnore(name) {
|
||||
if gcs.shouldIgnore(name) {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -150,7 +150,7 @@ func TestRunGatherIteratiosnWithLimit(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRunGatherIterationWithPages(t *testing.T) {
|
||||
srv := stateFulGCSServer(t)
|
||||
srv := stateFullGCSServer(t)
|
||||
defer srv.Close()
|
||||
|
||||
emulatorSetEnv(t, srv)
|
||||
|
|
@ -280,7 +280,7 @@ func startMultipleItemGCSServer(t *testing.T) *httptest.Server {
|
|||
return srv
|
||||
}
|
||||
|
||||
func stateFulGCSServer(t *testing.T) *httptest.Server {
|
||||
func stateFullGCSServer(t *testing.T) *httptest.Server {
|
||||
srv := httptest.NewServer(http.NotFoundHandler())
|
||||
|
||||
firstElement := parseJSONFromFile(t, "testdata/first_file_listing.json")
|
||||
|
|
|
|||
|
|
@ -251,7 +251,7 @@ type TransactionStats struct {
|
|||
TransCheckpoints int64 `bson:"transaction checkpoints"`
|
||||
}
|
||||
|
||||
// WTConnectionStats stores statistices on wiredTiger connections
|
||||
// WTConnectionStats stores statistics on wiredTiger connections
|
||||
type WTConnectionStats struct {
|
||||
FilesCurrentlyOpen int64 `bson:"files currently open"`
|
||||
}
|
||||
|
|
|
|||
|
|
@ -47,7 +47,7 @@ func TestConvertGlobalStatus(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestCovertGlobalVariables(t *testing.T) {
|
||||
func TestConvertGlobalVariables(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
key string
|
||||
|
|
|
|||
|
|
@ -188,7 +188,7 @@ func TestPhpFpmTimeout_From_Fcgi(t *testing.T) {
|
|||
}
|
||||
|
||||
// TestPhpFpmCrashWithTimeout_From_Fcgi show issue #15175: when timeout is enabled
|
||||
// and nothing is listenning on specified port, a nil pointer was dereferenced.
|
||||
// and nothing is listening on specified port, a nil pointer was dereferenced.
|
||||
func TestPhpFpmCrashWithTimeout_From_Fcgi(t *testing.T) {
|
||||
tcp, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err, "Cannot initialize test server")
|
||||
|
|
|
|||
|
|
@ -99,10 +99,10 @@ func (p *Ping) args(url string) []string {
|
|||
// It returns (<transmitted packets>, <received reply>, <received packet>, <average response>, <min response>, <max response>)
|
||||
func processPingOutput(out string) (statistics, error) {
|
||||
// So find a line contain 3 numbers except reply lines
|
||||
var statsLine, aproxs []string = nil, nil
|
||||
var statsLine, approxs []string = nil, nil
|
||||
err := errors.New("fatal error processing ping output")
|
||||
stat := regexp.MustCompile(`=\W*(\d+)\D*=\W*(\d+)\D*=\W*(\d+)`)
|
||||
aprox := regexp.MustCompile(`=\W*(\d+)\D*ms\D*=\W*(\d+)\D*ms\D*=\W*(\d+)\D*ms`)
|
||||
approx := regexp.MustCompile(`=\W*(\d+)\D*ms\D*=\W*(\d+)\D*ms\D*=\W*(\d+)\D*ms`)
|
||||
tttLine := regexp.MustCompile(`TTL=\d+`)
|
||||
lines := strings.Split(out, "\n")
|
||||
var replyReceived = 0
|
||||
|
|
@ -113,8 +113,8 @@ func processPingOutput(out string) (statistics, error) {
|
|||
if statsLine == nil {
|
||||
statsLine = stat.FindStringSubmatch(line)
|
||||
}
|
||||
if statsLine != nil && aproxs == nil {
|
||||
aproxs = aprox.FindStringSubmatch(line)
|
||||
if statsLine != nil && approxs == nil {
|
||||
approxs = approx.FindStringSubmatch(line)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -147,19 +147,19 @@ func processPingOutput(out string) (statistics, error) {
|
|||
stats.replyReceived = replyReceived
|
||||
stats.packetsReceived = packetsReceived
|
||||
|
||||
// aproxs data should contain 4 members: entireExpression + ( min, max, avg )
|
||||
if len(aproxs) != 4 {
|
||||
// approxs data should contain 4 members: entireExpression + ( min, max, avg )
|
||||
if len(approxs) != 4 {
|
||||
return stats, err
|
||||
}
|
||||
min, err := strconv.Atoi(aproxs[1])
|
||||
min, err := strconv.Atoi(approxs[1])
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
max, err := strconv.Atoi(aproxs[2])
|
||||
max, err := strconv.Atoi(approxs[2])
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
avg, err := strconv.Atoi(aproxs[3])
|
||||
avg, err := strconv.Atoi(approxs[3])
|
||||
if err != nil {
|
||||
return statistics{}, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -97,7 +97,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
|||
# # win_service = []
|
||||
#
|
||||
# ## Process filters, multiple are allowed
|
||||
# ## Regular expressions to use for matching againt the full command
|
||||
# ## Regular expressions to use for matching against the full command
|
||||
# # patterns = ['.*']
|
||||
# ## List of users owning the process (wildcards are supported)
|
||||
# # users = ['*']
|
||||
|
|
|
|||
|
|
@ -56,9 +56,9 @@ func TestChildPattern(t *testing.T) {
|
|||
parent, err := finder.Pattern(parentName)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, parent, 1)
|
||||
childs, err := finder.Children(parent[0])
|
||||
children, err := finder.Children(parent[0])
|
||||
require.NoError(t, err)
|
||||
require.ElementsMatch(t, expected, childs)
|
||||
require.ElementsMatch(t, expected, children)
|
||||
}
|
||||
|
||||
func TestGather_RealPatternIntegration(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -68,7 +68,7 @@
|
|||
# # win_service = []
|
||||
#
|
||||
# ## Process filters, multiple are allowed
|
||||
# ## Regular expressions to use for matching againt the full command
|
||||
# ## Regular expressions to use for matching against the full command
|
||||
# # patterns = ['.*']
|
||||
# ## List of users owning the process (wildcards are supported)
|
||||
# # users = ['*']
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ func (s *Statsd) parseEventMessage(now time.Time, message string, defaultHostnam
|
|||
// |p:priority
|
||||
// |h:hostname
|
||||
// |t:alert_type
|
||||
// |s:source_type_nam
|
||||
// |s:source_type_name
|
||||
// |#tag1,tag2
|
||||
// ]
|
||||
//
|
||||
|
|
|
|||
|
|
@ -311,7 +311,7 @@ func (e *Endpoint) startDiscovery(ctx context.Context) {
|
|||
}()
|
||||
}
|
||||
|
||||
func (e *Endpoint) initalDiscovery(ctx context.Context) {
|
||||
func (e *Endpoint) initialDiscovery(ctx context.Context) {
|
||||
err := e.discover(ctx)
|
||||
if err != nil && !errors.Is(err, context.Canceled) {
|
||||
e.log.Errorf("Discovery for %s: %s", e.URL.Host, err.Error())
|
||||
|
|
@ -347,7 +347,7 @@ func (e *Endpoint) init(ctx context.Context) error {
|
|||
|
||||
if time.Duration(e.Parent.ObjectDiscoveryInterval) > 0 {
|
||||
e.Parent.Log.Debug("Running initial discovery")
|
||||
e.initalDiscovery(ctx)
|
||||
e.initialDiscovery(ctx)
|
||||
}
|
||||
e.initialized = true
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -24,8 +24,8 @@ See [webhook doc](https://rollbar.com/docs/webhooks/)
|
|||
* 'event' = `event.event_name` string
|
||||
* 'environment' = `event.data.item.environment` string
|
||||
* 'project_id = `event.data.item.project_id` int
|
||||
* 'language' = `event.data.item.last_occurence.language` string
|
||||
* 'level' = `event.data.item.last_occurence.level` string
|
||||
* 'language' = `event.data.item.last_occurrence.language` string
|
||||
* 'level' = `event.data.item.last_occurrence.level` string
|
||||
|
||||
**Fields:**
|
||||
|
||||
|
|
|
|||
|
|
@ -11,16 +11,16 @@ type DummyEvent struct {
|
|||
EventName string `json:"event_name"`
|
||||
}
|
||||
|
||||
type NewItemDataItemLastOccurence struct {
|
||||
type NewItemDataItemLastOccurrence struct {
|
||||
Language string `json:"language"`
|
||||
Level string `json:"level"`
|
||||
}
|
||||
|
||||
type NewItemDataItem struct {
|
||||
ID int `json:"id"`
|
||||
Environment string `json:"environment"`
|
||||
ProjectID int `json:"project_id"`
|
||||
LastOccurence NewItemDataItemLastOccurence `json:"last_occurrence"`
|
||||
ID int `json:"id"`
|
||||
Environment string `json:"environment"`
|
||||
ProjectID int `json:"project_id"`
|
||||
LastOccurrence NewItemDataItemLastOccurrence `json:"last_occurrence"`
|
||||
}
|
||||
|
||||
type NewItemData struct {
|
||||
|
|
@ -37,8 +37,8 @@ func (ni *NewItem) Tags() map[string]string {
|
|||
"event": ni.EventName,
|
||||
"environment": ni.Data.Item.Environment,
|
||||
"project_id": strconv.Itoa(ni.Data.Item.ProjectID),
|
||||
"language": ni.Data.Item.LastOccurence.Language,
|
||||
"level": ni.Data.Item.LastOccurence.Level,
|
||||
"language": ni.Data.Item.LastOccurrence.Language,
|
||||
"level": ni.Data.Item.LastOccurrence.Level,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -157,7 +157,7 @@ In case you see a `Collection took longer than expected` warning, there might
|
|||
be a burst of events logged and the API is not able to deliver them fast enough
|
||||
to complete processing within the specified interval. Tweaking the
|
||||
`event_batch_size` setting might help to mitigate the issue.
|
||||
The said warning does not indicate data-loss, but you should investige the
|
||||
The said warning does not indicate data-loss, but you should investigate the
|
||||
amount of events you log.
|
||||
|
||||
## Metrics
|
||||
|
|
|
|||
|
|
@ -126,7 +126,7 @@ Refer the query below to check if streaming is enabled
|
|||
.show database <DB-Name> policy streamingingestion
|
||||
```
|
||||
|
||||
## Authentiation
|
||||
## Authentication
|
||||
|
||||
### Supported Authentication Methods
|
||||
|
||||
|
|
|
|||
|
|
@ -482,12 +482,12 @@ func TestSanitizeLabelName(t *testing.T) {
|
|||
expected: "foobar",
|
||||
},
|
||||
{
|
||||
name: "replace invalid first charachter",
|
||||
name: "replace invalid first character",
|
||||
input: "3foobar",
|
||||
expected: "_foobar",
|
||||
},
|
||||
{
|
||||
name: "replace invalid later charachter",
|
||||
name: "replace invalid later character",
|
||||
input: "foobar.foobar",
|
||||
expected: "foobar_foobar",
|
||||
},
|
||||
|
|
|
|||
|
|
@ -84,7 +84,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
|||
##
|
||||
## Check specification
|
||||
## The check name is the name to give the Sensu check associated with the event
|
||||
## created. This maps to check.metatadata.name in the event.
|
||||
## created. This maps to check.metadata.name in the event.
|
||||
[outputs.sensu.check]
|
||||
name = "telegraf"
|
||||
|
||||
|
|
|
|||
|
|
@ -67,7 +67,7 @@
|
|||
##
|
||||
## Check specification
|
||||
## The check name is the name to give the Sensu check associated with the event
|
||||
## created. This maps to check.metatadata.name in the event.
|
||||
## created. This maps to check.metadata.name in the event.
|
||||
[outputs.sensu.check]
|
||||
name = "telegraf"
|
||||
|
||||
|
|
|
|||
|
|
@ -49,7 +49,7 @@ type Stackdriver struct {
|
|||
counterCache *counterCache
|
||||
filterCounter filter.Filter
|
||||
filterGauge filter.Filter
|
||||
fitlerHistogram filter.Filter
|
||||
filterHistogram filter.Filter
|
||||
}
|
||||
|
||||
const (
|
||||
|
|
@ -100,7 +100,7 @@ func (s *Stackdriver) Init() error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("creating gauge filter failed: %w", err)
|
||||
}
|
||||
s.fitlerHistogram, err = filter.Compile(s.MetricHistogram)
|
||||
s.filterHistogram, err = filter.Compile(s.MetricHistogram)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating histogram filter failed: %w", err)
|
||||
}
|
||||
|
|
@ -227,7 +227,7 @@ func (s *Stackdriver) sendBatch(batch []telegraf.Metric) error {
|
|||
if s.filterGauge != nil && s.filterGauge.Match(m.Name()) {
|
||||
metricType = telegraf.Gauge
|
||||
}
|
||||
if s.fitlerHistogram != nil && s.fitlerHistogram.Match(m.Name()) {
|
||||
if s.filterHistogram != nil && s.filterHistogram.Match(m.Name()) {
|
||||
metricType = telegraf.Histogram
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -45,9 +45,9 @@ const (
|
|||
)
|
||||
|
||||
type SumoLogic struct {
|
||||
URL string `toml:"url"`
|
||||
Timeout config.Duration `toml:"timeout"`
|
||||
MaxRequstBodySize config.Size `toml:"max_request_body_size"`
|
||||
URL string `toml:"url"`
|
||||
Timeout config.Duration `toml:"timeout"`
|
||||
MaxRequestBodySize config.Size `toml:"max_request_body_size"`
|
||||
|
||||
SourceName string `toml:"source_name"`
|
||||
SourceHost string `toml:"source_host"`
|
||||
|
|
@ -126,7 +126,7 @@ func (s *SumoLogic) Write(metrics []telegraf.Metric) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if l := len(reqBody); l > int(s.MaxRequstBodySize) {
|
||||
if l := len(reqBody); l > int(s.MaxRequestBodySize) {
|
||||
chunks, err := s.splitIntoChunks(metrics)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -194,10 +194,10 @@ func (s *SumoLogic) writeRequestChunk(reqBody []byte) error {
|
|||
}
|
||||
|
||||
// splitIntoChunks splits metrics to be sent into chunks so that every request
|
||||
// is smaller than s.MaxRequstBodySize unless it was configured so small so that
|
||||
// is smaller than s.MaxRequestBodySize unless it was configured so small so that
|
||||
// even a single metric cannot fit.
|
||||
// In such a situation metrics will be sent one by one with a warning being logged
|
||||
// for every request sent even though they don't fit in s.MaxRequstBodySize bytes.
|
||||
// for every request sent even though they don't fit in s.MaxRequestBodySize bytes.
|
||||
func (s *SumoLogic) splitIntoChunks(metrics []telegraf.Metric) ([][]byte, error) {
|
||||
var (
|
||||
numMetrics = len(metrics)
|
||||
|
|
@ -215,7 +215,7 @@ func (s *SumoLogic) splitIntoChunks(metrics []telegraf.Metric) ([][]byte, error)
|
|||
la := len(toAppend)
|
||||
if la != 0 {
|
||||
// We already have something to append ...
|
||||
if la+len(chunkBody) > int(s.MaxRequstBodySize) {
|
||||
if la+len(chunkBody) > int(s.MaxRequestBodySize) {
|
||||
// ... and it's just the right size, without currently processed chunk.
|
||||
break
|
||||
}
|
||||
|
|
@ -229,10 +229,10 @@ func (s *SumoLogic) splitIntoChunks(metrics []telegraf.Metric) ([][]byte, error)
|
|||
i++
|
||||
toAppend = chunkBody
|
||||
|
||||
if len(chunkBody) > int(s.MaxRequstBodySize) {
|
||||
if len(chunkBody) > int(s.MaxRequestBodySize) {
|
||||
s.Log.Warnf(
|
||||
"max_request_body_size set to %d which is too small even for a single metric (len: %d), sending without split",
|
||||
s.MaxRequstBodySize, len(chunkBody),
|
||||
s.MaxRequestBodySize, len(chunkBody),
|
||||
)
|
||||
|
||||
// The serialized metric is too big, but we have no choice
|
||||
|
|
@ -263,9 +263,9 @@ func setHeaderIfSetInConfig(r *http.Request, h header, value string) {
|
|||
|
||||
func Default() *SumoLogic {
|
||||
return &SumoLogic{
|
||||
Timeout: config.Duration(defaultClientTimeout),
|
||||
MaxRequstBodySize: defaultMaxRequestBodySize,
|
||||
headers: make(map[string]string),
|
||||
Timeout: config.Duration(defaultClientTimeout),
|
||||
MaxRequestBodySize: defaultMaxRequestBodySize,
|
||||
headers: make(map[string]string),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -357,8 +357,8 @@ func TestDefaultUserAgent(t *testing.T) {
|
|||
})
|
||||
|
||||
plugin := &SumoLogic{
|
||||
URL: u.String(),
|
||||
MaxRequstBodySize: Default().MaxRequstBodySize,
|
||||
URL: u.String(),
|
||||
MaxRequestBodySize: Default().MaxRequestBodySize,
|
||||
}
|
||||
|
||||
serializer := &carbon2.Serializer{
|
||||
|
|
@ -508,7 +508,7 @@ func TestMaxRequestBodySize(t *testing.T) {
|
|||
s.URL = u.String()
|
||||
// getMetrics returns metrics that serialized (using carbon2),
|
||||
// uncompressed size is 43750B
|
||||
s.MaxRequstBodySize = 43_749
|
||||
s.MaxRequestBodySize = 43_749
|
||||
return s
|
||||
},
|
||||
metrics: getMetrics(),
|
||||
|
|
@ -521,7 +521,7 @@ func TestMaxRequestBodySize(t *testing.T) {
|
|||
plugin: func() *SumoLogic {
|
||||
s := Default()
|
||||
s.URL = u.String()
|
||||
s.MaxRequstBodySize = 10_000
|
||||
s.MaxRequestBodySize = 10_000
|
||||
return s
|
||||
},
|
||||
metrics: getMetrics(),
|
||||
|
|
@ -534,7 +534,7 @@ func TestMaxRequestBodySize(t *testing.T) {
|
|||
plugin: func() *SumoLogic {
|
||||
s := Default()
|
||||
s.URL = u.String()
|
||||
s.MaxRequstBodySize = 5_000
|
||||
s.MaxRequestBodySize = 5_000
|
||||
return s
|
||||
},
|
||||
metrics: getMetrics(),
|
||||
|
|
@ -547,7 +547,7 @@ func TestMaxRequestBodySize(t *testing.T) {
|
|||
plugin: func() *SumoLogic {
|
||||
s := Default()
|
||||
s.URL = u.String()
|
||||
s.MaxRequstBodySize = 2_500
|
||||
s.MaxRequestBodySize = 2_500
|
||||
return s
|
||||
},
|
||||
metrics: getMetrics(),
|
||||
|
|
@ -560,7 +560,7 @@ func TestMaxRequestBodySize(t *testing.T) {
|
|||
plugin: func() *SumoLogic {
|
||||
s := Default()
|
||||
s.URL = u.String()
|
||||
s.MaxRequstBodySize = 1_000
|
||||
s.MaxRequestBodySize = 1_000
|
||||
return s
|
||||
},
|
||||
metrics: getMetrics(),
|
||||
|
|
@ -573,7 +573,7 @@ func TestMaxRequestBodySize(t *testing.T) {
|
|||
plugin: func() *SumoLogic {
|
||||
s := Default()
|
||||
s.URL = u.String()
|
||||
s.MaxRequstBodySize = 500
|
||||
s.MaxRequestBodySize = 500
|
||||
return s
|
||||
},
|
||||
metrics: getMetrics(),
|
||||
|
|
@ -586,7 +586,7 @@ func TestMaxRequestBodySize(t *testing.T) {
|
|||
plugin: func() *SumoLogic {
|
||||
s := Default()
|
||||
s.URL = u.String()
|
||||
s.MaxRequstBodySize = 300
|
||||
s.MaxRequestBodySize = 300
|
||||
return s
|
||||
},
|
||||
metrics: getMetrics(),
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
|||
|
||||
## The framing technique with which it is expected that messages are
|
||||
## transported (default = "octet-counting"). Whether the messages come
|
||||
## using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1),
|
||||
## using the octet-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1),
|
||||
## or the non-transparent framing technique (RFC6587#section-3.4.2). Must
|
||||
## be one of "octet-counting", "non-transparent".
|
||||
# framing = "octet-counting"
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@
|
|||
|
||||
## The framing technique with which it is expected that messages are
|
||||
## transported (default = "octet-counting"). Whether the messages come
|
||||
## using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1),
|
||||
## using the octet-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1),
|
||||
## or the non-transparent framing technique (RFC6587#section-3.4.2). Must
|
||||
## be one of "octet-counting", "non-transparent".
|
||||
# framing = "octet-counting"
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ import (
|
|||
"github.com/leodido/go-syslog/v4/nontransparent"
|
||||
)
|
||||
|
||||
func TestGetSyslogMessageWithFramingOctectCounting(t *testing.T) {
|
||||
func TestGetSyslogMessageWithFramingOctetCounting(t *testing.T) {
|
||||
// Init plugin
|
||||
s := newSyslog()
|
||||
require.NoError(t, s.Init())
|
||||
|
|
@ -35,7 +35,7 @@ func TestGetSyslogMessageWithFramingOctectCounting(t *testing.T) {
|
|||
messageBytesWithFraming, err := s.getSyslogMessageBytesWithFraming(syslogMessage)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, "59 <13>1 2010-11-10T23:00:00Z testhost Telegraf - testmetric -", string(messageBytesWithFraming), "Incorrect Octect counting framing")
|
||||
require.Equal(t, "59 <13>1 2010-11-10T23:00:00Z testhost Telegraf - testmetric -", string(messageBytesWithFraming), "Incorrect Octet counting framing")
|
||||
}
|
||||
|
||||
func TestGetSyslogMessageWithFramingNonTransparent(t *testing.T) {
|
||||
|
|
@ -60,7 +60,7 @@ func TestGetSyslogMessageWithFramingNonTransparent(t *testing.T) {
|
|||
messageBytesWithFraming, err := s.getSyslogMessageBytesWithFraming(syslogMessage)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, "<13>1 2010-11-10T23:00:00Z testhost Telegraf - testmetric -\n", string(messageBytesWithFraming), "Incorrect Octect counting framing")
|
||||
require.Equal(t, "<13>1 2010-11-10T23:00:00Z testhost Telegraf - testmetric -\n", string(messageBytesWithFraming), "Incorrect Octet counting framing")
|
||||
}
|
||||
|
||||
func TestGetSyslogMessageWithFramingNonTransparentNul(t *testing.T) {
|
||||
|
|
@ -86,7 +86,7 @@ func TestGetSyslogMessageWithFramingNonTransparentNul(t *testing.T) {
|
|||
messageBytesWithFraming, err := s.getSyslogMessageBytesWithFraming(syslogMessage)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, "<13>1 2010-11-10T23:00:00Z testhost Telegraf - testmetric -\x00", string(messageBytesWithFraming), "Incorrect Octect counting framing")
|
||||
require.Equal(t, "<13>1 2010-11-10T23:00:00Z testhost Telegraf - testmetric -\x00", string(messageBytesWithFraming), "Incorrect Octet counting framing")
|
||||
}
|
||||
|
||||
func TestSyslogWriteWithTcp(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -219,7 +219,7 @@ not know in advance what are we going to send, for example, the name of a
|
|||
container to send its cpu and memory consumption.
|
||||
|
||||
For this case Zabbix provides [low-level discovery][lld] that allow to create
|
||||
new items dinamically based on the parameters sent by the trap.
|
||||
new items dynamically based on the parameters sent by the trap.
|
||||
|
||||
As explained previously, this output plugin will format the Zabbix key using
|
||||
the tags seen in the Telegraf metric following the LLD format.
|
||||
|
|
|
|||
|
|
@ -255,7 +255,7 @@ func (p *Parser) createMetric(data map[string]interface{}, schema string) (teleg
|
|||
}
|
||||
var schemaObj map[string]interface{}
|
||||
if err := json.Unmarshal([]byte(schema), &schemaObj); err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling schema failed: %w", err)
|
||||
return nil, fmt.Errorf("unmarshalling schema failed: %w", err)
|
||||
}
|
||||
if len(fields) == 0 {
|
||||
// A telegraf metric needs at least one field.
|
||||
|
|
|
|||
|
|
@ -111,7 +111,7 @@ func (h *MetricHandler) AddBool(key []byte, value []byte) error {
|
|||
fk := unescape(key)
|
||||
fv, err := parseBoolBytes(value)
|
||||
if err != nil {
|
||||
return errors.New("unparseable bool")
|
||||
return errors.New("unparsable bool")
|
||||
}
|
||||
h.metric.AddField(fk, fv)
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -104,8 +104,8 @@ func convertToParseError(input []byte, rawErr error) error {
|
|||
// Parser is an InfluxDB Line Protocol parser that implements the
|
||||
// parsers.Parser interface.
|
||||
type Parser struct {
|
||||
InfluxTimestampPrecsion config.Duration `toml:"influx_timestamp_precision"`
|
||||
DefaultTags map[string]string `toml:"-"`
|
||||
InfluxTimestampPrecision config.Duration `toml:"influx_timestamp_precision"`
|
||||
DefaultTags map[string]string `toml:"-"`
|
||||
// If set to "series" a series machine will be initialized, defaults to regular machine
|
||||
Type string `toml:"-"`
|
||||
|
||||
|
|
@ -189,7 +189,7 @@ func (p *Parser) applyDefaultTagsSingle(m telegraf.Metric) {
|
|||
}
|
||||
|
||||
func (p *Parser) Init() error {
|
||||
if err := p.SetTimePrecision(time.Duration(p.InfluxTimestampPrecsion)); err != nil {
|
||||
if err := p.SetTimePrecision(time.Duration(p.InfluxTimestampPrecision)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -854,7 +854,7 @@ func TestParserTimestampPrecision(t *testing.T) {
|
|||
t.Run(tt.name, func(t *testing.T) {
|
||||
d := config.Duration(0)
|
||||
require.NoError(t, d.UnmarshalText([]byte(tt.precision)))
|
||||
parser := Parser{InfluxTimestampPrecsion: d}
|
||||
parser := Parser{InfluxTimestampPrecision: d}
|
||||
require.NoError(t, parser.Init())
|
||||
|
||||
metrics, err := parser.Parse(tt.input)
|
||||
|
|
@ -869,7 +869,7 @@ func TestParserInvalidTimestampPrecision(t *testing.T) {
|
|||
d := config.Duration(0)
|
||||
for _, precision := range []string{"1h", "1d", "2s", "1m", "2ns"} {
|
||||
require.NoError(t, d.UnmarshalText([]byte(precision)))
|
||||
parser := Parser{InfluxTimestampPrecsion: d}
|
||||
parser := Parser{InfluxTimestampPrecision: d}
|
||||
require.ErrorContains(t, parser.Init(), "invalid time precision")
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -61,8 +61,8 @@ func (e *ParseError) Error() string {
|
|||
// Parser is an InfluxDB Line Protocol parser that implements the
|
||||
// parsers.Parser interface.
|
||||
type Parser struct {
|
||||
InfluxTimestampPrecsion config.Duration `toml:"influx_timestamp_precision"`
|
||||
DefaultTags map[string]string `toml:"-"`
|
||||
InfluxTimestampPrecision config.Duration `toml:"influx_timestamp_precision"`
|
||||
DefaultTags map[string]string `toml:"-"`
|
||||
// If set to "series" a series machine will be initialized, defaults to regular machine
|
||||
Type string `toml:"-"`
|
||||
|
||||
|
|
@ -157,13 +157,13 @@ func (p *Parser) Init() error {
|
|||
p.machine = NewMachine(p.handler)
|
||||
}
|
||||
|
||||
timeDuration := time.Duration(p.InfluxTimestampPrecsion)
|
||||
timeDuration := time.Duration(p.InfluxTimestampPrecision)
|
||||
switch timeDuration {
|
||||
case 0:
|
||||
case time.Nanosecond, time.Microsecond, time.Millisecond, time.Second:
|
||||
p.SetTimePrecision(timeDuration)
|
||||
default:
|
||||
return fmt.Errorf("invalid time precision: %d", p.InfluxTimestampPrecsion)
|
||||
return fmt.Errorf("invalid time precision: %d", p.InfluxTimestampPrecision)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -690,7 +690,7 @@ func TestParserTimestampPrecision(t *testing.T) {
|
|||
t.Run(tt.name, func(t *testing.T) {
|
||||
d := config.Duration(0)
|
||||
require.NoError(t, d.UnmarshalText([]byte(tt.precision)))
|
||||
parser := Parser{InfluxTimestampPrecsion: d}
|
||||
parser := Parser{InfluxTimestampPrecision: d}
|
||||
require.NoError(t, parser.Init())
|
||||
|
||||
metrics, err := parser.Parse(tt.input)
|
||||
|
|
@ -705,7 +705,7 @@ func TestParserInvalidTimestampPrecision(t *testing.T) {
|
|||
d := config.Duration(0)
|
||||
for _, precision := range []string{"1h", "1d", "2s", "1m", "2ns"} {
|
||||
require.NoError(t, d.UnmarshalText([]byte(precision)))
|
||||
parser := Parser{InfluxTimestampPrecsion: d}
|
||||
parser := Parser{InfluxTimestampPrecision: d}
|
||||
require.ErrorContains(t, parser.Init(), "invalid time precision")
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -193,8 +193,8 @@ func parsePerfData(perfdatas string, timestamp time.Time) ([]telegraf.Metric, er
|
|||
metrics := make([]telegraf.Metric, 0)
|
||||
|
||||
for _, unParsedPerf := range perfSplitRegExp.FindAllString(perfdatas, -1) {
|
||||
trimedPerf := strings.TrimSpace(unParsedPerf)
|
||||
perf := nagiosRegExp.FindStringSubmatch(trimedPerf)
|
||||
trimmedPerf := strings.TrimSpace(unParsedPerf)
|
||||
perf := nagiosRegExp.FindStringSubmatch(trimmedPerf)
|
||||
|
||||
// verify at least `'label'=value[UOM];` existed
|
||||
if len(perf) < 3 {
|
||||
|
|
|
|||
|
|
@ -560,18 +560,18 @@ func splitLastPathElement(query string) []string {
|
|||
return []string{}
|
||||
}
|
||||
|
||||
seperatorIdx := strings.LastIndex(query, "/")
|
||||
if seperatorIdx < 0 {
|
||||
separatorIdx := strings.LastIndex(query, "/")
|
||||
if separatorIdx < 0 {
|
||||
query = "./" + query
|
||||
seperatorIdx = 1
|
||||
separatorIdx = 1
|
||||
}
|
||||
|
||||
// For double slash we want to split at the first slash
|
||||
if seperatorIdx > 0 && query[seperatorIdx-1] == byte('/') {
|
||||
seperatorIdx--
|
||||
if separatorIdx > 0 && query[separatorIdx-1] == byte('/') {
|
||||
separatorIdx--
|
||||
}
|
||||
|
||||
base := query[:seperatorIdx]
|
||||
base := query[:separatorIdx]
|
||||
if base == "" {
|
||||
base = "/"
|
||||
}
|
||||
|
|
@ -579,7 +579,7 @@ func splitLastPathElement(query string) []string {
|
|||
elements := make([]string, 0, 3)
|
||||
elements = append(elements, base)
|
||||
|
||||
offset := seperatorIdx
|
||||
offset := separatorIdx
|
||||
if i := strings.Index(query[offset:], "::"); i >= 0 {
|
||||
// Check for axis operator
|
||||
offset += i
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
# Noise Processor Plugin
|
||||
|
||||
The _Noise_ processor is used to add noise to numerical field values. For each
|
||||
field a noise is generated using a defined probability densitiy function and
|
||||
field a noise is generated using a defined probability density function and
|
||||
added to the value. The function type can be configured as _Laplace_, _Gaussian_
|
||||
or _Uniform_. Depending on the function, various parameters need to be
|
||||
configured:
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
|||
# source_timestamp_timezone = ""
|
||||
|
||||
## Target timestamp format
|
||||
## This defines the destination timestmap format. It also can accept either
|
||||
## This defines the destination timestamp format. It also can accept either
|
||||
## `unix`, `unix_ms`, `unix_us`, `unix_ns`, or a time in Go "reference time".
|
||||
destination_timestamp_format = ""
|
||||
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@
|
|||
# source_timestamp_timezone = ""
|
||||
|
||||
## Target timestamp format
|
||||
## This defines the destination timestmap format. It also can accept either
|
||||
## This defines the destination timestamp format. It also can accept either
|
||||
## `unix`, `unix_ms`, `unix_us`, `unix_ns`, or a time in Go "reference time".
|
||||
destination_timestamp_format = ""
|
||||
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ func TestInitFail(t *testing.T) {
|
|||
require.ErrorContains(t, plugin.Init(), "id missing")
|
||||
}
|
||||
|
||||
func TestPathNonExistant(t *testing.T) {
|
||||
func TestPathNonExistent(t *testing.T) {
|
||||
plugin := &Docker{
|
||||
ID: "non_existent_path_test",
|
||||
Path: "non/existent/path",
|
||||
|
|
@ -127,7 +127,7 @@ func TestResolverInvalid(t *testing.T) {
|
|||
require.ErrorContains(t, err, "cannot read the secret's value under the directory:")
|
||||
}
|
||||
|
||||
func TestGetNonExistant(t *testing.T) {
|
||||
func TestGetNonExistent(t *testing.T) {
|
||||
testdir, err := filepath.Abs("testdata")
|
||||
require.NoError(t, err, "testdata cannot be found")
|
||||
|
||||
|
|
|
|||
|
|
@ -158,7 +158,7 @@ func TestResolverInvalid(t *testing.T) {
|
|||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestGetNonExistant(t *testing.T) {
|
||||
func TestGetNonExistent(t *testing.T) {
|
||||
secretKey := "a secret"
|
||||
secretVal := "I won't tell"
|
||||
|
||||
|
|
|
|||
|
|
@ -160,7 +160,7 @@ func TestResolverInvalid(t *testing.T) {
|
|||
require.ErrorContains(t, err, "cannot read the secret's value:")
|
||||
}
|
||||
|
||||
func TestGetNonExistant(t *testing.T) {
|
||||
func TestGetNonExistent(t *testing.T) {
|
||||
getSystemdVersion = getSystemdVersionMin
|
||||
t.Setenv("CREDENTIALS_DIRECTORY", "testdata")
|
||||
|
||||
|
|
|
|||
|
|
@ -10,9 +10,9 @@ import (
|
|||
|
||||
func TestFindHash(t *testing.T) {
|
||||
tests := []struct {
|
||||
testFile string
|
||||
version string
|
||||
expectedHases map[string]string
|
||||
testFile string
|
||||
version string
|
||||
expectedHashes map[string]string
|
||||
}{
|
||||
{
|
||||
"testdata/godev_patch.html",
|
||||
|
|
@ -41,6 +41,6 @@ func TestFindHash(t *testing.T) {
|
|||
hashes, err := findHashes(bytes.NewReader(b), test.version)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, test.expectedHases, hashes)
|
||||
require.Equal(t, test.expectedHashes, hashes)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue