chore: Fix typos throughout codebase (#15338)

This commit is contained in:
Viktor Szépe 2024-05-31 10:26:37 +02:00 committed by GitHub
parent 536b3d04ce
commit 31a1d34d99
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
54 changed files with 150 additions and 150 deletions

View File

@ -622,7 +622,7 @@
- [#5261](https://github.com/influxdata/telegraf/pull/5261): Fix arithmetic overflow in sqlserver input. - [#5261](https://github.com/influxdata/telegraf/pull/5261): Fix arithmetic overflow in sqlserver input.
- [#5194](https://github.com/influxdata/telegraf/issues/5194): Fix latest metrics not sent first when output fails. - [#5194](https://github.com/influxdata/telegraf/issues/5194): Fix latest metrics not sent first when output fails.
- [#5285](https://github.com/influxdata/telegraf/issues/5285): Fix amqp_consumer stops consuming when it receives unparseable messages. - [#5285](https://github.com/influxdata/telegraf/issues/5285): Fix amqp_consumer stops consuming when it receives unparsable messages.
- [#5281](https://github.com/influxdata/telegraf/issues/5281): Fix prometheus input not detecting added and removed pods. - [#5281](https://github.com/influxdata/telegraf/issues/5281): Fix prometheus input not detecting added and removed pods.
- [#5215](https://github.com/influxdata/telegraf/issues/5215): Remove userinfo from cluster tag in couchbase. - [#5215](https://github.com/influxdata/telegraf/issues/5215): Remove userinfo from cluster tag in couchbase.
- [#5298](https://github.com/influxdata/telegraf/issues/5298): Fix internal_write buffer_size not reset on timed writes. - [#5298](https://github.com/influxdata/telegraf/issues/5298): Fix internal_write buffer_size not reset on timed writes.
@ -917,7 +917,7 @@
- [#4498](https://github.com/influxdata/telegraf/pull/4498): Keep leading whitespace for messages in syslog input. - [#4498](https://github.com/influxdata/telegraf/pull/4498): Keep leading whitespace for messages in syslog input.
- [#4470](https://github.com/influxdata/telegraf/issues/4470): Skip bad entries on interrupt input. - [#4470](https://github.com/influxdata/telegraf/issues/4470): Skip bad entries on interrupt input.
- [#4501](https://github.com/influxdata/telegraf/issues/4501): Preserve metric type when using filters in output plugins. - [#4501](https://github.com/influxdata/telegraf/issues/4501): Preserve metric type when using filters in output plugins.
- [#3794](https://github.com/influxdata/telegraf/issues/3794): Fix error message if URL is unparseable in influxdb output. - [#3794](https://github.com/influxdata/telegraf/issues/3794): Fix error message if URL is unparsable in influxdb output.
- [#4059](https://github.com/influxdata/telegraf/issues/4059): Use explicit zpool properties to fix parse error on FreeBSD 11.2. - [#4059](https://github.com/influxdata/telegraf/issues/4059): Use explicit zpool properties to fix parse error on FreeBSD 11.2.
- [#4514](https://github.com/influxdata/telegraf/pull/4514): Lock buffer when adding metrics. - [#4514](https://github.com/influxdata/telegraf/pull/4514): Lock buffer when adding metrics.

View File

@ -471,7 +471,7 @@ func TestConfig_InlineTables(t *testing.T) {
require.NoError(t, c.LoadConfig("./testdata/inline_table.toml")) require.NoError(t, c.LoadConfig("./testdata/inline_table.toml"))
require.Len(t, c.Outputs, 2) require.Len(t, c.Outputs, 2)
output, ok := c.Outputs[1].Output.(*MockupOuputPlugin) output, ok := c.Outputs[1].Output.(*MockupOutputPlugin)
require.True(t, ok) require.True(t, ok)
require.Equal(t, map[string]string{"Authorization": "Token test", "Content-Type": "application/json"}, output.Headers) require.Equal(t, map[string]string{"Authorization": "Token test", "Content-Type": "application/json"}, output.Headers)
require.Equal(t, []string{"org_id"}, c.Outputs[0].Config.Filter.TagInclude) require.Equal(t, []string{"org_id"}, c.Outputs[0].Config.Filter.TagInclude)
@ -484,7 +484,7 @@ func TestConfig_SliceComment(t *testing.T) {
require.NoError(t, c.LoadConfig("./testdata/slice_comment.toml")) require.NoError(t, c.LoadConfig("./testdata/slice_comment.toml"))
require.Len(t, c.Outputs, 1) require.Len(t, c.Outputs, 1)
output, ok := c.Outputs[0].Output.(*MockupOuputPlugin) output, ok := c.Outputs[0].Output.(*MockupOutputPlugin)
require.True(t, ok) require.True(t, ok)
require.Equal(t, []string{"test"}, output.Scopes) require.Equal(t, []string{"test"}, output.Scopes)
} }
@ -510,7 +510,7 @@ func TestConfig_AzureMonitorNamespacePrefix(t *testing.T) {
expectedPrefix := []string{"Telegraf/", ""} expectedPrefix := []string{"Telegraf/", ""}
for i, plugin := range c.Outputs { for i, plugin := range c.Outputs {
output, ok := plugin.Output.(*MockupOuputPlugin) output, ok := plugin.Output.(*MockupOutputPlugin)
require.True(t, ok) require.True(t, ok)
require.Equal(t, expectedPrefix[i], output.NamespacePrefix) require.Equal(t, expectedPrefix[i], output.NamespacePrefix)
} }
@ -1453,7 +1453,7 @@ func (m *MockupProcessorPluginParserFunc) SetParserFunc(pf telegraf.ParserFunc)
} }
/*** Mockup OUTPUT plugin for testing to avoid cyclic dependencies ***/ /*** Mockup OUTPUT plugin for testing to avoid cyclic dependencies ***/
type MockupOuputPlugin struct { type MockupOutputPlugin struct {
URL string `toml:"url"` URL string `toml:"url"`
Headers map[string]string `toml:"headers"` Headers map[string]string `toml:"headers"`
Scopes []string `toml:"scopes"` Scopes []string `toml:"scopes"`
@ -1462,16 +1462,16 @@ type MockupOuputPlugin struct {
tls.ClientConfig tls.ClientConfig
} }
func (m *MockupOuputPlugin) Connect() error { func (m *MockupOutputPlugin) Connect() error {
return nil return nil
} }
func (m *MockupOuputPlugin) Close() error { func (m *MockupOutputPlugin) Close() error {
return nil return nil
} }
func (m *MockupOuputPlugin) SampleConfig() string { func (m *MockupOutputPlugin) SampleConfig() string {
return "Mockup test output plugin" return "Mockup test output plugin"
} }
func (m *MockupOuputPlugin) Write(_ []telegraf.Metric) error { func (m *MockupOutputPlugin) Write(_ []telegraf.Metric) error {
return nil return nil
} }
@ -1624,10 +1624,10 @@ func init() {
// Register the mockup output plugin for the required names // Register the mockup output plugin for the required names
outputs.Add("azure_monitor", func() telegraf.Output { outputs.Add("azure_monitor", func() telegraf.Output {
return &MockupOuputPlugin{NamespacePrefix: "Telegraf/"} return &MockupOutputPlugin{NamespacePrefix: "Telegraf/"}
}) })
outputs.Add("http", func() telegraf.Output { outputs.Add("http", func() telegraf.Output {
return &MockupOuputPlugin{} return &MockupOutputPlugin{}
}) })
outputs.Add("serializer_test_new", func() telegraf.Output { outputs.Add("serializer_test_new", func() telegraf.Output {
return &MockupOutputPluginSerializerNew{} return &MockupOutputPluginSerializerNew{}

View File

@ -32,19 +32,19 @@ func processTable(parent string, table *ast.Table) ([]keyValuePair, error) {
}) })
case *ast.Table: case *ast.Table:
key := prefix + k key := prefix + k
childs, err := processTable(key, v) children, err := processTable(key, v)
if err != nil { if err != nil {
return nil, fmt.Errorf("parsing table for %q failed: %w", key, err) return nil, fmt.Errorf("parsing table for %q failed: %w", key, err)
} }
options = append(options, childs...) options = append(options, children...)
case []*ast.Table: case []*ast.Table:
for i, t := range v { for i, t := range v {
key := fmt.Sprintf("%s#%d.%s", prefix, i, k) key := fmt.Sprintf("%s#%d.%s", prefix, i, k)
childs, err := processTable(key, t) children, err := processTable(key, t)
if err != nil { if err != nil {
return nil, fmt.Errorf("parsing table for %q #%d failed: %w", key, i, err) return nil, fmt.Errorf("parsing table for %q #%d failed: %w", key, i, err)
} }
options = append(options, childs...) options = append(options, children...)
} }
default: default:
return nil, fmt.Errorf("unknown node type %T in key %q", value, prefix+k) return nil, fmt.Errorf("unknown node type %T in key %q", value, prefix+k)

View File

@ -70,7 +70,7 @@ In case you still want to continue with the PR, feel free to reopen it.
## Linting ## Linting
Each pull request will have the appropriate linters checking the files for any common mistakes. The github action Super Linter is used: [super-pinter](https://github.com/github/super-linter). If it is failing you can click on the action and read the logs to figure out the issue. You can also run the github action locally by following these instructions: [run-linter-locally.md](https://github.com/github/super-linter/blob/main/docs/run-linter-locally.md). You can find more information on each of the linters in the super linter readme. Each pull request will have the appropriate linters checking the files for any common mistakes. The github action Super Linter is used: [super-linter](https://github.com/github/super-linter). If it is failing you can click on the action and read the logs to figure out the issue. You can also run the github action locally by following these instructions: [run-linter-locally.md](https://github.com/github/super-linter/blob/main/docs/run-linter-locally.md). You can find more information on each of the linters in the super linter readme.
## Testing ## Testing

View File

@ -11,7 +11,7 @@ import (
type SASLAuth struct { type SASLAuth struct {
SASLUsername config.Secret `toml:"sasl_username"` SASLUsername config.Secret `toml:"sasl_username"`
SASLPassword config.Secret `toml:"sasl_password"` SASLPassword config.Secret `toml:"sasl_password"`
SASLExtentions map[string]string `toml:"sasl_extensions"` SASLExtensions map[string]string `toml:"sasl_extensions"`
SASLMechanism string `toml:"sasl_mechanism"` SASLMechanism string `toml:"sasl_mechanism"`
SASLVersion *int `toml:"sasl_version"` SASLVersion *int `toml:"sasl_version"`
@ -92,7 +92,7 @@ func (k *SASLAuth) Token() (*sarama.AccessToken, error) {
defer token.Destroy() defer token.Destroy()
return &sarama.AccessToken{ return &sarama.AccessToken{
Token: token.String(), Token: token.String(),
Extensions: k.SASLExtentions, Extensions: k.SASLExtensions,
}, nil }, nil
} }

View File

@ -375,7 +375,7 @@ func parseTag(tagSpec string, data interface{}) (tagKey string, tagValue string,
func (s *AliyunCMS) prepareTagsAndDimensions(metric *Metric) { func (s *AliyunCMS) prepareTagsAndDimensions(metric *Metric) {
var ( var (
newData bool newData bool
defaulTags = []string{"RegionId:RegionId"} defaultTags = []string{"RegionId:RegionId"}
) )
if s.dt == nil { //Discovery is not activated if s.dt == nil { //Discovery is not activated
@ -411,7 +411,7 @@ L:
//Start filing tags //Start filing tags
//Remove old value if exist //Remove old value if exist
delete(metric.discoveryTags, instanceID) delete(metric.discoveryTags, instanceID)
metric.discoveryTags[instanceID] = make(map[string]string, len(metric.TagsQueryPath)+len(defaulTags)) metric.discoveryTags[instanceID] = make(map[string]string, len(metric.TagsQueryPath)+len(defaultTags))
for _, tagQueryPath := range metric.TagsQueryPath { for _, tagQueryPath := range metric.TagsQueryPath {
tagKey, tagValue, err := parseTag(tagQueryPath, elem) tagKey, tagValue, err := parseTag(tagQueryPath, elem)
@ -428,7 +428,7 @@ L:
} }
//Adding default tags if not already there //Adding default tags if not already there
for _, defaultTagQP := range defaulTags { for _, defaultTagQP := range defaultTags {
tagKey, tagValue, err := parseTag(defaultTagQP, elem) tagKey, tagValue, err := parseTag(defaultTagQP, elem)
if err != nil { if err != nil {

View File

@ -392,7 +392,7 @@ func TestGather(t *testing.T) {
//test table: //test table:
tests := []struct { tests := []struct {
name string name string
hasMeasurment bool hasMeasurement bool
metricNames []string metricNames []string
expected []telegraf.Metric expected []telegraf.Metric
}{ }{
@ -409,7 +409,7 @@ func TestGather(t *testing.T) {
}, },
{ {
name: "Data point with fields & tags", name: "Data point with fields & tags",
hasMeasurment: true, hasMeasurement: true,
metricNames: []string{"InstanceActiveConnection"}, metricNames: []string{"InstanceActiveConnection"},
expected: []telegraf.Metric{ expected: []telegraf.Metric{
testutil.MustMetric( testutil.MustMetric(
@ -434,8 +434,8 @@ func TestGather(t *testing.T) {
var acc testutil.Accumulator var acc testutil.Accumulator
plugin.Metrics[0].MetricNames = tt.metricNames plugin.Metrics[0].MetricNames = tt.metricNames
require.Empty(t, acc.GatherError(plugin.Gather)) require.Empty(t, acc.GatherError(plugin.Gather))
require.Equal(t, acc.HasMeasurement("aliyuncms_acs_slb_dashboard"), tt.hasMeasurment) require.Equal(t, acc.HasMeasurement("aliyuncms_acs_slb_dashboard"), tt.hasMeasurement)
if tt.hasMeasurment { if tt.hasMeasurement {
acc.AssertContainsTaggedFields(t, "aliyuncms_acs_slb_dashboard", tt.expected[0].Fields(), tt.expected[0].Tags()) acc.AssertContainsTaggedFields(t, "aliyuncms_acs_slb_dashboard", tt.expected[0].Fields(), tt.expected[0].Tags())
} }
}) })

View File

@ -90,6 +90,6 @@ of versions and small set of GPUs. Currently the latest ROCm version tested is
information provided by `rocm-smi` can vary so that some fields would start/stop information provided by `rocm-smi` can vary so that some fields would start/stop
appearing in the metrics upon updates. The `rocm-smi` JSON output is not appearing in the metrics upon updates. The `rocm-smi` JSON output is not
perfectly homogeneous and is possibly changing in the future, hence parsing and perfectly homogeneous and is possibly changing in the future, hence parsing and
unmarshaling can start failing upon updating ROCm. unmarshalling can start failing upon updating ROCm.
Inspired by the current state of the art of the `nvidia-smi` plugin. Inspired by the current state of the art of the `nvidia-smi` plugin.

View File

@ -122,11 +122,11 @@ func TestIntegration(t *testing.T) {
"test,source=B value=1i 1712780301000000100", "test,source=B value=1i 1712780301000000100",
"test,source=C value=2i 1712780301000000200", "test,source=C value=2i 1712780301000000200",
} }
expexted := make([]telegraf.Metric, 0, len(metrics)) expected := make([]telegraf.Metric, 0, len(metrics))
for _, x := range metrics { for _, x := range metrics {
m, err := parser.Parse([]byte(x)) m, err := parser.Parse([]byte(x))
require.NoError(t, err) require.NoError(t, err)
expexted = append(expexted, m...) expected = append(expected, m...)
} }
// Start the plugin // Start the plugin
@ -141,12 +141,12 @@ func TestIntegration(t *testing.T) {
// Verify that the metrics were actually written // Verify that the metrics were actually written
require.Eventually(t, func() bool { require.Eventually(t, func() bool {
return acc.NMetrics() >= uint64(len(expexted)) return acc.NMetrics() >= uint64(len(expected))
}, 3*time.Second, 100*time.Millisecond) }, 3*time.Second, 100*time.Millisecond)
client.close() client.close()
plugin.Stop() plugin.Stop()
testutil.RequireMetricsEqual(t, expexted, acc.GetTelegrafMetrics()) testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics())
} }
func TestStartupErrorBehaviorError(t *testing.T) { func TestStartupErrorBehaviorError(t *testing.T) {
@ -341,11 +341,11 @@ func TestStartupErrorBehaviorRetry(t *testing.T) {
"test,source=B value=1i 1712780301000000100", "test,source=B value=1i 1712780301000000100",
"test,source=C value=2i 1712780301000000200", "test,source=C value=2i 1712780301000000200",
} }
expexted := make([]telegraf.Metric, 0, len(metrics)) expected := make([]telegraf.Metric, 0, len(metrics))
for _, x := range metrics { for _, x := range metrics {
m, err := parser.Parse([]byte(x)) m, err := parser.Parse([]byte(x))
require.NoError(t, err) require.NoError(t, err)
expexted = append(expexted, m...) expected = append(expected, m...)
} }
// Starting the plugin should succeed as we will retry to startup later // Starting the plugin should succeed as we will retry to startup later
@ -374,12 +374,12 @@ func TestStartupErrorBehaviorRetry(t *testing.T) {
// Verify that the metrics were actually collected // Verify that the metrics were actually collected
require.Eventually(t, func() bool { require.Eventually(t, func() bool {
return acc.NMetrics() >= uint64(len(expexted)) return acc.NMetrics() >= uint64(len(expected))
}, 3*time.Second, 100*time.Millisecond) }, 3*time.Second, 100*time.Millisecond)
client.close() client.close()
plugin.Stop() plugin.Stop()
testutil.RequireMetricsEqual(t, expexted, acc.GetTelegrafMetrics()) testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics())
} }
type producer struct { type producer struct {

View File

@ -116,7 +116,7 @@ func TestAurora(t *testing.T) {
}, },
}, },
{ {
name: "float64 unparseable", name: "float64 unparsable",
leaderhealth: func(_ *testing.T, w http.ResponseWriter, _ *http.Request) { leaderhealth: func(_ *testing.T, w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusOK) w.WriteHeader(http.StatusOK)
}, },
@ -136,7 +136,7 @@ func TestAurora(t *testing.T) {
}, },
}, },
{ {
name: "int64 unparseable", name: "int64 unparsable",
leaderhealth: func(_ *testing.T, w http.ResponseWriter, _ *http.Request) { leaderhealth: func(_ *testing.T, w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusOK) w.WriteHeader(http.StatusOK)
}, },

View File

@ -617,7 +617,7 @@ func TestBindXmlStatsV3(t *testing.T) {
}) })
} }
func TestBindUnparseableURL(t *testing.T) { func TestBindUnparsableURL(t *testing.T) {
b := Bind{ b := Bind{
Urls: []string{"://example.com"}, Urls: []string{"://example.com"},
} }

View File

@ -108,7 +108,7 @@ Partner Churned Count: 0
Slave Interface: eth1 Slave Interface: eth1
MII Status: down MII Status: down
Speed: Unknown Speed: Unknown
Duplex: Unkown Duplex: Unknown
Link Failure Count: 1 Link Failure Count: 1
Permanent HW addr: 3c:ec:ef:5e:71:59 Permanent HW addr: 3c:ec:ef:5e:71:59
Slave queue ID: 0 Slave queue ID: 0

View File

@ -96,7 +96,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
## The interval that defines the fastest rate at which the node values should be sampled and values captured. (default: 1s) ## The interval that defines the fastest rate at which the node values should be sampled and values captured. (default: 1s)
## The sampling frequency should be adjusted to the dynamics of the signal to be sampled. ## The sampling frequency should be adjusted to the dynamics of the signal to be sampled.
## Higher sampling frequence increases load on ctrlX Data Layer. ## Higher sampling frequencies increases load on ctrlX Data Layer.
## The sampling frequency can be higher, than the publish interval. Captured samples are put in a queue and sent in publish interval. ## The sampling frequency can be higher, than the publish interval. Captured samples are put in a queue and sent in publish interval.
## Note: The minimum sampling interval can be overruled by a global setting in the ctrlX Data Layer configuration ('datalayer/subscriptions/settings'). ## Note: The minimum sampling interval can be overruled by a global setting in the ctrlX Data Layer configuration ('datalayer/subscriptions/settings').
# sampling_interval = "1s" # sampling_interval = "1s"

View File

@ -74,7 +74,7 @@
## The interval that defines the fastest rate at which the node values should be sampled and values captured. (default: 1s) ## The interval that defines the fastest rate at which the node values should be sampled and values captured. (default: 1s)
## The sampling frequency should be adjusted to the dynamics of the signal to be sampled. ## The sampling frequency should be adjusted to the dynamics of the signal to be sampled.
## Higher sampling frequence increases load on ctrlX Data Layer. ## Higher sampling frequencies increases load on ctrlX Data Layer.
## The sampling frequency can be higher, than the publish interval. Captured samples are put in a queue and sent in publish interval. ## The sampling frequency can be higher, than the publish interval. Captured samples are put in a queue and sent in publish interval.
## Note: The minimum sampling interval can be overruled by a global setting in the ctrlX Data Layer configuration ('datalayer/subscriptions/settings'). ## Note: The minimum sampling interval can be overruled by a global setting in the ctrlX Data Layer configuration ('datalayer/subscriptions/settings').
# sampling_interval = "1s" # sampling_interval = "1s"

View File

@ -94,7 +94,7 @@ func (gcs *GCS) Gather(acc telegraf.Accumulator) error {
name = attrs.Name name = attrs.Name
if !gcs.shoudIgnore(name) { if !gcs.shouldIgnore(name) {
if err := gcs.processMeasurementsInObject(name, bucket, acc); err != nil { if err := gcs.processMeasurementsInObject(name, bucket, acc); err != nil {
gcs.Log.Errorf("Could not process object %q in bucket %q: %v", name, bucketName, err) gcs.Log.Errorf("Could not process object %q in bucket %q: %v", name, bucketName, err)
acc.AddError(fmt.Errorf("COULD NOT PROCESS OBJECT %q IN BUCKET %q: %w", name, bucketName, err)) acc.AddError(fmt.Errorf("COULD NOT PROCESS OBJECT %q IN BUCKET %q: %w", name, bucketName, err))
@ -119,7 +119,7 @@ func (gcs *GCS) createQuery() storage.Query {
return storage.Query{Prefix: gcs.Prefix} return storage.Query{Prefix: gcs.Prefix}
} }
func (gcs *GCS) shoudIgnore(name string) bool { func (gcs *GCS) shouldIgnore(name string) bool {
return gcs.offSet.OffSet == name || gcs.OffsetKey == name return gcs.offSet.OffSet == name || gcs.OffsetKey == name
} }
@ -159,7 +159,7 @@ func (gcs *GCS) reachedThreshlod(processed int) bool {
} }
func (gcs *GCS) updateOffset(bucket *storage.BucketHandle, name string) error { func (gcs *GCS) updateOffset(bucket *storage.BucketHandle, name string) error {
if gcs.shoudIgnore(name) { if gcs.shouldIgnore(name) {
return nil return nil
} }

View File

@ -150,7 +150,7 @@ func TestRunGatherIteratiosnWithLimit(t *testing.T) {
} }
func TestRunGatherIterationWithPages(t *testing.T) { func TestRunGatherIterationWithPages(t *testing.T) {
srv := stateFulGCSServer(t) srv := stateFullGCSServer(t)
defer srv.Close() defer srv.Close()
emulatorSetEnv(t, srv) emulatorSetEnv(t, srv)
@ -280,7 +280,7 @@ func startMultipleItemGCSServer(t *testing.T) *httptest.Server {
return srv return srv
} }
func stateFulGCSServer(t *testing.T) *httptest.Server { func stateFullGCSServer(t *testing.T) *httptest.Server {
srv := httptest.NewServer(http.NotFoundHandler()) srv := httptest.NewServer(http.NotFoundHandler())
firstElement := parseJSONFromFile(t, "testdata/first_file_listing.json") firstElement := parseJSONFromFile(t, "testdata/first_file_listing.json")

View File

@ -251,7 +251,7 @@ type TransactionStats struct {
TransCheckpoints int64 `bson:"transaction checkpoints"` TransCheckpoints int64 `bson:"transaction checkpoints"`
} }
// WTConnectionStats stores statistices on wiredTiger connections // WTConnectionStats stores statistics on wiredTiger connections
type WTConnectionStats struct { type WTConnectionStats struct {
FilesCurrentlyOpen int64 `bson:"files currently open"` FilesCurrentlyOpen int64 `bson:"files currently open"`
} }

View File

@ -47,7 +47,7 @@ func TestConvertGlobalStatus(t *testing.T) {
} }
} }
func TestCovertGlobalVariables(t *testing.T) { func TestConvertGlobalVariables(t *testing.T) {
tests := []struct { tests := []struct {
name string name string
key string key string

View File

@ -188,7 +188,7 @@ func TestPhpFpmTimeout_From_Fcgi(t *testing.T) {
} }
// TestPhpFpmCrashWithTimeout_From_Fcgi show issue #15175: when timeout is enabled // TestPhpFpmCrashWithTimeout_From_Fcgi show issue #15175: when timeout is enabled
// and nothing is listenning on specified port, a nil pointer was dereferenced. // and nothing is listening on specified port, a nil pointer was dereferenced.
func TestPhpFpmCrashWithTimeout_From_Fcgi(t *testing.T) { func TestPhpFpmCrashWithTimeout_From_Fcgi(t *testing.T) {
tcp, err := net.Listen("tcp", "127.0.0.1:0") tcp, err := net.Listen("tcp", "127.0.0.1:0")
require.NoError(t, err, "Cannot initialize test server") require.NoError(t, err, "Cannot initialize test server")

View File

@ -99,10 +99,10 @@ func (p *Ping) args(url string) []string {
// It returns (<transmitted packets>, <received reply>, <received packet>, <average response>, <min response>, <max response>) // It returns (<transmitted packets>, <received reply>, <received packet>, <average response>, <min response>, <max response>)
func processPingOutput(out string) (statistics, error) { func processPingOutput(out string) (statistics, error) {
// So find a line contain 3 numbers except reply lines // So find a line contain 3 numbers except reply lines
var statsLine, aproxs []string = nil, nil var statsLine, approxs []string = nil, nil
err := errors.New("fatal error processing ping output") err := errors.New("fatal error processing ping output")
stat := regexp.MustCompile(`=\W*(\d+)\D*=\W*(\d+)\D*=\W*(\d+)`) stat := regexp.MustCompile(`=\W*(\d+)\D*=\W*(\d+)\D*=\W*(\d+)`)
aprox := regexp.MustCompile(`=\W*(\d+)\D*ms\D*=\W*(\d+)\D*ms\D*=\W*(\d+)\D*ms`) approx := regexp.MustCompile(`=\W*(\d+)\D*ms\D*=\W*(\d+)\D*ms\D*=\W*(\d+)\D*ms`)
tttLine := regexp.MustCompile(`TTL=\d+`) tttLine := regexp.MustCompile(`TTL=\d+`)
lines := strings.Split(out, "\n") lines := strings.Split(out, "\n")
var replyReceived = 0 var replyReceived = 0
@ -113,8 +113,8 @@ func processPingOutput(out string) (statistics, error) {
if statsLine == nil { if statsLine == nil {
statsLine = stat.FindStringSubmatch(line) statsLine = stat.FindStringSubmatch(line)
} }
if statsLine != nil && aproxs == nil { if statsLine != nil && approxs == nil {
aproxs = aprox.FindStringSubmatch(line) approxs = approx.FindStringSubmatch(line)
} }
} }
} }
@ -147,19 +147,19 @@ func processPingOutput(out string) (statistics, error) {
stats.replyReceived = replyReceived stats.replyReceived = replyReceived
stats.packetsReceived = packetsReceived stats.packetsReceived = packetsReceived
// aproxs data should contain 4 members: entireExpression + ( min, max, avg ) // approxs data should contain 4 members: entireExpression + ( min, max, avg )
if len(aproxs) != 4 { if len(approxs) != 4 {
return stats, err return stats, err
} }
min, err := strconv.Atoi(aproxs[1]) min, err := strconv.Atoi(approxs[1])
if err != nil { if err != nil {
return stats, err return stats, err
} }
max, err := strconv.Atoi(aproxs[2]) max, err := strconv.Atoi(approxs[2])
if err != nil { if err != nil {
return stats, err return stats, err
} }
avg, err := strconv.Atoi(aproxs[3]) avg, err := strconv.Atoi(approxs[3])
if err != nil { if err != nil {
return statistics{}, err return statistics{}, err
} }

View File

@ -97,7 +97,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
# # win_service = [] # # win_service = []
# #
# ## Process filters, multiple are allowed # ## Process filters, multiple are allowed
# ## Regular expressions to use for matching againt the full command # ## Regular expressions to use for matching against the full command
# # patterns = ['.*'] # # patterns = ['.*']
# ## List of users owning the process (wildcards are supported) # ## List of users owning the process (wildcards are supported)
# # users = ['*'] # # users = ['*']

View File

@ -56,9 +56,9 @@ func TestChildPattern(t *testing.T) {
parent, err := finder.Pattern(parentName) parent, err := finder.Pattern(parentName)
require.NoError(t, err) require.NoError(t, err)
require.Len(t, parent, 1) require.Len(t, parent, 1)
childs, err := finder.Children(parent[0]) children, err := finder.Children(parent[0])
require.NoError(t, err) require.NoError(t, err)
require.ElementsMatch(t, expected, childs) require.ElementsMatch(t, expected, children)
} }
func TestGather_RealPatternIntegration(t *testing.T) { func TestGather_RealPatternIntegration(t *testing.T) {

View File

@ -68,7 +68,7 @@
# # win_service = [] # # win_service = []
# #
# ## Process filters, multiple are allowed # ## Process filters, multiple are allowed
# ## Regular expressions to use for matching againt the full command # ## Regular expressions to use for matching against the full command
# # patterns = ['.*'] # # patterns = ['.*']
# ## List of users owning the process (wildcards are supported) # ## List of users owning the process (wildcards are supported)
# # users = ['*'] # # users = ['*']

View File

@ -30,7 +30,7 @@ func (s *Statsd) parseEventMessage(now time.Time, message string, defaultHostnam
// |p:priority // |p:priority
// |h:hostname // |h:hostname
// |t:alert_type // |t:alert_type
// |s:source_type_nam // |s:source_type_name
// |#tag1,tag2 // |#tag1,tag2
// ] // ]
// //

View File

@ -311,7 +311,7 @@ func (e *Endpoint) startDiscovery(ctx context.Context) {
}() }()
} }
func (e *Endpoint) initalDiscovery(ctx context.Context) { func (e *Endpoint) initialDiscovery(ctx context.Context) {
err := e.discover(ctx) err := e.discover(ctx)
if err != nil && !errors.Is(err, context.Canceled) { if err != nil && !errors.Is(err, context.Canceled) {
e.log.Errorf("Discovery for %s: %s", e.URL.Host, err.Error()) e.log.Errorf("Discovery for %s: %s", e.URL.Host, err.Error())
@ -347,7 +347,7 @@ func (e *Endpoint) init(ctx context.Context) error {
if time.Duration(e.Parent.ObjectDiscoveryInterval) > 0 { if time.Duration(e.Parent.ObjectDiscoveryInterval) > 0 {
e.Parent.Log.Debug("Running initial discovery") e.Parent.Log.Debug("Running initial discovery")
e.initalDiscovery(ctx) e.initialDiscovery(ctx)
} }
e.initialized = true e.initialized = true
return nil return nil

View File

@ -24,8 +24,8 @@ See [webhook doc](https://rollbar.com/docs/webhooks/)
* 'event' = `event.event_name` string * 'event' = `event.event_name` string
* 'environment' = `event.data.item.environment` string * 'environment' = `event.data.item.environment` string
* 'project_id = `event.data.item.project_id` int * 'project_id = `event.data.item.project_id` int
* 'language' = `event.data.item.last_occurence.language` string * 'language' = `event.data.item.last_occurrence.language` string
* 'level' = `event.data.item.last_occurence.level` string * 'level' = `event.data.item.last_occurrence.level` string
**Fields:** **Fields:**

View File

@ -11,7 +11,7 @@ type DummyEvent struct {
EventName string `json:"event_name"` EventName string `json:"event_name"`
} }
type NewItemDataItemLastOccurence struct { type NewItemDataItemLastOccurrence struct {
Language string `json:"language"` Language string `json:"language"`
Level string `json:"level"` Level string `json:"level"`
} }
@ -20,7 +20,7 @@ type NewItemDataItem struct {
ID int `json:"id"` ID int `json:"id"`
Environment string `json:"environment"` Environment string `json:"environment"`
ProjectID int `json:"project_id"` ProjectID int `json:"project_id"`
LastOccurence NewItemDataItemLastOccurence `json:"last_occurrence"` LastOccurrence NewItemDataItemLastOccurrence `json:"last_occurrence"`
} }
type NewItemData struct { type NewItemData struct {
@ -37,8 +37,8 @@ func (ni *NewItem) Tags() map[string]string {
"event": ni.EventName, "event": ni.EventName,
"environment": ni.Data.Item.Environment, "environment": ni.Data.Item.Environment,
"project_id": strconv.Itoa(ni.Data.Item.ProjectID), "project_id": strconv.Itoa(ni.Data.Item.ProjectID),
"language": ni.Data.Item.LastOccurence.Language, "language": ni.Data.Item.LastOccurrence.Language,
"level": ni.Data.Item.LastOccurence.Level, "level": ni.Data.Item.LastOccurrence.Level,
} }
} }

View File

@ -157,7 +157,7 @@ In case you see a `Collection took longer than expected` warning, there might
be a burst of events logged and the API is not able to deliver them fast enough be a burst of events logged and the API is not able to deliver them fast enough
to complete processing within the specified interval. Tweaking the to complete processing within the specified interval. Tweaking the
`event_batch_size` setting might help to mitigate the issue. `event_batch_size` setting might help to mitigate the issue.
The said warning does not indicate data-loss, but you should investige the The said warning does not indicate data-loss, but you should investigate the
amount of events you log. amount of events you log.
## Metrics ## Metrics

View File

@ -126,7 +126,7 @@ Refer the query below to check if streaming is enabled
.show database <DB-Name> policy streamingingestion .show database <DB-Name> policy streamingingestion
``` ```
## Authentiation ## Authentication
### Supported Authentication Methods ### Supported Authentication Methods

View File

@ -482,12 +482,12 @@ func TestSanitizeLabelName(t *testing.T) {
expected: "foobar", expected: "foobar",
}, },
{ {
name: "replace invalid first charachter", name: "replace invalid first character",
input: "3foobar", input: "3foobar",
expected: "_foobar", expected: "_foobar",
}, },
{ {
name: "replace invalid later charachter", name: "replace invalid later character",
input: "foobar.foobar", input: "foobar.foobar",
expected: "foobar_foobar", expected: "foobar_foobar",
}, },

View File

@ -84,7 +84,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
## ##
## Check specification ## Check specification
## The check name is the name to give the Sensu check associated with the event ## The check name is the name to give the Sensu check associated with the event
## created. This maps to check.metatadata.name in the event. ## created. This maps to check.metadata.name in the event.
[outputs.sensu.check] [outputs.sensu.check]
name = "telegraf" name = "telegraf"

View File

@ -67,7 +67,7 @@
## ##
## Check specification ## Check specification
## The check name is the name to give the Sensu check associated with the event ## The check name is the name to give the Sensu check associated with the event
## created. This maps to check.metatadata.name in the event. ## created. This maps to check.metadata.name in the event.
[outputs.sensu.check] [outputs.sensu.check]
name = "telegraf" name = "telegraf"

View File

@ -49,7 +49,7 @@ type Stackdriver struct {
counterCache *counterCache counterCache *counterCache
filterCounter filter.Filter filterCounter filter.Filter
filterGauge filter.Filter filterGauge filter.Filter
fitlerHistogram filter.Filter filterHistogram filter.Filter
} }
const ( const (
@ -100,7 +100,7 @@ func (s *Stackdriver) Init() error {
if err != nil { if err != nil {
return fmt.Errorf("creating gauge filter failed: %w", err) return fmt.Errorf("creating gauge filter failed: %w", err)
} }
s.fitlerHistogram, err = filter.Compile(s.MetricHistogram) s.filterHistogram, err = filter.Compile(s.MetricHistogram)
if err != nil { if err != nil {
return fmt.Errorf("creating histogram filter failed: %w", err) return fmt.Errorf("creating histogram filter failed: %w", err)
} }
@ -227,7 +227,7 @@ func (s *Stackdriver) sendBatch(batch []telegraf.Metric) error {
if s.filterGauge != nil && s.filterGauge.Match(m.Name()) { if s.filterGauge != nil && s.filterGauge.Match(m.Name()) {
metricType = telegraf.Gauge metricType = telegraf.Gauge
} }
if s.fitlerHistogram != nil && s.fitlerHistogram.Match(m.Name()) { if s.filterHistogram != nil && s.filterHistogram.Match(m.Name()) {
metricType = telegraf.Histogram metricType = telegraf.Histogram
} }

View File

@ -47,7 +47,7 @@ const (
type SumoLogic struct { type SumoLogic struct {
URL string `toml:"url"` URL string `toml:"url"`
Timeout config.Duration `toml:"timeout"` Timeout config.Duration `toml:"timeout"`
MaxRequstBodySize config.Size `toml:"max_request_body_size"` MaxRequestBodySize config.Size `toml:"max_request_body_size"`
SourceName string `toml:"source_name"` SourceName string `toml:"source_name"`
SourceHost string `toml:"source_host"` SourceHost string `toml:"source_host"`
@ -126,7 +126,7 @@ func (s *SumoLogic) Write(metrics []telegraf.Metric) error {
return err return err
} }
if l := len(reqBody); l > int(s.MaxRequstBodySize) { if l := len(reqBody); l > int(s.MaxRequestBodySize) {
chunks, err := s.splitIntoChunks(metrics) chunks, err := s.splitIntoChunks(metrics)
if err != nil { if err != nil {
return err return err
@ -194,10 +194,10 @@ func (s *SumoLogic) writeRequestChunk(reqBody []byte) error {
} }
// splitIntoChunks splits metrics to be sent into chunks so that every request // splitIntoChunks splits metrics to be sent into chunks so that every request
// is smaller than s.MaxRequstBodySize unless it was configured so small so that // is smaller than s.MaxRequestBodySize unless it was configured so small so that
// even a single metric cannot fit. // even a single metric cannot fit.
// In such a situation metrics will be sent one by one with a warning being logged // In such a situation metrics will be sent one by one with a warning being logged
// for every request sent even though they don't fit in s.MaxRequstBodySize bytes. // for every request sent even though they don't fit in s.MaxRequestBodySize bytes.
func (s *SumoLogic) splitIntoChunks(metrics []telegraf.Metric) ([][]byte, error) { func (s *SumoLogic) splitIntoChunks(metrics []telegraf.Metric) ([][]byte, error) {
var ( var (
numMetrics = len(metrics) numMetrics = len(metrics)
@ -215,7 +215,7 @@ func (s *SumoLogic) splitIntoChunks(metrics []telegraf.Metric) ([][]byte, error)
la := len(toAppend) la := len(toAppend)
if la != 0 { if la != 0 {
// We already have something to append ... // We already have something to append ...
if la+len(chunkBody) > int(s.MaxRequstBodySize) { if la+len(chunkBody) > int(s.MaxRequestBodySize) {
// ... and it's just the right size, without currently processed chunk. // ... and it's just the right size, without currently processed chunk.
break break
} }
@ -229,10 +229,10 @@ func (s *SumoLogic) splitIntoChunks(metrics []telegraf.Metric) ([][]byte, error)
i++ i++
toAppend = chunkBody toAppend = chunkBody
if len(chunkBody) > int(s.MaxRequstBodySize) { if len(chunkBody) > int(s.MaxRequestBodySize) {
s.Log.Warnf( s.Log.Warnf(
"max_request_body_size set to %d which is too small even for a single metric (len: %d), sending without split", "max_request_body_size set to %d which is too small even for a single metric (len: %d), sending without split",
s.MaxRequstBodySize, len(chunkBody), s.MaxRequestBodySize, len(chunkBody),
) )
// The serialized metric is too big, but we have no choice // The serialized metric is too big, but we have no choice
@ -264,7 +264,7 @@ func setHeaderIfSetInConfig(r *http.Request, h header, value string) {
func Default() *SumoLogic { func Default() *SumoLogic {
return &SumoLogic{ return &SumoLogic{
Timeout: config.Duration(defaultClientTimeout), Timeout: config.Duration(defaultClientTimeout),
MaxRequstBodySize: defaultMaxRequestBodySize, MaxRequestBodySize: defaultMaxRequestBodySize,
headers: make(map[string]string), headers: make(map[string]string),
} }
} }

View File

@ -358,7 +358,7 @@ func TestDefaultUserAgent(t *testing.T) {
plugin := &SumoLogic{ plugin := &SumoLogic{
URL: u.String(), URL: u.String(),
MaxRequstBodySize: Default().MaxRequstBodySize, MaxRequestBodySize: Default().MaxRequestBodySize,
} }
serializer := &carbon2.Serializer{ serializer := &carbon2.Serializer{
@ -508,7 +508,7 @@ func TestMaxRequestBodySize(t *testing.T) {
s.URL = u.String() s.URL = u.String()
// getMetrics returns metrics that serialized (using carbon2), // getMetrics returns metrics that serialized (using carbon2),
// uncompressed size is 43750B // uncompressed size is 43750B
s.MaxRequstBodySize = 43_749 s.MaxRequestBodySize = 43_749
return s return s
}, },
metrics: getMetrics(), metrics: getMetrics(),
@ -521,7 +521,7 @@ func TestMaxRequestBodySize(t *testing.T) {
plugin: func() *SumoLogic { plugin: func() *SumoLogic {
s := Default() s := Default()
s.URL = u.String() s.URL = u.String()
s.MaxRequstBodySize = 10_000 s.MaxRequestBodySize = 10_000
return s return s
}, },
metrics: getMetrics(), metrics: getMetrics(),
@ -534,7 +534,7 @@ func TestMaxRequestBodySize(t *testing.T) {
plugin: func() *SumoLogic { plugin: func() *SumoLogic {
s := Default() s := Default()
s.URL = u.String() s.URL = u.String()
s.MaxRequstBodySize = 5_000 s.MaxRequestBodySize = 5_000
return s return s
}, },
metrics: getMetrics(), metrics: getMetrics(),
@ -547,7 +547,7 @@ func TestMaxRequestBodySize(t *testing.T) {
plugin: func() *SumoLogic { plugin: func() *SumoLogic {
s := Default() s := Default()
s.URL = u.String() s.URL = u.String()
s.MaxRequstBodySize = 2_500 s.MaxRequestBodySize = 2_500
return s return s
}, },
metrics: getMetrics(), metrics: getMetrics(),
@ -560,7 +560,7 @@ func TestMaxRequestBodySize(t *testing.T) {
plugin: func() *SumoLogic { plugin: func() *SumoLogic {
s := Default() s := Default()
s.URL = u.String() s.URL = u.String()
s.MaxRequstBodySize = 1_000 s.MaxRequestBodySize = 1_000
return s return s
}, },
metrics: getMetrics(), metrics: getMetrics(),
@ -573,7 +573,7 @@ func TestMaxRequestBodySize(t *testing.T) {
plugin: func() *SumoLogic { plugin: func() *SumoLogic {
s := Default() s := Default()
s.URL = u.String() s.URL = u.String()
s.MaxRequstBodySize = 500 s.MaxRequestBodySize = 500
return s return s
}, },
metrics: getMetrics(), metrics: getMetrics(),
@ -586,7 +586,7 @@ func TestMaxRequestBodySize(t *testing.T) {
plugin: func() *SumoLogic { plugin: func() *SumoLogic {
s := Default() s := Default()
s.URL = u.String() s.URL = u.String()
s.MaxRequstBodySize = 300 s.MaxRequestBodySize = 300
return s return s
}, },
metrics: getMetrics(), metrics: getMetrics(),

View File

@ -48,7 +48,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
## The framing technique with which it is expected that messages are ## The framing technique with which it is expected that messages are
## transported (default = "octet-counting"). Whether the messages come ## transported (default = "octet-counting"). Whether the messages come
## using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), ## using the octet-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1),
## or the non-transparent framing technique (RFC6587#section-3.4.2). Must ## or the non-transparent framing technique (RFC6587#section-3.4.2). Must
## be one of "octet-counting", "non-transparent". ## be one of "octet-counting", "non-transparent".
# framing = "octet-counting" # framing = "octet-counting"

View File

@ -25,7 +25,7 @@
## The framing technique with which it is expected that messages are ## The framing technique with which it is expected that messages are
## transported (default = "octet-counting"). Whether the messages come ## transported (default = "octet-counting"). Whether the messages come
## using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), ## using the octet-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1),
## or the non-transparent framing technique (RFC6587#section-3.4.2). Must ## or the non-transparent framing technique (RFC6587#section-3.4.2). Must
## be one of "octet-counting", "non-transparent". ## be one of "octet-counting", "non-transparent".
# framing = "octet-counting" # framing = "octet-counting"

View File

@ -14,7 +14,7 @@ import (
"github.com/leodido/go-syslog/v4/nontransparent" "github.com/leodido/go-syslog/v4/nontransparent"
) )
func TestGetSyslogMessageWithFramingOctectCounting(t *testing.T) { func TestGetSyslogMessageWithFramingOctetCounting(t *testing.T) {
// Init plugin // Init plugin
s := newSyslog() s := newSyslog()
require.NoError(t, s.Init()) require.NoError(t, s.Init())
@ -35,7 +35,7 @@ func TestGetSyslogMessageWithFramingOctectCounting(t *testing.T) {
messageBytesWithFraming, err := s.getSyslogMessageBytesWithFraming(syslogMessage) messageBytesWithFraming, err := s.getSyslogMessageBytesWithFraming(syslogMessage)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, "59 <13>1 2010-11-10T23:00:00Z testhost Telegraf - testmetric -", string(messageBytesWithFraming), "Incorrect Octect counting framing") require.Equal(t, "59 <13>1 2010-11-10T23:00:00Z testhost Telegraf - testmetric -", string(messageBytesWithFraming), "Incorrect Octet counting framing")
} }
func TestGetSyslogMessageWithFramingNonTransparent(t *testing.T) { func TestGetSyslogMessageWithFramingNonTransparent(t *testing.T) {
@ -60,7 +60,7 @@ func TestGetSyslogMessageWithFramingNonTransparent(t *testing.T) {
messageBytesWithFraming, err := s.getSyslogMessageBytesWithFraming(syslogMessage) messageBytesWithFraming, err := s.getSyslogMessageBytesWithFraming(syslogMessage)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, "<13>1 2010-11-10T23:00:00Z testhost Telegraf - testmetric -\n", string(messageBytesWithFraming), "Incorrect Octect counting framing") require.Equal(t, "<13>1 2010-11-10T23:00:00Z testhost Telegraf - testmetric -\n", string(messageBytesWithFraming), "Incorrect Octet counting framing")
} }
func TestGetSyslogMessageWithFramingNonTransparentNul(t *testing.T) { func TestGetSyslogMessageWithFramingNonTransparentNul(t *testing.T) {
@ -86,7 +86,7 @@ func TestGetSyslogMessageWithFramingNonTransparentNul(t *testing.T) {
messageBytesWithFraming, err := s.getSyslogMessageBytesWithFraming(syslogMessage) messageBytesWithFraming, err := s.getSyslogMessageBytesWithFraming(syslogMessage)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, "<13>1 2010-11-10T23:00:00Z testhost Telegraf - testmetric -\x00", string(messageBytesWithFraming), "Incorrect Octect counting framing") require.Equal(t, "<13>1 2010-11-10T23:00:00Z testhost Telegraf - testmetric -\x00", string(messageBytesWithFraming), "Incorrect Octet counting framing")
} }
func TestSyslogWriteWithTcp(t *testing.T) { func TestSyslogWriteWithTcp(t *testing.T) {

View File

@ -219,7 +219,7 @@ not know in advance what are we going to send, for example, the name of a
container to send its cpu and memory consumption. container to send its cpu and memory consumption.
For this case Zabbix provides [low-level discovery][lld] that allow to create For this case Zabbix provides [low-level discovery][lld] that allow to create
new items dinamically based on the parameters sent by the trap. new items dynamically based on the parameters sent by the trap.
As explained previously, this output plugin will format the Zabbix key using As explained previously, this output plugin will format the Zabbix key using
the tags seen in the Telegraf metric following the LLD format. the tags seen in the Telegraf metric following the LLD format.

View File

@ -255,7 +255,7 @@ func (p *Parser) createMetric(data map[string]interface{}, schema string) (teleg
} }
var schemaObj map[string]interface{} var schemaObj map[string]interface{}
if err := json.Unmarshal([]byte(schema), &schemaObj); err != nil { if err := json.Unmarshal([]byte(schema), &schemaObj); err != nil {
return nil, fmt.Errorf("unmarshaling schema failed: %w", err) return nil, fmt.Errorf("unmarshalling schema failed: %w", err)
} }
if len(fields) == 0 { if len(fields) == 0 {
// A telegraf metric needs at least one field. // A telegraf metric needs at least one field.

View File

@ -111,7 +111,7 @@ func (h *MetricHandler) AddBool(key []byte, value []byte) error {
fk := unescape(key) fk := unescape(key)
fv, err := parseBoolBytes(value) fv, err := parseBoolBytes(value)
if err != nil { if err != nil {
return errors.New("unparseable bool") return errors.New("unparsable bool")
} }
h.metric.AddField(fk, fv) h.metric.AddField(fk, fv)
return nil return nil

View File

@ -104,7 +104,7 @@ func convertToParseError(input []byte, rawErr error) error {
// Parser is an InfluxDB Line Protocol parser that implements the // Parser is an InfluxDB Line Protocol parser that implements the
// parsers.Parser interface. // parsers.Parser interface.
type Parser struct { type Parser struct {
InfluxTimestampPrecsion config.Duration `toml:"influx_timestamp_precision"` InfluxTimestampPrecision config.Duration `toml:"influx_timestamp_precision"`
DefaultTags map[string]string `toml:"-"` DefaultTags map[string]string `toml:"-"`
// If set to "series" a series machine will be initialized, defaults to regular machine // If set to "series" a series machine will be initialized, defaults to regular machine
Type string `toml:"-"` Type string `toml:"-"`
@ -189,7 +189,7 @@ func (p *Parser) applyDefaultTagsSingle(m telegraf.Metric) {
} }
func (p *Parser) Init() error { func (p *Parser) Init() error {
if err := p.SetTimePrecision(time.Duration(p.InfluxTimestampPrecsion)); err != nil { if err := p.SetTimePrecision(time.Duration(p.InfluxTimestampPrecision)); err != nil {
return err return err
} }

View File

@ -854,7 +854,7 @@ func TestParserTimestampPrecision(t *testing.T) {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
d := config.Duration(0) d := config.Duration(0)
require.NoError(t, d.UnmarshalText([]byte(tt.precision))) require.NoError(t, d.UnmarshalText([]byte(tt.precision)))
parser := Parser{InfluxTimestampPrecsion: d} parser := Parser{InfluxTimestampPrecision: d}
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
metrics, err := parser.Parse(tt.input) metrics, err := parser.Parse(tt.input)
@ -869,7 +869,7 @@ func TestParserInvalidTimestampPrecision(t *testing.T) {
d := config.Duration(0) d := config.Duration(0)
for _, precision := range []string{"1h", "1d", "2s", "1m", "2ns"} { for _, precision := range []string{"1h", "1d", "2s", "1m", "2ns"} {
require.NoError(t, d.UnmarshalText([]byte(precision))) require.NoError(t, d.UnmarshalText([]byte(precision)))
parser := Parser{InfluxTimestampPrecsion: d} parser := Parser{InfluxTimestampPrecision: d}
require.ErrorContains(t, parser.Init(), "invalid time precision") require.ErrorContains(t, parser.Init(), "invalid time precision")
} }
} }

View File

@ -61,7 +61,7 @@ func (e *ParseError) Error() string {
// Parser is an InfluxDB Line Protocol parser that implements the // Parser is an InfluxDB Line Protocol parser that implements the
// parsers.Parser interface. // parsers.Parser interface.
type Parser struct { type Parser struct {
InfluxTimestampPrecsion config.Duration `toml:"influx_timestamp_precision"` InfluxTimestampPrecision config.Duration `toml:"influx_timestamp_precision"`
DefaultTags map[string]string `toml:"-"` DefaultTags map[string]string `toml:"-"`
// If set to "series" a series machine will be initialized, defaults to regular machine // If set to "series" a series machine will be initialized, defaults to regular machine
Type string `toml:"-"` Type string `toml:"-"`
@ -157,13 +157,13 @@ func (p *Parser) Init() error {
p.machine = NewMachine(p.handler) p.machine = NewMachine(p.handler)
} }
timeDuration := time.Duration(p.InfluxTimestampPrecsion) timeDuration := time.Duration(p.InfluxTimestampPrecision)
switch timeDuration { switch timeDuration {
case 0: case 0:
case time.Nanosecond, time.Microsecond, time.Millisecond, time.Second: case time.Nanosecond, time.Microsecond, time.Millisecond, time.Second:
p.SetTimePrecision(timeDuration) p.SetTimePrecision(timeDuration)
default: default:
return fmt.Errorf("invalid time precision: %d", p.InfluxTimestampPrecsion) return fmt.Errorf("invalid time precision: %d", p.InfluxTimestampPrecision)
} }
return nil return nil

View File

@ -690,7 +690,7 @@ func TestParserTimestampPrecision(t *testing.T) {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
d := config.Duration(0) d := config.Duration(0)
require.NoError(t, d.UnmarshalText([]byte(tt.precision))) require.NoError(t, d.UnmarshalText([]byte(tt.precision)))
parser := Parser{InfluxTimestampPrecsion: d} parser := Parser{InfluxTimestampPrecision: d}
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
metrics, err := parser.Parse(tt.input) metrics, err := parser.Parse(tt.input)
@ -705,7 +705,7 @@ func TestParserInvalidTimestampPrecision(t *testing.T) {
d := config.Duration(0) d := config.Duration(0)
for _, precision := range []string{"1h", "1d", "2s", "1m", "2ns"} { for _, precision := range []string{"1h", "1d", "2s", "1m", "2ns"} {
require.NoError(t, d.UnmarshalText([]byte(precision))) require.NoError(t, d.UnmarshalText([]byte(precision)))
parser := Parser{InfluxTimestampPrecsion: d} parser := Parser{InfluxTimestampPrecision: d}
require.ErrorContains(t, parser.Init(), "invalid time precision") require.ErrorContains(t, parser.Init(), "invalid time precision")
} }
} }

View File

@ -193,8 +193,8 @@ func parsePerfData(perfdatas string, timestamp time.Time) ([]telegraf.Metric, er
metrics := make([]telegraf.Metric, 0) metrics := make([]telegraf.Metric, 0)
for _, unParsedPerf := range perfSplitRegExp.FindAllString(perfdatas, -1) { for _, unParsedPerf := range perfSplitRegExp.FindAllString(perfdatas, -1) {
trimedPerf := strings.TrimSpace(unParsedPerf) trimmedPerf := strings.TrimSpace(unParsedPerf)
perf := nagiosRegExp.FindStringSubmatch(trimedPerf) perf := nagiosRegExp.FindStringSubmatch(trimmedPerf)
// verify at least `'label'=value[UOM];` existed // verify at least `'label'=value[UOM];` existed
if len(perf) < 3 { if len(perf) < 3 {

View File

@ -560,18 +560,18 @@ func splitLastPathElement(query string) []string {
return []string{} return []string{}
} }
seperatorIdx := strings.LastIndex(query, "/") separatorIdx := strings.LastIndex(query, "/")
if seperatorIdx < 0 { if separatorIdx < 0 {
query = "./" + query query = "./" + query
seperatorIdx = 1 separatorIdx = 1
} }
// For double slash we want to split at the first slash // For double slash we want to split at the first slash
if seperatorIdx > 0 && query[seperatorIdx-1] == byte('/') { if separatorIdx > 0 && query[separatorIdx-1] == byte('/') {
seperatorIdx-- separatorIdx--
} }
base := query[:seperatorIdx] base := query[:separatorIdx]
if base == "" { if base == "" {
base = "/" base = "/"
} }
@ -579,7 +579,7 @@ func splitLastPathElement(query string) []string {
elements := make([]string, 0, 3) elements := make([]string, 0, 3)
elements = append(elements, base) elements = append(elements, base)
offset := seperatorIdx offset := separatorIdx
if i := strings.Index(query[offset:], "::"); i >= 0 { if i := strings.Index(query[offset:], "::"); i >= 0 {
// Check for axis operator // Check for axis operator
offset += i offset += i

View File

@ -1,7 +1,7 @@
# Noise Processor Plugin # Noise Processor Plugin
The _Noise_ processor is used to add noise to numerical field values. For each The _Noise_ processor is used to add noise to numerical field values. For each
field a noise is generated using a defined probability densitiy function and field a noise is generated using a defined probability density function and
added to the value. The function type can be configured as _Laplace_, _Gaussian_ added to the value. The function type can be configured as _Laplace_, _Gaussian_
or _Uniform_. Depending on the function, various parameters need to be or _Uniform_. Depending on the function, various parameters need to be
configured: configured:

View File

@ -39,7 +39,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
# source_timestamp_timezone = "" # source_timestamp_timezone = ""
## Target timestamp format ## Target timestamp format
## This defines the destination timestmap format. It also can accept either ## This defines the destination timestamp format. It also can accept either
## `unix`, `unix_ms`, `unix_us`, `unix_ns`, or a time in Go "reference time". ## `unix`, `unix_ms`, `unix_us`, `unix_ns`, or a time in Go "reference time".
destination_timestamp_format = "" destination_timestamp_format = ""

View File

@ -22,7 +22,7 @@
# source_timestamp_timezone = "" # source_timestamp_timezone = ""
## Target timestamp format ## Target timestamp format
## This defines the destination timestmap format. It also can accept either ## This defines the destination timestamp format. It also can accept either
## `unix`, `unix_ms`, `unix_us`, `unix_ns`, or a time in Go "reference time". ## `unix`, `unix_ms`, `unix_us`, `unix_ns`, or a time in Go "reference time".
destination_timestamp_format = "" destination_timestamp_format = ""

View File

@ -17,7 +17,7 @@ func TestInitFail(t *testing.T) {
require.ErrorContains(t, plugin.Init(), "id missing") require.ErrorContains(t, plugin.Init(), "id missing")
} }
func TestPathNonExistant(t *testing.T) { func TestPathNonExistent(t *testing.T) {
plugin := &Docker{ plugin := &Docker{
ID: "non_existent_path_test", ID: "non_existent_path_test",
Path: "non/existent/path", Path: "non/existent/path",
@ -127,7 +127,7 @@ func TestResolverInvalid(t *testing.T) {
require.ErrorContains(t, err, "cannot read the secret's value under the directory:") require.ErrorContains(t, err, "cannot read the secret's value under the directory:")
} }
func TestGetNonExistant(t *testing.T) { func TestGetNonExistent(t *testing.T) {
testdir, err := filepath.Abs("testdata") testdir, err := filepath.Abs("testdata")
require.NoError(t, err, "testdata cannot be found") require.NoError(t, err, "testdata cannot be found")

View File

@ -158,7 +158,7 @@ func TestResolverInvalid(t *testing.T) {
require.Error(t, err) require.Error(t, err)
} }
func TestGetNonExistant(t *testing.T) { func TestGetNonExistent(t *testing.T) {
secretKey := "a secret" secretKey := "a secret"
secretVal := "I won't tell" secretVal := "I won't tell"

View File

@ -160,7 +160,7 @@ func TestResolverInvalid(t *testing.T) {
require.ErrorContains(t, err, "cannot read the secret's value:") require.ErrorContains(t, err, "cannot read the secret's value:")
} }
func TestGetNonExistant(t *testing.T) { func TestGetNonExistent(t *testing.T) {
getSystemdVersion = getSystemdVersionMin getSystemdVersion = getSystemdVersionMin
t.Setenv("CREDENTIALS_DIRECTORY", "testdata") t.Setenv("CREDENTIALS_DIRECTORY", "testdata")

View File

@ -12,7 +12,7 @@ func TestFindHash(t *testing.T) {
tests := []struct { tests := []struct {
testFile string testFile string
version string version string
expectedHases map[string]string expectedHashes map[string]string
}{ }{
{ {
"testdata/godev_patch.html", "testdata/godev_patch.html",
@ -41,6 +41,6 @@ func TestFindHash(t *testing.T) {
hashes, err := findHashes(bytes.NewReader(b), test.version) hashes, err := findHashes(bytes.NewReader(b), test.version)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, test.expectedHases, hashes) require.Equal(t, test.expectedHashes, hashes)
} }
} }