diff --git a/CHANGELOG-1.13.md b/CHANGELOG-1.13.md index 03627717d..a55e02ed9 100644 --- a/CHANGELOG-1.13.md +++ b/CHANGELOG-1.13.md @@ -622,7 +622,7 @@ - [#5261](https://github.com/influxdata/telegraf/pull/5261): Fix arithmetic overflow in sqlserver input. - [#5194](https://github.com/influxdata/telegraf/issues/5194): Fix latest metrics not sent first when output fails. -- [#5285](https://github.com/influxdata/telegraf/issues/5285): Fix amqp_consumer stops consuming when it receives unparseable messages. +- [#5285](https://github.com/influxdata/telegraf/issues/5285): Fix amqp_consumer stops consuming when it receives unparsable messages. - [#5281](https://github.com/influxdata/telegraf/issues/5281): Fix prometheus input not detecting added and removed pods. - [#5215](https://github.com/influxdata/telegraf/issues/5215): Remove userinfo from cluster tag in couchbase. - [#5298](https://github.com/influxdata/telegraf/issues/5298): Fix internal_write buffer_size not reset on timed writes. @@ -917,7 +917,7 @@ - [#4498](https://github.com/influxdata/telegraf/pull/4498): Keep leading whitespace for messages in syslog input. - [#4470](https://github.com/influxdata/telegraf/issues/4470): Skip bad entries on interrupt input. - [#4501](https://github.com/influxdata/telegraf/issues/4501): Preserve metric type when using filters in output plugins. -- [#3794](https://github.com/influxdata/telegraf/issues/3794): Fix error message if URL is unparseable in influxdb output. +- [#3794](https://github.com/influxdata/telegraf/issues/3794): Fix error message if URL is unparsable in influxdb output. - [#4059](https://github.com/influxdata/telegraf/issues/4059): Use explicit zpool properties to fix parse error on FreeBSD 11.2. - [#4514](https://github.com/influxdata/telegraf/pull/4514): Lock buffer when adding metrics. diff --git a/config/config_test.go b/config/config_test.go index 5ed0b5b37..4f22ef1e6 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -471,7 +471,7 @@ func TestConfig_InlineTables(t *testing.T) { require.NoError(t, c.LoadConfig("./testdata/inline_table.toml")) require.Len(t, c.Outputs, 2) - output, ok := c.Outputs[1].Output.(*MockupOuputPlugin) + output, ok := c.Outputs[1].Output.(*MockupOutputPlugin) require.True(t, ok) require.Equal(t, map[string]string{"Authorization": "Token test", "Content-Type": "application/json"}, output.Headers) require.Equal(t, []string{"org_id"}, c.Outputs[0].Config.Filter.TagInclude) @@ -484,7 +484,7 @@ func TestConfig_SliceComment(t *testing.T) { require.NoError(t, c.LoadConfig("./testdata/slice_comment.toml")) require.Len(t, c.Outputs, 1) - output, ok := c.Outputs[0].Output.(*MockupOuputPlugin) + output, ok := c.Outputs[0].Output.(*MockupOutputPlugin) require.True(t, ok) require.Equal(t, []string{"test"}, output.Scopes) } @@ -510,7 +510,7 @@ func TestConfig_AzureMonitorNamespacePrefix(t *testing.T) { expectedPrefix := []string{"Telegraf/", ""} for i, plugin := range c.Outputs { - output, ok := plugin.Output.(*MockupOuputPlugin) + output, ok := plugin.Output.(*MockupOutputPlugin) require.True(t, ok) require.Equal(t, expectedPrefix[i], output.NamespacePrefix) } @@ -1453,7 +1453,7 @@ func (m *MockupProcessorPluginParserFunc) SetParserFunc(pf telegraf.ParserFunc) } /*** Mockup OUTPUT plugin for testing to avoid cyclic dependencies ***/ -type MockupOuputPlugin struct { +type MockupOutputPlugin struct { URL string `toml:"url"` Headers map[string]string `toml:"headers"` Scopes []string `toml:"scopes"` @@ -1462,16 +1462,16 @@ type MockupOuputPlugin struct { tls.ClientConfig } -func (m *MockupOuputPlugin) Connect() error { +func (m *MockupOutputPlugin) Connect() error { return nil } -func (m *MockupOuputPlugin) Close() error { +func (m *MockupOutputPlugin) Close() error { return nil } -func (m *MockupOuputPlugin) SampleConfig() string { +func (m *MockupOutputPlugin) SampleConfig() string { return "Mockup test output plugin" } -func (m *MockupOuputPlugin) Write(_ []telegraf.Metric) error { +func (m *MockupOutputPlugin) Write(_ []telegraf.Metric) error { return nil } @@ -1624,10 +1624,10 @@ func init() { // Register the mockup output plugin for the required names outputs.Add("azure_monitor", func() telegraf.Output { - return &MockupOuputPlugin{NamespacePrefix: "Telegraf/"} + return &MockupOutputPlugin{NamespacePrefix: "Telegraf/"} }) outputs.Add("http", func() telegraf.Output { - return &MockupOuputPlugin{} + return &MockupOutputPlugin{} }) outputs.Add("serializer_test_new", func() telegraf.Output { return &MockupOutputPluginSerializerNew{} diff --git a/config/plugin_id.go b/config/plugin_id.go index 37fdc5a75..51f5c2139 100644 --- a/config/plugin_id.go +++ b/config/plugin_id.go @@ -32,19 +32,19 @@ func processTable(parent string, table *ast.Table) ([]keyValuePair, error) { }) case *ast.Table: key := prefix + k - childs, err := processTable(key, v) + children, err := processTable(key, v) if err != nil { return nil, fmt.Errorf("parsing table for %q failed: %w", key, err) } - options = append(options, childs...) + options = append(options, children...) case []*ast.Table: for i, t := range v { key := fmt.Sprintf("%s#%d.%s", prefix, i, k) - childs, err := processTable(key, t) + children, err := processTable(key, t) if err != nil { return nil, fmt.Errorf("parsing table for %q #%d failed: %w", key, i, err) } - options = append(options, childs...) + options = append(options, children...) } default: return nil, fmt.Errorf("unknown node type %T in key %q", value, prefix+k) diff --git a/docs/developers/REVIEWS.md b/docs/developers/REVIEWS.md index d06ace16b..1cffe186e 100644 --- a/docs/developers/REVIEWS.md +++ b/docs/developers/REVIEWS.md @@ -70,7 +70,7 @@ In case you still want to continue with the PR, feel free to reopen it. ## Linting -Each pull request will have the appropriate linters checking the files for any common mistakes. The github action Super Linter is used: [super-pinter](https://github.com/github/super-linter). If it is failing you can click on the action and read the logs to figure out the issue. You can also run the github action locally by following these instructions: [run-linter-locally.md](https://github.com/github/super-linter/blob/main/docs/run-linter-locally.md). You can find more information on each of the linters in the super linter readme. +Each pull request will have the appropriate linters checking the files for any common mistakes. The github action Super Linter is used: [super-linter](https://github.com/github/super-linter). If it is failing you can click on the action and read the logs to figure out the issue. You can also run the github action locally by following these instructions: [run-linter-locally.md](https://github.com/github/super-linter/blob/main/docs/run-linter-locally.md). You can find more information on each of the linters in the super linter readme. ## Testing diff --git a/plugins/common/kafka/sasl.go b/plugins/common/kafka/sasl.go index fc5f1f8e9..f82a34940 100644 --- a/plugins/common/kafka/sasl.go +++ b/plugins/common/kafka/sasl.go @@ -11,7 +11,7 @@ import ( type SASLAuth struct { SASLUsername config.Secret `toml:"sasl_username"` SASLPassword config.Secret `toml:"sasl_password"` - SASLExtentions map[string]string `toml:"sasl_extensions"` + SASLExtensions map[string]string `toml:"sasl_extensions"` SASLMechanism string `toml:"sasl_mechanism"` SASLVersion *int `toml:"sasl_version"` @@ -92,7 +92,7 @@ func (k *SASLAuth) Token() (*sarama.AccessToken, error) { defer token.Destroy() return &sarama.AccessToken{ Token: token.String(), - Extensions: k.SASLExtentions, + Extensions: k.SASLExtensions, }, nil } diff --git a/plugins/inputs/aliyuncms/aliyuncms.go b/plugins/inputs/aliyuncms/aliyuncms.go index 4d9846427..dfc3cdfac 100644 --- a/plugins/inputs/aliyuncms/aliyuncms.go +++ b/plugins/inputs/aliyuncms/aliyuncms.go @@ -374,8 +374,8 @@ func parseTag(tagSpec string, data interface{}) (tagKey string, tagValue string, func (s *AliyunCMS) prepareTagsAndDimensions(metric *Metric) { var ( - newData bool - defaulTags = []string{"RegionId:RegionId"} + newData bool + defaultTags = []string{"RegionId:RegionId"} ) if s.dt == nil { //Discovery is not activated @@ -411,7 +411,7 @@ L: //Start filing tags //Remove old value if exist delete(metric.discoveryTags, instanceID) - metric.discoveryTags[instanceID] = make(map[string]string, len(metric.TagsQueryPath)+len(defaulTags)) + metric.discoveryTags[instanceID] = make(map[string]string, len(metric.TagsQueryPath)+len(defaultTags)) for _, tagQueryPath := range metric.TagsQueryPath { tagKey, tagValue, err := parseTag(tagQueryPath, elem) @@ -428,7 +428,7 @@ L: } //Adding default tags if not already there - for _, defaultTagQP := range defaulTags { + for _, defaultTagQP := range defaultTags { tagKey, tagValue, err := parseTag(defaultTagQP, elem) if err != nil { diff --git a/plugins/inputs/aliyuncms/aliyuncms_test.go b/plugins/inputs/aliyuncms/aliyuncms_test.go index cfa5b7821..34115ec47 100644 --- a/plugins/inputs/aliyuncms/aliyuncms_test.go +++ b/plugins/inputs/aliyuncms/aliyuncms_test.go @@ -391,10 +391,10 @@ func TestGather(t *testing.T) { //test table: tests := []struct { - name string - hasMeasurment bool - metricNames []string - expected []telegraf.Metric + name string + hasMeasurement bool + metricNames []string + expected []telegraf.Metric }{ { name: "Empty data point", @@ -408,9 +408,9 @@ func TestGather(t *testing.T) { }, }, { - name: "Data point with fields & tags", - hasMeasurment: true, - metricNames: []string{"InstanceActiveConnection"}, + name: "Data point with fields & tags", + hasMeasurement: true, + metricNames: []string{"InstanceActiveConnection"}, expected: []telegraf.Metric{ testutil.MustMetric( "aliyuncms_acs_slb_dashboard", @@ -434,8 +434,8 @@ func TestGather(t *testing.T) { var acc testutil.Accumulator plugin.Metrics[0].MetricNames = tt.metricNames require.Empty(t, acc.GatherError(plugin.Gather)) - require.Equal(t, acc.HasMeasurement("aliyuncms_acs_slb_dashboard"), tt.hasMeasurment) - if tt.hasMeasurment { + require.Equal(t, acc.HasMeasurement("aliyuncms_acs_slb_dashboard"), tt.hasMeasurement) + if tt.hasMeasurement { acc.AssertContainsTaggedFields(t, "aliyuncms_acs_slb_dashboard", tt.expected[0].Fields(), tt.expected[0].Tags()) } }) diff --git a/plugins/inputs/amd_rocm_smi/README.md b/plugins/inputs/amd_rocm_smi/README.md index 2e3c6f436..14493bd4f 100644 --- a/plugins/inputs/amd_rocm_smi/README.md +++ b/plugins/inputs/amd_rocm_smi/README.md @@ -90,6 +90,6 @@ of versions and small set of GPUs. Currently the latest ROCm version tested is information provided by `rocm-smi` can vary so that some fields would start/stop appearing in the metrics upon updates. The `rocm-smi` JSON output is not perfectly homogeneous and is possibly changing in the future, hence parsing and -unmarshaling can start failing upon updating ROCm. +unmarshalling can start failing upon updating ROCm. Inspired by the current state of the art of the `nvidia-smi` plugin. diff --git a/plugins/inputs/amqp_consumer/amqp_consumer_test.go b/plugins/inputs/amqp_consumer/amqp_consumer_test.go index 957addaab..481d9fa3e 100644 --- a/plugins/inputs/amqp_consumer/amqp_consumer_test.go +++ b/plugins/inputs/amqp_consumer/amqp_consumer_test.go @@ -122,11 +122,11 @@ func TestIntegration(t *testing.T) { "test,source=B value=1i 1712780301000000100", "test,source=C value=2i 1712780301000000200", } - expexted := make([]telegraf.Metric, 0, len(metrics)) + expected := make([]telegraf.Metric, 0, len(metrics)) for _, x := range metrics { m, err := parser.Parse([]byte(x)) require.NoError(t, err) - expexted = append(expexted, m...) + expected = append(expected, m...) } // Start the plugin @@ -141,12 +141,12 @@ func TestIntegration(t *testing.T) { // Verify that the metrics were actually written require.Eventually(t, func() bool { - return acc.NMetrics() >= uint64(len(expexted)) + return acc.NMetrics() >= uint64(len(expected)) }, 3*time.Second, 100*time.Millisecond) client.close() plugin.Stop() - testutil.RequireMetricsEqual(t, expexted, acc.GetTelegrafMetrics()) + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics()) } func TestStartupErrorBehaviorError(t *testing.T) { @@ -341,11 +341,11 @@ func TestStartupErrorBehaviorRetry(t *testing.T) { "test,source=B value=1i 1712780301000000100", "test,source=C value=2i 1712780301000000200", } - expexted := make([]telegraf.Metric, 0, len(metrics)) + expected := make([]telegraf.Metric, 0, len(metrics)) for _, x := range metrics { m, err := parser.Parse([]byte(x)) require.NoError(t, err) - expexted = append(expexted, m...) + expected = append(expected, m...) } // Starting the plugin should succeed as we will retry to startup later @@ -374,12 +374,12 @@ func TestStartupErrorBehaviorRetry(t *testing.T) { // Verify that the metrics were actually collected require.Eventually(t, func() bool { - return acc.NMetrics() >= uint64(len(expexted)) + return acc.NMetrics() >= uint64(len(expected)) }, 3*time.Second, 100*time.Millisecond) client.close() plugin.Stop() - testutil.RequireMetricsEqual(t, expexted, acc.GetTelegrafMetrics()) + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics()) } type producer struct { diff --git a/plugins/inputs/aurora/aurora_test.go b/plugins/inputs/aurora/aurora_test.go index 7d297067a..6eeaa93f4 100644 --- a/plugins/inputs/aurora/aurora_test.go +++ b/plugins/inputs/aurora/aurora_test.go @@ -116,7 +116,7 @@ func TestAurora(t *testing.T) { }, }, { - name: "float64 unparseable", + name: "float64 unparsable", leaderhealth: func(_ *testing.T, w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusOK) }, @@ -136,7 +136,7 @@ func TestAurora(t *testing.T) { }, }, { - name: "int64 unparseable", + name: "int64 unparsable", leaderhealth: func(_ *testing.T, w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusOK) }, diff --git a/plugins/inputs/bind/bind_test.go b/plugins/inputs/bind/bind_test.go index db2358239..1598d04d4 100644 --- a/plugins/inputs/bind/bind_test.go +++ b/plugins/inputs/bind/bind_test.go @@ -617,7 +617,7 @@ func TestBindXmlStatsV3(t *testing.T) { }) } -func TestBindUnparseableURL(t *testing.T) { +func TestBindUnparsableURL(t *testing.T) { b := Bind{ Urls: []string{"://example.com"}, } diff --git a/plugins/inputs/bond/bond_test.go b/plugins/inputs/bond/bond_test.go index 38428deb4..18d5c71ac 100644 --- a/plugins/inputs/bond/bond_test.go +++ b/plugins/inputs/bond/bond_test.go @@ -108,7 +108,7 @@ Partner Churned Count: 0 Slave Interface: eth1 MII Status: down Speed: Unknown -Duplex: Unkown +Duplex: Unknown Link Failure Count: 1 Permanent HW addr: 3c:ec:ef:5e:71:59 Slave queue ID: 0 diff --git a/plugins/inputs/ctrlx_datalayer/README.md b/plugins/inputs/ctrlx_datalayer/README.md index 2f5ea35c1..30ae8d72c 100644 --- a/plugins/inputs/ctrlx_datalayer/README.md +++ b/plugins/inputs/ctrlx_datalayer/README.md @@ -96,7 +96,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. ## The interval that defines the fastest rate at which the node values should be sampled and values captured. (default: 1s) ## The sampling frequency should be adjusted to the dynamics of the signal to be sampled. - ## Higher sampling frequence increases load on ctrlX Data Layer. + ## Higher sampling frequencies increases load on ctrlX Data Layer. ## The sampling frequency can be higher, than the publish interval. Captured samples are put in a queue and sent in publish interval. ## Note: The minimum sampling interval can be overruled by a global setting in the ctrlX Data Layer configuration ('datalayer/subscriptions/settings'). # sampling_interval = "1s" diff --git a/plugins/inputs/ctrlx_datalayer/sample.conf b/plugins/inputs/ctrlx_datalayer/sample.conf index aeccc3b67..df872b119 100644 --- a/plugins/inputs/ctrlx_datalayer/sample.conf +++ b/plugins/inputs/ctrlx_datalayer/sample.conf @@ -74,7 +74,7 @@ ## The interval that defines the fastest rate at which the node values should be sampled and values captured. (default: 1s) ## The sampling frequency should be adjusted to the dynamics of the signal to be sampled. - ## Higher sampling frequence increases load on ctrlX Data Layer. + ## Higher sampling frequencies increases load on ctrlX Data Layer. ## The sampling frequency can be higher, than the publish interval. Captured samples are put in a queue and sent in publish interval. ## Note: The minimum sampling interval can be overruled by a global setting in the ctrlX Data Layer configuration ('datalayer/subscriptions/settings'). # sampling_interval = "1s" diff --git a/plugins/inputs/google_cloud_storage/google_cloud_storage.go b/plugins/inputs/google_cloud_storage/google_cloud_storage.go index e1539fd76..2090715c0 100644 --- a/plugins/inputs/google_cloud_storage/google_cloud_storage.go +++ b/plugins/inputs/google_cloud_storage/google_cloud_storage.go @@ -94,7 +94,7 @@ func (gcs *GCS) Gather(acc telegraf.Accumulator) error { name = attrs.Name - if !gcs.shoudIgnore(name) { + if !gcs.shouldIgnore(name) { if err := gcs.processMeasurementsInObject(name, bucket, acc); err != nil { gcs.Log.Errorf("Could not process object %q in bucket %q: %v", name, bucketName, err) acc.AddError(fmt.Errorf("COULD NOT PROCESS OBJECT %q IN BUCKET %q: %w", name, bucketName, err)) @@ -119,7 +119,7 @@ func (gcs *GCS) createQuery() storage.Query { return storage.Query{Prefix: gcs.Prefix} } -func (gcs *GCS) shoudIgnore(name string) bool { +func (gcs *GCS) shouldIgnore(name string) bool { return gcs.offSet.OffSet == name || gcs.OffsetKey == name } @@ -159,7 +159,7 @@ func (gcs *GCS) reachedThreshlod(processed int) bool { } func (gcs *GCS) updateOffset(bucket *storage.BucketHandle, name string) error { - if gcs.shoudIgnore(name) { + if gcs.shouldIgnore(name) { return nil } diff --git a/plugins/inputs/google_cloud_storage/google_cloud_storage_test.go b/plugins/inputs/google_cloud_storage/google_cloud_storage_test.go index 8d9fead51..1e85e5366 100644 --- a/plugins/inputs/google_cloud_storage/google_cloud_storage_test.go +++ b/plugins/inputs/google_cloud_storage/google_cloud_storage_test.go @@ -150,7 +150,7 @@ func TestRunGatherIteratiosnWithLimit(t *testing.T) { } func TestRunGatherIterationWithPages(t *testing.T) { - srv := stateFulGCSServer(t) + srv := stateFullGCSServer(t) defer srv.Close() emulatorSetEnv(t, srv) @@ -280,7 +280,7 @@ func startMultipleItemGCSServer(t *testing.T) *httptest.Server { return srv } -func stateFulGCSServer(t *testing.T) *httptest.Server { +func stateFullGCSServer(t *testing.T) *httptest.Server { srv := httptest.NewServer(http.NotFoundHandler()) firstElement := parseJSONFromFile(t, "testdata/first_file_listing.json") diff --git a/plugins/inputs/mongodb/mongostat.go b/plugins/inputs/mongodb/mongostat.go index 8fd26a19f..13fd0328a 100644 --- a/plugins/inputs/mongodb/mongostat.go +++ b/plugins/inputs/mongodb/mongostat.go @@ -251,7 +251,7 @@ type TransactionStats struct { TransCheckpoints int64 `bson:"transaction checkpoints"` } -// WTConnectionStats stores statistices on wiredTiger connections +// WTConnectionStats stores statistics on wiredTiger connections type WTConnectionStats struct { FilesCurrentlyOpen int64 `bson:"files currently open"` } diff --git a/plugins/inputs/mysql/v2/convert_test.go b/plugins/inputs/mysql/v2/convert_test.go index 8276c9a61..467a386c9 100644 --- a/plugins/inputs/mysql/v2/convert_test.go +++ b/plugins/inputs/mysql/v2/convert_test.go @@ -47,7 +47,7 @@ func TestConvertGlobalStatus(t *testing.T) { } } -func TestCovertGlobalVariables(t *testing.T) { +func TestConvertGlobalVariables(t *testing.T) { tests := []struct { name string key string diff --git a/plugins/inputs/phpfpm/phpfpm_test.go b/plugins/inputs/phpfpm/phpfpm_test.go index 882208a4d..e617bcfe7 100644 --- a/plugins/inputs/phpfpm/phpfpm_test.go +++ b/plugins/inputs/phpfpm/phpfpm_test.go @@ -188,7 +188,7 @@ func TestPhpFpmTimeout_From_Fcgi(t *testing.T) { } // TestPhpFpmCrashWithTimeout_From_Fcgi show issue #15175: when timeout is enabled -// and nothing is listenning on specified port, a nil pointer was dereferenced. +// and nothing is listening on specified port, a nil pointer was dereferenced. func TestPhpFpmCrashWithTimeout_From_Fcgi(t *testing.T) { tcp, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err, "Cannot initialize test server") diff --git a/plugins/inputs/ping/ping_windows.go b/plugins/inputs/ping/ping_windows.go index 2e9d595df..09c019a3e 100644 --- a/plugins/inputs/ping/ping_windows.go +++ b/plugins/inputs/ping/ping_windows.go @@ -99,10 +99,10 @@ func (p *Ping) args(url string) []string { // It returns (, , , , , ) func processPingOutput(out string) (statistics, error) { // So find a line contain 3 numbers except reply lines - var statsLine, aproxs []string = nil, nil + var statsLine, approxs []string = nil, nil err := errors.New("fatal error processing ping output") stat := regexp.MustCompile(`=\W*(\d+)\D*=\W*(\d+)\D*=\W*(\d+)`) - aprox := regexp.MustCompile(`=\W*(\d+)\D*ms\D*=\W*(\d+)\D*ms\D*=\W*(\d+)\D*ms`) + approx := regexp.MustCompile(`=\W*(\d+)\D*ms\D*=\W*(\d+)\D*ms\D*=\W*(\d+)\D*ms`) tttLine := regexp.MustCompile(`TTL=\d+`) lines := strings.Split(out, "\n") var replyReceived = 0 @@ -113,8 +113,8 @@ func processPingOutput(out string) (statistics, error) { if statsLine == nil { statsLine = stat.FindStringSubmatch(line) } - if statsLine != nil && aproxs == nil { - aproxs = aprox.FindStringSubmatch(line) + if statsLine != nil && approxs == nil { + approxs = approx.FindStringSubmatch(line) } } } @@ -147,19 +147,19 @@ func processPingOutput(out string) (statistics, error) { stats.replyReceived = replyReceived stats.packetsReceived = packetsReceived - // aproxs data should contain 4 members: entireExpression + ( min, max, avg ) - if len(aproxs) != 4 { + // approxs data should contain 4 members: entireExpression + ( min, max, avg ) + if len(approxs) != 4 { return stats, err } - min, err := strconv.Atoi(aproxs[1]) + min, err := strconv.Atoi(approxs[1]) if err != nil { return stats, err } - max, err := strconv.Atoi(aproxs[2]) + max, err := strconv.Atoi(approxs[2]) if err != nil { return stats, err } - avg, err := strconv.Atoi(aproxs[3]) + avg, err := strconv.Atoi(approxs[3]) if err != nil { return statistics{}, err } diff --git a/plugins/inputs/procstat/README.md b/plugins/inputs/procstat/README.md index 5f91f4004..f234c5836 100644 --- a/plugins/inputs/procstat/README.md +++ b/plugins/inputs/procstat/README.md @@ -97,7 +97,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. # # win_service = [] # # ## Process filters, multiple are allowed - # ## Regular expressions to use for matching againt the full command + # ## Regular expressions to use for matching against the full command # # patterns = ['.*'] # ## List of users owning the process (wildcards are supported) # # users = ['*'] diff --git a/plugins/inputs/procstat/native_finder_test.go b/plugins/inputs/procstat/native_finder_test.go index eaccd4631..1e6c6d84a 100644 --- a/plugins/inputs/procstat/native_finder_test.go +++ b/plugins/inputs/procstat/native_finder_test.go @@ -56,9 +56,9 @@ func TestChildPattern(t *testing.T) { parent, err := finder.Pattern(parentName) require.NoError(t, err) require.Len(t, parent, 1) - childs, err := finder.Children(parent[0]) + children, err := finder.Children(parent[0]) require.NoError(t, err) - require.ElementsMatch(t, expected, childs) + require.ElementsMatch(t, expected, children) } func TestGather_RealPatternIntegration(t *testing.T) { diff --git a/plugins/inputs/procstat/sample.conf b/plugins/inputs/procstat/sample.conf index 20e003d5b..0977412a8 100644 --- a/plugins/inputs/procstat/sample.conf +++ b/plugins/inputs/procstat/sample.conf @@ -68,7 +68,7 @@ # # win_service = [] # # ## Process filters, multiple are allowed - # ## Regular expressions to use for matching againt the full command + # ## Regular expressions to use for matching against the full command # # patterns = ['.*'] # ## List of users owning the process (wildcards are supported) # # users = ['*'] diff --git a/plugins/inputs/statsd/datadog.go b/plugins/inputs/statsd/datadog.go index dc4be4f99..88612649e 100644 --- a/plugins/inputs/statsd/datadog.go +++ b/plugins/inputs/statsd/datadog.go @@ -30,7 +30,7 @@ func (s *Statsd) parseEventMessage(now time.Time, message string, defaultHostnam // |p:priority // |h:hostname // |t:alert_type - // |s:source_type_nam + // |s:source_type_name // |#tag1,tag2 // ] // diff --git a/plugins/inputs/vsphere/endpoint.go b/plugins/inputs/vsphere/endpoint.go index a99c4c277..4694fc9d9 100644 --- a/plugins/inputs/vsphere/endpoint.go +++ b/plugins/inputs/vsphere/endpoint.go @@ -311,7 +311,7 @@ func (e *Endpoint) startDiscovery(ctx context.Context) { }() } -func (e *Endpoint) initalDiscovery(ctx context.Context) { +func (e *Endpoint) initialDiscovery(ctx context.Context) { err := e.discover(ctx) if err != nil && !errors.Is(err, context.Canceled) { e.log.Errorf("Discovery for %s: %s", e.URL.Host, err.Error()) @@ -347,7 +347,7 @@ func (e *Endpoint) init(ctx context.Context) error { if time.Duration(e.Parent.ObjectDiscoveryInterval) > 0 { e.Parent.Log.Debug("Running initial discovery") - e.initalDiscovery(ctx) + e.initialDiscovery(ctx) } e.initialized = true return nil diff --git a/plugins/inputs/webhooks/rollbar/README.md b/plugins/inputs/webhooks/rollbar/README.md index 136516c8e..b759dd0bd 100644 --- a/plugins/inputs/webhooks/rollbar/README.md +++ b/plugins/inputs/webhooks/rollbar/README.md @@ -24,8 +24,8 @@ See [webhook doc](https://rollbar.com/docs/webhooks/) * 'event' = `event.event_name` string * 'environment' = `event.data.item.environment` string * 'project_id = `event.data.item.project_id` int -* 'language' = `event.data.item.last_occurence.language` string -* 'level' = `event.data.item.last_occurence.level` string +* 'language' = `event.data.item.last_occurrence.language` string +* 'level' = `event.data.item.last_occurrence.level` string **Fields:** diff --git a/plugins/inputs/webhooks/rollbar/rollbar_webhooks_events.go b/plugins/inputs/webhooks/rollbar/rollbar_webhooks_events.go index ad5c54a03..36677759a 100644 --- a/plugins/inputs/webhooks/rollbar/rollbar_webhooks_events.go +++ b/plugins/inputs/webhooks/rollbar/rollbar_webhooks_events.go @@ -11,16 +11,16 @@ type DummyEvent struct { EventName string `json:"event_name"` } -type NewItemDataItemLastOccurence struct { +type NewItemDataItemLastOccurrence struct { Language string `json:"language"` Level string `json:"level"` } type NewItemDataItem struct { - ID int `json:"id"` - Environment string `json:"environment"` - ProjectID int `json:"project_id"` - LastOccurence NewItemDataItemLastOccurence `json:"last_occurrence"` + ID int `json:"id"` + Environment string `json:"environment"` + ProjectID int `json:"project_id"` + LastOccurrence NewItemDataItemLastOccurrence `json:"last_occurrence"` } type NewItemData struct { @@ -37,8 +37,8 @@ func (ni *NewItem) Tags() map[string]string { "event": ni.EventName, "environment": ni.Data.Item.Environment, "project_id": strconv.Itoa(ni.Data.Item.ProjectID), - "language": ni.Data.Item.LastOccurence.Language, - "level": ni.Data.Item.LastOccurence.Level, + "language": ni.Data.Item.LastOccurrence.Language, + "level": ni.Data.Item.LastOccurrence.Level, } } diff --git a/plugins/inputs/win_eventlog/README.md b/plugins/inputs/win_eventlog/README.md index e5aff002b..7594f6467 100644 --- a/plugins/inputs/win_eventlog/README.md +++ b/plugins/inputs/win_eventlog/README.md @@ -157,7 +157,7 @@ In case you see a `Collection took longer than expected` warning, there might be a burst of events logged and the API is not able to deliver them fast enough to complete processing within the specified interval. Tweaking the `event_batch_size` setting might help to mitigate the issue. -The said warning does not indicate data-loss, but you should investige the +The said warning does not indicate data-loss, but you should investigate the amount of events you log. ## Metrics diff --git a/plugins/outputs/azure_data_explorer/README.md b/plugins/outputs/azure_data_explorer/README.md index afe6d2858..994f6fad1 100644 --- a/plugins/outputs/azure_data_explorer/README.md +++ b/plugins/outputs/azure_data_explorer/README.md @@ -126,7 +126,7 @@ Refer the query below to check if streaming is enabled .show database policy streamingingestion ``` -## Authentiation +## Authentication ### Supported Authentication Methods diff --git a/plugins/outputs/loki/loki_test.go b/plugins/outputs/loki/loki_test.go index 08c2acd7d..49a96914e 100644 --- a/plugins/outputs/loki/loki_test.go +++ b/plugins/outputs/loki/loki_test.go @@ -482,12 +482,12 @@ func TestSanitizeLabelName(t *testing.T) { expected: "foobar", }, { - name: "replace invalid first charachter", + name: "replace invalid first character", input: "3foobar", expected: "_foobar", }, { - name: "replace invalid later charachter", + name: "replace invalid later character", input: "foobar.foobar", expected: "foobar_foobar", }, diff --git a/plugins/outputs/sensu/README.md b/plugins/outputs/sensu/README.md index c3bae7a76..13ffa157e 100644 --- a/plugins/outputs/sensu/README.md +++ b/plugins/outputs/sensu/README.md @@ -84,7 +84,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. ## ## Check specification ## The check name is the name to give the Sensu check associated with the event - ## created. This maps to check.metatadata.name in the event. + ## created. This maps to check.metadata.name in the event. [outputs.sensu.check] name = "telegraf" diff --git a/plugins/outputs/sensu/sample.conf b/plugins/outputs/sensu/sample.conf index ac5580cd7..3c994da6f 100644 --- a/plugins/outputs/sensu/sample.conf +++ b/plugins/outputs/sensu/sample.conf @@ -67,7 +67,7 @@ ## ## Check specification ## The check name is the name to give the Sensu check associated with the event - ## created. This maps to check.metatadata.name in the event. + ## created. This maps to check.metadata.name in the event. [outputs.sensu.check] name = "telegraf" diff --git a/plugins/outputs/stackdriver/stackdriver.go b/plugins/outputs/stackdriver/stackdriver.go index 24b849016..6a6588b6b 100644 --- a/plugins/outputs/stackdriver/stackdriver.go +++ b/plugins/outputs/stackdriver/stackdriver.go @@ -49,7 +49,7 @@ type Stackdriver struct { counterCache *counterCache filterCounter filter.Filter filterGauge filter.Filter - fitlerHistogram filter.Filter + filterHistogram filter.Filter } const ( @@ -100,7 +100,7 @@ func (s *Stackdriver) Init() error { if err != nil { return fmt.Errorf("creating gauge filter failed: %w", err) } - s.fitlerHistogram, err = filter.Compile(s.MetricHistogram) + s.filterHistogram, err = filter.Compile(s.MetricHistogram) if err != nil { return fmt.Errorf("creating histogram filter failed: %w", err) } @@ -227,7 +227,7 @@ func (s *Stackdriver) sendBatch(batch []telegraf.Metric) error { if s.filterGauge != nil && s.filterGauge.Match(m.Name()) { metricType = telegraf.Gauge } - if s.fitlerHistogram != nil && s.fitlerHistogram.Match(m.Name()) { + if s.filterHistogram != nil && s.filterHistogram.Match(m.Name()) { metricType = telegraf.Histogram } diff --git a/plugins/outputs/sumologic/sumologic.go b/plugins/outputs/sumologic/sumologic.go index 2dc5abf4a..37e4798da 100644 --- a/plugins/outputs/sumologic/sumologic.go +++ b/plugins/outputs/sumologic/sumologic.go @@ -45,9 +45,9 @@ const ( ) type SumoLogic struct { - URL string `toml:"url"` - Timeout config.Duration `toml:"timeout"` - MaxRequstBodySize config.Size `toml:"max_request_body_size"` + URL string `toml:"url"` + Timeout config.Duration `toml:"timeout"` + MaxRequestBodySize config.Size `toml:"max_request_body_size"` SourceName string `toml:"source_name"` SourceHost string `toml:"source_host"` @@ -126,7 +126,7 @@ func (s *SumoLogic) Write(metrics []telegraf.Metric) error { return err } - if l := len(reqBody); l > int(s.MaxRequstBodySize) { + if l := len(reqBody); l > int(s.MaxRequestBodySize) { chunks, err := s.splitIntoChunks(metrics) if err != nil { return err @@ -194,10 +194,10 @@ func (s *SumoLogic) writeRequestChunk(reqBody []byte) error { } // splitIntoChunks splits metrics to be sent into chunks so that every request -// is smaller than s.MaxRequstBodySize unless it was configured so small so that +// is smaller than s.MaxRequestBodySize unless it was configured so small so that // even a single metric cannot fit. // In such a situation metrics will be sent one by one with a warning being logged -// for every request sent even though they don't fit in s.MaxRequstBodySize bytes. +// for every request sent even though they don't fit in s.MaxRequestBodySize bytes. func (s *SumoLogic) splitIntoChunks(metrics []telegraf.Metric) ([][]byte, error) { var ( numMetrics = len(metrics) @@ -215,7 +215,7 @@ func (s *SumoLogic) splitIntoChunks(metrics []telegraf.Metric) ([][]byte, error) la := len(toAppend) if la != 0 { // We already have something to append ... - if la+len(chunkBody) > int(s.MaxRequstBodySize) { + if la+len(chunkBody) > int(s.MaxRequestBodySize) { // ... and it's just the right size, without currently processed chunk. break } @@ -229,10 +229,10 @@ func (s *SumoLogic) splitIntoChunks(metrics []telegraf.Metric) ([][]byte, error) i++ toAppend = chunkBody - if len(chunkBody) > int(s.MaxRequstBodySize) { + if len(chunkBody) > int(s.MaxRequestBodySize) { s.Log.Warnf( "max_request_body_size set to %d which is too small even for a single metric (len: %d), sending without split", - s.MaxRequstBodySize, len(chunkBody), + s.MaxRequestBodySize, len(chunkBody), ) // The serialized metric is too big, but we have no choice @@ -263,9 +263,9 @@ func setHeaderIfSetInConfig(r *http.Request, h header, value string) { func Default() *SumoLogic { return &SumoLogic{ - Timeout: config.Duration(defaultClientTimeout), - MaxRequstBodySize: defaultMaxRequestBodySize, - headers: make(map[string]string), + Timeout: config.Duration(defaultClientTimeout), + MaxRequestBodySize: defaultMaxRequestBodySize, + headers: make(map[string]string), } } diff --git a/plugins/outputs/sumologic/sumologic_test.go b/plugins/outputs/sumologic/sumologic_test.go index 0587d1bbd..7ce7d723e 100644 --- a/plugins/outputs/sumologic/sumologic_test.go +++ b/plugins/outputs/sumologic/sumologic_test.go @@ -357,8 +357,8 @@ func TestDefaultUserAgent(t *testing.T) { }) plugin := &SumoLogic{ - URL: u.String(), - MaxRequstBodySize: Default().MaxRequstBodySize, + URL: u.String(), + MaxRequestBodySize: Default().MaxRequestBodySize, } serializer := &carbon2.Serializer{ @@ -508,7 +508,7 @@ func TestMaxRequestBodySize(t *testing.T) { s.URL = u.String() // getMetrics returns metrics that serialized (using carbon2), // uncompressed size is 43750B - s.MaxRequstBodySize = 43_749 + s.MaxRequestBodySize = 43_749 return s }, metrics: getMetrics(), @@ -521,7 +521,7 @@ func TestMaxRequestBodySize(t *testing.T) { plugin: func() *SumoLogic { s := Default() s.URL = u.String() - s.MaxRequstBodySize = 10_000 + s.MaxRequestBodySize = 10_000 return s }, metrics: getMetrics(), @@ -534,7 +534,7 @@ func TestMaxRequestBodySize(t *testing.T) { plugin: func() *SumoLogic { s := Default() s.URL = u.String() - s.MaxRequstBodySize = 5_000 + s.MaxRequestBodySize = 5_000 return s }, metrics: getMetrics(), @@ -547,7 +547,7 @@ func TestMaxRequestBodySize(t *testing.T) { plugin: func() *SumoLogic { s := Default() s.URL = u.String() - s.MaxRequstBodySize = 2_500 + s.MaxRequestBodySize = 2_500 return s }, metrics: getMetrics(), @@ -560,7 +560,7 @@ func TestMaxRequestBodySize(t *testing.T) { plugin: func() *SumoLogic { s := Default() s.URL = u.String() - s.MaxRequstBodySize = 1_000 + s.MaxRequestBodySize = 1_000 return s }, metrics: getMetrics(), @@ -573,7 +573,7 @@ func TestMaxRequestBodySize(t *testing.T) { plugin: func() *SumoLogic { s := Default() s.URL = u.String() - s.MaxRequstBodySize = 500 + s.MaxRequestBodySize = 500 return s }, metrics: getMetrics(), @@ -586,7 +586,7 @@ func TestMaxRequestBodySize(t *testing.T) { plugin: func() *SumoLogic { s := Default() s.URL = u.String() - s.MaxRequstBodySize = 300 + s.MaxRequestBodySize = 300 return s }, metrics: getMetrics(), diff --git a/plugins/outputs/syslog/README.md b/plugins/outputs/syslog/README.md index 7ab8a0951..cbab2d1f2 100644 --- a/plugins/outputs/syslog/README.md +++ b/plugins/outputs/syslog/README.md @@ -48,7 +48,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. ## The framing technique with which it is expected that messages are ## transported (default = "octet-counting"). Whether the messages come - ## using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), + ## using the octet-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), ## or the non-transparent framing technique (RFC6587#section-3.4.2). Must ## be one of "octet-counting", "non-transparent". # framing = "octet-counting" diff --git a/plugins/outputs/syslog/sample.conf b/plugins/outputs/syslog/sample.conf index 1ee3e54fd..c9168db3e 100644 --- a/plugins/outputs/syslog/sample.conf +++ b/plugins/outputs/syslog/sample.conf @@ -25,7 +25,7 @@ ## The framing technique with which it is expected that messages are ## transported (default = "octet-counting"). Whether the messages come - ## using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), + ## using the octet-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), ## or the non-transparent framing technique (RFC6587#section-3.4.2). Must ## be one of "octet-counting", "non-transparent". # framing = "octet-counting" diff --git a/plugins/outputs/syslog/syslog_test.go b/plugins/outputs/syslog/syslog_test.go index 9bbb4199d..4aa0e8694 100644 --- a/plugins/outputs/syslog/syslog_test.go +++ b/plugins/outputs/syslog/syslog_test.go @@ -14,7 +14,7 @@ import ( "github.com/leodido/go-syslog/v4/nontransparent" ) -func TestGetSyslogMessageWithFramingOctectCounting(t *testing.T) { +func TestGetSyslogMessageWithFramingOctetCounting(t *testing.T) { // Init plugin s := newSyslog() require.NoError(t, s.Init()) @@ -35,7 +35,7 @@ func TestGetSyslogMessageWithFramingOctectCounting(t *testing.T) { messageBytesWithFraming, err := s.getSyslogMessageBytesWithFraming(syslogMessage) require.NoError(t, err) - require.Equal(t, "59 <13>1 2010-11-10T23:00:00Z testhost Telegraf - testmetric -", string(messageBytesWithFraming), "Incorrect Octect counting framing") + require.Equal(t, "59 <13>1 2010-11-10T23:00:00Z testhost Telegraf - testmetric -", string(messageBytesWithFraming), "Incorrect Octet counting framing") } func TestGetSyslogMessageWithFramingNonTransparent(t *testing.T) { @@ -60,7 +60,7 @@ func TestGetSyslogMessageWithFramingNonTransparent(t *testing.T) { messageBytesWithFraming, err := s.getSyslogMessageBytesWithFraming(syslogMessage) require.NoError(t, err) - require.Equal(t, "<13>1 2010-11-10T23:00:00Z testhost Telegraf - testmetric -\n", string(messageBytesWithFraming), "Incorrect Octect counting framing") + require.Equal(t, "<13>1 2010-11-10T23:00:00Z testhost Telegraf - testmetric -\n", string(messageBytesWithFraming), "Incorrect Octet counting framing") } func TestGetSyslogMessageWithFramingNonTransparentNul(t *testing.T) { @@ -86,7 +86,7 @@ func TestGetSyslogMessageWithFramingNonTransparentNul(t *testing.T) { messageBytesWithFraming, err := s.getSyslogMessageBytesWithFraming(syslogMessage) require.NoError(t, err) - require.Equal(t, "<13>1 2010-11-10T23:00:00Z testhost Telegraf - testmetric -\x00", string(messageBytesWithFraming), "Incorrect Octect counting framing") + require.Equal(t, "<13>1 2010-11-10T23:00:00Z testhost Telegraf - testmetric -\x00", string(messageBytesWithFraming), "Incorrect Octet counting framing") } func TestSyslogWriteWithTcp(t *testing.T) { diff --git a/plugins/outputs/zabbix/README.md b/plugins/outputs/zabbix/README.md index 42918c3fb..5945f45c3 100644 --- a/plugins/outputs/zabbix/README.md +++ b/plugins/outputs/zabbix/README.md @@ -219,7 +219,7 @@ not know in advance what are we going to send, for example, the name of a container to send its cpu and memory consumption. For this case Zabbix provides [low-level discovery][lld] that allow to create -new items dinamically based on the parameters sent by the trap. +new items dynamically based on the parameters sent by the trap. As explained previously, this output plugin will format the Zabbix key using the tags seen in the Telegraf metric following the LLD format. diff --git a/plugins/parsers/avro/parser.go b/plugins/parsers/avro/parser.go index 2fbbd4888..1f9a911f8 100644 --- a/plugins/parsers/avro/parser.go +++ b/plugins/parsers/avro/parser.go @@ -255,7 +255,7 @@ func (p *Parser) createMetric(data map[string]interface{}, schema string) (teleg } var schemaObj map[string]interface{} if err := json.Unmarshal([]byte(schema), &schemaObj); err != nil { - return nil, fmt.Errorf("unmarshaling schema failed: %w", err) + return nil, fmt.Errorf("unmarshalling schema failed: %w", err) } if len(fields) == 0 { // A telegraf metric needs at least one field. diff --git a/plugins/parsers/influx/handler.go b/plugins/parsers/influx/handler.go index c861edf2d..62b84ea80 100644 --- a/plugins/parsers/influx/handler.go +++ b/plugins/parsers/influx/handler.go @@ -111,7 +111,7 @@ func (h *MetricHandler) AddBool(key []byte, value []byte) error { fk := unescape(key) fv, err := parseBoolBytes(value) if err != nil { - return errors.New("unparseable bool") + return errors.New("unparsable bool") } h.metric.AddField(fk, fv) return nil diff --git a/plugins/parsers/influx/influx_upstream/parser.go b/plugins/parsers/influx/influx_upstream/parser.go index 290db825d..6924b4b8b 100644 --- a/plugins/parsers/influx/influx_upstream/parser.go +++ b/plugins/parsers/influx/influx_upstream/parser.go @@ -104,8 +104,8 @@ func convertToParseError(input []byte, rawErr error) error { // Parser is an InfluxDB Line Protocol parser that implements the // parsers.Parser interface. type Parser struct { - InfluxTimestampPrecsion config.Duration `toml:"influx_timestamp_precision"` - DefaultTags map[string]string `toml:"-"` + InfluxTimestampPrecision config.Duration `toml:"influx_timestamp_precision"` + DefaultTags map[string]string `toml:"-"` // If set to "series" a series machine will be initialized, defaults to regular machine Type string `toml:"-"` @@ -189,7 +189,7 @@ func (p *Parser) applyDefaultTagsSingle(m telegraf.Metric) { } func (p *Parser) Init() error { - if err := p.SetTimePrecision(time.Duration(p.InfluxTimestampPrecsion)); err != nil { + if err := p.SetTimePrecision(time.Duration(p.InfluxTimestampPrecision)); err != nil { return err } diff --git a/plugins/parsers/influx/influx_upstream/parser_test.go b/plugins/parsers/influx/influx_upstream/parser_test.go index c5cef72a1..9731586c9 100644 --- a/plugins/parsers/influx/influx_upstream/parser_test.go +++ b/plugins/parsers/influx/influx_upstream/parser_test.go @@ -854,7 +854,7 @@ func TestParserTimestampPrecision(t *testing.T) { t.Run(tt.name, func(t *testing.T) { d := config.Duration(0) require.NoError(t, d.UnmarshalText([]byte(tt.precision))) - parser := Parser{InfluxTimestampPrecsion: d} + parser := Parser{InfluxTimestampPrecision: d} require.NoError(t, parser.Init()) metrics, err := parser.Parse(tt.input) @@ -869,7 +869,7 @@ func TestParserInvalidTimestampPrecision(t *testing.T) { d := config.Duration(0) for _, precision := range []string{"1h", "1d", "2s", "1m", "2ns"} { require.NoError(t, d.UnmarshalText([]byte(precision))) - parser := Parser{InfluxTimestampPrecsion: d} + parser := Parser{InfluxTimestampPrecision: d} require.ErrorContains(t, parser.Init(), "invalid time precision") } } diff --git a/plugins/parsers/influx/parser.go b/plugins/parsers/influx/parser.go index 4fcd20bcc..a4adf330b 100644 --- a/plugins/parsers/influx/parser.go +++ b/plugins/parsers/influx/parser.go @@ -61,8 +61,8 @@ func (e *ParseError) Error() string { // Parser is an InfluxDB Line Protocol parser that implements the // parsers.Parser interface. type Parser struct { - InfluxTimestampPrecsion config.Duration `toml:"influx_timestamp_precision"` - DefaultTags map[string]string `toml:"-"` + InfluxTimestampPrecision config.Duration `toml:"influx_timestamp_precision"` + DefaultTags map[string]string `toml:"-"` // If set to "series" a series machine will be initialized, defaults to regular machine Type string `toml:"-"` @@ -157,13 +157,13 @@ func (p *Parser) Init() error { p.machine = NewMachine(p.handler) } - timeDuration := time.Duration(p.InfluxTimestampPrecsion) + timeDuration := time.Duration(p.InfluxTimestampPrecision) switch timeDuration { case 0: case time.Nanosecond, time.Microsecond, time.Millisecond, time.Second: p.SetTimePrecision(timeDuration) default: - return fmt.Errorf("invalid time precision: %d", p.InfluxTimestampPrecsion) + return fmt.Errorf("invalid time precision: %d", p.InfluxTimestampPrecision) } return nil diff --git a/plugins/parsers/influx/parser_test.go b/plugins/parsers/influx/parser_test.go index dc21a5ff7..8907325be 100644 --- a/plugins/parsers/influx/parser_test.go +++ b/plugins/parsers/influx/parser_test.go @@ -690,7 +690,7 @@ func TestParserTimestampPrecision(t *testing.T) { t.Run(tt.name, func(t *testing.T) { d := config.Duration(0) require.NoError(t, d.UnmarshalText([]byte(tt.precision))) - parser := Parser{InfluxTimestampPrecsion: d} + parser := Parser{InfluxTimestampPrecision: d} require.NoError(t, parser.Init()) metrics, err := parser.Parse(tt.input) @@ -705,7 +705,7 @@ func TestParserInvalidTimestampPrecision(t *testing.T) { d := config.Duration(0) for _, precision := range []string{"1h", "1d", "2s", "1m", "2ns"} { require.NoError(t, d.UnmarshalText([]byte(precision))) - parser := Parser{InfluxTimestampPrecsion: d} + parser := Parser{InfluxTimestampPrecision: d} require.ErrorContains(t, parser.Init(), "invalid time precision") } } diff --git a/plugins/parsers/nagios/parser.go b/plugins/parsers/nagios/parser.go index 645586ad2..6a5847d3d 100644 --- a/plugins/parsers/nagios/parser.go +++ b/plugins/parsers/nagios/parser.go @@ -193,8 +193,8 @@ func parsePerfData(perfdatas string, timestamp time.Time) ([]telegraf.Metric, er metrics := make([]telegraf.Metric, 0) for _, unParsedPerf := range perfSplitRegExp.FindAllString(perfdatas, -1) { - trimedPerf := strings.TrimSpace(unParsedPerf) - perf := nagiosRegExp.FindStringSubmatch(trimedPerf) + trimmedPerf := strings.TrimSpace(unParsedPerf) + perf := nagiosRegExp.FindStringSubmatch(trimmedPerf) // verify at least `'label'=value[UOM];` existed if len(perf) < 3 { diff --git a/plugins/parsers/xpath/parser.go b/plugins/parsers/xpath/parser.go index 59d6d0ee0..9e94e754b 100644 --- a/plugins/parsers/xpath/parser.go +++ b/plugins/parsers/xpath/parser.go @@ -560,18 +560,18 @@ func splitLastPathElement(query string) []string { return []string{} } - seperatorIdx := strings.LastIndex(query, "/") - if seperatorIdx < 0 { + separatorIdx := strings.LastIndex(query, "/") + if separatorIdx < 0 { query = "./" + query - seperatorIdx = 1 + separatorIdx = 1 } // For double slash we want to split at the first slash - if seperatorIdx > 0 && query[seperatorIdx-1] == byte('/') { - seperatorIdx-- + if separatorIdx > 0 && query[separatorIdx-1] == byte('/') { + separatorIdx-- } - base := query[:seperatorIdx] + base := query[:separatorIdx] if base == "" { base = "/" } @@ -579,7 +579,7 @@ func splitLastPathElement(query string) []string { elements := make([]string, 0, 3) elements = append(elements, base) - offset := seperatorIdx + offset := separatorIdx if i := strings.Index(query[offset:], "::"); i >= 0 { // Check for axis operator offset += i diff --git a/plugins/processors/noise/README.md b/plugins/processors/noise/README.md index 3d97c3cbb..ccbf482e3 100644 --- a/plugins/processors/noise/README.md +++ b/plugins/processors/noise/README.md @@ -1,7 +1,7 @@ # Noise Processor Plugin The _Noise_ processor is used to add noise to numerical field values. For each -field a noise is generated using a defined probability densitiy function and +field a noise is generated using a defined probability density function and added to the value. The function type can be configured as _Laplace_, _Gaussian_ or _Uniform_. Depending on the function, various parameters need to be configured: diff --git a/plugins/processors/timestamp/README.md b/plugins/processors/timestamp/README.md index e2cdfa713..129144282 100644 --- a/plugins/processors/timestamp/README.md +++ b/plugins/processors/timestamp/README.md @@ -39,7 +39,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. # source_timestamp_timezone = "" ## Target timestamp format - ## This defines the destination timestmap format. It also can accept either + ## This defines the destination timestamp format. It also can accept either ## `unix`, `unix_ms`, `unix_us`, `unix_ns`, or a time in Go "reference time". destination_timestamp_format = "" diff --git a/plugins/processors/timestamp/sample.conf b/plugins/processors/timestamp/sample.conf index edf55b637..e8321d9b5 100644 --- a/plugins/processors/timestamp/sample.conf +++ b/plugins/processors/timestamp/sample.conf @@ -22,7 +22,7 @@ # source_timestamp_timezone = "" ## Target timestamp format - ## This defines the destination timestmap format. It also can accept either + ## This defines the destination timestamp format. It also can accept either ## `unix`, `unix_ms`, `unix_us`, `unix_ns`, or a time in Go "reference time". destination_timestamp_format = "" diff --git a/plugins/secretstores/docker/docker_test.go b/plugins/secretstores/docker/docker_test.go index fef9449f2..3f1cfbf44 100644 --- a/plugins/secretstores/docker/docker_test.go +++ b/plugins/secretstores/docker/docker_test.go @@ -17,7 +17,7 @@ func TestInitFail(t *testing.T) { require.ErrorContains(t, plugin.Init(), "id missing") } -func TestPathNonExistant(t *testing.T) { +func TestPathNonExistent(t *testing.T) { plugin := &Docker{ ID: "non_existent_path_test", Path: "non/existent/path", @@ -127,7 +127,7 @@ func TestResolverInvalid(t *testing.T) { require.ErrorContains(t, err, "cannot read the secret's value under the directory:") } -func TestGetNonExistant(t *testing.T) { +func TestGetNonExistent(t *testing.T) { testdir, err := filepath.Abs("testdata") require.NoError(t, err, "testdata cannot be found") diff --git a/plugins/secretstores/jose/jose_test.go b/plugins/secretstores/jose/jose_test.go index 25fbc13d7..290b4028d 100644 --- a/plugins/secretstores/jose/jose_test.go +++ b/plugins/secretstores/jose/jose_test.go @@ -158,7 +158,7 @@ func TestResolverInvalid(t *testing.T) { require.Error(t, err) } -func TestGetNonExistant(t *testing.T) { +func TestGetNonExistent(t *testing.T) { secretKey := "a secret" secretVal := "I won't tell" diff --git a/plugins/secretstores/systemd/systemd_test.go b/plugins/secretstores/systemd/systemd_test.go index 88f9a6bad..a8ccc9487 100644 --- a/plugins/secretstores/systemd/systemd_test.go +++ b/plugins/secretstores/systemd/systemd_test.go @@ -160,7 +160,7 @@ func TestResolverInvalid(t *testing.T) { require.ErrorContains(t, err, "cannot read the secret's value:") } -func TestGetNonExistant(t *testing.T) { +func TestGetNonExistent(t *testing.T) { getSystemdVersion = getSystemdVersionMin t.Setenv("CREDENTIALS_DIRECTORY", "testdata") diff --git a/tools/update_goversion/main_test.go b/tools/update_goversion/main_test.go index e34ff764e..9193e518b 100644 --- a/tools/update_goversion/main_test.go +++ b/tools/update_goversion/main_test.go @@ -10,9 +10,9 @@ import ( func TestFindHash(t *testing.T) { tests := []struct { - testFile string - version string - expectedHases map[string]string + testFile string + version string + expectedHashes map[string]string }{ { "testdata/godev_patch.html", @@ -41,6 +41,6 @@ func TestFindHash(t *testing.T) { hashes, err := findHashes(bytes.NewReader(b), test.version) require.NoError(t, err) - require.Equal(t, test.expectedHases, hashes) + require.Equal(t, test.expectedHashes, hashes) } }