From 6814d7af8a4134d8e05bee47f597df4e930eba69 Mon Sep 17 00:00:00 2001 From: Andreas Deininger Date: Wed, 29 Nov 2023 14:47:39 +0100 Subject: [PATCH] docs: Fix typos (#14359) Co-authored-by: Josh Powers --- config/config_test.go | 6 +-- config/deprecation.go | 2 +- plugins/common/mqtt/mqtt.go | 2 +- plugins/inputs/azure_monitor/README.md | 2 +- plugins/inputs/azure_monitor/sample.conf | 2 +- plugins/inputs/ctrlx_datalayer/README.md | 2 +- .../inputs/ctrlx_datalayer/ctrlx_datalayer.go | 2 +- plugins/inputs/dns_query/README.md | 2 +- plugins/inputs/example/example.go | 4 +- plugins/inputs/example/example_test.go | 10 ++-- plugins/inputs/fireboard/README.md | 4 +- plugins/inputs/gnmi/README.md | 2 +- plugins/inputs/gnmi/sample.conf | 2 +- plugins/inputs/intel_powerstat/msr.go | 2 +- plugins/inputs/jenkins/README.md | 4 +- plugins/inputs/jenkins/sample.conf | 4 +- plugins/inputs/kernel/README.md | 2 +- plugins/inputs/modbus/request.go | 2 +- plugins/inputs/nfsclient/README.md | 8 +-- plugins/inputs/nfsclient/sample.conf | 2 +- plugins/inputs/opensearch_query/README.md | 2 +- plugins/inputs/opensearch_query/sample.conf | 2 +- plugins/inputs/openweathermap/README.md | 2 +- plugins/inputs/openweathermap/sample.conf | 2 +- plugins/inputs/prometheus/consul.go | 6 +-- plugins/inputs/snmp/README.md | 2 +- plugins/inputs/snmp/sample.conf | 2 +- plugins/inputs/socketstat/socketstat.go | 2 +- plugins/inputs/sqlserver/README.md | 2 +- plugins/inputs/sqlserver/sample.conf | 2 +- plugins/inputs/system/system_test.go | 2 +- plugins/inputs/tacacs/README.md | 2 +- plugins/inputs/webhooks/artifactory/README.md | 53 ++++++++++++------- .../artifactory/artifactory_webhook_models.go | 2 +- plugins/inputs/zfs/README.md | 4 +- plugins/outputs/opensearch/opensearch.go | 2 +- plugins/parsers/avro/schema_registry.go | 4 +- plugins/parsers/csv/README.md | 2 +- plugins/parsers/json_v2/README.md | 4 +- plugins/parsers/json_v2/parser.go | 2 +- plugins/processors/scale/scale_test.go | 2 +- plugins/processors/starlark/README.md | 2 +- plugins/processors/starlark/starlark_test.go | 2 +- plugins/secretstores/docker/README.md | 2 +- scripts/windows-gen-syso.sh | 2 +- 45 files changed, 97 insertions(+), 80 deletions(-) diff --git a/config/config_test.go b/config/config_test.go index 3a58a912d..870e7a2b8 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -621,7 +621,7 @@ func TestConfig_SerializerInterfaceNewFormat(t *testing.T) { options = append(options, cmpopts.IgnoreFields(stype, settings.mask...)) } - // Do a manual comparision as require.EqualValues will also work on unexported fields + // Do a manual comparison as require.EqualValues will also work on unexported fields // that cannot be cleared or ignored. diff := cmp.Diff(expected[i], actual[i], options...) require.Emptyf(t, diff, "Difference in SetSerializer() for %q", format) @@ -820,7 +820,7 @@ func TestConfig_ParserInterface(t *testing.T) { options = append(options, cmpopts.IgnoreFields(stype, settings.mask...)) } - // Do a manual comparision as require.EqualValues will also work on unexported fields + // Do a manual comparison as require.EqualValues will also work on unexported fields // that cannot be cleared or ignored. diff := cmp.Diff(expected[i], actual[i], options...) require.Emptyf(t, diff, "Difference in SetParser() for %q", format) @@ -1039,7 +1039,7 @@ func TestConfig_ProcessorsWithParsers(t *testing.T) { options = append(options, cmpopts.IgnoreFields(stype, settings.mask...)) } - // Do a manual comparision as require.EqualValues will also work on unexported fields + // Do a manual comparison as require.EqualValues will also work on unexported fields // that cannot be cleared or ignored. diff := cmp.Diff(expected[i], actual[i], options...) require.Emptyf(t, diff, "Difference in SetParser() for %q", format) diff --git a/config/deprecation.go b/config/deprecation.go index c0f7ac682..52c315c38 100644 --- a/config/deprecation.go +++ b/config/deprecation.go @@ -297,7 +297,7 @@ func walkPluginStruct(value reflect.Value, fn func(f reflect.StructField, fv ref } // Walk over the struct fields and call the given function. If we encounter more complex embedded - // elements (stucts, slices/arrays, maps) we need to descend into those elements as they might + // elements (structs, slices/arrays, maps) we need to descend into those elements as they might // contain structures nested in the current structure. for i := 0; i < t.NumField(); i++ { field := t.Field(i) diff --git a/plugins/common/mqtt/mqtt.go b/plugins/common/mqtt/mqtt.go index 892eec39c..3534ad093 100644 --- a/plugins/common/mqtt/mqtt.go +++ b/plugins/common/mqtt/mqtt.go @@ -72,7 +72,7 @@ func NewClient(cfg *MqttConfig) (Client, error) { case "5": return NewMQTTv5Client(cfg) } - return nil, fmt.Errorf("unsuported protocol %q: must be \"3.1.1\" or \"5\"", cfg.Protocol) + return nil, fmt.Errorf("unsupported protocol %q: must be \"3.1.1\" or \"5\"", cfg.Protocol) } func parseServers(servers []string) ([]*url.URL, error) { diff --git a/plugins/inputs/azure_monitor/README.md b/plugins/inputs/azure_monitor/README.md index 135bf87b8..b03d1a461 100644 --- a/plugins/inputs/azure_monitor/README.md +++ b/plugins/inputs/azure_monitor/README.md @@ -74,7 +74,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. # resource target #1 to collect metrics from [[inputs.azure_monitor.resource_target]] - # can be found undet Overview->Essentials->JSON View in the Azure portal for your application/service + # can be found under Overview->Essentials->JSON View in the Azure portal for your application/service # must start with 'resourceGroups/...' ('/subscriptions/xxxxxxxx-xxxx-xxxx-xxx-xxxxxxxxxxxx' # must be removed from the beginning of Resource ID property value) resource_id = "<>" diff --git a/plugins/inputs/azure_monitor/sample.conf b/plugins/inputs/azure_monitor/sample.conf index bedb0bc99..9f7bf3084 100644 --- a/plugins/inputs/azure_monitor/sample.conf +++ b/plugins/inputs/azure_monitor/sample.conf @@ -11,7 +11,7 @@ # resource target #1 to collect metrics from [[inputs.azure_monitor.resource_target]] - # can be found undet Overview->Essentials->JSON View in the Azure portal for your application/service + # can be found under Overview->Essentials->JSON View in the Azure portal for your application/service # must start with 'resourceGroups/...' ('/subscriptions/xxxxxxxx-xxxx-xxxx-xxx-xxxxxxxxxxxx' # must be removed from the beginning of Resource ID property value) resource_id = "<>" diff --git a/plugins/inputs/ctrlx_datalayer/README.md b/plugins/inputs/ctrlx_datalayer/README.md index fcd6c3866..2f5ea35c1 100644 --- a/plugins/inputs/ctrlx_datalayer/README.md +++ b/plugins/inputs/ctrlx_datalayer/README.md @@ -1,7 +1,7 @@ # ctrlX Data Layer Input Plugin The `ctrlx_datalayer` plugin gathers data from the ctrlX Data Layer, -a communication middleware runnning on +a communication middleware running on [ctrlX CORE devices](https://ctrlx-core.com) from [Bosch Rexroth](https://boschrexroth.com). The platform is used for professional automation applications like industrial automation, building diff --git a/plugins/inputs/ctrlx_datalayer/ctrlx_datalayer.go b/plugins/inputs/ctrlx_datalayer/ctrlx_datalayer.go index 6f9bde66e..525a5c4bb 100644 --- a/plugins/inputs/ctrlx_datalayer/ctrlx_datalayer.go +++ b/plugins/inputs/ctrlx_datalayer/ctrlx_datalayer.go @@ -60,7 +60,7 @@ type CtrlXDataLayer struct { // convertTimestamp2UnixTime converts the given Data Layer timestamp of the payload to UnixTime. func convertTimestamp2UnixTime(t int64) time.Time { - // 1 sec=1000 milisec=1000000 microsec=1000000000 nanosec. + // 1 sec=1000 millisec=1000000 microsec=1000000000 nanosec. // Convert from FILETIME (100-nanosecond intervals since January 1, 1601 UTC) to // seconds and nanoseconds since January 1, 1970 UTC. // Between Jan 1, 1601 and Jan 1, 1970 there are 11644473600 seconds. diff --git a/plugins/inputs/dns_query/README.md b/plugins/inputs/dns_query/README.md index fec9c4023..1841306f9 100644 --- a/plugins/inputs/dns_query/README.md +++ b/plugins/inputs/dns_query/README.md @@ -1,6 +1,6 @@ # DNS Query Input Plugin -The DNS plugin gathers dns query times in miliseconds - like +The DNS plugin gathers dns query times in milliseconds - like [Dig](https://en.wikipedia.org/wiki/Dig_\(command\)) ## Global configuration options diff --git a/plugins/inputs/example/example.go b/plugins/inputs/example/example.go index 48df55ec5..996d426d3 100644 --- a/plugins/inputs/example/example.go +++ b/plugins/inputs/example/example.go @@ -55,7 +55,7 @@ func (m *Example) Init() error { } // Set your defaults. - // Please note: In golang all fields are initialzed to their nil value, so you should not + // Please note: In golang all fields are initialized to their nil value, so you should not // set these fields if the nil value is what you want (e.g. for booleans). if m.NumberFields < 1 { m.Log.Debugf("Setting number of fields to default from invalid value %d", m.NumberFields) @@ -75,7 +75,7 @@ func (m *Example) Init() error { } defer password.Destroy() - // Initialze your internal states + // Initialize your internal states m.count = 1 return nil diff --git a/plugins/inputs/example/example_test.go b/plugins/inputs/example/example_test.go index d744449ba..f4ea9bb82 100644 --- a/plugins/inputs/example/example_test.go +++ b/plugins/inputs/example/example_test.go @@ -20,7 +20,7 @@ func TestInitDefault(t *testing.T) { // This test should succeed with the default initialization. // Use whatever you use in the init() function plus the mandatory options. - // ATTENTION: Always initialze the "Log" as you will get SIGSEGV otherwise. + // ATTENTION: Always initialize the "Log" as you will get SIGSEGV otherwise. plugin := &Example{ DeviceName: "test", Timeout: config.Duration(100 * time.Millisecond), @@ -42,7 +42,7 @@ func TestInitFail(t *testing.T) { // and check if you reach them // We setup a table-test here to specify "setting" - "expected error" values. - // Eventhough it seems overkill here for the example plugin, we reuse this structure + // Even though it seems overkill here for the example plugin, we reuse this structure // later for checking the metrics tests := []struct { name string @@ -58,7 +58,7 @@ func TestInitFail(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - // Always initialze the logger to avoid SIGSEGV. This is done automatically by + // Always initialize the logger to avoid SIGSEGV. This is done automatically by // telegraf during normal operation. tt.plugin.Log = testutil.Logger{} err := tt.plugin.Init() @@ -225,8 +225,8 @@ func TestFixedValue(t *testing.T) { acc.Wait(len(tt.expected)) // Compare the metrics in a convenient way. Here we ignore - // the metric time during comparision as we cannot inject the time - // during test. For more comparision options check testutil package. + // the metric time during comparison as we cannot inject the time + // during test. For more comparison options check testutil package. testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) }) } diff --git a/plugins/inputs/fireboard/README.md b/plugins/inputs/fireboard/README.md index 7677b7982..848f74eb3 100644 --- a/plugins/inputs/fireboard/README.md +++ b/plugins/inputs/fireboard/README.md @@ -53,7 +53,7 @@ values are included if they are less than a minute old. - fireboard - tags: - channel - - scale (Celcius; Farenheit) + - scale (Celsius; Fahrenheit) - title (name of the Fireboard) - uuid (UUID of the Fireboard) - fields: @@ -66,5 +66,5 @@ This section shows example output in Line Protocol format. You can often use this information. ```text -fireboard,channel=2,host=patas-mbp,scale=Farenheit,title=telegraf-FireBoard,uuid=b55e766c-b308-49b5-93a4-df89fe31efd0 temperature=78.2 1561690040000000000 +fireboard,channel=2,host=patas-mbp,scale=Fahrenheit,title=telegraf-FireBoard,uuid=b55e766c-b308-49b5-93a4-df89fe31efd0 temperature=78.2 1561690040000000000 ``` diff --git a/plugins/inputs/gnmi/README.md b/plugins/inputs/gnmi/README.md index 6802f1d11..f7da3f9a0 100644 --- a/plugins/inputs/gnmi/README.md +++ b/plugins/inputs/gnmi/README.md @@ -88,7 +88,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. ## * Juniper Header Extension (juniper_header): some sensors are directly managed by ## Linecard, which adds the Juniper GNMI Header Extension. Enabling this ## allows the decoding of the Extension header if present. Currently this knob - ## adds component, component_id & sub_component_id as additionnal tags + ## adds component, component_id & sub_component_id as additional tags # vendor_specific = [] ## Define additional aliases to map encoding paths to measurement names diff --git a/plugins/inputs/gnmi/sample.conf b/plugins/inputs/gnmi/sample.conf index 7e330f79d..f2c5989f0 100644 --- a/plugins/inputs/gnmi/sample.conf +++ b/plugins/inputs/gnmi/sample.conf @@ -49,7 +49,7 @@ ## * Juniper Header Extension (juniper_header): some sensors are directly managed by ## Linecard, which adds the Juniper GNMI Header Extension. Enabling this ## allows the decoding of the Extension header if present. Currently this knob - ## adds component, component_id & sub_component_id as additionnal tags + ## adds component, component_id & sub_component_id as additional tags # vendor_specific = [] ## Define additional aliases to map encoding paths to measurement names diff --git a/plugins/inputs/intel_powerstat/msr.go b/plugins/inputs/intel_powerstat/msr.go index 52690c4a1..8d30f954e 100644 --- a/plugins/inputs/intel_powerstat/msr.go +++ b/plugins/inputs/intel_powerstat/msr.go @@ -184,7 +184,7 @@ func (m *msrServiceImpl) readSingleMsr(core string, msr string) (uint64, error) case msrFSBFreqString: msrAddress = fsbFreq default: - return 0, fmt.Errorf("incorect name of MSR %s", msr) + return 0, fmt.Errorf("incorrect name of MSR %s", msr) } value, err := m.fs.readFileAtOffsetToUint64(msrFile, msrAddress) diff --git a/plugins/inputs/jenkins/README.md b/plugins/inputs/jenkins/README.md index d39d15c8b..8266754ea 100644 --- a/plugins/inputs/jenkins/README.md +++ b/plugins/inputs/jenkins/README.md @@ -66,9 +66,9 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. ## Empty this field will use default value 5 # max_connections = 5 - ## When set to true will add node labels as a comma-seperated tag. If none, + ## When set to true will add node labels as a comma-separated tag. If none, ## are found, then a tag with the value of 'none' is used. Finally, if a - ## lable contains a comma it is replaced with an underscore. + ## label contains a comma it is replaced with an underscore. # node_labels_as_tag = false ``` diff --git a/plugins/inputs/jenkins/sample.conf b/plugins/inputs/jenkins/sample.conf index f8a0cb717..9aa4bd709 100644 --- a/plugins/inputs/jenkins/sample.conf +++ b/plugins/inputs/jenkins/sample.conf @@ -46,7 +46,7 @@ ## Empty this field will use default value 5 # max_connections = 5 - ## When set to true will add node labels as a comma-seperated tag. If none, + ## When set to true will add node labels as a comma-separated tag. If none, ## are found, then a tag with the value of 'none' is used. Finally, if a - ## lable contains a comma it is replaced with an underscore. + ## label contains a comma it is replaced with an underscore. # node_labels_as_tag = false diff --git a/plugins/inputs/kernel/README.md b/plugins/inputs/kernel/README.md index a64905407..2ea010bc2 100644 --- a/plugins/inputs/kernel/README.md +++ b/plugins/inputs/kernel/README.md @@ -39,7 +39,7 @@ processes 86031 Number of forks since boot. ``` -Kernel Samepage Merging is generally documented in [kernel documenation][1] and +Kernel Samepage Merging is generally documented in [kernel documentation][1] and the available metrics exposed via sysfs are documented in [admin guide][2] [1]: https://www.kernel.org/doc/html/latest/mm/ksm.html diff --git a/plugins/inputs/modbus/request.go b/plugins/inputs/modbus/request.go index 3da7e257c..fb6c248cf 100644 --- a/plugins/inputs/modbus/request.go +++ b/plugins/inputs/modbus/request.go @@ -275,7 +275,7 @@ func groupFieldsToRequests(fields []field, params groupingParams) []request { } requests = optimizeGroup(total, params.MaxBatchSize) case "max_insert": - // Similar to aggressive but keeps the number of touched registers bellow a threshold + // Similar to aggressive but keeps the number of touched registers below a threshold var total request for _, g := range groups { if len(g.fields) > 0 { diff --git a/plugins/inputs/nfsclient/README.md b/plugins/inputs/nfsclient/README.md index 149900bb7..f144c3e50 100644 --- a/plugins/inputs/nfsclient/README.md +++ b/plugins/inputs/nfsclient/README.md @@ -45,7 +45,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. ## NFSv3 and NFSv4 have different lists. While it is not possible to ## have different include/exclude lists for NFSv3/4, unused elements ## in the list should be okay. It is possible to have different lists - ## for different mountpoints: use mulitple [[input.nfsclient]] stanzas, + ## for different mountpoints: use multiple [[input.nfsclient]] stanzas, ## with their own lists. See "include_mounts" above, and be careful of ## duplicate metrics. # include_operations = [] @@ -89,9 +89,9 @@ MOUNT_PROC: /host/proc/self/mountstats - bytes (integer, bytes) - The total number of bytes exchanged doing this operation. This is bytes sent _and_ received, including overhead _and_ payload. (bytes = OP_bytes_sent + OP_bytes_recv. See nfs_ops below) - ops (integer, count) - The number of operations of this type executed. - retrans (integer, count) - The number of times an operation had to be retried (retrans = OP_trans - OP_ops. See nfs_ops below) - - exe (integer, miliseconds) - The number of miliseconds it took to process the operations. - - rtt (integer, miliseconds) - The total round-trip time for all operations. - - rtt_per_op (float, miliseconds) - The average round-trip time per operation. + - exe (integer, milliseconds) - The number of milliseconds it took to process the operations. + - rtt (integer, milliseconds) - The total round-trip time for all operations. + - rtt_per_op (float, milliseconds) - The average round-trip time per operation. In addition enabling `fullstat` will make many more metrics available. diff --git a/plugins/inputs/nfsclient/sample.conf b/plugins/inputs/nfsclient/sample.conf index fbd1371c5..d84451374 100644 --- a/plugins/inputs/nfsclient/sample.conf +++ b/plugins/inputs/nfsclient/sample.conf @@ -20,7 +20,7 @@ ## NFSv3 and NFSv4 have different lists. While it is not possible to ## have different include/exclude lists for NFSv3/4, unused elements ## in the list should be okay. It is possible to have different lists - ## for different mountpoints: use mulitple [[input.nfsclient]] stanzas, + ## for different mountpoints: use multiple [[input.nfsclient]] stanzas, ## with their own lists. See "include_mounts" above, and be careful of ## duplicate metrics. # include_operations = [] diff --git a/plugins/inputs/opensearch_query/README.md b/plugins/inputs/opensearch_query/README.md index 77dba3970..00a47fc23 100755 --- a/plugins/inputs/opensearch_query/README.md +++ b/plugins/inputs/opensearch_query/README.md @@ -33,7 +33,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. # Derive metrics from aggregating OpenSearch query results [[inputs.opensearch_query]] ## OpenSearch cluster endpoint(s). Multiple urls can be specified as part - ## of the same cluster. Only one succesful call will be made per interval. + ## of the same cluster. Only one successful call will be made per interval. urls = [ "https://node1.os.example.com:9200" ] # required. ## OpenSearch client timeout, defaults to "5s". diff --git a/plugins/inputs/opensearch_query/sample.conf b/plugins/inputs/opensearch_query/sample.conf index 30b672e23..7d1dae43c 100644 --- a/plugins/inputs/opensearch_query/sample.conf +++ b/plugins/inputs/opensearch_query/sample.conf @@ -1,7 +1,7 @@ # Derive metrics from aggregating OpenSearch query results [[inputs.opensearch_query]] ## OpenSearch cluster endpoint(s). Multiple urls can be specified as part - ## of the same cluster. Only one succesful call will be made per interval. + ## of the same cluster. Only one successful call will be made per interval. urls = [ "https://node1.os.example.com:9200" ] # required. ## OpenSearch client timeout, defaults to "5s". diff --git a/plugins/inputs/openweathermap/README.md b/plugins/inputs/openweathermap/README.md index 4c8afe46c..402d8a2df 100644 --- a/plugins/inputs/openweathermap/README.md +++ b/plugins/inputs/openweathermap/README.md @@ -57,7 +57,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. # query_style = "batch" ## Query interval to fetch data. - ## By default the gloabl 'interval' setting is used. You should override the + ## By default the global 'interval' setting is used. You should override the ## interval here if the global setting is shorter than 10 minutes as ## OpenWeatherMap weather data is only updated every 10 minutes. # interval = "10m" diff --git a/plugins/inputs/openweathermap/sample.conf b/plugins/inputs/openweathermap/sample.conf index d44bc0640..b8134899b 100644 --- a/plugins/inputs/openweathermap/sample.conf +++ b/plugins/inputs/openweathermap/sample.conf @@ -33,7 +33,7 @@ # query_style = "batch" ## Query interval to fetch data. - ## By default the gloabl 'interval' setting is used. You should override the + ## By default the global 'interval' setting is used. You should override the ## interval here if the global setting is shorter than 10 minutes as ## OpenWeatherMap weather data is only updated every 10 minutes. # interval = "10m" diff --git a/plugins/inputs/prometheus/consul.go b/plugins/inputs/prometheus/consul.go index 7cd026dbe..b0914e314 100644 --- a/plugins/inputs/prometheus/consul.go +++ b/plugins/inputs/prometheus/consul.go @@ -94,13 +94,13 @@ func (p *Prometheus) startConsul(ctx context.Context) error { p.wg.Add(1) go func() { - // Store last error status and change log level depending on repeated occurence + // Store last error status and change log level depending on repeated occurrence var refreshFailed = false defer p.wg.Done() err := p.refreshConsulServices(catalog) if err != nil { refreshFailed = true - p.Log.Errorf("Unable to refreh Consul services: %v", err) + p.Log.Errorf("Unable to refresh Consul services: %v", err) } for { select { @@ -109,7 +109,7 @@ func (p *Prometheus) startConsul(ctx context.Context) error { case <-time.After(time.Duration(p.ConsulConfig.QueryInterval)): err := p.refreshConsulServices(catalog) if err != nil { - message := fmt.Sprintf("Unable to refreh Consul services: %v", err) + message := fmt.Sprintf("Unable to refresh Consul services: %v", err) if refreshFailed { p.Log.Debug(message) } else { diff --git a/plugins/inputs/snmp/README.md b/plugins/inputs/snmp/README.md index 8f2b9519b..d5397e667 100644 --- a/plugins/inputs/snmp/README.md +++ b/plugins/inputs/snmp/README.md @@ -54,7 +54,7 @@ details. # version = 2 ## Unconnected UDP socket - ## When true, SNMP reponses are accepted from any address not just + ## When true, SNMP responses are accepted from any address not just ## the requested address. This can be useful when gathering from ## redundant/failover systems. # unconnected_udp_socket = false diff --git a/plugins/inputs/snmp/sample.conf b/plugins/inputs/snmp/sample.conf index 62e0a9d93..0b30d41c7 100644 --- a/plugins/inputs/snmp/sample.conf +++ b/plugins/inputs/snmp/sample.conf @@ -17,7 +17,7 @@ # version = 2 ## Unconnected UDP socket - ## When true, SNMP reponses are accepted from any address not just + ## When true, SNMP responses are accepted from any address not just ## the requested address. This can be useful when gathering from ## redundant/failover systems. # unconnected_udp_socket = false diff --git a/plugins/inputs/socketstat/socketstat.go b/plugins/inputs/socketstat/socketstat.go index 5c7c5cb9b..400bd512d 100644 --- a/plugins/inputs/socketstat/socketstat.go +++ b/plugins/inputs/socketstat/socketstat.go @@ -132,7 +132,7 @@ func (ss *Socketstat) parseAndGather(acc telegraf.Accumulator, data *bytes.Buffe // formats depending on the protocol. tags, fields = getTagsAndState(proto, words, ss.Log) - // This line containted metrics, so record that. + // This line contained metrics, so record that. flushData = true } if flushData { diff --git a/plugins/inputs/sqlserver/README.md b/plugins/inputs/sqlserver/README.md index 41ac7f192..6b0369af7 100644 --- a/plugins/inputs/sqlserver/README.md +++ b/plugins/inputs/sqlserver/README.md @@ -203,7 +203,7 @@ to use them. ## This setting/metric is optional and is disabled by default. # health_metric = false - ## Possible queries accross different versions of the collectors + ## Possible queries across different versions of the collectors ## Queries enabled by default for specific Database Type ## database_type = AzureSQLDB by default collects the following queries diff --git a/plugins/inputs/sqlserver/sample.conf b/plugins/inputs/sqlserver/sample.conf index 16da591b0..315d59abd 100644 --- a/plugins/inputs/sqlserver/sample.conf +++ b/plugins/inputs/sqlserver/sample.conf @@ -71,7 +71,7 @@ ## This setting/metric is optional and is disabled by default. # health_metric = false - ## Possible queries accross different versions of the collectors + ## Possible queries across different versions of the collectors ## Queries enabled by default for specific Database Type ## database_type = AzureSQLDB by default collects the following queries diff --git a/plugins/inputs/system/system_test.go b/plugins/inputs/system/system_test.go index a94870e3f..22a2ab66d 100644 --- a/plugins/inputs/system/system_test.go +++ b/plugins/inputs/system/system_test.go @@ -21,7 +21,7 @@ func TestUniqueUsers(t *testing.T) { }, }, { - name: "emptry entry", + name: "empty entry", expected: 0, data: []host.UserStat{}, }, diff --git a/plugins/inputs/tacacs/README.md b/plugins/inputs/tacacs/README.md index c8276584c..d6a719d19 100644 --- a/plugins/inputs/tacacs/README.md +++ b/plugins/inputs/tacacs/README.md @@ -64,7 +64,7 @@ by the tacacs server, or filled by telegraf in case of a timeout. ### field `responsetime_ms` The field responsetime_ms is response time of the tacacs server -in miliseconds of the furthest achieved stage of auth. +in milliseconds of the furthest achieved stage of auth. In case of timeout, its filled by telegraf to be the value of the configured response_timeout. diff --git a/plugins/inputs/webhooks/artifactory/README.md b/plugins/inputs/webhooks/artifactory/README.md index 4dabfacac..a79001610 100644 --- a/plugins/inputs/webhooks/artifactory/README.md +++ b/plugins/inputs/webhooks/artifactory/README.md @@ -1,12 +1,20 @@ -# artifactory webhook +# Artifactory Webhook -You need to configure to orginizations artifactory instance/s as detailed via the artifactory webhook documentation: . Multiple webhooks may need be needed to configure different domains. +You need to configure the organization's artifactory instance(s) as detailed +via the artifactory [webhook documentation][webhook docs]. Multiple webhooks may +need be needed to configure different domains. -You can also add a secret that will be used by telegraf to verify the authenticity of the requests. +You can also add a secret that will be used by telegraf to verify the +authenticity of the requests. + +[webhook docs]: https://www.jfrog.com/confluence/display/JFROG/Webhooks ## Events -The different events type can be found found in the webhook documentation: . Events are identified by their `domain` and `event`. The following sections break down each event by domain. +The different events type can be found found in the webhook documentation: +. +Events are identified by their `domain` and `event`. +The following sections break down each event by domain. ### Artifact Domain @@ -84,7 +92,8 @@ The Webhook is triggered when an artifact is copied from a repository. #### Properties Added Event -The Webhook is triggered when a property is added to an artifact/folder in a repository, or the repository itself. +The Webhook is triggered when a property is added to an artifact/folder +in a repository, or the repository itself. **Tags:** @@ -95,11 +104,12 @@ The Webhook is triggered when a property is added to an artifact/folder in a rep * 'name' string **Fields** * 'property_key' string -* 'property_values' string (joined comma seperated list) +* 'property_values' string (joined comma separated list) #### Properties Deleted Event -The Webhook is triggered when a property is deleted from an artifact/folder in a repository, or the repository itself. +The Webhook is triggered when a property is deleted from an artifact/folder in a +repository, or the repository itself. **Tags:** @@ -112,13 +122,14 @@ The Webhook is triggered when a property is deleted from an artifact/folder in a **Fields:** * 'property_key' string -* 'property_values' string (joined comma seperated list) +* 'property_values' string (joined comma separated list) ### Docker Domain #### Docker Pushed Event -The Webhook is triggered when a new tag of a Docker image is pushed to a Docker repository. +The Webhook is triggered when a new tag of a Docker image is pushed to a Docker +repository. **Tags:** @@ -135,12 +146,13 @@ The Webhook is triggered when a new tag of a Docker image is pushed to a Docker * 'sha256' string * 'tag' string * 'platforms' []object - * 'achitecture' string + * 'architecture' string * 'os' string #### Docker Deleted Event -The Webhook is triggered when a tag of a Docker image is deleted from a Docker repository. +The Webhook is triggered when a tag of a Docker image is deleted from a Docker +repository. **Tags:** @@ -157,7 +169,7 @@ The Webhook is triggered when a tag of a Docker image is deleted from a Docker r * 'sha256' string * 'tag' string * 'platforms' []object - * 'achitecture' string + * 'architecture' string * 'os' string #### Docker Promoted Event @@ -179,7 +191,7 @@ The Webhook is triggered when a tag of a Docker image is promoted. * 'sha256' string * 'tag' string * 'platforms' []object - * 'achitecture' string + * 'architecture' string * 'os' string ### Build Domain @@ -376,7 +388,8 @@ The Webhook is triggered when Release Bundle distribution has failed. #### Release Bundle Version Deletion Started EVent -The Webhook is triggered when a Release Bundle version deletion has started on one or more Edge nodes. +The Webhook is triggered when a Release Bundle version deletion has started on +one or more Edge nodes. **Tags:** @@ -398,7 +411,8 @@ The Webhook is triggered when a Release Bundle version deletion has started on o #### Release Bundle Version Deletion Completed Event -The Webhook is triggered when a Release Bundle version deletion has completed from one or more Edge nodes. +The Webhook is triggered when a Release Bundle version deletion has completed +from one or more Edge nodes. **Tags:** @@ -420,7 +434,8 @@ The Webhook is triggered when a Release Bundle version deletion has completed fr #### Release Bundle Version Deletion Failed Event -The Webhook is triggered when a Release Bundle version deletion has failed on one or more Edge nodes. +The Webhook is triggered when a Release Bundle version deletion has failed on +one or more Edge nodes. **Tags:** @@ -461,7 +476,8 @@ The Webhook is triggered when a Release Bundle was received on an Edge Node. ### Release Bundle Delete Started Event -The Webhook is triggered when a Release Bundle deletion from an Edge Node completed. +The Webhook is triggered when a Release Bundle deletion from an Edge Node +completed. **Tags:** @@ -478,7 +494,8 @@ The Webhook is triggered when a Release Bundle deletion from an Edge Node comple #### Release Bundle Delete Completed Event -The Webhook is triggered when a Release Bundle deletion from an Edge Node completed. +The Webhook is triggered when a Release Bundle deletion from an Edge Node +completed. **Tags:** diff --git a/plugins/inputs/webhooks/artifactory/artifactory_webhook_models.go b/plugins/inputs/webhooks/artifactory/artifactory_webhook_models.go index 7186794f5..33b21be72 100644 --- a/plugins/inputs/webhooks/artifactory/artifactory_webhook_models.go +++ b/plugins/inputs/webhooks/artifactory/artifactory_webhook_models.go @@ -119,7 +119,7 @@ type DockerEvent struct { ImageName string `json:"image_name"` Tag string `json:"tag"` Platforms []struct { - Architecture string `json:"achitecture"` + Architecture string `json:"architecture"` Os string `json:"os"` } `json:"platforms"` } `json:"data"` diff --git a/plugins/inputs/zfs/README.md b/plugins/inputs/zfs/README.md index 28faca361..63e33985f 100644 --- a/plugins/inputs/zfs/README.md +++ b/plugins/inputs/zfs/README.md @@ -43,7 +43,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. By default this plugin collects metrics about ZFS internals pool and dataset. These metrics are either counters or measure sizes in bytes. These metrics will be in the `zfs` measurement with the field -names listed bellow. +names listed below. If `poolMetrics` is enabled then additional metrics will be gathered for each pool. @@ -52,7 +52,7 @@ If `datasetMetrics` is enabled then additional metrics will be gathered for each dataset. - zfs - With fields listed bellow. + With fields listed below. ### ARC Stats (FreeBSD and Linux) diff --git a/plugins/outputs/opensearch/opensearch.go b/plugins/outputs/opensearch/opensearch.go index c6f227d05..9d9047787 100644 --- a/plugins/outputs/opensearch/opensearch.go +++ b/plugins/outputs/opensearch/opensearch.go @@ -304,7 +304,7 @@ func (o *Opensearch) Write(metrics []telegraf.Metric) error { return nil } -// BulkIndexer supports pipeline at config level so seperate indexer instance for each unique pipeline +// BulkIndexer supports pipeline at config level so separate indexer instance for each unique pipeline func getTargetIndexers(metrics []telegraf.Metric, osInst *Opensearch) map[string]opensearchutil.BulkIndexer { var indexers = make(map[string]opensearchutil.BulkIndexer) diff --git a/plugins/parsers/avro/schema_registry.go b/plugins/parsers/avro/schema_registry.go index bbb467dd7..5ca53987d 100644 --- a/plugins/parsers/avro/schema_registry.go +++ b/plugins/parsers/avro/schema_registry.go @@ -99,12 +99,12 @@ func (sr *schemaRegistry) getSchemaAndCodec(id int) (*schemaAndCodec, error) { schema, ok := jsonResponse["schema"] if !ok { - return nil, fmt.Errorf("malformed respose from schema registry: no 'schema' key") + return nil, fmt.Errorf("malformed response from schema registry: no 'schema' key") } schemaValue, ok := schema.(string) if !ok { - return nil, fmt.Errorf("malformed respose from schema registry: %v cannot be cast to string", schema) + return nil, fmt.Errorf("malformed response from schema registry: %v cannot be cast to string", schema) } codec, err := goavro.NewCodec(schemaValue) if err != nil { diff --git a/plugins/parsers/csv/README.md b/plugins/parsers/csv/README.md index b6936010b..a9520fb0c 100644 --- a/plugins/parsers/csv/README.md +++ b/plugins/parsers/csv/README.md @@ -45,7 +45,7 @@ values. ## A list of metadata separators. If csv_metadata_rows is set, ## csv_metadata_separators must contain at least one separator. - ## Please note that separators are case sensitive and the sequence of the seperators are respected. + ## Please note that separators are case sensitive and the sequence of the separators are respected. csv_metadata_separators = [":", "="] ## A set of metadata trim characters. diff --git a/plugins/parsers/json_v2/README.md b/plugins/parsers/json_v2/README.md index f7d243cf4..8fefdc788 100644 --- a/plugins/parsers/json_v2/README.md +++ b/plugins/parsers/json_v2/README.md @@ -180,7 +180,7 @@ such as `America/New_York`, to `Local` to utilize the system timezone, or to `UT *Configuration to modify the resutling line protocol:* * **disable_prepend_keys (OPTIONAL)**: Set to true to prevent resulting nested data to contain the parent key prepended to its key **NOTE**: duplicate names can overwrite each other when this is enabled -* **renames (OPTIONAL, defined in TOML as a table using single bracket)**: A table matching the json key with the desired name (oppossed to defaulting to using the key), use names that include the prepended keys of its parent keys for nested results +* **renames (OPTIONAL, defined in TOML as a table using single bracket)**: A table matching the json key with the desired name (opposed to defaulting to using the key), use names that include the prepended keys of its parent keys for nested results * **fields (OPTIONAL, defined in TOML as a table using single bracket)**: A table matching the json key with the desired type (int,string,bool,float), if you define a key that is an array or object then all nested values will become that type ## Arrays and Objects @@ -194,7 +194,7 @@ The following describes the high-level approach when parsing arrays and objects: When handling nested arrays and objects, these above rules continue to apply as the parser creates line protocol. When an object has multiple array's as values, the array's will become separate line protocol containing only non-array values -from the obejct. Below you can see an example of this behavior, with an input +from the object. Below you can see an example of this behavior, with an input json containing an array of book objects that has a nested array of characters. Example JSON: diff --git a/plugins/parsers/json_v2/parser.go b/plugins/parsers/json_v2/parser.go index 84334d2e4..1b339a7a0 100644 --- a/plugins/parsers/json_v2/parser.go +++ b/plugins/parsers/json_v2/parser.go @@ -24,7 +24,7 @@ type Parser struct { DefaultTags map[string]string `toml:"-"` Log telegraf.Logger `toml:"-"` - // **** The struct fields bellow this comment are used for processing individual configs **** + // **** The struct fields below this comment are used for processing individual configs **** // measurementName is the name of the current config used in each line protocol measurementName string diff --git a/plugins/processors/scale/scale_test.go b/plugins/processors/scale/scale_test.go index a003c39dd..4683591bb 100644 --- a/plugins/processors/scale/scale_test.go +++ b/plugins/processors/scale/scale_test.go @@ -149,7 +149,7 @@ func TestMinMax(t *testing.T) { }, }, { - name: "Missing field Fileds", + name: "Missing field Fields", scale: []scalingValuesMinMax{ { InMin: -1, diff --git a/plugins/processors/starlark/README.md b/plugins/processors/starlark/README.md index c7c361424..dcd04b476 100644 --- a/plugins/processors/starlark/README.md +++ b/plugins/processors/starlark/README.md @@ -193,7 +193,7 @@ with an error. In case you need to call some code that may return an error, you can delegate the call to the built-in function `catch` which takes as argument a `Callable` -and returns the error that occured if any, `None` otherwise. +and returns the error that occurred if any, `None` otherwise. So for example: diff --git a/plugins/processors/starlark/starlark_test.go b/plugins/processors/starlark/starlark_test.go index 310da2d6e..049519e2a 100644 --- a/plugins/processors/starlark/starlark_test.go +++ b/plugins/processors/starlark/starlark_test.go @@ -3436,7 +3436,7 @@ def apply(metric): mu.Lock() defer mu.Unlock() return len(delivered) == 1 - }, 1*time.Second, 100*time.Millisecond, "orignal metric not delivered") + }, 1*time.Second, 100*time.Millisecond, "original metric not delivered") }) } } diff --git a/plugins/secretstores/docker/README.md b/plugins/secretstores/docker/README.md index 8afce0cb6..7ec236dce 100644 --- a/plugins/secretstores/docker/README.md +++ b/plugins/secretstores/docker/README.md @@ -82,7 +82,7 @@ Referencing the secret within a plugin occurs by: password = "@{docker_secretstore:secret_for_plugin}" ``` -## Additonal Information +## Additional Information [Docker Secrets in Swarm][2] diff --git a/scripts/windows-gen-syso.sh b/scripts/windows-gen-syso.sh index fddb900a7..5b9d4535f 100755 --- a/scripts/windows-gen-syso.sh +++ b/scripts/windows-gen-syso.sh @@ -6,7 +6,7 @@ NAME="Telegraf" VERSION=$(cd ../../ && make version) FLAGS=() -# If building for arm64, then incude the extra flags required. +# If building for arm64, then include the extra flags required. if [ -n "${1+x}" ] && [ "$1" = "arm64" ]; then FLAGS=(-arm -64) fi