docs: Fix typos (#14359)

Co-authored-by: Josh Powers <powersj@fastmail.com>
This commit is contained in:
Andreas Deininger 2023-11-29 14:47:39 +01:00 committed by GitHub
parent 827f1cc249
commit 6814d7af8a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
45 changed files with 97 additions and 80 deletions

View File

@ -621,7 +621,7 @@ func TestConfig_SerializerInterfaceNewFormat(t *testing.T) {
options = append(options, cmpopts.IgnoreFields(stype, settings.mask...))
}
// Do a manual comparision as require.EqualValues will also work on unexported fields
// Do a manual comparison as require.EqualValues will also work on unexported fields
// that cannot be cleared or ignored.
diff := cmp.Diff(expected[i], actual[i], options...)
require.Emptyf(t, diff, "Difference in SetSerializer() for %q", format)
@ -820,7 +820,7 @@ func TestConfig_ParserInterface(t *testing.T) {
options = append(options, cmpopts.IgnoreFields(stype, settings.mask...))
}
// Do a manual comparision as require.EqualValues will also work on unexported fields
// Do a manual comparison as require.EqualValues will also work on unexported fields
// that cannot be cleared or ignored.
diff := cmp.Diff(expected[i], actual[i], options...)
require.Emptyf(t, diff, "Difference in SetParser() for %q", format)
@ -1039,7 +1039,7 @@ func TestConfig_ProcessorsWithParsers(t *testing.T) {
options = append(options, cmpopts.IgnoreFields(stype, settings.mask...))
}
// Do a manual comparision as require.EqualValues will also work on unexported fields
// Do a manual comparison as require.EqualValues will also work on unexported fields
// that cannot be cleared or ignored.
diff := cmp.Diff(expected[i], actual[i], options...)
require.Emptyf(t, diff, "Difference in SetParser() for %q", format)

View File

@ -297,7 +297,7 @@ func walkPluginStruct(value reflect.Value, fn func(f reflect.StructField, fv ref
}
// Walk over the struct fields and call the given function. If we encounter more complex embedded
// elements (stucts, slices/arrays, maps) we need to descend into those elements as they might
// elements (structs, slices/arrays, maps) we need to descend into those elements as they might
// contain structures nested in the current structure.
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)

View File

@ -72,7 +72,7 @@ func NewClient(cfg *MqttConfig) (Client, error) {
case "5":
return NewMQTTv5Client(cfg)
}
return nil, fmt.Errorf("unsuported protocol %q: must be \"3.1.1\" or \"5\"", cfg.Protocol)
return nil, fmt.Errorf("unsupported protocol %q: must be \"3.1.1\" or \"5\"", cfg.Protocol)
}
func parseServers(servers []string) ([]*url.URL, error) {

View File

@ -74,7 +74,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
# resource target #1 to collect metrics from
[[inputs.azure_monitor.resource_target]]
# can be found undet Overview->Essentials->JSON View in the Azure portal for your application/service
# can be found under Overview->Essentials->JSON View in the Azure portal for your application/service
# must start with 'resourceGroups/...' ('/subscriptions/xxxxxxxx-xxxx-xxxx-xxx-xxxxxxxxxxxx'
# must be removed from the beginning of Resource ID property value)
resource_id = "<<RESOURCE_ID>>"

View File

@ -11,7 +11,7 @@
# resource target #1 to collect metrics from
[[inputs.azure_monitor.resource_target]]
# can be found undet Overview->Essentials->JSON View in the Azure portal for your application/service
# can be found under Overview->Essentials->JSON View in the Azure portal for your application/service
# must start with 'resourceGroups/...' ('/subscriptions/xxxxxxxx-xxxx-xxxx-xxx-xxxxxxxxxxxx'
# must be removed from the beginning of Resource ID property value)
resource_id = "<<RESOURCE_ID>>"

View File

@ -1,7 +1,7 @@
# ctrlX Data Layer Input Plugin
The `ctrlx_datalayer` plugin gathers data from the ctrlX Data Layer,
a communication middleware runnning on
a communication middleware running on
[ctrlX CORE devices](https://ctrlx-core.com) from
[Bosch Rexroth](https://boschrexroth.com). The platform is used for
professional automation applications like industrial automation, building

View File

@ -60,7 +60,7 @@ type CtrlXDataLayer struct {
// convertTimestamp2UnixTime converts the given Data Layer timestamp of the payload to UnixTime.
func convertTimestamp2UnixTime(t int64) time.Time {
// 1 sec=1000 milisec=1000000 microsec=1000000000 nanosec.
// 1 sec=1000 millisec=1000000 microsec=1000000000 nanosec.
// Convert from FILETIME (100-nanosecond intervals since January 1, 1601 UTC) to
// seconds and nanoseconds since January 1, 1970 UTC.
// Between Jan 1, 1601 and Jan 1, 1970 there are 11644473600 seconds.

View File

@ -1,6 +1,6 @@
# DNS Query Input Plugin
The DNS plugin gathers dns query times in miliseconds - like
The DNS plugin gathers dns query times in milliseconds - like
[Dig](https://en.wikipedia.org/wiki/Dig_\(command\))
## Global configuration options <!-- @/docs/includes/plugin_config.md -->

View File

@ -55,7 +55,7 @@ func (m *Example) Init() error {
}
// Set your defaults.
// Please note: In golang all fields are initialzed to their nil value, so you should not
// Please note: In golang all fields are initialized to their nil value, so you should not
// set these fields if the nil value is what you want (e.g. for booleans).
if m.NumberFields < 1 {
m.Log.Debugf("Setting number of fields to default from invalid value %d", m.NumberFields)
@ -75,7 +75,7 @@ func (m *Example) Init() error {
}
defer password.Destroy()
// Initialze your internal states
// Initialize your internal states
m.count = 1
return nil

View File

@ -20,7 +20,7 @@ func TestInitDefault(t *testing.T) {
// This test should succeed with the default initialization.
// Use whatever you use in the init() function plus the mandatory options.
// ATTENTION: Always initialze the "Log" as you will get SIGSEGV otherwise.
// ATTENTION: Always initialize the "Log" as you will get SIGSEGV otherwise.
plugin := &Example{
DeviceName: "test",
Timeout: config.Duration(100 * time.Millisecond),
@ -42,7 +42,7 @@ func TestInitFail(t *testing.T) {
// and check if you reach them
// We setup a table-test here to specify "setting" - "expected error" values.
// Eventhough it seems overkill here for the example plugin, we reuse this structure
// Even though it seems overkill here for the example plugin, we reuse this structure
// later for checking the metrics
tests := []struct {
name string
@ -58,7 +58,7 @@ func TestInitFail(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Always initialze the logger to avoid SIGSEGV. This is done automatically by
// Always initialize the logger to avoid SIGSEGV. This is done automatically by
// telegraf during normal operation.
tt.plugin.Log = testutil.Logger{}
err := tt.plugin.Init()
@ -225,8 +225,8 @@ func TestFixedValue(t *testing.T) {
acc.Wait(len(tt.expected))
// Compare the metrics in a convenient way. Here we ignore
// the metric time during comparision as we cannot inject the time
// during test. For more comparision options check testutil package.
// the metric time during comparison as we cannot inject the time
// during test. For more comparison options check testutil package.
testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime())
})
}

View File

@ -53,7 +53,7 @@ values are included if they are less than a minute old.
- fireboard
- tags:
- channel
- scale (Celcius; Farenheit)
- scale (Celsius; Fahrenheit)
- title (name of the Fireboard)
- uuid (UUID of the Fireboard)
- fields:
@ -66,5 +66,5 @@ This section shows example output in Line Protocol format. You can often use
this information.
```text
fireboard,channel=2,host=patas-mbp,scale=Farenheit,title=telegraf-FireBoard,uuid=b55e766c-b308-49b5-93a4-df89fe31efd0 temperature=78.2 1561690040000000000
fireboard,channel=2,host=patas-mbp,scale=Fahrenheit,title=telegraf-FireBoard,uuid=b55e766c-b308-49b5-93a4-df89fe31efd0 temperature=78.2 1561690040000000000
```

View File

@ -88,7 +88,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
## * Juniper Header Extension (juniper_header): some sensors are directly managed by
## Linecard, which adds the Juniper GNMI Header Extension. Enabling this
## allows the decoding of the Extension header if present. Currently this knob
## adds component, component_id & sub_component_id as additionnal tags
## adds component, component_id & sub_component_id as additional tags
# vendor_specific = []
## Define additional aliases to map encoding paths to measurement names

View File

@ -49,7 +49,7 @@
## * Juniper Header Extension (juniper_header): some sensors are directly managed by
## Linecard, which adds the Juniper GNMI Header Extension. Enabling this
## allows the decoding of the Extension header if present. Currently this knob
## adds component, component_id & sub_component_id as additionnal tags
## adds component, component_id & sub_component_id as additional tags
# vendor_specific = []
## Define additional aliases to map encoding paths to measurement names

View File

@ -184,7 +184,7 @@ func (m *msrServiceImpl) readSingleMsr(core string, msr string) (uint64, error)
case msrFSBFreqString:
msrAddress = fsbFreq
default:
return 0, fmt.Errorf("incorect name of MSR %s", msr)
return 0, fmt.Errorf("incorrect name of MSR %s", msr)
}
value, err := m.fs.readFileAtOffsetToUint64(msrFile, msrAddress)

View File

@ -66,9 +66,9 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
## Empty this field will use default value 5
# max_connections = 5
## When set to true will add node labels as a comma-seperated tag. If none,
## When set to true will add node labels as a comma-separated tag. If none,
## are found, then a tag with the value of 'none' is used. Finally, if a
## lable contains a comma it is replaced with an underscore.
## label contains a comma it is replaced with an underscore.
# node_labels_as_tag = false
```

View File

@ -46,7 +46,7 @@
## Empty this field will use default value 5
# max_connections = 5
## When set to true will add node labels as a comma-seperated tag. If none,
## When set to true will add node labels as a comma-separated tag. If none,
## are found, then a tag with the value of 'none' is used. Finally, if a
## lable contains a comma it is replaced with an underscore.
## label contains a comma it is replaced with an underscore.
# node_labels_as_tag = false

View File

@ -39,7 +39,7 @@ processes 86031
Number of forks since boot.
```
Kernel Samepage Merging is generally documented in [kernel documenation][1] and
Kernel Samepage Merging is generally documented in [kernel documentation][1] and
the available metrics exposed via sysfs are documented in [admin guide][2]
[1]: https://www.kernel.org/doc/html/latest/mm/ksm.html

View File

@ -275,7 +275,7 @@ func groupFieldsToRequests(fields []field, params groupingParams) []request {
}
requests = optimizeGroup(total, params.MaxBatchSize)
case "max_insert":
// Similar to aggressive but keeps the number of touched registers bellow a threshold
// Similar to aggressive but keeps the number of touched registers below a threshold
var total request
for _, g := range groups {
if len(g.fields) > 0 {

View File

@ -45,7 +45,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
## NFSv3 and NFSv4 have different lists. While it is not possible to
## have different include/exclude lists for NFSv3/4, unused elements
## in the list should be okay. It is possible to have different lists
## for different mountpoints: use mulitple [[input.nfsclient]] stanzas,
## for different mountpoints: use multiple [[input.nfsclient]] stanzas,
## with their own lists. See "include_mounts" above, and be careful of
## duplicate metrics.
# include_operations = []
@ -89,9 +89,9 @@ MOUNT_PROC: /host/proc/self/mountstats
- bytes (integer, bytes) - The total number of bytes exchanged doing this operation. This is bytes sent _and_ received, including overhead _and_ payload. (bytes = OP_bytes_sent + OP_bytes_recv. See nfs_ops below)
- ops (integer, count) - The number of operations of this type executed.
- retrans (integer, count) - The number of times an operation had to be retried (retrans = OP_trans - OP_ops. See nfs_ops below)
- exe (integer, miliseconds) - The number of miliseconds it took to process the operations.
- rtt (integer, miliseconds) - The total round-trip time for all operations.
- rtt_per_op (float, miliseconds) - The average round-trip time per operation.
- exe (integer, milliseconds) - The number of milliseconds it took to process the operations.
- rtt (integer, milliseconds) - The total round-trip time for all operations.
- rtt_per_op (float, milliseconds) - The average round-trip time per operation.
In addition enabling `fullstat` will make many more metrics available.

View File

@ -20,7 +20,7 @@
## NFSv3 and NFSv4 have different lists. While it is not possible to
## have different include/exclude lists for NFSv3/4, unused elements
## in the list should be okay. It is possible to have different lists
## for different mountpoints: use mulitple [[input.nfsclient]] stanzas,
## for different mountpoints: use multiple [[input.nfsclient]] stanzas,
## with their own lists. See "include_mounts" above, and be careful of
## duplicate metrics.
# include_operations = []

View File

@ -33,7 +33,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
# Derive metrics from aggregating OpenSearch query results
[[inputs.opensearch_query]]
## OpenSearch cluster endpoint(s). Multiple urls can be specified as part
## of the same cluster. Only one succesful call will be made per interval.
## of the same cluster. Only one successful call will be made per interval.
urls = [ "https://node1.os.example.com:9200" ] # required.
## OpenSearch client timeout, defaults to "5s".

View File

@ -1,7 +1,7 @@
# Derive metrics from aggregating OpenSearch query results
[[inputs.opensearch_query]]
## OpenSearch cluster endpoint(s). Multiple urls can be specified as part
## of the same cluster. Only one succesful call will be made per interval.
## of the same cluster. Only one successful call will be made per interval.
urls = [ "https://node1.os.example.com:9200" ] # required.
## OpenSearch client timeout, defaults to "5s".

View File

@ -57,7 +57,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
# query_style = "batch"
## Query interval to fetch data.
## By default the gloabl 'interval' setting is used. You should override the
## By default the global 'interval' setting is used. You should override the
## interval here if the global setting is shorter than 10 minutes as
## OpenWeatherMap weather data is only updated every 10 minutes.
# interval = "10m"

View File

@ -33,7 +33,7 @@
# query_style = "batch"
## Query interval to fetch data.
## By default the gloabl 'interval' setting is used. You should override the
## By default the global 'interval' setting is used. You should override the
## interval here if the global setting is shorter than 10 minutes as
## OpenWeatherMap weather data is only updated every 10 minutes.
# interval = "10m"

View File

@ -94,13 +94,13 @@ func (p *Prometheus) startConsul(ctx context.Context) error {
p.wg.Add(1)
go func() {
// Store last error status and change log level depending on repeated occurence
// Store last error status and change log level depending on repeated occurrence
var refreshFailed = false
defer p.wg.Done()
err := p.refreshConsulServices(catalog)
if err != nil {
refreshFailed = true
p.Log.Errorf("Unable to refreh Consul services: %v", err)
p.Log.Errorf("Unable to refresh Consul services: %v", err)
}
for {
select {
@ -109,7 +109,7 @@ func (p *Prometheus) startConsul(ctx context.Context) error {
case <-time.After(time.Duration(p.ConsulConfig.QueryInterval)):
err := p.refreshConsulServices(catalog)
if err != nil {
message := fmt.Sprintf("Unable to refreh Consul services: %v", err)
message := fmt.Sprintf("Unable to refresh Consul services: %v", err)
if refreshFailed {
p.Log.Debug(message)
} else {

View File

@ -54,7 +54,7 @@ details.
# version = 2
## Unconnected UDP socket
## When true, SNMP reponses are accepted from any address not just
## When true, SNMP responses are accepted from any address not just
## the requested address. This can be useful when gathering from
## redundant/failover systems.
# unconnected_udp_socket = false

View File

@ -17,7 +17,7 @@
# version = 2
## Unconnected UDP socket
## When true, SNMP reponses are accepted from any address not just
## When true, SNMP responses are accepted from any address not just
## the requested address. This can be useful when gathering from
## redundant/failover systems.
# unconnected_udp_socket = false

View File

@ -132,7 +132,7 @@ func (ss *Socketstat) parseAndGather(acc telegraf.Accumulator, data *bytes.Buffe
// formats depending on the protocol.
tags, fields = getTagsAndState(proto, words, ss.Log)
// This line containted metrics, so record that.
// This line contained metrics, so record that.
flushData = true
}
if flushData {

View File

@ -203,7 +203,7 @@ to use them.
## This setting/metric is optional and is disabled by default.
# health_metric = false
## Possible queries accross different versions of the collectors
## Possible queries across different versions of the collectors
## Queries enabled by default for specific Database Type
## database_type = AzureSQLDB by default collects the following queries

View File

@ -71,7 +71,7 @@
## This setting/metric is optional and is disabled by default.
# health_metric = false
## Possible queries accross different versions of the collectors
## Possible queries across different versions of the collectors
## Queries enabled by default for specific Database Type
## database_type = AzureSQLDB by default collects the following queries

View File

@ -21,7 +21,7 @@ func TestUniqueUsers(t *testing.T) {
},
},
{
name: "emptry entry",
name: "empty entry",
expected: 0,
data: []host.UserStat{},
},

View File

@ -64,7 +64,7 @@ by the tacacs server, or filled by telegraf in case of a timeout.
### field `responsetime_ms`
The field responsetime_ms is response time of the tacacs server
in miliseconds of the furthest achieved stage of auth.
in milliseconds of the furthest achieved stage of auth.
In case of timeout, its filled by telegraf to be the value of
the configured response_timeout.

View File

@ -1,12 +1,20 @@
# artifactory webhook
# Artifactory Webhook
You need to configure to orginizations artifactory instance/s as detailed via the artifactory webhook documentation: <https://www.jfrog.com/confluence/display/JFROG/Webhooks>. Multiple webhooks may need be needed to configure different domains.
You need to configure the organization's artifactory instance(s) as detailed
via the artifactory [webhook documentation][webhook docs]. Multiple webhooks may
need be needed to configure different domains.
You can also add a secret that will be used by telegraf to verify the authenticity of the requests.
You can also add a secret that will be used by telegraf to verify the
authenticity of the requests.
[webhook docs]: https://www.jfrog.com/confluence/display/JFROG/Webhooks
## Events
The different events type can be found found in the webhook documentation: <https://www.jfrog.com/confluence/display/JFROG/Webhooks>. Events are identified by their `domain` and `event`. The following sections break down each event by domain.
The different events type can be found found in the webhook documentation:
<https://www.jfrog.com/confluence/display/JFROG/Webhooks>.
Events are identified by their `domain` and `event`.
The following sections break down each event by domain.
### Artifact Domain
@ -84,7 +92,8 @@ The Webhook is triggered when an artifact is copied from a repository.
#### Properties Added Event
The Webhook is triggered when a property is added to an artifact/folder in a repository, or the repository itself.
The Webhook is triggered when a property is added to an artifact/folder
in a repository, or the repository itself.
**Tags:**
@ -95,11 +104,12 @@ The Webhook is triggered when a property is added to an artifact/folder in a rep
* 'name' string
**Fields**
* 'property_key' string
* 'property_values' string (joined comma seperated list)
* 'property_values' string (joined comma separated list)
#### Properties Deleted Event
The Webhook is triggered when a property is deleted from an artifact/folder in a repository, or the repository itself.
The Webhook is triggered when a property is deleted from an artifact/folder in a
repository, or the repository itself.
**Tags:**
@ -112,13 +122,14 @@ The Webhook is triggered when a property is deleted from an artifact/folder in a
**Fields:**
* 'property_key' string
* 'property_values' string (joined comma seperated list)
* 'property_values' string (joined comma separated list)
### Docker Domain
#### Docker Pushed Event
The Webhook is triggered when a new tag of a Docker image is pushed to a Docker repository.
The Webhook is triggered when a new tag of a Docker image is pushed to a Docker
repository.
**Tags:**
@ -135,12 +146,13 @@ The Webhook is triggered when a new tag of a Docker image is pushed to a Docker
* 'sha256' string
* 'tag' string
* 'platforms' []object
* 'achitecture' string
* 'architecture' string
* 'os' string
#### Docker Deleted Event
The Webhook is triggered when a tag of a Docker image is deleted from a Docker repository.
The Webhook is triggered when a tag of a Docker image is deleted from a Docker
repository.
**Tags:**
@ -157,7 +169,7 @@ The Webhook is triggered when a tag of a Docker image is deleted from a Docker r
* 'sha256' string
* 'tag' string
* 'platforms' []object
* 'achitecture' string
* 'architecture' string
* 'os' string
#### Docker Promoted Event
@ -179,7 +191,7 @@ The Webhook is triggered when a tag of a Docker image is promoted.
* 'sha256' string
* 'tag' string
* 'platforms' []object
* 'achitecture' string
* 'architecture' string
* 'os' string
### Build Domain
@ -376,7 +388,8 @@ The Webhook is triggered when Release Bundle distribution has failed.
#### Release Bundle Version Deletion Started EVent
The Webhook is triggered when a Release Bundle version deletion has started on one or more Edge nodes.
The Webhook is triggered when a Release Bundle version deletion has started on
one or more Edge nodes.
**Tags:**
@ -398,7 +411,8 @@ The Webhook is triggered when a Release Bundle version deletion has started on o
#### Release Bundle Version Deletion Completed Event
The Webhook is triggered when a Release Bundle version deletion has completed from one or more Edge nodes.
The Webhook is triggered when a Release Bundle version deletion has completed
from one or more Edge nodes.
**Tags:**
@ -420,7 +434,8 @@ The Webhook is triggered when a Release Bundle version deletion has completed fr
#### Release Bundle Version Deletion Failed Event
The Webhook is triggered when a Release Bundle version deletion has failed on one or more Edge nodes.
The Webhook is triggered when a Release Bundle version deletion has failed on
one or more Edge nodes.
**Tags:**
@ -461,7 +476,8 @@ The Webhook is triggered when a Release Bundle was received on an Edge Node.
### Release Bundle Delete Started Event
The Webhook is triggered when a Release Bundle deletion from an Edge Node completed.
The Webhook is triggered when a Release Bundle deletion from an Edge Node
completed.
**Tags:**
@ -478,7 +494,8 @@ The Webhook is triggered when a Release Bundle deletion from an Edge Node comple
#### Release Bundle Delete Completed Event
The Webhook is triggered when a Release Bundle deletion from an Edge Node completed.
The Webhook is triggered when a Release Bundle deletion from an Edge Node
completed.
**Tags:**

View File

@ -119,7 +119,7 @@ type DockerEvent struct {
ImageName string `json:"image_name"`
Tag string `json:"tag"`
Platforms []struct {
Architecture string `json:"achitecture"`
Architecture string `json:"architecture"`
Os string `json:"os"`
} `json:"platforms"`
} `json:"data"`

View File

@ -43,7 +43,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
By default this plugin collects metrics about ZFS internals pool and dataset.
These metrics are either counters or measure sizes
in bytes. These metrics will be in the `zfs` measurement with the field
names listed bellow.
names listed below.
If `poolMetrics` is enabled then additional metrics will be gathered for
each pool.
@ -52,7 +52,7 @@ If `datasetMetrics` is enabled then additional metrics will be gathered for
each dataset.
- zfs
With fields listed bellow.
With fields listed below.
### ARC Stats (FreeBSD and Linux)

View File

@ -304,7 +304,7 @@ func (o *Opensearch) Write(metrics []telegraf.Metric) error {
return nil
}
// BulkIndexer supports pipeline at config level so seperate indexer instance for each unique pipeline
// BulkIndexer supports pipeline at config level so separate indexer instance for each unique pipeline
func getTargetIndexers(metrics []telegraf.Metric, osInst *Opensearch) map[string]opensearchutil.BulkIndexer {
var indexers = make(map[string]opensearchutil.BulkIndexer)

View File

@ -99,12 +99,12 @@ func (sr *schemaRegistry) getSchemaAndCodec(id int) (*schemaAndCodec, error) {
schema, ok := jsonResponse["schema"]
if !ok {
return nil, fmt.Errorf("malformed respose from schema registry: no 'schema' key")
return nil, fmt.Errorf("malformed response from schema registry: no 'schema' key")
}
schemaValue, ok := schema.(string)
if !ok {
return nil, fmt.Errorf("malformed respose from schema registry: %v cannot be cast to string", schema)
return nil, fmt.Errorf("malformed response from schema registry: %v cannot be cast to string", schema)
}
codec, err := goavro.NewCodec(schemaValue)
if err != nil {

View File

@ -45,7 +45,7 @@ values.
## A list of metadata separators. If csv_metadata_rows is set,
## csv_metadata_separators must contain at least one separator.
## Please note that separators are case sensitive and the sequence of the seperators are respected.
## Please note that separators are case sensitive and the sequence of the separators are respected.
csv_metadata_separators = [":", "="]
## A set of metadata trim characters.

View File

@ -180,7 +180,7 @@ such as `America/New_York`, to `Local` to utilize the system timezone, or to `UT
*Configuration to modify the resutling line protocol:*
* **disable_prepend_keys (OPTIONAL)**: Set to true to prevent resulting nested data to contain the parent key prepended to its key **NOTE**: duplicate names can overwrite each other when this is enabled
* **renames (OPTIONAL, defined in TOML as a table using single bracket)**: A table matching the json key with the desired name (oppossed to defaulting to using the key), use names that include the prepended keys of its parent keys for nested results
* **renames (OPTIONAL, defined in TOML as a table using single bracket)**: A table matching the json key with the desired name (opposed to defaulting to using the key), use names that include the prepended keys of its parent keys for nested results
* **fields (OPTIONAL, defined in TOML as a table using single bracket)**: A table matching the json key with the desired type (int,string,bool,float), if you define a key that is an array or object then all nested values will become that type
## Arrays and Objects
@ -194,7 +194,7 @@ The following describes the high-level approach when parsing arrays and objects:
When handling nested arrays and objects, these above rules continue to apply as
the parser creates line protocol. When an object has multiple array's as values,
the array's will become separate line protocol containing only non-array values
from the obejct. Below you can see an example of this behavior, with an input
from the object. Below you can see an example of this behavior, with an input
json containing an array of book objects that has a nested array of characters.
Example JSON:

View File

@ -24,7 +24,7 @@ type Parser struct {
DefaultTags map[string]string `toml:"-"`
Log telegraf.Logger `toml:"-"`
// **** The struct fields bellow this comment are used for processing individual configs ****
// **** The struct fields below this comment are used for processing individual configs ****
// measurementName is the name of the current config used in each line protocol
measurementName string

View File

@ -149,7 +149,7 @@ func TestMinMax(t *testing.T) {
},
},
{
name: "Missing field Fileds",
name: "Missing field Fields",
scale: []scalingValuesMinMax{
{
InMin: -1,

View File

@ -193,7 +193,7 @@ with an error.
In case you need to call some code that may return an error, you can delegate
the call to the built-in function `catch` which takes as argument a `Callable`
and returns the error that occured if any, `None` otherwise.
and returns the error that occurred if any, `None` otherwise.
So for example:

View File

@ -3436,7 +3436,7 @@ def apply(metric):
mu.Lock()
defer mu.Unlock()
return len(delivered) == 1
}, 1*time.Second, 100*time.Millisecond, "orignal metric not delivered")
}, 1*time.Second, 100*time.Millisecond, "original metric not delivered")
})
}
}

View File

@ -82,7 +82,7 @@ Referencing the secret within a plugin occurs by:
password = "@{docker_secretstore:secret_for_plugin}"
```
## Additonal Information
## Additional Information
[Docker Secrets in Swarm][2]

View File

@ -6,7 +6,7 @@ NAME="Telegraf"
VERSION=$(cd ../../ && make version)
FLAGS=()
# If building for arm64, then incude the extra flags required.
# If building for arm64, then include the extra flags required.
if [ -n "${1+x}" ] && [ "$1" = "arm64" ]; then
FLAGS=(-arm -64)
fi