chore: Fix typos (#15114)

This commit is contained in:
Andreas Deininger 2024-04-09 20:00:52 +02:00 committed by GitHub
parent c4849d344f
commit 01c00d69fb
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
32 changed files with 48 additions and 48 deletions

View File

@ -9,7 +9,7 @@ Pull requests welcome.
- [awsalarms](https://github.com/vipinvkmenon/awsalarms) - Simple plugin to gather/monitor alarms generated in AWS.
- [octoprint](https://github.com/BattleBas/octoprint-telegraf-plugin) - Gather 3d print information from the octoprint API.
- [opcda](https://github.com/lpc921/telegraf-execd-opcda) - Gather data from [OPC Fundation's Data Access (DA)](https://opcfoundation.org/about/opc-technologies/opc-classic/) protocol for industrial automation.
- [opcda](https://github.com/lpc921/telegraf-execd-opcda) - Gather data from [OPC Foundation's Data Access (DA)](https://opcfoundation.org/about/opc-technologies/opc-classic/) protocol for industrial automation.
- [open-hardware-monitor](https://github.com/marianob85/open_hardware_monitor-telegraf-plugin) - Gather sensors data provided by [Open Hardware Monitor](http://openhardwaremonitor.org)
- [plex](https://github.com/russorat/telegraf-webhooks-plex) - Listens for events from Plex Media Server [Webhooks](https://support.plex.tv/articles/115002267687-webhooks/).
- [rand](https://github.com/ssoroka/rand) - Generate random numbers

View File

@ -15,7 +15,7 @@ The review process is roughly structured as follows:
1. Submit a pull request.
Please check that you signed the [CLA](https://www.influxdata.com/legal/cla/) (and [Corporate CLA](https://www.influxdata.com/legal/ccla/) if you are contributing code on as an employee of your company). Provide a short description of your submission and reference issues that you potentially close. Make sure the CI tests are all green and there are no linter-issues.
1. Get feedback from a first reviewer and a `ready for final review` tag.
Please constructively work with the reviewer to get your code into a mergable state (see also [below](#reviewing-plugin-code)).
Please constructively work with the reviewer to get your code into a mergeable state (see also [below](#reviewing-plugin-code)).
1. Get a final review by one of the InfluxData maintainers.
Please fix any issue raised.
1. Wait for the pull-request to be merged.
@ -154,7 +154,7 @@ one series be item in the list.
Counters retrieved from other projects often are in one of two styles,
monotonically increasing without reset and reset on each interval. No attempt
should be made to switch between these two styles but if given the option it
is preferred to use the non-reseting variant. This style is more resilient in
is preferred to use the non-resetting variant. This style is more resilient in
the face of downtime and does not contain a fixed time element.
### Source tag

View File

@ -120,7 +120,7 @@ func (t *Table) initBuild() error {
func (t Table) Build(gs Connection, walk bool) (*RTable, error) {
rows := map[string]RTableRow{}
//translation table for secondary index (when preforming join on two tables)
//translation table for secondary index (when performing join on two tables)
secIdxTab := make(map[string]string)
secGlobalOuterJoin := false
for i, f := range t.Fields {

View File

@ -95,7 +95,7 @@
# ## settings are valid:
# ## unconditional -- always match
# ## name -- match by the "name" key
# ## This resembles the previsou 'tag-only' behavior.
# ## This resembles the previous 'tag-only' behavior.
# ## elements -- match by the keys in the path filtered by the path
# ## parts specified `elements` below
# ## By default, 'elements' is used if the 'elements' option is provided,

View File

@ -95,7 +95,7 @@
# ## settings are valid:
# ## unconditional -- always match
# ## name -- match by the "name" key
# ## This resembles the previsou 'tag-only' behavior.
# ## This resembles the previous 'tag-only' behavior.
# ## elements -- match by the keys in the path filtered by the path
# ## parts specified `elements` below
# ## By default, 'elements' is used if the 'elements' option is provided,

View File

@ -126,7 +126,7 @@ func TestTwoFullEventsWithoutParameter(t *testing.T) {
)
}
func TestTwoFullEventsInSeperatePushes(t *testing.T) {
func TestTwoFullEventsInSeparatePushes(t *testing.T) {
acc := testutil.Accumulator{}
derivative := &Derivative{
Variable: " parameter",
@ -160,7 +160,7 @@ func TestTwoFullEventsInSeperatePushes(t *testing.T) {
acc.AssertContainsTaggedFields(t, "TestMetric", expectedFields, expectedTags)
}
func TestTwoFullEventsInSeperatePushesWithSeveralRollOvers(t *testing.T) {
func TestTwoFullEventsInSeparatePushesWithSeveralRollOvers(t *testing.T) {
acc := testutil.Accumulator{}
derivative := &Derivative{
Variable: "parameter",
@ -193,7 +193,7 @@ func TestTwoFullEventsInSeperatePushesWithSeveralRollOvers(t *testing.T) {
acc.AssertContainsFields(t, "TestMetric", expectedFields)
}
func TestTwoFullEventsInSeperatePushesWithOutRollOver(t *testing.T) {
func TestTwoFullEventsInSeparatePushesWithOutRollOver(t *testing.T) {
acc := testutil.Accumulator{}
derivative := &Derivative{
Variable: "parameter",
@ -265,7 +265,7 @@ func TestIgnoresMissingVariable(t *testing.T) {
acc.AssertContainsTaggedFields(t, "TestMetric", expectedFields, expectedTags)
}
func TestMergesDifferenMetricsWithSameHash(t *testing.T) {
func TestMergesDifferentMetricsWithSameHash(t *testing.T) {
acc := testutil.Accumulator{}
derivative := NewDerivative()
derivative.Log = testutil.Logger{}

View File

@ -85,7 +85,7 @@ func (am *AzureMonitor) Init() error {
}
if err = am.receiver.SplitResourceTargetsMetricsByMinTimeGrain(); err != nil {
return fmt.Errorf("error spliting resource targets metrics by min time grain: %w", err)
return fmt.Errorf("error splitting resource targets metrics by min time grain: %w", err)
}
am.receiver.SplitResourceTargetsWithMoreThanMaxMetrics()

View File

@ -354,12 +354,12 @@ type CephStatus struct {
DataBytes float64 `json:"data_bytes"`
DegradedObjects float64 `json:"degraded_objects"`
DegradedRatio float64 `json:"degraded_ratio"`
DegraedTotal float64 `json:"degraded_total"`
DegradedTotal float64 `json:"degraded_total"`
InactivePGsRatio float64 `json:"inactive_pgs_ratio"`
NumBytesRecovered float64 `json:"num_bytes_recovered"`
NumKeysRecovered float64 `json:"num_keys_recovered"`
NumObjects float64 `json:"num_objects"`
NumOjbectRecovered float64 `json:"num_objects_recovered"`
NumObjectRecovered float64 `json:"num_objects_recovered"`
NumPGs float64 `json:"num_pgs"`
NumPools float64 `json:"num_pools"`
OpPerSec float64 `json:"op_per_sec"` // This field is no longer reported in ceph 10 and later
@ -470,11 +470,11 @@ func decodeStatusPgmap(acc telegraf.Accumulator, data *CephStatus) error {
"data_bytes": data.PGMap.DataBytes,
"degraded_objects": data.PGMap.DegradedObjects,
"degraded_ratio": data.PGMap.DegradedRatio,
"degraded_total": data.PGMap.DegraedTotal,
"degraded_total": data.PGMap.DegradedTotal,
"inactive_pgs_ratio": data.PGMap.InactivePGsRatio,
"num_bytes_recovered": data.PGMap.NumBytesRecovered,
"num_keys_recovered": data.PGMap.NumKeysRecovered,
"num_objects_recovered": data.PGMap.NumOjbectRecovered,
"num_objects_recovered": data.PGMap.NumObjectRecovered,
"num_objects": data.PGMap.NumObjects,
"num_pgs": data.PGMap.NumPGs,
"num_pools": data.PGMap.NumPools,

View File

@ -67,7 +67,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
## <replica><host>clickhouse-eu-1.local</host><port>9000</port></replica>
## <replica><host>clickhouse-eu-2.local</host><port>9000</port></replica>
## </shard>
## </my-onw-cluster>
## </my-own-cluster>
## </remote_servers>
##
## </yandex>

View File

@ -47,7 +47,7 @@
## <replica><host>clickhouse-eu-1.local</host><port>9000</port></replica>
## <replica><host>clickhouse-eu-2.local</host><port>9000</port></replica>
## </shard>
## </my-onw-cluster>
## </my-own-cluster>
## </remote_servers>
##
## </yandex>

View File

@ -186,7 +186,7 @@ of the examples below.
- `filter_query`: Lucene query to filter the results (default: "\*")
- `metric_fields`: The list of fields to perform metric aggregation (these must
be indexed as numeric fields)
- `metric_funcion`: The single-value metric aggregation function to be performed
- `metric_function`: The single-value metric aggregation function to be performed
on the `metric_fields` defined. Currently supported aggregations are "avg",
"min", "max", "sum". (see the [aggregation docs][agg]
- `tags`: The list of fields to be used as tags (these must be indexed as

View File

@ -137,7 +137,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
# ## settings are valid:
# ## unconditional -- always match
# ## name -- match by the "name" key
# ## This resembles the previsou 'tag-only' behavior.
# ## This resembles the previous 'tag-only' behavior.
# ## elements -- match by the keys in the path filtered by the path
# ## parts specified `elements` below
# ## By default, 'elements' is used if the 'elements' option is provided,

View File

@ -98,7 +98,7 @@
# ## settings are valid:
# ## unconditional -- always match
# ## name -- match by the "name" key
# ## This resembles the previsou 'tag-only' behavior.
# ## This resembles the previous 'tag-only' behavior.
# ## elements -- match by the keys in the path filtered by the path
# ## parts specified `elements` below
# ## By default, 'elements' is used if the 'elements' option is provided,

View File

@ -32,7 +32,7 @@ func TestErrorBehaviorDefault(t *testing.T) {
require.Error(t, plugin.Init())
}
func TestErorBehaviorIgnore(t *testing.T) {
func TestErrorBehaviorIgnore(t *testing.T) {
// make sure we can't find nvidia-smi in $PATH somewhere
os.Unsetenv("PATH")
plugin := &NvidiaSMI{

View File

@ -108,7 +108,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
- `filter_query`: Lucene query to filter the results (default: "\*")
- `metric_fields`: The list of fields to perform metric aggregation (these must
be indexed as numeric fields)
- `metric_funcion`: The single-value metric aggregation function to be performed
- `metric_function`: The single-value metric aggregation function to be performed
on the `metric_fields` defined. Currently supported aggregations are "avg",
"min", "max", "sum", "value_count", "stats", "extended_stats", "percentiles".
(see the [aggregation docs][agg]

View File

@ -42,7 +42,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
## Format of stats to parse, set to "status" or "json"
## If the user configures the URL to return JSON (e.g.
## http://localhost/status?json), set to JSON. Otherwise, will attempt to
## parse line-by-line. The JSON mode will produce additonal metrics.
## parse line-by-line. The JSON mode will produce additional metrics.
# format = "status"
## Duration allowed to complete HTTP requests.

View File

@ -26,7 +26,7 @@
## Format of stats to parse, set to "status" or "json"
## If the user configures the URL to return JSON (e.g.
## http://localhost/status?json), set to JSON. Otherwise, will attempt to
## parse line-by-line. The JSON mode will produce additonal metrics.
## parse line-by-line. The JSON mode will produce additional metrics.
# format = "status"
## Duration allowed to complete HTTP requests.

View File

@ -72,7 +72,7 @@ var (
// key normalized raw
// program_fail_count : 100% 0
// REGEX patter supports deprecated metrics (nvme-cli version below 1.14) and metrics from nvme-cli 1.14 (and above).
// REGEX pattern supports deprecated metrics (nvme-cli version below 1.14) and metrics from nvme-cli 1.14 (and above).
intelExpressionPattern = regexp.MustCompile(`^([A-Za-z0-9_\s]+)[:|\s]+(\d+)[%|\s]+(.+)`)
// vid : 0x8086

View File

@ -11,7 +11,7 @@ import (
// (https://github.com/opserver/Opserver/blob/9c89c7e9936b58ad237b30e6f4cc6cd59c406889/Opserver.Core/Data/SQL/SQLInstance.Memory.cs)
// for putting most of the memory clerk definitions online!
/*
The SQL scripts use a series of IF and CASE statemens to choose the correct query based on edition and version of SQL Server, below the meaning of the numbers:
The SQL scripts use a series of IF and CASE statements to choose the correct query based on edition and version of SQL Server, below the meaning of the numbers:
EngineEdition:
1 = Personal or Desktop Engine (Not available in SQL Server 2005 (9.x) and later versions.)
2 = Standard (This is returned for Standard, Web, and Business Intelligence.)

View File

@ -121,7 +121,7 @@ func (*WinServices) SampleConfig() string {
}
func (m *WinServices) Init() error {
// For case insensitive comparision (see issue #8796) we need to transform the services
// For case insensitive comparison (see issue #8796) we need to transform the services
// to lowercase
servicesInclude := make([]string, 0, len(m.ServiceNames))
for _, s := range m.ServiceNames {

View File

@ -23,7 +23,7 @@ import (
var sampleConfig string
// matches any word that has a non valid backtick
// `word` <- dosen't match
// `word` <- doesn't match
// “word , `wo`rd` , `word , word` <- match
var forbiddenBacktick = regexp.MustCompile("^[^\x60].*?[\x60]+.*?[^\x60]$|^[\x60].*[\x60]+.*[\x60]$|^[\x60]+.*[^\x60]$|^[^\x60].*[\x60]+$")
var allowedBacktick = regexp.MustCompile("^[\x60].*[\x60]$")

View File

@ -769,7 +769,7 @@ func TestWriteIntegration_tagError(t *testing.T) {
require.EqualValues(t, 2, dump[1]["v"])
}
// Verify that when using TagsAsForeignKeys and ForeignTagConstraing and a tag can't be written, that we drop the metrics.
// Verify that when using TagsAsForeignKeys and ForeignTagConstraint and a tag can't be written, that we drop the metrics.
func TestWriteIntegration_tagError_foreignConstraint(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")

View File

@ -126,7 +126,7 @@ func (p *SQL) deriveDatatype(value interface{}) string {
} else if p.Convert.ConversionStyle == "literal" {
datatype = p.Convert.Unsigned
} else {
p.Log.Errorf("unknown converstaion style: %s", p.Convert.ConversionStyle)
p.Log.Errorf("unknown conversion style: %s", p.Convert.ConversionStyle)
}
case float64:
datatype = p.Convert.Real

View File

@ -112,7 +112,7 @@ syslog messages.
| APP-NAME | appname | - | default_appname = "Telegraf" |
| TIMESTAMP | - | timestamp | Metric's own timestamp |
| VERSION | - | version | 1 |
| PRI | - | serverity_code + (8 * facility_code)| default_severity_code=5 (notice), default_facility_code=1 (user-level)|
| PRI | - | severity_code + (8 * facility_code)| default_severity_code=5 (notice), default_facility_code=1 (user-level)|
| HOSTNAME | hostname OR source OR host | - | os.Hostname() |
| MSGID | - | msgid | Metric name |
| PROCID | - | procid | - |

View File

@ -64,7 +64,7 @@ func (z *Zabbix) Connect() error {
return nil
}
// Init initilizes LLD and autoregister maps. Copy config values to them. Configure Logger.
// Init initializes LLD and autoregister maps. Copy config values to them. Configure Logger.
func (z *Zabbix) Init() error {
// Add port to address if not present
if _, _, err := net.SplitHostPort(z.Address); err != nil {

View File

@ -92,7 +92,7 @@ such as `America/New_York`, to `Local` to utilize the system timezone, or to `UT
the `field` and `tag` config tables to gather a single value or an array of
values that all share the same type and name. With this you can add a field or
tag to a line protocol from data stored anywhere in your JSON. If you define the
GJSON path to return a single value then you will get a single resutling line
GJSON path to return a single value then you will get a single resulting line
protocol that contains the field/tag. If you define the GJSON path to return an
array of values, then each field/tag will be put into a separate line protocol
(you use the # character to retrieve JSON arrays, find examples
@ -177,7 +177,7 @@ such as `America/New_York`, to `Local` to utilize the system timezone, or to `UT
* **field (OPTIONAL, defined in TOML as an array table using double brackets)**: Identical to the [field](#field) table you can define, but with two key differences. The path supports arrays and objects and is defined under the object table and therefore will adhere to how the JSON is structured. You want to use this if you want the field/tag to be added as it would if it were in the included_key list, but then use the GJSON path syntax.
* **tag (OPTIONAL, defined in TOML as an array table using double brackets)**: Identical to the [tag](#tag) table you can define, but with two key differences. The path supports arrays and objects and is defined under the object table and therefore will adhere to how the JSON is structured. You want to use this if you want the field/tag to be added as it would if it were in the included_key list, but then use the GJSON path syntax.
*Configuration to modify the resutling line protocol:*
*Configuration to modify the resulting line protocol:*
* **disable_prepend_keys (OPTIONAL)**: Set to true to prevent resulting nested data to contain the parent key prepended to its key **NOTE**: duplicate names can overwrite each other when this is enabled
* **renames (OPTIONAL, defined in TOML as a table using single bracket)**: A table matching the json key with the desired name (opposed to defaulting to using the key), use names that include the prepended keys of its parent keys for nested results

View File

@ -25,7 +25,7 @@
path = "convertfloattostring"
type = "string"
# Parse int typess from JSON
# Parse int types from JSON
[[inputs.file]]
files = ["./testdata/types/input.json"]
data_format = "json_v2"

View File

@ -192,12 +192,12 @@ XPath expressions.
In this configuration mode, you explicitly specify the field and tags you want
to scrape out of your data.
A configuration can contain muliple _xpath_ subsections for e.g. the file plugin
to process the xml-string multiple times. Consult the [XPath syntax][xpath] and
the [underlying library's functions][xpath lib] for details and help regarding
XPath queries. Consider using an XPath tester such as [xpather.com][xpather] or
[Code Beautify's XPath Tester][xpath tester] for help developing and debugging
your query.
A configuration can contain multiple _xpath_ subsections for e.g. the
file plugin to process the xml-string multiple times. Consult the
[XPath syntax][xpath] and the [underlying library's functions][xpath lib]
for details and help regarding XPath queries. Consider using an XPath tester
such as [xpather.com][xpather] or [Code Beautify's XPath Tester][xpath tester]
for help developing and debugging your query.
## Configuration (batch)

View File

@ -28,7 +28,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
## you'll want to consider memory use.
cache_ttl = "24h"
## lookup_timeout is how long should you wait for a single dns request to repsond.
## lookup_timeout is how long should you wait for a single dns request to respond.
## this is also the maximum acceptable latency for a metric travelling through
## the reverse_dns processor. After lookup_timeout is exceeded, a metric will
## be passed on unaltered.

View File

@ -9,7 +9,7 @@
## you'll want to consider memory use.
cache_ttl = "24h"
## lookup_timeout is how long should you wait for a single dns request to repsond.
## lookup_timeout is how long should you wait for a single dns request to respond.
## this is also the maximum acceptable latency for a metric travelling through
## the reverse_dns processor. After lookup_timeout is exceeded, a metric will
## be passed on unaltered.

View File

@ -91,8 +91,8 @@ func TestTrim(t *testing.T) {
func TestTracking(t *testing.T) {
inputRaw := []telegraf.Metric{
metric.New("foo", map[string]string{"tag": "testing"}, map[string]interface{}{"value": 42}, time.Unix(0, 0)),
metric.New("bar", map[string]string{"tag": "other", "host": "locahost"}, map[string]interface{}{"value": 23}, time.Unix(0, 0)),
metric.New("baz", map[string]string{"tag": "value", "host": "locahost", "module": "main"}, map[string]interface{}{"value": 99}, time.Unix(0, 0)),
metric.New("bar", map[string]string{"tag": "other", "host": "localhost"}, map[string]interface{}{"value": 23}, time.Unix(0, 0)),
metric.New("baz", map[string]string{"tag": "value", "host": "localhost", "module": "main"}, map[string]interface{}{"value": 99}, time.Unix(0, 0)),
}
var mu sync.Mutex
@ -118,13 +118,13 @@ func TestTracking(t *testing.T) {
),
metric.New(
"bar",
map[string]string{"tag": "other", "host": "locahost"},
map[string]string{"tag": "other", "host": "localhost"},
map[string]interface{}{"value": 23},
time.Unix(0, 0),
),
metric.New(
"baz",
map[string]string{"tag": "value", "host": "locahost"},
map[string]string{"tag": "value", "host": "localhost"},
map[string]interface{}{"value": 99},
time.Unix(0, 0),
),

View File

@ -15,7 +15,7 @@ elif [ "$ARCH" = 'x86_64' ]; then
GO_VERSION_SHA=${GO_VERSION_SHA_amd64}
fi
# This path is cachable. (Saving in /usr/local/ would cause issues restoring the cache.)
# This path is cacheable. (Saving in /usr/local/ would cause issues restoring the cache.)
path="/usr/local/Cellar"
sudo mkdir -p ${path}