docs: Fix typos (#14376)
This commit is contained in:
parent
0052fc3634
commit
5f5a56ed01
10
CHANGELOG.md
10
CHANGELOG.md
|
|
@ -1854,7 +1854,7 @@ Thank you to @zak-pawel for lots of linter fixes!
|
||||||
- [#10007](https://github.com/influxdata/telegraf/pull/10007) `processors.ifname` Parallelism fix for ifname processor
|
- [#10007](https://github.com/influxdata/telegraf/pull/10007) `processors.ifname` Parallelism fix for ifname processor
|
||||||
- [#10208](https://github.com/influxdata/telegraf/pull/10208) `inputs.mqtt_consumer` Mqtt topic extracting no longer requires all three fields
|
- [#10208](https://github.com/influxdata/telegraf/pull/10208) `inputs.mqtt_consumer` Mqtt topic extracting no longer requires all three fields
|
||||||
- [#9616](https://github.com/influxdata/telegraf/pull/9616) Windows Service - graceful shutdown of telegraf
|
- [#9616](https://github.com/influxdata/telegraf/pull/9616) Windows Service - graceful shutdown of telegraf
|
||||||
- [#10203](https://github.com/influxdata/telegraf/pull/10203) Revert unintented corruption of the Makefile
|
- [#10203](https://github.com/influxdata/telegraf/pull/10203) Revert unintended corruption of the Makefile
|
||||||
- [#10112](https://github.com/influxdata/telegraf/pull/10112) `inputs.cloudwatch` Cloudwatch metrics collection
|
- [#10112](https://github.com/influxdata/telegraf/pull/10112) `inputs.cloudwatch` Cloudwatch metrics collection
|
||||||
- [#10178](https://github.com/influxdata/telegraf/pull/10178) `outputs.all` Register bigquery to output plugins
|
- [#10178](https://github.com/influxdata/telegraf/pull/10178) `outputs.all` Register bigquery to output plugins
|
||||||
- [#10165](https://github.com/influxdata/telegraf/pull/10165) `inputs.sysstat` Sysstat to use unique temp file vs hard-coded
|
- [#10165](https://github.com/influxdata/telegraf/pull/10165) `inputs.sysstat` Sysstat to use unique temp file vs hard-coded
|
||||||
|
|
@ -1963,7 +1963,7 @@ Thank you to @zak-pawel for lots of linter fixes!
|
||||||
- [#9585](https://github.com/influxdata/telegraf/pull/9585) `inputs.kube_inventory` Fix segfault in ingress, persistentvolumeclaim, statefulset in kube_inventory
|
- [#9585](https://github.com/influxdata/telegraf/pull/9585) `inputs.kube_inventory` Fix segfault in ingress, persistentvolumeclaim, statefulset in kube_inventory
|
||||||
- [#9901](https://github.com/influxdata/telegraf/pull/9901) `inputs.ethtool` Add normalization of tags for ethtool input plugin
|
- [#9901](https://github.com/influxdata/telegraf/pull/9901) `inputs.ethtool` Add normalization of tags for ethtool input plugin
|
||||||
- [#9957](https://github.com/influxdata/telegraf/pull/9957) `inputs.internet_speed` Resolve missing latency field
|
- [#9957](https://github.com/influxdata/telegraf/pull/9957) `inputs.internet_speed` Resolve missing latency field
|
||||||
- [#9662](https://github.com/influxdata/telegraf/pull/9662) `inputs.prometheus` Decode Prometheus scrape path from Kuberentes labels
|
- [#9662](https://github.com/influxdata/telegraf/pull/9662) `inputs.prometheus` Decode Prometheus scrape path from Kubernetes labels
|
||||||
- [#9933](https://github.com/influxdata/telegraf/pull/9933) `inputs.procstat` Correct conversion of int with specific bit size
|
- [#9933](https://github.com/influxdata/telegraf/pull/9933) `inputs.procstat` Correct conversion of int with specific bit size
|
||||||
- [#9940](https://github.com/influxdata/telegraf/pull/9940) `inputs.webhooks` Provide more fields for papertrail event webhook
|
- [#9940](https://github.com/influxdata/telegraf/pull/9940) `inputs.webhooks` Provide more fields for papertrail event webhook
|
||||||
- [#9892](https://github.com/influxdata/telegraf/pull/9892) `inputs.mongodb` Solve compatibility issue for mongodb inputs when using 5.x relicaset
|
- [#9892](https://github.com/influxdata/telegraf/pull/9892) `inputs.mongodb` Solve compatibility issue for mongodb inputs when using 5.x relicaset
|
||||||
|
|
@ -2301,7 +2301,7 @@ Thank you to @zak-pawel for lots of linter fixes!
|
||||||
- [#9067](https://github.com/influxdata/telegraf/pull/9067) `inputs.nfsclient` Fix integer overflow in fields from mountstat
|
- [#9067](https://github.com/influxdata/telegraf/pull/9067) `inputs.nfsclient` Fix integer overflow in fields from mountstat
|
||||||
- [#9050](https://github.com/influxdata/telegraf/pull/9050) `inputs.snmp` Fix init when no mibs are installed
|
- [#9050](https://github.com/influxdata/telegraf/pull/9050) `inputs.snmp` Fix init when no mibs are installed
|
||||||
- [#9072](https://github.com/influxdata/telegraf/pull/9072) `inputs.ping` Always call SetPrivileged(true) in native mode
|
- [#9072](https://github.com/influxdata/telegraf/pull/9072) `inputs.ping` Always call SetPrivileged(true) in native mode
|
||||||
- [#9043](https://github.com/influxdata/telegraf/pull/9043) `processors.ifname` Get interface name more effeciently
|
- [#9043](https://github.com/influxdata/telegraf/pull/9043) `processors.ifname` Get interface name more efficiently
|
||||||
- [#9056](https://github.com/influxdata/telegraf/pull/9056) `outputs.yandex_cloud_monitoring` Use correct compute metadata URL to get folder-id
|
- [#9056](https://github.com/influxdata/telegraf/pull/9056) `outputs.yandex_cloud_monitoring` Use correct compute metadata URL to get folder-id
|
||||||
- [#9048](https://github.com/influxdata/telegraf/pull/9048) `outputs.azure_monitor` Handle error when initializing the auth object
|
- [#9048](https://github.com/influxdata/telegraf/pull/9048) `outputs.azure_monitor` Handle error when initializing the auth object
|
||||||
- [#8549](https://github.com/influxdata/telegraf/pull/8549) `inputs.sqlserver` Fix sqlserver_process_cpu calculation
|
- [#8549](https://github.com/influxdata/telegraf/pull/8549) `inputs.sqlserver` Fix sqlserver_process_cpu calculation
|
||||||
|
|
@ -2451,7 +2451,7 @@ Included a few more changes that add configuration options to plugins as it's be
|
||||||
- [#8715](https://github.com/influxdata/telegraf/pull/8715) Bump github.com/Shopify/sarama from 1.27.1 to 1.27.2
|
- [#8715](https://github.com/influxdata/telegraf/pull/8715) Bump github.com/Shopify/sarama from 1.27.1 to 1.27.2
|
||||||
- [#8712](https://github.com/influxdata/telegraf/pull/8712) Bump github.com/newrelic/newrelic-telemetry-sdk-go from 0.2.0 to 0.5.1
|
- [#8712](https://github.com/influxdata/telegraf/pull/8712) Bump github.com/newrelic/newrelic-telemetry-sdk-go from 0.2.0 to 0.5.1
|
||||||
- [#8659](https://github.com/influxdata/telegraf/pull/8659) `inputs.gnmi` GNMI plugin should not take off the first character of field keys when no 'alias path' exists.
|
- [#8659](https://github.com/influxdata/telegraf/pull/8659) `inputs.gnmi` GNMI plugin should not take off the first character of field keys when no 'alias path' exists.
|
||||||
- [#8609](https://github.com/influxdata/telegraf/pull/8609) `inputs.webhooks` Use the 'measurement' json field from the particle webhook as the measurment name, or if it's blank, use the 'name' field of the event's json.
|
- [#8609](https://github.com/influxdata/telegraf/pull/8609) `inputs.webhooks` Use the 'measurement' json field from the particle webhook as the measurement name, or if it's blank, use the 'name' field of the event's json.
|
||||||
- [#8658](https://github.com/influxdata/telegraf/pull/8658) `inputs.procstat` Procstat input plugin should use the same timestamp in all metrics in the same Gather() cycle.
|
- [#8658](https://github.com/influxdata/telegraf/pull/8658) `inputs.procstat` Procstat input plugin should use the same timestamp in all metrics in the same Gather() cycle.
|
||||||
- [#8391](https://github.com/influxdata/telegraf/pull/8391) `aggregators.merge` Optimize SeriesGrouper & aggregators.merge
|
- [#8391](https://github.com/influxdata/telegraf/pull/8391) `aggregators.merge` Optimize SeriesGrouper & aggregators.merge
|
||||||
- [#8545](https://github.com/influxdata/telegraf/pull/8545) `inputs.prometheus` Using mime-type in prometheus parser to handle protocol-buffer responses
|
- [#8545](https://github.com/influxdata/telegraf/pull/8545) `inputs.prometheus` Using mime-type in prometheus parser to handle protocol-buffer responses
|
||||||
|
|
@ -2602,7 +2602,7 @@ Included a few more changes that add configuration options to plugins as it's be
|
||||||
- [#7953](https://github.com/influxdata/telegraf/pull/7953) Add details to connect to InfluxDB OSS 2 and Cloud 2
|
- [#7953](https://github.com/influxdata/telegraf/pull/7953) Add details to connect to InfluxDB OSS 2 and Cloud 2
|
||||||
- [#8054](https://github.com/influxdata/telegraf/pull/8054) add guidelines run to external plugins with execd
|
- [#8054](https://github.com/influxdata/telegraf/pull/8054) add guidelines run to external plugins with execd
|
||||||
- [#8198](https://github.com/influxdata/telegraf/pull/8198) `inputs.influxdb_v2_listener` change default influxdb port from 9999 to 8086 to match OSS 2.0 release
|
- [#8198](https://github.com/influxdata/telegraf/pull/8198) `inputs.influxdb_v2_listener` change default influxdb port from 9999 to 8086 to match OSS 2.0 release
|
||||||
- [starlark](https://github.com/influxdata/telegraf/tree/release-1.16/plugins/processors/starlark/testdata) `processors.starlark` add various code exampels for the Starlark processor
|
- [starlark](https://github.com/influxdata/telegraf/tree/release-1.16/plugins/processors/starlark/testdata) `processors.starlark` add various code examples for the Starlark processor
|
||||||
|
|
||||||
### Features
|
### Features
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -11,7 +11,7 @@
|
||||||
b = "bar"
|
b = "bar"
|
||||||
|
|
||||||
[[inputs.statetest]]
|
[[inputs.statetest]]
|
||||||
## What a wounderful world...
|
## What a wonderful world...
|
||||||
servers = ["myserver.org", "myserver.com"]
|
servers = ["myserver.org", "myserver.com"]
|
||||||
port = 80
|
port = 80
|
||||||
method = "strange"
|
method = "strange"
|
||||||
|
|
|
||||||
|
|
@ -18,7 +18,7 @@ For client TLS support we have the following options:
|
||||||
## Root certificates for verifying server certificates encoded in PEM format.
|
## Root certificates for verifying server certificates encoded in PEM format.
|
||||||
# tls_ca = "/etc/telegraf/ca.pem"
|
# tls_ca = "/etc/telegraf/ca.pem"
|
||||||
|
|
||||||
## The public and private keypairs for the client encoded in PEM format. May
|
## The public and private key pairs for the client encoded in PEM format. May
|
||||||
## contain intermediate certificates.
|
## contain intermediate certificates.
|
||||||
# tls_cert = "/etc/telegraf/cert.pem"
|
# tls_cert = "/etc/telegraf/cert.pem"
|
||||||
# tls_key = "/etc/telegraf/key.pem"
|
# tls_key = "/etc/telegraf/key.pem"
|
||||||
|
|
|
||||||
|
|
@ -607,12 +607,7 @@ func TestFilter_MetricPass(t *testing.T) {
|
||||||
expected: true,
|
expected: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "arithmetics",
|
name: "arithmetic",
|
||||||
expression: `fields.count + fields.errors < fields.total`,
|
|
||||||
expected: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "arithmetics",
|
|
||||||
expression: `fields.count + fields.errors < fields.total`,
|
expression: `fields.count + fields.errors < fields.total`,
|
||||||
expected: true,
|
expected: true,
|
||||||
},
|
},
|
||||||
|
|
@ -622,7 +617,7 @@ func TestFilter_MetricPass(t *testing.T) {
|
||||||
expected: true,
|
expected: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "time arithmetics",
|
name: "time arithmetic",
|
||||||
expression: `time >= timestamp("2023-04-25T00:00:00Z") - duration("24h")`,
|
expression: `time >= timestamp("2023-04-25T00:00:00Z") - duration("24h")`,
|
||||||
expected: true,
|
expected: true,
|
||||||
},
|
},
|
||||||
|
|
|
||||||
|
|
@ -212,7 +212,7 @@ with only occasional changes.
|
||||||
### Tags
|
### Tags
|
||||||
|
|
||||||
No tags are applied by this aggregator.
|
No tags are applied by this aggregator.
|
||||||
Existing tags are passed throug the aggregator untouched.
|
Existing tags are passed through the aggregator untouched.
|
||||||
|
|
||||||
## Example Output
|
## Example Output
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -37,7 +37,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
||||||
## Output strategy, supported values:
|
## Output strategy, supported values:
|
||||||
## timeout -- output a metric if no new input arrived for `series_timeout`;
|
## timeout -- output a metric if no new input arrived for `series_timeout`;
|
||||||
## useful for filling gaps in input data
|
## useful for filling gaps in input data
|
||||||
## periodic -- ouput the last received metric every `period`; useful to
|
## periodic -- output the last received metric every `period`; useful to
|
||||||
## downsample the input data
|
## downsample the input data
|
||||||
# output_strategy = "timeout"
|
# output_strategy = "timeout"
|
||||||
```
|
```
|
||||||
|
|
@ -49,12 +49,12 @@ for the period if the last received one is older than the series_timeout. This
|
||||||
will not guarantee a regular output of a `final` metric e.g. if the
|
will not guarantee a regular output of a `final` metric e.g. if the
|
||||||
series-timeout is a multiple of the gathering interval for an input. In this
|
series-timeout is a multiple of the gathering interval for an input. In this
|
||||||
case metric sporadically arrive in the timeout phase of the period and emitting
|
case metric sporadically arrive in the timeout phase of the period and emitting
|
||||||
the `final` metric is surpressed.
|
the `final` metric is suppressed.
|
||||||
This can be helpful to fill in gaps in the data if no input arrived in time.
|
This can be helpful to fill in gaps in the data if no input arrived in time.
|
||||||
|
|
||||||
Contrary to this, `output_strategy = "periodic"` will always output a `final`
|
Contrary to this, `output_strategy = "periodic"` will always output a `final`
|
||||||
metric at the end of the period irrespectively of when the last metric arrived,
|
metric at the end of the period irrespectively of when the last metric arrived,
|
||||||
the `series_timout` is ignored.
|
the `series_timeout` is ignored.
|
||||||
This is helpful if you for example want to downsample input data arriving at a
|
This is helpful if you for example want to downsample input data arriving at a
|
||||||
high rate and require a periodic output of the `final` metric.
|
high rate and require a periodic output of the `final` metric.
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -12,6 +12,6 @@
|
||||||
## Output strategy, supported values:
|
## Output strategy, supported values:
|
||||||
## timeout -- output a metric if no new input arrived for `series_timeout`;
|
## timeout -- output a metric if no new input arrived for `series_timeout`;
|
||||||
## useful for filling gaps in input data
|
## useful for filling gaps in input data
|
||||||
## periodic -- ouput the last received metric every `period`; useful to
|
## periodic -- output the last received metric every `period`; useful to
|
||||||
## downsample the input data
|
## downsample the input data
|
||||||
# output_strategy = "timeout"
|
# output_strategy = "timeout"
|
||||||
|
|
|
||||||
|
|
@ -88,7 +88,7 @@ result.
|
||||||
|
|
||||||
## Measurements
|
## Measurements
|
||||||
|
|
||||||
Measurement names are passed trough this aggregator.
|
Measurement names are passed through this aggregator.
|
||||||
|
|
||||||
### Fields
|
### Fields
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -95,7 +95,7 @@ func (c *ClientConfig) TLSConfig() (*tls.Config, error) {
|
||||||
case "freely":
|
case "freely":
|
||||||
renegotiationMethod = tls.RenegotiateFreelyAsClient
|
renegotiationMethod = tls.RenegotiateFreelyAsClient
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unrecognized renegotation method %q, choose from: 'never', 'once', 'freely'", c.RenegotiationMethod)
|
return nil, fmt.Errorf("unrecognized renegotiation method %q, choose from: 'never', 'once', 'freely'", c.RenegotiationMethod)
|
||||||
}
|
}
|
||||||
|
|
||||||
tlsConfig := &tls.Config{
|
tlsConfig := &tls.Config{
|
||||||
|
|
|
||||||
|
|
@ -98,7 +98,7 @@ func (marc *mockAzureResourcesClient) ListByResourceGroup(
|
||||||
return responses, nil
|
return responses, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, fmt.Errorf("resouce group was not found")
|
return nil, fmt.Errorf("resource group was not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mamdc *mockAzureMetricDefinitionsClient) List(
|
func (mamdc *mockAzureMetricDefinitionsClient) List(
|
||||||
|
|
|
||||||
|
|
@ -56,7 +56,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
||||||
|
|
||||||
## Consul checks' tag splitting
|
## Consul checks' tag splitting
|
||||||
# When tags are formatted like "key:value" with ":" as a delimiter then
|
# When tags are formatted like "key:value" with ":" as a delimiter then
|
||||||
# they will be splitted and reported as proper key:value in Telegraf
|
# they will be split and reported as proper key:value in Telegraf
|
||||||
# tag_delimiter = ":"
|
# tag_delimiter = ":"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -33,5 +33,5 @@
|
||||||
|
|
||||||
## Consul checks' tag splitting
|
## Consul checks' tag splitting
|
||||||
# When tags are formatted like "key:value" with ":" as a delimiter then
|
# When tags are formatted like "key:value" with ":" as a delimiter then
|
||||||
# they will be splitted and reported as proper key:value in Telegraf
|
# they will be split and reported as proper key:value in Telegraf
|
||||||
# tag_delimiter = ":"
|
# tag_delimiter = ":"
|
||||||
|
|
|
||||||
|
|
@ -30,7 +30,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
||||||
## The directory to monitor and read files from (including sub-directories if "recursive" is true).
|
## The directory to monitor and read files from (including sub-directories if "recursive" is true).
|
||||||
directory = ""
|
directory = ""
|
||||||
#
|
#
|
||||||
## The directory to move finished files to (maintaining directory hierachy from source).
|
## The directory to move finished files to (maintaining directory hierarchy from source).
|
||||||
finished_directory = ""
|
finished_directory = ""
|
||||||
#
|
#
|
||||||
## Setting recursive to true will make the plugin recursively walk the directory and process all sub-directories.
|
## Setting recursive to true will make the plugin recursively walk the directory and process all sub-directories.
|
||||||
|
|
|
||||||
|
|
@ -334,7 +334,7 @@ func (monitor *DirectoryMonitor) moveFile(srcPath string, dstBaseDir string) {
|
||||||
dstPath := filepath.Join(dstBaseDir, basePath)
|
dstPath := filepath.Join(dstBaseDir, basePath)
|
||||||
err := os.MkdirAll(filepath.Dir(dstPath), os.ModePerm)
|
err := os.MkdirAll(filepath.Dir(dstPath), os.ModePerm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
monitor.Log.Errorf("Error creating directory hierachy for " + srcPath + ". Error: " + err.Error())
|
monitor.Log.Errorf("Error creating directory hierarchy for " + srcPath + ". Error: " + err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
inputFile, err := os.Open(srcPath)
|
inputFile, err := os.Open(srcPath)
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,7 @@
|
||||||
## The directory to monitor and read files from (including sub-directories if "recursive" is true).
|
## The directory to monitor and read files from (including sub-directories if "recursive" is true).
|
||||||
directory = ""
|
directory = ""
|
||||||
#
|
#
|
||||||
## The directory to move finished files to (maintaining directory hierachy from source).
|
## The directory to move finished files to (maintaining directory hierarchy from source).
|
||||||
finished_directory = ""
|
finished_directory = ""
|
||||||
#
|
#
|
||||||
## Setting recursive to true will make the plugin recursively walk the directory and process all sub-directories.
|
## Setting recursive to true will make the plugin recursively walk the directory and process all sub-directories.
|
||||||
|
|
|
||||||
|
|
@ -6,20 +6,20 @@ type LinkRoomsSections struct {
|
||||||
SectionID uint16
|
SectionID uint16
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sections contains sections informations
|
// Sections contains sections information
|
||||||
type Sections struct {
|
type Sections struct {
|
||||||
ID uint16 `json:"id"`
|
ID uint16 `json:"id"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rooms contains rooms informations
|
// Rooms contains rooms information
|
||||||
type Rooms struct {
|
type Rooms struct {
|
||||||
ID uint16 `json:"id"`
|
ID uint16 `json:"id"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
SectionID uint16 `json:"sectionID"`
|
SectionID uint16 `json:"sectionID"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Devices contains devices informations
|
// Devices contains devices information
|
||||||
type Devices struct {
|
type Devices struct {
|
||||||
ID uint16 `json:"id"`
|
ID uint16 `json:"id"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
|
|
|
||||||
|
|
@ -6,20 +6,20 @@ type linkRoomsSections struct {
|
||||||
SectionID uint16
|
SectionID uint16
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sections contains sections informations
|
// Sections contains sections information
|
||||||
type Sections struct {
|
type Sections struct {
|
||||||
ID uint16 `json:"id"`
|
ID uint16 `json:"id"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rooms contains rooms informations
|
// Rooms contains rooms information
|
||||||
type Rooms struct {
|
type Rooms struct {
|
||||||
ID uint16 `json:"id"`
|
ID uint16 `json:"id"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
SectionID uint16 `json:"sectionID"`
|
SectionID uint16 `json:"sectionID"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Devices contains devices informations
|
// Devices contains devices information
|
||||||
type Devices struct {
|
type Devices struct {
|
||||||
ID uint16 `json:"id"`
|
ID uint16 `json:"id"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
|
|
|
||||||
|
|
@ -132,7 +132,7 @@ func (h *handler) handleSubscribeResponseUpdate(acc telegraf.Accumulator, respon
|
||||||
|
|
||||||
switch ext.GetRegisteredExt().Id {
|
switch ext.GetRegisteredExt().Id {
|
||||||
case eidJuniperTelemetryHeader:
|
case eidJuniperTelemetryHeader:
|
||||||
// Juniper Header extention
|
// Juniper Header extension
|
||||||
// Decode it only if user requested it
|
// Decode it only if user requested it
|
||||||
if choice.Contains("juniper_header", h.vendorExt) {
|
if choice.Contains("juniper_header", h.vendorExt) {
|
||||||
juniperHeader := &jnprHeader.GnmiJuniperTelemetryHeaderExtension{}
|
juniperHeader := &jnprHeader.GnmiJuniperTelemetryHeaderExtension{}
|
||||||
|
|
|
||||||
|
|
@ -1 +1 @@
|
||||||
fourty two
|
forty two
|
||||||
|
|
@ -180,7 +180,7 @@ func (i *IntelPMU) checkFileDescriptors() error {
|
||||||
}
|
}
|
||||||
uncoreFd, err := estimateUncoreFd(i.UncoreEntities)
|
uncoreFd, err := estimateUncoreFd(i.UncoreEntities)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to estimate nubmer of uncore events file descriptors: %w", err)
|
return fmt.Errorf("failed to estimate number of uncore events file descriptors: %w", err)
|
||||||
}
|
}
|
||||||
if coreFd > math.MaxUint64-uncoreFd {
|
if coreFd > math.MaxUint64-uncoreFd {
|
||||||
return fmt.Errorf("requested number of file descriptors exceeds uint64")
|
return fmt.Errorf("requested number of file descriptors exceeds uint64")
|
||||||
|
|
|
||||||
|
|
@ -26,7 +26,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
||||||
```toml @sample.conf
|
```toml @sample.conf
|
||||||
# Intel PowerStat plugin enables monitoring of platform metrics (power, TDP)
|
# Intel PowerStat plugin enables monitoring of platform metrics (power, TDP)
|
||||||
# and per-CPU metrics like temperature, power and utilization. Please see the
|
# and per-CPU metrics like temperature, power and utilization. Please see the
|
||||||
# plugin readme for details on software and hardware compatability.
|
# plugin readme for details on software and hardware compatibility.
|
||||||
# This plugin ONLY supports Linux.
|
# This plugin ONLY supports Linux.
|
||||||
[[inputs.intel_powerstat]]
|
[[inputs.intel_powerstat]]
|
||||||
## The user can choose which package metrics are monitored by the plugin with
|
## The user can choose which package metrics are monitored by the plugin with
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
# Intel PowerStat plugin enables monitoring of platform metrics (power, TDP)
|
# Intel PowerStat plugin enables monitoring of platform metrics (power, TDP)
|
||||||
# and per-CPU metrics like temperature, power and utilization. Please see the
|
# and per-CPU metrics like temperature, power and utilization. Please see the
|
||||||
# plugin readme for details on software and hardware compatability.
|
# plugin readme for details on software and hardware compatibility.
|
||||||
# This plugin ONLY supports Linux.
|
# This plugin ONLY supports Linux.
|
||||||
[[inputs.intel_powerstat]]
|
[[inputs.intel_powerstat]]
|
||||||
## The user can choose which package metrics are monitored by the plugin with
|
## The user can choose which package metrics are monitored by the plugin with
|
||||||
|
|
|
||||||
|
|
@ -23,7 +23,7 @@ command to collect remote host sensor stats:
|
||||||
ipmitool -I lan -H SERVER -U USERID -P PASSW0RD sdr
|
ipmitool -I lan -H SERVER -U USERID -P PASSW0RD sdr
|
||||||
```
|
```
|
||||||
|
|
||||||
Any of the following parameters will be added to the aformentioned query if
|
Any of the following parameters will be added to the aforementioned query if
|
||||||
they're configured:
|
they're configured:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
|
|
|
||||||
|
|
@ -83,7 +83,7 @@ Use `paths` to refine which fields to collect.
|
||||||
paths = ["HeapMemoryUsage", "NonHeapMemoryUsage", "ObjectPendingFinalizationCount"]
|
paths = ["HeapMemoryUsage", "NonHeapMemoryUsage", "ObjectPendingFinalizationCount"]
|
||||||
```
|
```
|
||||||
|
|
||||||
The preceeding `jvm_memory` `metric` declaration produces the following output:
|
The preceding `jvm_memory` `metric` declaration produces the following output:
|
||||||
|
|
||||||
```text
|
```text
|
||||||
jvm_memory HeapMemoryUsage.committed=4294967296,HeapMemoryUsage.init=4294967296,HeapMemoryUsage.max=4294967296,HeapMemoryUsage.used=1750658992,NonHeapMemoryUsage.committed=67350528,NonHeapMemoryUsage.init=2555904,NonHeapMemoryUsage.max=-1,NonHeapMemoryUsage.used=65821352,ObjectPendingFinalizationCount=0 1503762436000000000
|
jvm_memory HeapMemoryUsage.committed=4294967296,HeapMemoryUsage.init=4294967296,HeapMemoryUsage.max=4294967296,HeapMemoryUsage.used=1750658992,NonHeapMemoryUsage.committed=67350528,NonHeapMemoryUsage.init=2555904,NonHeapMemoryUsage.max=-1,NonHeapMemoryUsage.used=65821352,ObjectPendingFinalizationCount=0 1503762436000000000
|
||||||
|
|
@ -101,7 +101,7 @@ by capturing values into `tag_keys`.
|
||||||
```
|
```
|
||||||
|
|
||||||
Since `name=*` matches both `G1 Old Generation` and `G1 Young Generation`, and
|
Since `name=*` matches both `G1 Old Generation` and `G1 Young Generation`, and
|
||||||
`name` is used as a tag, the preceeding `jvm_garbage_collector` `metric`
|
`name` is used as a tag, the preceding `jvm_garbage_collector` `metric`
|
||||||
declaration produces two metrics.
|
declaration produces two metrics.
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
|
|
@ -120,7 +120,7 @@ Use `tag_prefix` along with `tag_keys` to add detail to tag names.
|
||||||
tag_prefix = "pool_"
|
tag_prefix = "pool_"
|
||||||
```
|
```
|
||||||
|
|
||||||
The preceeding `jvm_memory_pool` `metric` declaration produces six metrics, each
|
The preceding `jvm_memory_pool` `metric` declaration produces six metrics, each
|
||||||
with a distinct `pool_name` tag.
|
with a distinct `pool_name` tag.
|
||||||
|
|
||||||
```text
|
```text
|
||||||
|
|
@ -145,7 +145,7 @@ the property-key `name`, and `$2` represents the value of the property-key
|
||||||
tag_keys = ["topic"]
|
tag_keys = ["topic"]
|
||||||
```
|
```
|
||||||
|
|
||||||
The preceeding `kafka_topic` `metric` declaration produces a metric per Kafka
|
The preceding `kafka_topic` `metric` declaration produces a metric per Kafka
|
||||||
topic. The `name` Mbean property-key is used as a field prefix to aid in
|
topic. The `name` Mbean property-key is used as a field prefix to aid in
|
||||||
gathering fields together into the single metric.
|
gathering fields together into the single metric.
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -293,7 +293,7 @@ func (m *OpenConfigTelemetry) collectData(
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
m.Log.Debugf("Sucessfully subscribed to %s on %s", sensor.measurementName, grpcServer)
|
m.Log.Debugf("Successfully subscribed to %s on %s", sensor.measurementName, grpcServer)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
r, err := stream.Recv()
|
r, err := stream.Recv()
|
||||||
|
|
|
||||||
|
|
@ -170,7 +170,7 @@ subjects:
|
||||||
|
|
||||||
When monitoring [k3s](https://k3s.io) server instances one can re-use already
|
When monitoring [k3s](https://k3s.io) server instances one can re-use already
|
||||||
generated administration token. This is less secure than using the more
|
generated administration token. This is less secure than using the more
|
||||||
restrictive dedicated telegraf user but more convienient to set up.
|
restrictive dedicated telegraf user but more convenient to set up.
|
||||||
|
|
||||||
```console
|
```console
|
||||||
# replace `telegraf` with the user the telegraf process is running as
|
# replace `telegraf` with the user the telegraf process is running as
|
||||||
|
|
@ -366,7 +366,7 @@ tls_key = "/run/telegraf-kubernetes-key"
|
||||||
- enddate
|
- enddate
|
||||||
- verification_code
|
- verification_code
|
||||||
|
|
||||||
### kuberntes node status `status`
|
### kubernetes node status `status`
|
||||||
|
|
||||||
The node status ready can mean 3 different values.
|
The node status ready can mean 3 different values.
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1374,7 +1374,7 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Prepartions for the average health state of the replica-set
|
// Preparations for the average health state of the replica-set
|
||||||
replMemberCount := len(newReplStat.Members)
|
replMemberCount := len(newReplStat.Members)
|
||||||
replMemberHealthyCount := 0
|
replMemberHealthyCount := 0
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -7,7 +7,7 @@ The internal field mappings for Netflow v5 fields are defined according to
|
||||||
[Cisco's Netflow v5 documentation][CISCO NF5], Netflow v9 fields are defined
|
[Cisco's Netflow v5 documentation][CISCO NF5], Netflow v9 fields are defined
|
||||||
according to [Cisco's Netflow v9 documentation][CISCO NF9] and the
|
according to [Cisco's Netflow v9 documentation][CISCO NF9] and the
|
||||||
[ASA extensions][ASA extensions].
|
[ASA extensions][ASA extensions].
|
||||||
Definitions for IPFIX are according to [IANA assignement document][IPFIX doc].
|
Definitions for IPFIX are according to [IANA assignment document][IPFIX doc].
|
||||||
|
|
||||||
[IANA assignments]: https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml
|
[IANA assignments]: https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml
|
||||||
[CISCO NF5]: https://www.cisco.com/c/en/us/td/docs/net_mgmt/netflow_collection_engine/3-6/user/guide/format.html#wp1006186
|
[CISCO NF5]: https://www.cisco.com/c/en/us/td/docs/net_mgmt/netflow_collection_engine/3-6/user/guide/format.html#wp1006186
|
||||||
|
|
@ -120,7 +120,7 @@ following information
|
||||||
- dst_port (uint64, destination port)
|
- dst_port (uint64, destination port)
|
||||||
- protocol (string, Layer 4 protocol name)
|
- protocol (string, Layer 4 protocol name)
|
||||||
- in_bytes (uint64, number of incoming bytes)
|
- in_bytes (uint64, number of incoming bytes)
|
||||||
- in_packets (uint64, number of incomming packets)
|
- in_packets (uint64, number of incoming packets)
|
||||||
- tcp_flags (string, TCP flags for the flow)
|
- tcp_flags (string, TCP flags for the flow)
|
||||||
|
|
||||||
## Example Output
|
## Example Output
|
||||||
|
|
|
||||||
|
|
@ -28,7 +28,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
||||||
## Read more low-level metrics (optional, defaults to false)
|
## Read more low-level metrics (optional, defaults to false)
|
||||||
# fullstat = false
|
# fullstat = false
|
||||||
|
|
||||||
## List of mounts to explictly include or exclude (optional)
|
## List of mounts to explicitly include or exclude (optional)
|
||||||
## The pattern (Go regexp) is matched against the mount point (not the
|
## The pattern (Go regexp) is matched against the mount point (not the
|
||||||
## device being mounted). If include_mounts is set, all mounts are ignored
|
## device being mounted). If include_mounts is set, all mounts are ignored
|
||||||
## unless present in the list. If a mount is listed in both include_mounts
|
## unless present in the list. If a mount is listed in both include_mounts
|
||||||
|
|
@ -37,7 +37,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
||||||
# exclude_mounts = []
|
# exclude_mounts = []
|
||||||
|
|
||||||
## List of operations to include or exclude from collecting. This applies
|
## List of operations to include or exclude from collecting. This applies
|
||||||
## only when fullstat=true. Symantics are similar to {include,exclude}_mounts:
|
## only when fullstat=true. Semantics are similar to {include,exclude}_mounts:
|
||||||
## the default is to collect everything; when include_operations is set, only
|
## the default is to collect everything; when include_operations is set, only
|
||||||
## those OPs are collected; when exclude_operations is set, all are collected
|
## those OPs are collected; when exclude_operations is set, all are collected
|
||||||
## except those listed. If include and exclude are set, the OP is excluded.
|
## except those listed. If include and exclude are set, the OP is excluded.
|
||||||
|
|
@ -154,7 +154,7 @@ as it changes occasionally.
|
||||||
- sillyrenames (int, count): Number of times an in-use file was removed (thus creating a temporary ".nfsXXXXXX" file)
|
- sillyrenames (int, count): Number of times an in-use file was removed (thus creating a temporary ".nfsXXXXXX" file)
|
||||||
- shortreads (int, count): Number of times the NFS server returned less data than requested.
|
- shortreads (int, count): Number of times the NFS server returned less data than requested.
|
||||||
- shortwrites (int, count): Number of times NFS server reports it wrote less data than requested.
|
- shortwrites (int, count): Number of times NFS server reports it wrote less data than requested.
|
||||||
- delay (int, count): Occurances of EJUKEBOX ("Jukebox Delay", probably unused)
|
- delay (int, count): Occurrences of EJUKEBOX ("Jukebox Delay", probably unused)
|
||||||
- pnfsreads (int, count): Count of NFS v4.1+ pNFS reads.
|
- pnfsreads (int, count): Count of NFS v4.1+ pNFS reads.
|
||||||
- pnfswrites (int, count): Count of NFS v4.1+ pNFS writes.
|
- pnfswrites (int, count): Count of NFS v4.1+ pNFS writes.
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,7 @@
|
||||||
## Read more low-level metrics (optional, defaults to false)
|
## Read more low-level metrics (optional, defaults to false)
|
||||||
# fullstat = false
|
# fullstat = false
|
||||||
|
|
||||||
## List of mounts to explictly include or exclude (optional)
|
## List of mounts to explicitly include or exclude (optional)
|
||||||
## The pattern (Go regexp) is matched against the mount point (not the
|
## The pattern (Go regexp) is matched against the mount point (not the
|
||||||
## device being mounted). If include_mounts is set, all mounts are ignored
|
## device being mounted). If include_mounts is set, all mounts are ignored
|
||||||
## unless present in the list. If a mount is listed in both include_mounts
|
## unless present in the list. If a mount is listed in both include_mounts
|
||||||
|
|
@ -12,7 +12,7 @@
|
||||||
# exclude_mounts = []
|
# exclude_mounts = []
|
||||||
|
|
||||||
## List of operations to include or exclude from collecting. This applies
|
## List of operations to include or exclude from collecting. This applies
|
||||||
## only when fullstat=true. Symantics are similar to {include,exclude}_mounts:
|
## only when fullstat=true. Semantics are similar to {include,exclude}_mounts:
|
||||||
## the default is to collect everything; when include_operations is set, only
|
## the default is to collect everything; when include_operations is set, only
|
||||||
## those OPs are collected; when exclude_operations is set, all are collected
|
## those OPs are collected; when exclude_operations is set, all are collected
|
||||||
## except those listed. If include and exclude are set, the OP is excluded.
|
## except those listed. If include and exclude are set, the OP is excluded.
|
||||||
|
|
|
||||||
|
|
@ -143,7 +143,7 @@ func (ns *Nstat) loadGoodTable(table []byte) map[string]interface{} {
|
||||||
}
|
}
|
||||||
|
|
||||||
// loadUglyTable can be used to parse string heap that
|
// loadUglyTable can be used to parse string heap that
|
||||||
// the headers and values are splitted with a newline
|
// the headers and values are split with a newline
|
||||||
func (ns *Nstat) loadUglyTable(table []byte) map[string]interface{} {
|
func (ns *Nstat) loadUglyTable(table []byte) map[string]interface{} {
|
||||||
entries := map[string]interface{}{}
|
entries := map[string]interface{}{}
|
||||||
// split the lines by newline
|
// split the lines by newline
|
||||||
|
|
|
||||||
|
|
@ -23,7 +23,7 @@ func launchTestContainer(t *testing.T) *testutil.Container {
|
||||||
},
|
},
|
||||||
WaitingFor: wait.ForAll(
|
WaitingFor: wait.ForAll(
|
||||||
// the database comes up twice, once right away, then again a second
|
// the database comes up twice, once right away, then again a second
|
||||||
// time after the docker entrypoint starts configuraiton
|
// time after the docker entrypoint starts configuration
|
||||||
wait.ForLog("database system is ready to accept connections").WithOccurrence(2),
|
wait.ForLog("database system is ready to accept connections").WithOccurrence(2),
|
||||||
wait.ForListeningPort(nat.Port(servicePort)),
|
wait.ForListeningPort(nat.Port(servicePort)),
|
||||||
),
|
),
|
||||||
|
|
|
||||||
|
|
@ -44,7 +44,7 @@ type ConsulQuery struct {
|
||||||
serviceURLTemplate *template.Template
|
serviceURLTemplate *template.Template
|
||||||
serviceExtraTagsTemplate map[string]*template.Template
|
serviceExtraTagsTemplate map[string]*template.Template
|
||||||
|
|
||||||
// Store last error status and change log level depending on repeated occurence
|
// Store last error status and change log level depending on repeated occurrence
|
||||||
lastQueryFailed bool
|
lastQueryFailed bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -137,7 +137,7 @@ func gatherVMData(px *Proxmox, acc telegraf.Accumulator, rt ResourceType) {
|
||||||
tags := getTags(px, vmStat.Name, vmConfig, rt)
|
tags := getTags(px, vmStat.Name, vmConfig, rt)
|
||||||
currentVMStatus, err := getCurrentVMStatus(px, rt, vmStat.ID)
|
currentVMStatus, err := getCurrentVMStatus(px, rt, vmStat.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
px.Log.Errorf("Error getting VM curent VM status: %v", err)
|
px.Log.Errorf("Error getting VM current VM status: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -39,7 +39,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
||||||
|
|
||||||
## Specify username and password for ACL auth (Redis 6.0+). You can add this
|
## Specify username and password for ACL auth (Redis 6.0+). You can add this
|
||||||
## to the server URI above or specify it here. The values here take
|
## to the server URI above or specify it here. The values here take
|
||||||
## precidence.
|
## precedence.
|
||||||
# username = ""
|
# username = ""
|
||||||
# password = ""
|
# password = ""
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -23,7 +23,7 @@
|
||||||
|
|
||||||
## Specify username and password for ACL auth (Redis 6.0+). You can add this
|
## Specify username and password for ACL auth (Redis 6.0+). You can add this
|
||||||
## to the server URI above or specify it here. The values here take
|
## to the server URI above or specify it here. The values here take
|
||||||
## precidence.
|
## precedence.
|
||||||
# username = ""
|
# username = ""
|
||||||
# password = ""
|
# password = ""
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -248,7 +248,7 @@ One [metric][] is created for each row of the SNMP table.
|
||||||
## Controls if entries from secondary table should be added or not
|
## Controls if entries from secondary table should be added or not
|
||||||
## if joining index is present or not. I set to true, means that join
|
## if joining index is present or not. I set to true, means that join
|
||||||
## is outer, and index is prepended with "Secondary." for missing values
|
## is outer, and index is prepended with "Secondary." for missing values
|
||||||
## to avoid overlaping indexes from both tables. Can be set per field or
|
## to avoid overlapping indexes from both tables. Can be set per field or
|
||||||
## globally with SecondaryIndexTable, global true overrides per field false.
|
## globally with SecondaryIndexTable, global true overrides per field false.
|
||||||
# secondary_outer_join = false
|
# secondary_outer_join = false
|
||||||
```
|
```
|
||||||
|
|
|
||||||
|
|
@ -223,7 +223,7 @@ type Field struct {
|
||||||
// Conversion controls any type conversion that is done on the value.
|
// Conversion controls any type conversion that is done on the value.
|
||||||
// "float"/"float(0)" will convert the value into a float.
|
// "float"/"float(0)" will convert the value into a float.
|
||||||
// "float(X)" will convert the value into a float, and then move the decimal before Xth right-most digit.
|
// "float(X)" will convert the value into a float, and then move the decimal before Xth right-most digit.
|
||||||
// "int" will conver the value into an integer.
|
// "int" will convert the value into an integer.
|
||||||
// "hwaddr" will convert a 6-byte string to a MAC address.
|
// "hwaddr" will convert a 6-byte string to a MAC address.
|
||||||
// "ipaddr" will convert the value to an IPv4 or IPv6 address.
|
// "ipaddr" will convert the value to an IPv4 or IPv6 address.
|
||||||
// "enum"/"enum(1)" will convert the value according to its syntax. (Only supported with gosmi translator)
|
// "enum"/"enum(1)" will convert the value according to its syntax. (Only supported with gosmi translator)
|
||||||
|
|
@ -238,7 +238,7 @@ type Field struct {
|
||||||
SecondaryIndexUse bool
|
SecondaryIndexUse bool
|
||||||
// Controls if entries from secondary table should be added or not if joining
|
// Controls if entries from secondary table should be added or not if joining
|
||||||
// index is present or not. I set to true, means that join is outer, and
|
// index is present or not. I set to true, means that join is outer, and
|
||||||
// index is prepended with "Secondary." for missing values to avoid overlaping
|
// index is prepended with "Secondary." for missing values to avoid overlapping
|
||||||
// indexes from both tables.
|
// indexes from both tables.
|
||||||
// Can be set per field or globally with SecondaryIndexTable, global true overrides
|
// Can be set per field or globally with SecondaryIndexTable, global true overrides
|
||||||
// per field false.
|
// per field false.
|
||||||
|
|
|
||||||
|
|
@ -625,7 +625,7 @@ atPhysAddress OBJECT-TYPE
|
||||||
Setting this object to a null string (one of zero
|
Setting this object to a null string (one of zero
|
||||||
length) has the effect of invaliding the
|
length) has the effect of invaliding the
|
||||||
corresponding entry in the atTable object. That
|
corresponding entry in the atTable object. That
|
||||||
is, it effectively dissasociates the interface
|
is, it effectively disassociates the interface
|
||||||
identified with said entry from the mapping
|
identified with said entry from the mapping
|
||||||
identified with said entry. It is an
|
identified with said entry. It is an
|
||||||
implementation-specific matter as to whether the
|
implementation-specific matter as to whether the
|
||||||
|
|
@ -1140,7 +1140,7 @@ ipRouteType OBJECT-TYPE
|
||||||
Setting this object to the value invalid(2) has
|
Setting this object to the value invalid(2) has
|
||||||
the effect of invalidating the corresponding entry
|
the effect of invalidating the corresponding entry
|
||||||
in the ipRouteTable object. That is, it
|
in the ipRouteTable object. That is, it
|
||||||
effectively dissasociates the destination
|
effectively disassociates the destination
|
||||||
identified with said entry from the route
|
identified with said entry from the route
|
||||||
identified with said entry. It is an
|
identified with said entry. It is an
|
||||||
implementation-specific matter as to whether the
|
implementation-specific matter as to whether the
|
||||||
|
|
@ -1339,7 +1339,7 @@ ipNetToMediaType OBJECT-TYPE
|
||||||
Setting this object to the value invalid(2) has
|
Setting this object to the value invalid(2) has
|
||||||
the effect of invalidating the corresponding entry
|
the effect of invalidating the corresponding entry
|
||||||
in the ipNetToMediaTable. That is, it effectively
|
in the ipNetToMediaTable. That is, it effectively
|
||||||
dissasociates the interface identified with said
|
disassociates the interface identified with said
|
||||||
entry from the mapping identified with said entry.
|
entry from the mapping identified with said entry.
|
||||||
It is an implementation-specific matter as to
|
It is an implementation-specific matter as to
|
||||||
whether the agent removes an invalidated entry
|
whether the agent removes an invalidated entry
|
||||||
|
|
|
||||||
|
|
@ -112,8 +112,8 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
||||||
## Delimiter used to split received data to messages consumed by the parser.
|
## Delimiter used to split received data to messages consumed by the parser.
|
||||||
## The delimiter is a hex byte-sequence marking the end of a message
|
## The delimiter is a hex byte-sequence marking the end of a message
|
||||||
## e.g. "0x0D0A", "x0d0a" or "0d0a" marks a Windows line-break (CR LF).
|
## e.g. "0x0D0A", "x0d0a" or "0d0a" marks a Windows line-break (CR LF).
|
||||||
## The value is case-insensitive and can be specifed with "0x" or "x" prefix
|
## The value is case-insensitive and can be specified with "0x" or "x" prefix
|
||||||
## or withou.
|
## or without.
|
||||||
## Note: This setting is only used for splitting_strategy = "delimiter".
|
## Note: This setting is only used for splitting_strategy = "delimiter".
|
||||||
# splitting_delimiter = ""
|
# splitting_delimiter = ""
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -81,8 +81,8 @@
|
||||||
## Delimiter used to split received data to messages consumed by the parser.
|
## Delimiter used to split received data to messages consumed by the parser.
|
||||||
## The delimiter is a hex byte-sequence marking the end of a message
|
## The delimiter is a hex byte-sequence marking the end of a message
|
||||||
## e.g. "0x0D0A", "x0d0a" or "0d0a" marks a Windows line-break (CR LF).
|
## e.g. "0x0D0A", "x0d0a" or "0d0a" marks a Windows line-break (CR LF).
|
||||||
## The value is case-insensitive and can be specifed with "0x" or "x" prefix
|
## The value is case-insensitive and can be specified with "0x" or "x" prefix
|
||||||
## or withou.
|
## or without.
|
||||||
## Note: This setting is only used for splitting_strategy = "delimiter".
|
## Note: This setting is only used for splitting_strategy = "delimiter".
|
||||||
# splitting_delimiter = ""
|
# splitting_delimiter = ""
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -103,7 +103,7 @@ func (l *streamListener) setupVsock(u *url.URL) error {
|
||||||
}
|
}
|
||||||
port, _ := strconv.ParseUint(addrTuple[1], 10, 32)
|
port, _ := strconv.ParseUint(addrTuple[1], 10, 32)
|
||||||
if (port >= uint64(math.Pow(2, 32))-1) && (port <= 0) {
|
if (port >= uint64(math.Pow(2, 32))-1) && (port <= 0) {
|
||||||
return fmt.Errorf("Port numner %d is out of range", port)
|
return fmt.Errorf("Port number %d is out of range", port)
|
||||||
}
|
}
|
||||||
|
|
||||||
l.listener, err = vsock.Listen(uint32(port), nil)
|
l.listener, err = vsock.Listen(uint32(port), nil)
|
||||||
|
|
|
||||||
|
|
@ -79,7 +79,7 @@ to use them.
|
||||||
# measurement_column = ""
|
# measurement_column = ""
|
||||||
|
|
||||||
## Column name containing the time of the measurement
|
## Column name containing the time of the measurement
|
||||||
## If ommited, the time of the query will be used.
|
## If omitted, the time of the query will be used.
|
||||||
# time_column = ""
|
# time_column = ""
|
||||||
|
|
||||||
## Format of the time contained in 'time_col'
|
## Format of the time contained in 'time_col'
|
||||||
|
|
|
||||||
|
|
@ -51,7 +51,7 @@
|
||||||
# measurement_column = ""
|
# measurement_column = ""
|
||||||
|
|
||||||
## Column name containing the time of the measurement
|
## Column name containing the time of the measurement
|
||||||
## If ommited, the time of the query will be used.
|
## If omitted, the time of the query will be used.
|
||||||
# time_column = ""
|
# time_column = ""
|
||||||
|
|
||||||
## Format of the time contained in 'time_col'
|
## Format of the time contained in 'time_col'
|
||||||
|
|
|
||||||
|
|
@ -110,7 +110,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
||||||
## By default, telegraf will pass names directly as they are received.
|
## By default, telegraf will pass names directly as they are received.
|
||||||
## However, upstream statsd now does sanitization of names which can be
|
## However, upstream statsd now does sanitization of names which can be
|
||||||
## enabled by using the "upstream" method option. This option will a) replace
|
## enabled by using the "upstream" method option. This option will a) replace
|
||||||
## white space with '_', replace '/' with '-', and remove charachters not
|
## white space with '_', replace '/' with '-', and remove characters not
|
||||||
## matching 'a-zA-Z_\-0-9\.;='.
|
## matching 'a-zA-Z_\-0-9\.;='.
|
||||||
#sanitize_name_method = ""
|
#sanitize_name_method = ""
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -127,7 +127,7 @@ func (s *Statsd) parseEventMessage(now time.Time, message string, defaultHostnam
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Use source tag because host is reserved tag key in Telegraf.
|
// Use source tag because host is reserved tag key in Telegraf.
|
||||||
// In datadog the host tag and `h:` are interchangable, so we have to chech for the host tag.
|
// In datadog the host tag and `h:` are interchangeable, so we have to check for the host tag.
|
||||||
if host, ok := tags["host"]; ok {
|
if host, ok := tags["host"]; ok {
|
||||||
delete(tags, "host")
|
delete(tags, "host")
|
||||||
tags["source"] = host
|
tags["source"] = host
|
||||||
|
|
|
||||||
|
|
@ -83,7 +83,7 @@
|
||||||
## By default, telegraf will pass names directly as they are received.
|
## By default, telegraf will pass names directly as they are received.
|
||||||
## However, upstream statsd now does sanitization of names which can be
|
## However, upstream statsd now does sanitization of names which can be
|
||||||
## enabled by using the "upstream" method option. This option will a) replace
|
## enabled by using the "upstream" method option. This option will a) replace
|
||||||
## white space with '_', replace '/' with '-', and remove charachters not
|
## white space with '_', replace '/' with '-', and remove characters not
|
||||||
## matching 'a-zA-Z_\-0-9\.;='.
|
## matching 'a-zA-Z_\-0-9\.;='.
|
||||||
#sanitize_name_method = ""
|
#sanitize_name_method = ""
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -224,7 +224,7 @@ Add the following lines to the rsyslog configuration file
|
||||||
|
|
||||||
```s
|
```s
|
||||||
# This makes rsyslog listen on 127.0.0.1:514 to receive RFC3164 udp
|
# This makes rsyslog listen on 127.0.0.1:514 to receive RFC3164 udp
|
||||||
# messages which can them be forwared to telegraf as RFC5424
|
# messages which can them be forwarded to telegraf as RFC5424
|
||||||
$ModLoad imudp #loads the udp module
|
$ModLoad imudp #loads the udp module
|
||||||
$UDPServerAddress 127.0.0.1
|
$UDPServerAddress 127.0.0.1
|
||||||
$UDPServerRun 514
|
$UDPServerRun 514
|
||||||
|
|
|
||||||
|
|
@ -46,7 +46,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
||||||
# group = true
|
# group = true
|
||||||
|
|
||||||
## Options for the sadf command. The values on the left represent the sadf options and
|
## Options for the sadf command. The values on the left represent the sadf options and
|
||||||
## the values on the right their description (wich are used for grouping and prefixing metrics).
|
## the values on the right their description (which are used for grouping and prefixing metrics).
|
||||||
##
|
##
|
||||||
## Run 'sar -h' or 'man sar' to find out the supported options for your sysstat version.
|
## Run 'sar -h' or 'man sar' to find out the supported options for your sysstat version.
|
||||||
[inputs.sysstat.options]
|
[inputs.sysstat.options]
|
||||||
|
|
|
||||||
|
|
@ -26,7 +26,7 @@
|
||||||
# group = true
|
# group = true
|
||||||
|
|
||||||
## Options for the sadf command. The values on the left represent the sadf options and
|
## Options for the sadf command. The values on the left represent the sadf options and
|
||||||
## the values on the right their description (wich are used for grouping and prefixing metrics).
|
## the values on the right their description (which are used for grouping and prefixing metrics).
|
||||||
##
|
##
|
||||||
## Run 'sar -h' or 'man sar' to find out the supported options for your sysstat version.
|
## Run 'sar -h' or 'man sar' to find out the supported options for your sysstat version.
|
||||||
[inputs.sysstat.options]
|
[inputs.sysstat.options]
|
||||||
|
|
|
||||||
|
|
@ -286,7 +286,7 @@
|
||||||
"value": 38
|
"value": 38
|
||||||
},
|
},
|
||||||
"MAIN.s_synth": {
|
"MAIN.s_synth": {
|
||||||
"description": "Total synthethic responses made",
|
"description": "Total synthetic responses made",
|
||||||
"type": "MAIN", "flag": "c", "format": "i",
|
"type": "MAIN", "flag": "c", "format": "i",
|
||||||
"value": 0
|
"value": 0
|
||||||
},
|
},
|
||||||
|
|
|
||||||
|
|
@ -213,7 +213,7 @@ brevity, plugin takes only the first line. You can set
|
||||||
|
|
||||||
`TimeCreated` field is a string in RFC3339Nano format. By default Telegraf
|
`TimeCreated` field is a string in RFC3339Nano format. By default Telegraf
|
||||||
parses it as an event timestamp. If there is a field parse error or
|
parses it as an event timestamp. If there is a field parse error or
|
||||||
`timestamp_from_event` configration parameter is set to `false`, then event
|
`timestamp_from_event` configuration parameter is set to `false`, then event
|
||||||
timestamp will be set to the exact time when Telegraf has parsed this event, so
|
timestamp will be set to the exact time when Telegraf has parsed this event, so
|
||||||
it will be rounded to the nearest minute.
|
it will be rounded to the nearest minute.
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -103,7 +103,7 @@ func (w *WinEventLog) SetState(state interface{}) error {
|
||||||
|
|
||||||
ptr, err := syscall.UTF16PtrFromString(bookmarkXML)
|
ptr, err := syscall.UTF16PtrFromString(bookmarkXML)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("convertion to pointer failed: %w", err)
|
return fmt.Errorf("conversion to pointer failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
bookmark, err := _EvtCreateBookmark(ptr)
|
bookmark, err := _EvtCreateBookmark(ptr)
|
||||||
|
|
|
||||||
|
|
@ -122,7 +122,7 @@ func (xio *XtremIO) Gather(acc telegraf.Accumulator) error {
|
||||||
// Each collector is ran in a goroutine so they can be run in parallel.
|
// Each collector is ran in a goroutine so they can be run in parallel.
|
||||||
// Each collector does an initial query to build out the subqueries it
|
// Each collector does an initial query to build out the subqueries it
|
||||||
// needs to run, which are started here in nested goroutines. A future
|
// needs to run, which are started here in nested goroutines. A future
|
||||||
// refactor opportunity would be for the intial collector goroutines to
|
// refactor opportunity would be for the initial collector goroutines to
|
||||||
// return the results while exiting the goroutine, and then a series of
|
// return the results while exiting the goroutine, and then a series of
|
||||||
// goroutines can be kicked off for the subqueries. That way there is no
|
// goroutines can be kicked off for the subqueries. That way there is no
|
||||||
// nesting of goroutines.
|
// nesting of goroutines.
|
||||||
|
|
|
||||||
|
|
@ -150,7 +150,7 @@ These methods are:
|
||||||
3. Managed Service Identity (MSI) token
|
3. Managed Service Identity (MSI) token
|
||||||
|
|
||||||
- If you are running Telegraf from Azure VM or infrastructure, then this is
|
- If you are running Telegraf from Azure VM or infrastructure, then this is
|
||||||
the prefered authentication method.
|
the preferred authentication method.
|
||||||
|
|
||||||
[register]: https://docs.microsoft.com/en-us/azure/active-directory/develop/quickstart-register-app#register-an-application
|
[register]: https://docs.microsoft.com/en-us/azure/active-directory/develop/quickstart-register-app#register-an-application
|
||||||
|
|
||||||
|
|
@ -233,7 +233,7 @@ stored as dynamic data type, multiple ways to query this data-
|
||||||
```
|
```
|
||||||
|
|
||||||
**Note** - This approach could have performance impact in case of large
|
**Note** - This approach could have performance impact in case of large
|
||||||
volumes of data, use belwo mentioned approach for such cases.
|
volumes of data, use below mentioned approach for such cases.
|
||||||
|
|
||||||
1. Use [Update
|
1. Use [Update
|
||||||
policy](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/management/updatepolicy)**:
|
policy](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/management/updatepolicy)**:
|
||||||
|
|
|
||||||
|
|
@ -76,7 +76,7 @@ func TestMetricToTable(t *testing.T) {
|
||||||
expectedTable: "table_with_hyphens",
|
expectedTable: "table_with_hyphens",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "custom hypens",
|
name: "custom hyphens",
|
||||||
replaceHyphenTo: "*",
|
replaceHyphenTo: "*",
|
||||||
metricName: "table-with-hyphens",
|
metricName: "table-with-hyphens",
|
||||||
expectedTable: "table*with*hyphens",
|
expectedTable: "table*with*hyphens",
|
||||||
|
|
|
||||||
|
|
@ -44,7 +44,7 @@ func (c *Clarify) Init() error {
|
||||||
if c.Timeout <= 0 {
|
if c.Timeout <= 0 {
|
||||||
c.Timeout = defaultTimeout
|
c.Timeout = defaultTimeout
|
||||||
}
|
}
|
||||||
// Not blocking as it doesn't do any http requests, just sets up the necessarry Oauth2 client.
|
// Not blocking as it doesn't do any http requests, just sets up the necessary Oauth2 client.
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
switch {
|
switch {
|
||||||
case c.CredentialsFile != "":
|
case c.CredentialsFile != "":
|
||||||
|
|
|
||||||
|
|
@ -302,7 +302,7 @@ to use them.
|
||||||
## Set to true if you want telegraf to overwrite an existing template
|
## Set to true if you want telegraf to overwrite an existing template
|
||||||
overwrite_template = false
|
overwrite_template = false
|
||||||
## If set to true a unique ID hash will be sent as sha256(concat(timestamp,measurement,series-hash)) string
|
## If set to true a unique ID hash will be sent as sha256(concat(timestamp,measurement,series-hash)) string
|
||||||
## it will enable data resend and update metric points avoiding duplicated metrics with diferent id's
|
## it will enable data resend and update metric points avoiding duplicated metrics with different id's
|
||||||
force_document_id = false
|
force_document_id = false
|
||||||
|
|
||||||
## Specifies the handling of NaN and Inf values.
|
## Specifies the handling of NaN and Inf values.
|
||||||
|
|
@ -329,9 +329,9 @@ to use them.
|
||||||
|
|
||||||
If you are using authentication within your Elasticsearch cluster, you need to
|
If you are using authentication within your Elasticsearch cluster, you need to
|
||||||
create a account and create a role with at least the manage role in the Cluster
|
create a account and create a role with at least the manage role in the Cluster
|
||||||
Privileges category. Overwise, your account will not be able to connect to your
|
Privileges category. Otherwise, your account will not be able to connect to
|
||||||
Elasticsearch cluster and send logs to your cluster. After that, you need to
|
your Elasticsearch cluster and send logs to your cluster. After that, you need
|
||||||
add "create_indice" and "write" permission to your specific index pattern.
|
to add "create_indice" and "write" permission to your specific index pattern.
|
||||||
|
|
||||||
### Required parameters
|
### Required parameters
|
||||||
|
|
||||||
|
|
@ -373,7 +373,7 @@ the `default_tag_value` will be used instead.
|
||||||
existing template.
|
existing template.
|
||||||
* `force_document_id`: Set to true will compute a unique hash from as
|
* `force_document_id`: Set to true will compute a unique hash from as
|
||||||
sha256(concat(timestamp,measurement,series-hash)),enables resend or update
|
sha256(concat(timestamp,measurement,series-hash)),enables resend or update
|
||||||
data withoud ES duplicated documents.
|
data without ES duplicated documents.
|
||||||
* `float_handling`: Specifies how to handle `NaN` and infinite field
|
* `float_handling`: Specifies how to handle `NaN` and infinite field
|
||||||
values. `"none"` (default) will do nothing, `"drop"` will drop the field and
|
values. `"none"` (default) will do nothing, `"drop"` will drop the field and
|
||||||
`replace` will replace the field value by the number in
|
`replace` will replace the field value by the number in
|
||||||
|
|
|
||||||
|
|
@ -56,7 +56,7 @@
|
||||||
## Set to true if you want telegraf to overwrite an existing template
|
## Set to true if you want telegraf to overwrite an existing template
|
||||||
overwrite_template = false
|
overwrite_template = false
|
||||||
## If set to true a unique ID hash will be sent as sha256(concat(timestamp,measurement,series-hash)) string
|
## If set to true a unique ID hash will be sent as sha256(concat(timestamp,measurement,series-hash)) string
|
||||||
## it will enable data resend and update metric points avoiding duplicated metrics with diferent id's
|
## it will enable data resend and update metric points avoiding duplicated metrics with different id's
|
||||||
force_document_id = false
|
force_document_id = false
|
||||||
|
|
||||||
## Specifies the handling of NaN and Inf values.
|
## Specifies the handling of NaN and Inf values.
|
||||||
|
|
|
||||||
|
|
@ -42,7 +42,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
||||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||||
data_format = "influx"
|
data_format = "influx"
|
||||||
|
|
||||||
## Compress output data with the specifed algorithm.
|
## Compress output data with the specified algorithm.
|
||||||
## If empty, compression will be disabled and files will be plain text.
|
## If empty, compression will be disabled and files will be plain text.
|
||||||
## Supported algorithms are "zstd", "gzip" and "zlib".
|
## Supported algorithms are "zstd", "gzip" and "zlib".
|
||||||
# compression_algorithm = ""
|
# compression_algorithm = ""
|
||||||
|
|
|
||||||
|
|
@ -26,7 +26,7 @@
|
||||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||||
data_format = "influx"
|
data_format = "influx"
|
||||||
|
|
||||||
## Compress output data with the specifed algorithm.
|
## Compress output data with the specified algorithm.
|
||||||
## If empty, compression will be disabled and files will be plain text.
|
## If empty, compression will be disabled and files will be plain text.
|
||||||
## Supported algorithms are "zstd", "gzip" and "zlib".
|
## Supported algorithms are "zstd", "gzip" and "zlib".
|
||||||
# compression_algorithm = ""
|
# compression_algorithm = ""
|
||||||
|
|
|
||||||
|
|
@ -52,7 +52,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
||||||
# name_field_no_prefix = false
|
# name_field_no_prefix = false
|
||||||
|
|
||||||
## Connection retry options
|
## Connection retry options
|
||||||
## Attempt to connect to the enpoints if the initial connection fails.
|
## Attempt to connect to the endpoints if the initial connection fails.
|
||||||
## If 'false', Telegraf will give up after 3 connection attempt and will
|
## If 'false', Telegraf will give up after 3 connection attempt and will
|
||||||
## exit with an error. If set to 'true', the plugin will retry to connect
|
## exit with an error. If set to 'true', the plugin will retry to connect
|
||||||
## to the unconnected endpoints infinitely.
|
## to the unconnected endpoints infinitely.
|
||||||
|
|
|
||||||
|
|
@ -266,12 +266,12 @@ func TCPServer(t *testing.T, wg *sync.WaitGroup, tlsConfig *tls.Config, errs cha
|
||||||
|
|
||||||
// in TCP scenario only 3 messages are received, the 3rd is lost due to simulated connection break after the 2nd
|
// in TCP scenario only 3 messages are received, the 3rd is lost due to simulated connection break after the 2nd
|
||||||
|
|
||||||
fmt.Println("server: receving packet 1")
|
fmt.Println("server: receiving packet 1")
|
||||||
err = recv(conn)
|
err = recv(conn)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
}
|
}
|
||||||
fmt.Println("server: receving packet 2")
|
fmt.Println("server: receiving packet 2")
|
||||||
err = recv(conn)
|
err = recv(conn)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
|
|
@ -295,7 +295,7 @@ func TCPServer(t *testing.T, wg *sync.WaitGroup, tlsConfig *tls.Config, errs cha
|
||||||
}
|
}
|
||||||
defer conn.Close()
|
defer conn.Close()
|
||||||
|
|
||||||
fmt.Println("server: receving packet 4")
|
fmt.Println("server: receiving packet 4")
|
||||||
err = recv(conn)
|
err = recv(conn)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
|
|
|
||||||
|
|
@ -17,7 +17,7 @@
|
||||||
# name_field_no_prefix = false
|
# name_field_no_prefix = false
|
||||||
|
|
||||||
## Connection retry options
|
## Connection retry options
|
||||||
## Attempt to connect to the enpoints if the initial connection fails.
|
## Attempt to connect to the endpoints if the initial connection fails.
|
||||||
## If 'false', Telegraf will give up after 3 connection attempt and will
|
## If 'false', Telegraf will give up after 3 connection attempt and will
|
||||||
## exit with an error. If set to 'true', the plugin will retry to connect
|
## exit with an error. If set to 'true', the plugin will retry to connect
|
||||||
## to the unconnected endpoints infinitely.
|
## to the unconnected endpoints infinitely.
|
||||||
|
|
|
||||||
|
|
@ -392,7 +392,7 @@ func (c *httpClient) writeBatch(ctx context.Context, db, rp string, metrics []te
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// This error handles if there is an invaild or missing retention policy
|
// This error handles if there is an invalid or missing retention policy
|
||||||
if strings.Contains(desc, errStringRetentionPolicyNotFound) {
|
if strings.Contains(desc, errStringRetentionPolicyNotFound) {
|
||||||
c.log.Errorf("When writing to [%s]: received error %v", c.URL(), desc)
|
c.log.Errorf("When writing to [%s]: received error %v", c.URL(), desc)
|
||||||
return nil
|
return nil
|
||||||
|
|
@ -480,7 +480,7 @@ func (c *httpClient) makeWriteRequest(address string, body io.Reader) (*http.Req
|
||||||
return req, nil
|
return req, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// requestBodyReader warp io.Reader from influx.NewReader to io.ReadCloser, which is usefully to fast close the write
|
// requestBodyReader warp io.Reader from influx.NewReader to io.ReadCloser, which is useful to fast close the write
|
||||||
// side of the connection in case of error
|
// side of the connection in case of error
|
||||||
func (c *httpClient) requestBodyReader(metrics []telegraf.Metric) io.ReadCloser {
|
func (c *httpClient) requestBodyReader(metrics []telegraf.Metric) io.ReadCloser {
|
||||||
reader := influx.NewReader(metrics, c.config.Serializer)
|
reader := influx.NewReader(metrics, c.config.Serializer)
|
||||||
|
|
|
||||||
|
|
@ -397,7 +397,7 @@ func (c *httpClient) makeWriteRequest(address string, body io.Reader) (*http.Req
|
||||||
return req, nil
|
return req, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// requestBodyReader warp io.Reader from influx.NewReader to io.ReadCloser, which is usefully to fast close the write
|
// requestBodyReader warp io.Reader from influx.NewReader to io.ReadCloser, which is useful to fast close the write
|
||||||
// side of the connection in case of error
|
// side of the connection in case of error
|
||||||
func (c *httpClient) requestBodyReader(metrics []telegraf.Metric) io.ReadCloser {
|
func (c *httpClient) requestBodyReader(metrics []telegraf.Metric) io.ReadCloser {
|
||||||
reader := influx.NewReader(metrics, c.serializer)
|
reader := influx.NewReader(metrics, c.serializer)
|
||||||
|
|
|
||||||
|
|
@ -53,7 +53,7 @@ to use them.
|
||||||
|
|
||||||
## The routing tag specifies a tagkey on the metric whose value is used as
|
## The routing tag specifies a tagkey on the metric whose value is used as
|
||||||
## the message key. The message key is used to determine which partition to
|
## the message key. The message key is used to determine which partition to
|
||||||
## send the message to. This tag is prefered over the routing_key option.
|
## send the message to. This tag is preferred over the routing_key option.
|
||||||
routing_tag = "host"
|
routing_tag = "host"
|
||||||
|
|
||||||
## The routing key is set as the message key and used to determine which
|
## The routing key is set as the message key and used to determine which
|
||||||
|
|
|
||||||
|
|
@ -27,7 +27,7 @@
|
||||||
|
|
||||||
## The routing tag specifies a tagkey on the metric whose value is used as
|
## The routing tag specifies a tagkey on the metric whose value is used as
|
||||||
## the message key. The message key is used to determine which partition to
|
## the message key. The message key is used to determine which partition to
|
||||||
## send the message to. This tag is prefered over the routing_key option.
|
## send the message to. This tag is preferred over the routing_key option.
|
||||||
routing_tag = "host"
|
routing_tag = "host"
|
||||||
|
|
||||||
## The routing key is set as the message key and used to determine which
|
## The routing key is set as the message key and used to determine which
|
||||||
|
|
|
||||||
|
|
@ -38,12 +38,12 @@ type Librato struct {
|
||||||
// https://www.librato.com/docs/kb/faq/best_practices/naming_convention_metrics_sources.html#naming-limitations-for-sources-and-metrics
|
// https://www.librato.com/docs/kb/faq/best_practices/naming_convention_metrics_sources.html#naming-limitations-for-sources-and-metrics
|
||||||
var reUnacceptedChar = regexp.MustCompile("[^.a-zA-Z0-9_-]")
|
var reUnacceptedChar = regexp.MustCompile("[^.a-zA-Z0-9_-]")
|
||||||
|
|
||||||
// LMetrics is the default struct for Librato's API fromat
|
// LMetrics is the default struct for Librato's API format
|
||||||
type LMetrics struct {
|
type LMetrics struct {
|
||||||
Gauges []*Gauge `json:"gauges"`
|
Gauges []*Gauge `json:"gauges"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Gauge is the gauge format for Librato's API fromat
|
// Gauge is the gauge format for Librato's API format
|
||||||
type Gauge struct {
|
type Gauge struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Value float64 `json:"value"`
|
Value float64 `json:"value"`
|
||||||
|
|
@ -106,7 +106,7 @@ func (l *Librato) Write(metrics []telegraf.Metric) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
metricCounter := len(tempGauges)
|
metricCounter := len(tempGauges)
|
||||||
// make sur we send a batch of maximum 300
|
// make sure we send a batch of maximum 300
|
||||||
sizeBatch := 300
|
sizeBatch := 300
|
||||||
for start := 0; start < metricCounter; start += sizeBatch {
|
for start := 0; start < metricCounter; start += sizeBatch {
|
||||||
err := l.writeBatch(start, sizeBatch, metricCounter, tempGauges)
|
err := l.writeBatch(start, sizeBatch, metricCounter, tempGauges)
|
||||||
|
|
|
||||||
|
|
@ -322,7 +322,7 @@ func getTargetIndexers(metrics []telegraf.Metric, osInst *Opensearch) map[string
|
||||||
}
|
}
|
||||||
bulkIndxr, err := createBulkIndexer(osInst, pipelineName)
|
bulkIndxr, err := createBulkIndexer(osInst, pipelineName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
osInst.Log.Errorf("error while intantiating OpenSearch NewBulkIndexer: %v for pipeline: %s", err, pipelineName)
|
osInst.Log.Errorf("error while instantiating OpenSearch NewBulkIndexer: %v for pipeline: %s", err, pipelineName)
|
||||||
} else {
|
} else {
|
||||||
indexers[pipelineName] = bulkIndxr
|
indexers[pipelineName] = bulkIndxr
|
||||||
}
|
}
|
||||||
|
|
@ -332,7 +332,7 @@ func getTargetIndexers(metrics []telegraf.Metric, osInst *Opensearch) map[string
|
||||||
|
|
||||||
bulkIndxr, err := createBulkIndexer(osInst, "")
|
bulkIndxr, err := createBulkIndexer(osInst, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
osInst.Log.Errorf("error while intantiating OpenSearch NewBulkIndexer: %v for default pipeline", err)
|
osInst.Log.Errorf("error while instantiating OpenSearch NewBulkIndexer: %v for default pipeline", err)
|
||||||
} else {
|
} else {
|
||||||
indexers["default"] = bulkIndxr
|
indexers["default"] = bulkIndxr
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -28,7 +28,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
||||||
##
|
##
|
||||||
## Root certificates for verifying server certificates encoded in PEM format.
|
## Root certificates for verifying server certificates encoded in PEM format.
|
||||||
# tls_ca = "/etc/telegraf/ca.pem"
|
# tls_ca = "/etc/telegraf/ca.pem"
|
||||||
## The public and private keypairs for the client encoded in PEM format.
|
## The public and private key pairs for the client encoded in PEM format.
|
||||||
## May contain intermediate certificates.
|
## May contain intermediate certificates.
|
||||||
# tls_cert = "/etc/telegraf/cert.pem"
|
# tls_cert = "/etc/telegraf/cert.pem"
|
||||||
# tls_key = "/etc/telegraf/key.pem"
|
# tls_key = "/etc/telegraf/key.pem"
|
||||||
|
|
|
||||||
|
|
@ -11,7 +11,7 @@
|
||||||
##
|
##
|
||||||
## Root certificates for verifying server certificates encoded in PEM format.
|
## Root certificates for verifying server certificates encoded in PEM format.
|
||||||
# tls_ca = "/etc/telegraf/ca.pem"
|
# tls_ca = "/etc/telegraf/ca.pem"
|
||||||
## The public and private keypairs for the client encoded in PEM format.
|
## The public and private key pairs for the client encoded in PEM format.
|
||||||
## May contain intermediate certificates.
|
## May contain intermediate certificates.
|
||||||
# tls_cert = "/etc/telegraf/cert.pem"
|
# tls_cert = "/etc/telegraf/cert.pem"
|
||||||
# tls_key = "/etc/telegraf/key.pem"
|
# tls_key = "/etc/telegraf/key.pem"
|
||||||
|
|
|
||||||
|
|
@ -205,7 +205,7 @@ func (p *Postgresql) registerUint8(_ context.Context, conn *pgx.Conn) error {
|
||||||
}
|
}
|
||||||
row := conn.QueryRow(p.dbContext, "SELECT oid FROM pg_type WHERE typname=$1", dt.Name)
|
row := conn.QueryRow(p.dbContext, "SELECT oid FROM pg_type WHERE typname=$1", dt.Name)
|
||||||
if err := row.Scan(&dt.OID); err != nil {
|
if err := row.Scan(&dt.OID); err != nil {
|
||||||
return fmt.Errorf("retreiving OID for uint8 data type: %w", err)
|
return fmt.Errorf("retrieving OID for uint8 data type: %w", err)
|
||||||
}
|
}
|
||||||
p.pguint8 = &dt
|
p.pguint8 = &dt
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -220,7 +220,7 @@ func newPostgresqlTest(tb testing.TB) *PostgresqlTest {
|
||||||
},
|
},
|
||||||
WaitingFor: wait.ForAll(
|
WaitingFor: wait.ForAll(
|
||||||
// the database comes up twice, once right away, then again a second
|
// the database comes up twice, once right away, then again a second
|
||||||
// time after the docker entrypoint starts configuraiton
|
// time after the docker entrypoint starts configuration
|
||||||
wait.ForLog("database system is ready to accept connections").WithOccurrence(2),
|
wait.ForLog("database system is ready to accept connections").WithOccurrence(2),
|
||||||
wait.ForListeningPort(nat.Port(servicePort)),
|
wait.ForListeningPort(nat.Port(servicePort)),
|
||||||
),
|
),
|
||||||
|
|
@ -761,7 +761,7 @@ func TestWriteIntegration_UnsignedIntegers(t *testing.T) {
|
||||||
p.Uint64Type = PgUint8
|
p.Uint64Type = PgUint8
|
||||||
_ = p.Init()
|
_ = p.Init()
|
||||||
if err := p.Connect(); err != nil {
|
if err := p.Connect(); err != nil {
|
||||||
if strings.Contains(err.Error(), "retreiving OID for uint8 data type") {
|
if strings.Contains(err.Error(), "retrieving OID for uint8 data type") {
|
||||||
t.Skipf("pguint extension is not installed")
|
t.Skipf("pguint extension is not installed")
|
||||||
t.SkipNow()
|
t.SkipNow()
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -229,7 +229,7 @@ func TestTableManagerIntegration_MatchSource_UnsignedIntegers(t *testing.T) {
|
||||||
p.Uint64Type = PgUint8
|
p.Uint64Type = PgUint8
|
||||||
_ = p.Init()
|
_ = p.Init()
|
||||||
if err := p.Connect(); err != nil {
|
if err := p.Connect(); err != nil {
|
||||||
if strings.Contains(err.Error(), "retreiving OID for uint8 data type") {
|
if strings.Contains(err.Error(), "retrieving OID for uint8 data type") {
|
||||||
t.Skipf("pguint extension is not installed")
|
t.Skipf("pguint extension is not installed")
|
||||||
t.SkipNow()
|
t.SkipNow()
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -80,7 +80,7 @@ func GetTagID(metric telegraf.Metric) int64 {
|
||||||
return int64(hash.Sum64())
|
return int64(hash.Sum64())
|
||||||
}
|
}
|
||||||
|
|
||||||
// WaitGroup is similar to sync.WaitGroup, but allows interruptable waiting (e.g. a timeout).
|
// WaitGroup is similar to sync.WaitGroup, but allows interruptible waiting (e.g. a timeout).
|
||||||
type WaitGroup struct {
|
type WaitGroup struct {
|
||||||
count int32
|
count int32
|
||||||
done chan struct{}
|
done chan struct{}
|
||||||
|
|
|
||||||
|
|
@ -34,7 +34,7 @@ to use them.
|
||||||
|
|
||||||
## You can optionally provide a custom ingest url instead of the
|
## You can optionally provide a custom ingest url instead of the
|
||||||
## signalfx_realm option above if you are using a gateway or proxy
|
## signalfx_realm option above if you are using a gateway or proxy
|
||||||
## instance. This option takes precident over signalfx_realm.
|
## instance. This option takes precedence over signalfx_realm.
|
||||||
ingest_url = "https://my-custom-ingest/"
|
ingest_url = "https://my-custom-ingest/"
|
||||||
|
|
||||||
## Event typed metrics are omitted by default,
|
## Event typed metrics are omitted by default,
|
||||||
|
|
|
||||||
|
|
@ -8,7 +8,7 @@
|
||||||
|
|
||||||
## You can optionally provide a custom ingest url instead of the
|
## You can optionally provide a custom ingest url instead of the
|
||||||
## signalfx_realm option above if you are using a gateway or proxy
|
## signalfx_realm option above if you are using a gateway or proxy
|
||||||
## instance. This option takes precident over signalfx_realm.
|
## instance. This option takes precedence over signalfx_realm.
|
||||||
ingest_url = "https://my-custom-ingest/"
|
ingest_url = "https://my-custom-ingest/"
|
||||||
|
|
||||||
## Event typed metrics are omitted by default,
|
## Event typed metrics are omitted by default,
|
||||||
|
|
|
||||||
|
|
@ -75,7 +75,7 @@ func (sw *SocketWriter) Connect() error {
|
||||||
}
|
}
|
||||||
port, _ := strconv.ParseUint(addrTuple[1], 10, 32)
|
port, _ := strconv.ParseUint(addrTuple[1], 10, 32)
|
||||||
if (port >= uint64(math.Pow(2, 32))-1) && (port <= 0) {
|
if (port >= uint64(math.Pow(2, 32))-1) && (port <= 0) {
|
||||||
return fmt.Errorf("Port numner %d is out of range", port)
|
return fmt.Errorf("Port number %d is out of range", port)
|
||||||
}
|
}
|
||||||
c, err = vsock.Dial(uint32(cid), uint32(port), nil)
|
c, err = vsock.Dial(uint32(cid), uint32(port), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
||||||
|
|
@ -37,7 +37,7 @@ driver selected.
|
||||||
Through the nature of the inputs plugins, the amounts of columns inserted within
|
Through the nature of the inputs plugins, the amounts of columns inserted within
|
||||||
rows for a given metric may differ. Since the tables are created based on the
|
rows for a given metric may differ. Since the tables are created based on the
|
||||||
tags and fields available within an input metric, it's possible the created
|
tags and fields available within an input metric, it's possible the created
|
||||||
table won't contain all the neccessary columns. You might need to initialize
|
table won't contain all the necessary columns. You might need to initialize
|
||||||
the schema yourself, to avoid this scenario.
|
the schema yourself, to avoid this scenario.
|
||||||
|
|
||||||
## Advanced options
|
## Advanced options
|
||||||
|
|
|
||||||
|
|
@ -71,7 +71,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
||||||
|
|
||||||
## Override metric type by metric name
|
## Override metric type by metric name
|
||||||
## Metric names matching the values here, globbing supported, will have the
|
## Metric names matching the values here, globbing supported, will have the
|
||||||
## metric type set to the cooresponding type.
|
## metric type set to the corresponding type.
|
||||||
# metric_counter = []
|
# metric_counter = []
|
||||||
# metric_gauge = []
|
# metric_gauge = []
|
||||||
# metric_histogram = []
|
# metric_histogram = []
|
||||||
|
|
|
||||||
|
|
@ -36,7 +36,7 @@
|
||||||
|
|
||||||
## Override metric type by metric name
|
## Override metric type by metric name
|
||||||
## Metric names matching the values here, globbing supported, will have the
|
## Metric names matching the values here, globbing supported, will have the
|
||||||
## metric type set to the cooresponding type.
|
## metric type set to the corresponding type.
|
||||||
# metric_counter = []
|
# metric_counter = []
|
||||||
# metric_gauge = []
|
# metric_gauge = []
|
||||||
# metric_histogram = []
|
# metric_histogram = []
|
||||||
|
|
|
||||||
|
|
@ -22,7 +22,7 @@ The metric name will be set according the following priority:
|
||||||
in the schema definition.
|
in the schema definition.
|
||||||
|
|
||||||
In case if the metric name could not be determined according to these steps
|
In case if the metric name could not be determined according to these steps
|
||||||
the error will be rised and the message will not be parsed.
|
the error will be raised and the message will not be parsed.
|
||||||
|
|
||||||
## Configuration
|
## Configuration
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -282,7 +282,7 @@ func TestParseLine(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
if m.Name() != test.measurement {
|
if m.Name() != test.measurement {
|
||||||
t.Fatalf("name parse failer. expected %v, got %v",
|
t.Fatalf("name parse failed. expected %v, got %v",
|
||||||
test.measurement, m.Name())
|
test.measurement, m.Name())
|
||||||
}
|
}
|
||||||
if len(m.Tags()) != len(test.tags) {
|
if len(m.Tags()) != len(test.tags) {
|
||||||
|
|
@ -394,7 +394,7 @@ func TestParse(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
if metrics[0].Name() != test.measurement {
|
if metrics[0].Name() != test.measurement {
|
||||||
t.Fatalf("name parse failer. expected %v, got %v",
|
t.Fatalf("name parse failed. expected %v, got %v",
|
||||||
test.measurement, metrics[0].Name())
|
test.measurement, metrics[0].Name())
|
||||||
}
|
}
|
||||||
if len(metrics[0].Tags()) != len(test.tags) {
|
if len(metrics[0].Tags()) != len(test.tags) {
|
||||||
|
|
|
||||||
|
|
@ -271,7 +271,7 @@ func (p *Parser) parseQuery(starttime time.Time, doc, selected dataNode, config
|
||||||
}
|
}
|
||||||
|
|
||||||
// By default take the time the parser was invoked and override the value
|
// By default take the time the parser was invoked and override the value
|
||||||
// with the queried timestamp if an expresion was specified.
|
// with the queried timestamp if an expression was specified.
|
||||||
timestamp = starttime
|
timestamp = starttime
|
||||||
if len(config.Timestamp) > 0 {
|
if len(config.Timestamp) > 0 {
|
||||||
v, err := p.executeQuery(doc, selected, config.Timestamp)
|
v, err := p.executeQuery(doc, selected, config.Timestamp)
|
||||||
|
|
@ -550,7 +550,7 @@ func (p *Parser) executeQuery(doc, selected dataNode, query string) (r interface
|
||||||
func splitLastPathElement(query string) []string {
|
func splitLastPathElement(query string) []string {
|
||||||
// This is a rudimentary xpath-parser that splits the path
|
// This is a rudimentary xpath-parser that splits the path
|
||||||
// into the last path element and the remaining path-part.
|
// into the last path element and the remaining path-part.
|
||||||
// The last path element is then further splitted into
|
// The last path element is then further split into
|
||||||
// parts such as attributes or selectors. Each returned
|
// parts such as attributes or selectors. Each returned
|
||||||
// element is a full path!
|
// element is a full path!
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -434,7 +434,7 @@ func TestConverter(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "from string field hexidecimal",
|
name: "from string field hexadecimal",
|
||||||
converter: &Converter{
|
converter: &Converter{
|
||||||
Fields: &Conversion{
|
Fields: &Conversion{
|
||||||
Integer: []string{"a"},
|
Integer: []string{"a"},
|
||||||
|
|
|
||||||
|
|
@ -11,7 +11,7 @@ access the metric name via `{{.Name}}`, the tag values via `{{.Tag "mytag"}}`,
|
||||||
with `mytag` being the tag-name and field-values via `{{.Field "myfield"}}`,
|
with `mytag` being the tag-name and field-values via `{{.Field "myfield"}}`,
|
||||||
with `myfield` being the field-name. Non-existing tags and field will result
|
with `myfield` being the field-name. Non-existing tags and field will result
|
||||||
in an empty string or `nil` respectively. In case the key cannot be found, the
|
in an empty string or `nil` respectively. In case the key cannot be found, the
|
||||||
metric is passed-trough unchanged. By default all matching tags are added and
|
metric is passed-through unchanged. By default all matching tags are added and
|
||||||
existing tag-values are overwritten.
|
existing tag-values are overwritten.
|
||||||
|
|
||||||
Please note: The plugin only supports the addition of tags and thus all mapped
|
Please note: The plugin only supports the addition of tags and thus all mapped
|
||||||
|
|
|
||||||
|
|
@ -177,7 +177,7 @@ func TestAddNoise(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests that int64 & uint64 overflow errors are catched
|
// Tests that int64 & uint64 overflow errors are caught
|
||||||
func TestAddNoiseOverflowCheck(t *testing.T) {
|
func TestAddNoiseOverflowCheck(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
|
|
|
||||||
|
|
@ -3378,7 +3378,7 @@ def apply(metric):
|
||||||
source: `
|
source: `
|
||||||
def apply(metric):
|
def apply(metric):
|
||||||
newmetric = Metric("new_metric")
|
newmetric = Metric("new_metric")
|
||||||
newmetric.fields["vaue"] = 42
|
newmetric.fields["value"] = 42
|
||||||
return newmetric
|
return newmetric
|
||||||
`,
|
`,
|
||||||
},
|
},
|
||||||
|
|
@ -3388,7 +3388,7 @@ def apply(metric):
|
||||||
source: `
|
source: `
|
||||||
def apply(metric):
|
def apply(metric):
|
||||||
newmetric = Metric("new_metric")
|
newmetric = Metric("new_metric")
|
||||||
newmetric.fields["vaue"] = 42
|
newmetric.fields["value"] = 42
|
||||||
return [newmetric]
|
return [newmetric]
|
||||||
`,
|
`,
|
||||||
},
|
},
|
||||||
|
|
@ -3398,7 +3398,7 @@ def apply(metric):
|
||||||
source: `
|
source: `
|
||||||
def apply(metric):
|
def apply(metric):
|
||||||
newmetric = Metric("new_metric")
|
newmetric = Metric("new_metric")
|
||||||
newmetric.fields["vaue"] = 42
|
newmetric.fields["value"] = 42
|
||||||
return [metric, newmetric]
|
return [metric, newmetric]
|
||||||
`,
|
`,
|
||||||
},
|
},
|
||||||
|
|
|
||||||
|
|
@ -16,9 +16,9 @@
|
||||||
# - Deletes the host, type, topic, name and alias tags
|
# - Deletes the host, type, topic, name and alias tags
|
||||||
#
|
#
|
||||||
# TODO:
|
# TODO:
|
||||||
# The requirment that a DBIRTH message has to be received before DDATA messages
|
# The requirement that a DBIRTH message has to be received before DDATA messages
|
||||||
# can be used creates a significant reliability issue and a debugging mess.
|
# can be used creates a significant reliability issue and a debugging mess.
|
||||||
# I have to go into the Groov EPIC controller and restart the MQTT client everytime
|
# I have to go into the Groov EPIC controller and restart the MQTT client every time
|
||||||
# I restart the telegraf loader. This has caused many hours of needless frustration.
|
# I restart the telegraf loader. This has caused many hours of needless frustration.
|
||||||
#
|
#
|
||||||
# I see two possible solutions:
|
# I see two possible solutions:
|
||||||
|
|
|
||||||
|
|
@ -86,7 +86,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
||||||
|
|
||||||
### Add all fields as a tag
|
### Add all fields as a tag
|
||||||
|
|
||||||
Sometimes it is usefull to pass all fields with their values into a single
|
Sometimes it is useful to pass all fields with their values into a single
|
||||||
message for sending it to a monitoring system (e.g. Syslog, GroundWork), then
|
message for sending it to a monitoring system (e.g. Syslog, GroundWork), then
|
||||||
you can use `.Fields` or `.Tags`:
|
you can use `.Fields` or `.Tags`:
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -43,7 +43,7 @@ store usage.
|
||||||
# service = "custom"
|
# service = "custom"
|
||||||
|
|
||||||
## Setting to overwrite the queried token-endpoint
|
## Setting to overwrite the queried token-endpoint
|
||||||
## This setting is optional for some serices but mandatory for others such
|
## This setting is optional for some services but mandatory for others such
|
||||||
## as "custom" or "auth0". Please check the documentation at
|
## as "custom" or "auth0". Please check the documentation at
|
||||||
## https://github.com/influxdata/telegraf/blob/master/plugins/secretstores/oauth2/README.md
|
## https://github.com/influxdata/telegraf/blob/master/plugins/secretstores/oauth2/README.md
|
||||||
# token_endpoint = ""
|
# token_endpoint = ""
|
||||||
|
|
|
||||||
|
|
@ -10,7 +10,7 @@
|
||||||
# service = "custom"
|
# service = "custom"
|
||||||
|
|
||||||
## Setting to overwrite the queried token-endpoint
|
## Setting to overwrite the queried token-endpoint
|
||||||
## This setting is optional for some serices but mandatory for others such
|
## This setting is optional for some services but mandatory for others such
|
||||||
## as "custom" or "auth0". Please check the documentation at
|
## as "custom" or "auth0". Please check the documentation at
|
||||||
## https://github.com/influxdata/telegraf/blob/master/plugins/secretstores/oauth2/README.md
|
## https://github.com/influxdata/telegraf/blob/master/plugins/secretstores/oauth2/README.md
|
||||||
# token_endpoint = ""
|
# token_endpoint = ""
|
||||||
|
|
|
||||||
|
|
@ -41,12 +41,12 @@ metric=name field=field_N host=foo 59 1234567890
|
||||||
`Carbon2` serializer has a configuration option - `carbon2_format` - to change how
|
`Carbon2` serializer has a configuration option - `carbon2_format` - to change how
|
||||||
metrics names are being constructed.
|
metrics names are being constructed.
|
||||||
|
|
||||||
By default `metric` will only inclue the metric name and a separate field `field`
|
By default `metric` will only include the metric name and a separate field `field`
|
||||||
will contain the field name.
|
will contain the field name.
|
||||||
This is the behavior of `carbon2_format = "field_separate"` which is the default
|
This is the behavior of `carbon2_format = "field_separate"` which is the default
|
||||||
behavior (even if unspecified).
|
behavior (even if unspecified).
|
||||||
|
|
||||||
Optionally user can opt in to change this to make the metric inclue the field name
|
Optionally user can opt in to change this to make the metric include the field name
|
||||||
after the `_`.
|
after the `_`.
|
||||||
This is the behavior of `carbon2_format = "metric_includes_field"` which would
|
This is the behavior of `carbon2_format = "metric_includes_field"` which would
|
||||||
make the above example look like:
|
make the above example look like:
|
||||||
|
|
|
||||||
|
|
@ -159,7 +159,7 @@ to get
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
It is also possible to do arithmetics or renaming
|
It is also possible to do arithmetic or renaming
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
|
|
@ -183,7 +183,7 @@ will result in
|
||||||
|
|
||||||
### Batch mode
|
### Batch mode
|
||||||
|
|
||||||
When an output plugin emits multiple metrics in a batch fashion it might be usefull
|
When an output plugin emits multiple metrics in a batch fashion it might be useful
|
||||||
to restructure and/or combine the metric elements. We will use the following input
|
to restructure and/or combine the metric elements. We will use the following input
|
||||||
example in this section
|
example in this section
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -24,7 +24,7 @@ Output of this format is MessagePack binary representation of metrics that have
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
MessagePack has it's own timestamp representation. You can find additional informations from [MessagePack specification](https://github.com/msgpack/msgpack/blob/master/spec.md#timestamp-extension-type).
|
MessagePack has it's own timestamp representation. You can find additional information from [MessagePack specification](https://github.com/msgpack/msgpack/blob/master/spec.md#timestamp-extension-type).
|
||||||
|
|
||||||
## MessagePack Configuration
|
## MessagePack Configuration
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -95,7 +95,7 @@ func TestSerializeMultipleMetric(t *testing.T) {
|
||||||
encoded, err := s.Serialize(m)
|
encoded, err := s.Serialize(m)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Multiple metrics in continous bytes stream
|
// Multiple metrics in continuous bytes stream
|
||||||
var buf []byte
|
var buf []byte
|
||||||
buf = append(buf, encoded...)
|
buf = append(buf, encoded...)
|
||||||
buf = append(buf, encoded...)
|
buf = append(buf, encoded...)
|
||||||
|
|
|
||||||
|
|
@ -183,7 +183,7 @@ An example configuration of a file based output is:
|
||||||
Splunk supports only numeric field values, so serializer would silently drop metrics with the string values. For some cases it is possible to workaround using ENUM processor. Example, provided below doing this for the `docker_container_health.health_status` metric:
|
Splunk supports only numeric field values, so serializer would silently drop metrics with the string values. For some cases it is possible to workaround using ENUM processor. Example, provided below doing this for the `docker_container_health.health_status` metric:
|
||||||
|
|
||||||
```toml
|
```toml
|
||||||
# splunkmetric does not support sting values
|
# splunkmetric does not support string values
|
||||||
[[processors.enum]]
|
[[processors.enum]]
|
||||||
namepass = ["docker_container_health"]
|
namepass = ["docker_container_health"]
|
||||||
[[processors.enum.mapping]]
|
[[processors.enum.mapping]]
|
||||||
|
|
|
||||||
|
|
@ -22,7 +22,7 @@ type Container struct {
|
||||||
packageManager string
|
packageManager string
|
||||||
}
|
}
|
||||||
|
|
||||||
// create contianer with given name and image
|
// create container with given name and image
|
||||||
func (c *Container) Create(image string) error {
|
func (c *Container) Create(image string) error {
|
||||||
if c.Name == "" {
|
if c.Name == "" {
|
||||||
return fmt.Errorf("unable to create container: no name given")
|
return fmt.Errorf("unable to create container: no name given")
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue