docs: Clean up markdown add warning about tables (#13792)

This commit is contained in:
Joshua Powers 2023-08-28 14:07:45 -06:00 committed by GitHub
parent cb488ad0f8
commit 318a4b69e5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
32 changed files with 642 additions and 464 deletions

View File

@ -106,46 +106,55 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
## How often the discovery API call executed (default 1m)
#discovery_interval = "1m"
## Metrics to Pull (Required)
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Metrics to Pull
## At least one metrics definition required
[[inputs.aliyuncms.metrics]]
## Metrics names to be requested,
## Description can be found here (per project):
## https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq
names = ["InstanceActiveConnection", "InstanceNewConnection"]
## Metrics names to be requested,
## Description can be found here (per project):
## https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq
names = ["InstanceActiveConnection", "InstanceNewConnection"]
## Dimension filters for Metric (optional).
## This allows to get additional metric dimension. If dimension is not
## specified it can be returned or the data can be aggregated - it depends
## on particular metric, you can find details here:
## https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq
##
## Note, that by default dimension filter includes the list of discovered
## objects in scope (if discovery is enabled).
# Values specified here would be added into the list of discovered objects.
## You can specify either single dimension:
#dimensions = '{"instanceId": "p-example"}'
## Dimension filters for Metric (optional)
## This allows to get additional metric dimension. If dimension is not
## specified it can be returned or the data can be aggregated - it depends
## on particular metric, you can find details here:
## https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq
##
## Note, that by default dimension filter includes the list of discovered
## objects in scope (if discovery is enabled). Values specified here would
## be added into the list of discovered objects. You can specify either
## single dimension:
# dimensions = '{"instanceId": "p-example"}'
## Or you can specify several dimensions at once:
#dimensions = '[{"instanceId": "p-example"},{"instanceId": "q-example"}]'
## Or you can specify several dimensions at once:
# dimensions = '[{"instanceId": "p-example"},{"instanceId": "q-example"}]'
## Enrichment tags, can be added from discovery (if supported)
## Notation is <measurement_tag_name>:<JMES query path (https://jmespath.org/tutorial.html)>
## To figure out which fields are available, consult the Describe<ObjectType>
## API per project.
## For example, for SLB: https://api.aliyun.com/#/?product=Slb&version=2014-05-15&api=DescribeLoadBalancers&params={}&tab=MOCK&lang=GO
#tag_query_path = [
# "address:Address",
# "name:LoadBalancerName",
# "cluster_owner:Tags.Tag[?TagKey=='cs.cluster.name'].TagValue | [0]"
# ]
## The following tags added by default:
## regionId (if discovery enabled), userId, instanceId.
## Tag Query Path
## The following tags added by default:
## * regionId (if discovery enabled)
## * userId
## * instanceId
## Enrichment tags, can be added from discovery (if supported)
## Notation is
## <measurement_tag_name>:<JMES query path (https://jmespath.org/tutorial.html)>
## To figure out which fields are available, consult the
## Describe<ObjectType> API per project. For example, for SLB see:
## https://api.aliyun.com/#/?product=Slb&version=2014-05-15&api=DescribeLoadBalancers&params={}&tab=MOCK&lang=GO
# tag_query_path = [
# "address:Address",
# "name:LoadBalancerName",
# "cluster_owner:Tags.Tag[?TagKey=='cs.cluster.name'].TagValue | [0]"
# ]
## Allow metrics without discovery data, if discovery is enabled.
## If set to true, then metric without discovery data would be emitted, otherwise dropped.
## This cane be of help, in case debugging dimension filters, or partial coverage of
## discovery scope vs monitoring scope
# allow_dps_without_discovery = false
## Allow metrics without discovery data, if discovery is enabled.
## If set to true, then metric without discovery data would be emitted, otherwise dropped.
## This cane be of help, in case debugging dimension filters, or partial coverage of
## discovery scope vs monitoring scope
# allow_dps_without_discovery = false
```
### Requirements and Terminology

View File

@ -69,43 +69,52 @@
## How often the discovery API call executed (default 1m)
#discovery_interval = "1m"
## Metrics to Pull (Required)
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Metrics to Pull
## At least one metrics definition required
[[inputs.aliyuncms.metrics]]
## Metrics names to be requested,
## Description can be found here (per project):
## https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq
names = ["InstanceActiveConnection", "InstanceNewConnection"]
## Metrics names to be requested,
## Description can be found here (per project):
## https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq
names = ["InstanceActiveConnection", "InstanceNewConnection"]
## Dimension filters for Metric (optional).
## This allows to get additional metric dimension. If dimension is not
## specified it can be returned or the data can be aggregated - it depends
## on particular metric, you can find details here:
## https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq
##
## Note, that by default dimension filter includes the list of discovered
## objects in scope (if discovery is enabled).
# Values specified here would be added into the list of discovered objects.
## You can specify either single dimension:
#dimensions = '{"instanceId": "p-example"}'
## Dimension filters for Metric (optional)
## This allows to get additional metric dimension. If dimension is not
## specified it can be returned or the data can be aggregated - it depends
## on particular metric, you can find details here:
## https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq
##
## Note, that by default dimension filter includes the list of discovered
## objects in scope (if discovery is enabled). Values specified here would
## be added into the list of discovered objects. You can specify either
## single dimension:
# dimensions = '{"instanceId": "p-example"}'
## Or you can specify several dimensions at once:
#dimensions = '[{"instanceId": "p-example"},{"instanceId": "q-example"}]'
## Or you can specify several dimensions at once:
# dimensions = '[{"instanceId": "p-example"},{"instanceId": "q-example"}]'
## Enrichment tags, can be added from discovery (if supported)
## Notation is <measurement_tag_name>:<JMES query path (https://jmespath.org/tutorial.html)>
## To figure out which fields are available, consult the Describe<ObjectType>
## API per project.
## For example, for SLB: https://api.aliyun.com/#/?product=Slb&version=2014-05-15&api=DescribeLoadBalancers&params={}&tab=MOCK&lang=GO
#tag_query_path = [
# "address:Address",
# "name:LoadBalancerName",
# "cluster_owner:Tags.Tag[?TagKey=='cs.cluster.name'].TagValue | [0]"
# ]
## The following tags added by default:
## regionId (if discovery enabled), userId, instanceId.
## Tag Query Path
## The following tags added by default:
## * regionId (if discovery enabled)
## * userId
## * instanceId
## Enrichment tags, can be added from discovery (if supported)
## Notation is
## <measurement_tag_name>:<JMES query path (https://jmespath.org/tutorial.html)>
## To figure out which fields are available, consult the
## Describe<ObjectType> API per project. For example, for SLB see:
## https://api.aliyun.com/#/?product=Slb&version=2014-05-15&api=DescribeLoadBalancers&params={}&tab=MOCK&lang=GO
# tag_query_path = [
# "address:Address",
# "name:LoadBalancerName",
# "cluster_owner:Tags.Tag[?TagKey=='cs.cluster.name'].TagValue | [0]"
# ]
## Allow metrics without discovery data, if discovery is enabled.
## If set to true, then metric without discovery data would be emitted, otherwise dropped.
## This cane be of help, in case debugging dimension filters, or partial coverage of
## discovery scope vs monitoring scope
# allow_dps_without_discovery = false
## Allow metrics without discovery data, if discovery is enabled.
## If set to true, then metric without discovery data would be emitted, otherwise dropped.
## This cane be of help, in case debugging dimension filters, or partial coverage of
## discovery scope vs monitoring scope
# allow_dps_without_discovery = false

View File

@ -40,7 +40,7 @@ to use them.
## Configuration
```toml @sample.conf
-# Read metrics from one or many vCenters
# Read metrics from one or many vCenters
[[inputs.vsphere]]
## List of vCenter URLs to be monitored. These three lines must be uncommented
## and edited for the plugin to work.

View File

@ -1,4 +1,4 @@
-# Read metrics from one or many vCenters
# Read metrics from one or many vCenters
[[inputs.vsphere]]
## List of vCenter URLs to be monitored. These three lines must be uncommented
## and edited for the plugin to work.

View File

@ -302,143 +302,172 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
## Configuration
```toml @sample.conf
# # Input plugin to counterPath Performance Counters on Windows operating systems
# # This plugin ONLY supports Windows
# [[inputs.win_perf_counters]]
# ## By default this plugin returns basic CPU and Disk statistics.
# ## See the README file for more examples.
# ## Uncomment examples below or write your own as you see fit. If the system
# ## being polled for data does not have the Object at startup of the Telegraf
# ## agent, it will not be gathered.
# ## Settings:
# # PrintValid = false # Print All matching performance counters
# # Whether request a timestamp along with the PerfCounter data or just use current time
# # UsePerfCounterTime=true
# # If UseWildcardsExpansion params is set to true, wildcards (partial wildcards in instance names and wildcards in counters names) in configured counter paths will be expanded
# # and in case of localized Windows, counter paths will be also localized. It also returns instance indexes in instance names.
# # If false, wildcards (not partial) in instance names will still be expanded, but instance indexes will not be returned in instance names.
# #UseWildcardsExpansion = false
# # When running on a localized version of Windows and with UseWildcardsExpansion = true, Windows will
# # localize object and counter names. When LocalizeWildcardsExpansion = false, use the names in object.Counters instead
# # of the localized names. Only Instances can have wildcards in this case. ObjectName and Counters must not have wildcards when this
# # setting is false.
# #LocalizeWildcardsExpansion = true
# # Period after which counters will be reread from configuration and wildcards in counter paths expanded
# CountersRefreshInterval="1m"
# ## Accepts a list of PDH error codes which are defined in pdh.go, if this error is encountered it will be ignored
# ## For example, you can provide "PDH_NO_DATA" to ignore performance counters with no instances
# ## By default no errors are ignored
# ## You can find the list here: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/win_perf_counters/pdh.go
# ## e.g.: IgnoredErrors = ["PDH_NO_DATA"]
# # IgnoredErrors = []
#
# [[inputs.win_perf_counters.object]]
# # Processor usage, alternative to native, reports on a per core.
# ObjectName = "Processor"
# Instances = ["*"]
# Counters = [
# "% Idle Time",
# "% Interrupt Time",
# "% Privileged Time",
# "% User Time",
# "% Processor Time",
# "% DPC Time",
# ]
# Measurement = "win_cpu"
# # Set to true to include _Total instance when querying for all (*).
# # IncludeTotal=false
# # Print out when the performance counter is missing from object, counter or instance.
# # WarnOnMissing = false
# # Gather raw values instead of formatted. Raw value is stored in the field name with the "_Raw" suffix, e.g. "Disk_Read_Bytes_sec_Raw".
# # UseRawValues = true
#
# [[inputs.win_perf_counters.object]]
# # Disk times and queues
# ObjectName = "LogicalDisk"
# Instances = ["*"]
# Counters = [
# "% Idle Time",
# "% Disk Time",
# "% Disk Read Time",
# "% Disk Write Time",
# "% User Time",
# "% Free Space",
# "Current Disk Queue Length",
# "Free Megabytes",
# ]
# Measurement = "win_disk"
#
# [[inputs.win_perf_counters.object]]
# ObjectName = "PhysicalDisk"
# Instances = ["*"]
# Counters = [
# "Disk Read Bytes/sec",
# "Disk Write Bytes/sec",
# "Current Disk Queue Length",
# "Disk Reads/sec",
# "Disk Writes/sec",
# "% Disk Time",
# "% Disk Read Time",
# "% Disk Write Time",
# ]
# Measurement = "win_diskio"
#
# [[inputs.win_perf_counters.object]]
# ObjectName = "Network Interface"
# Instances = ["*"]
# Counters = [
# "Bytes Received/sec",
# "Bytes Sent/sec",
# "Packets Received/sec",
# "Packets Sent/sec",
# "Packets Received Discarded",
# "Packets Outbound Discarded",
# "Packets Received Errors",
# "Packets Outbound Errors",
# ]
# Measurement = "win_net"
#
#
# [[inputs.win_perf_counters.object]]
# ObjectName = "System"
# Counters = [
# "Context Switches/sec",
# "System Calls/sec",
# "Processor Queue Length",
# "System Up Time",
# ]
# Instances = ["------"]
# Measurement = "win_system"
#
# [[inputs.win_perf_counters.object]]
# # Example counterPath where the Instance portion must be removed to get data back,
# # such as from the Memory object.
# ObjectName = "Memory"
# Counters = [
# "Available Bytes",
# "Cache Faults/sec",
# "Demand Zero Faults/sec",
# "Page Faults/sec",
# "Pages/sec",
# "Transition Faults/sec",
# "Pool Nonpaged Bytes",
# "Pool Paged Bytes",
# "Standby Cache Reserve Bytes",
# "Standby Cache Normal Priority Bytes",
# "Standby Cache Core Bytes",
# ]
# Instances = ["------"] # Use 6 x - to remove the Instance bit from the counterPath.
# Measurement = "win_mem"
#
# [[inputs.win_perf_counters.object]]
# # Example query where the Instance portion must be removed to get data back,
# # such as from the Paging File object.
# ObjectName = "Paging File"
# Counters = [
# "% Usage",
# ]
# Instances = ["_Total"]
# Measurement = "win_swap"
# Input plugin to counterPath Performance Counters on Windows operating systems
# This plugin ONLY supports Windows
[[inputs.win_perf_counters]]
## By default this plugin returns basic CPU and Disk statistics. See the
## README file for more examples. Uncomment examples below or write your own
## as you see fit. If the system being polled for data does not have the
## Object at startup of the Telegraf agent, it will not be gathered.
## Print All matching performance counters
# PrintValid = false
## Whether request a timestamp along with the PerfCounter data or use current
## time
# UsePerfCounterTime = true
## If UseWildcardsExpansion params is set to true, wildcards (partial
## wildcards in instance names and wildcards in counters names) in configured
## counter paths will be expanded and in case of localized Windows, counter
## paths will be also localized. It also returns instance indexes in instance
## names. If false, wildcards (not partial) in instance names will still be
## expanded, but instance indexes will not be returned in instance names.
# UseWildcardsExpansion = false
## When running on a localized version of Windows and with
## UseWildcardsExpansion = true, Windows will localize object and counter
## names. When LocalizeWildcardsExpansion = false, use the names in
## object.Counters instead of the localized names. Only Instances can have
## wildcards in this case. ObjectName and Counters must not have wildcards
## when this setting is false.
# LocalizeWildcardsExpansion = true
## Period after which counters will be reread from configuration and
## wildcards in counter paths expanded
# CountersRefreshInterval="1m"
## Accepts a list of PDH error codes which are defined in pdh.go, if this
## error is encountered it will be ignored. For example, you can provide
## "PDH_NO_DATA" to ignore performance counters with no instances. By default
## no errors are ignored You can find the list here:
## https://github.com/influxdata/telegraf/blob/master/plugins/inputs/win_perf_counters/pdh.go
## e.g. IgnoredErrors = ["PDH_NO_DATA"]
# IgnoredErrors = []
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
# [[inputs.win_perf_counters.object]]
# Measurement = ""
# ObjectName = ""
# Instances = [""]
# Counters = []
## Additional Object Settings
## * IncludeTotal: set to true to include _Total instance when querying
## for all metrics via '*'
## * WarnOnMissing: print out when the performance counter is missing
## from object, counter or instance
## * UseRawValues: gather raw values instead of formatted. Raw values are
## stored in the field name with the "_Raw" suffix, e.g.
## "Disk_Read_Bytes_sec_Raw".
# IncludeTotal = false
# WarnOnMissing = false
# UseRawValues = false
## Processor usage, alternative to native, reports on a per core.
# [[inputs.win_perf_counters.object]]
# Measurement = "win_cpu"
# ObjectName = "Processor"
# Instances = ["*"]
# UseRawValues = true
# Counters = [
# "% Idle Time",
# "% Interrupt Time",
# "% Privileged Time",
# "% User Time",
# "% Processor Time",
# "% DPC Time",
# ]
## Disk times and queues
# [[inputs.win_perf_counters.object]]
# Measurement = "win_disk"
# ObjectName = "LogicalDisk"
# Instances = ["*"]
# Counters = [
# "% Idle Time",
# "% Disk Time",
# "% Disk Read Time",
# "% Disk Write Time",
# "% User Time",
# "% Free Space",
# "Current Disk Queue Length",
# "Free Megabytes",
# ]
# [[inputs.win_perf_counters.object]]
# Measurement = "win_diskio"
# ObjectName = "PhysicalDisk"
# Instances = ["*"]
# Counters = [
# "Disk Read Bytes/sec",
# "Disk Write Bytes/sec",
# "Current Disk Queue Length",
# "Disk Reads/sec",
# "Disk Writes/sec",
# "% Disk Time",
# "% Disk Read Time",
# "% Disk Write Time",
# ]
# [[inputs.win_perf_counters.object]]
# Measurement = "win_net"
# ObjectName = "Network Interface"
# Instances = ["*"]
# Counters = [
# "Bytes Received/sec",
# "Bytes Sent/sec",
# "Packets Received/sec",
# "Packets Sent/sec",
# "Packets Received Discarded",
# "Packets Outbound Discarded",
# "Packets Received Errors",
# "Packets Outbound Errors",
# ]
# [[inputs.win_perf_counters.object]]
# Measurement = "win_system"
# ObjectName = "System"
# Instances = ["------"]
# Counters = [
# "Context Switches/sec",
# "System Calls/sec",
# "Processor Queue Length",
# "System Up Time",
# ]
## Example counterPath where the Instance portion must be removed to get
## data back, such as from the Memory object.
# [[inputs.win_perf_counters.object]]
# Measurement = "win_mem"
# ObjectName = "Memory"
## Use 6 x - to remove the Instance bit from the counterPath.
# Instances = ["------"]
# Counters = [
# "Available Bytes",
# "Cache Faults/sec",
# "Demand Zero Faults/sec",
# "Page Faults/sec",
# "Pages/sec",
# "Transition Faults/sec",
# "Pool Nonpaged Bytes",
# "Pool Paged Bytes",
# "Standby Cache Reserve Bytes",
# "Standby Cache Normal Priority Bytes",
# "Standby Cache Core Bytes",
# ]
## Example query where the Instance portion must be removed to get data back,
## such as from the Paging File object.
# [[inputs.win_perf_counters.object]]
# Measurement = "win_swap"
# ObjectName = "Paging File"
# Instances = ["_Total"]
# Counters = [
# "% Usage",
# ]
```
### Generic Queries

View File

@ -1,137 +1,166 @@
# # Input plugin to counterPath Performance Counters on Windows operating systems
# # This plugin ONLY supports Windows
# [[inputs.win_perf_counters]]
# ## By default this plugin returns basic CPU and Disk statistics.
# ## See the README file for more examples.
# ## Uncomment examples below or write your own as you see fit. If the system
# ## being polled for data does not have the Object at startup of the Telegraf
# ## agent, it will not be gathered.
# ## Settings:
# # PrintValid = false # Print All matching performance counters
# # Whether request a timestamp along with the PerfCounter data or just use current time
# # UsePerfCounterTime=true
# # If UseWildcardsExpansion params is set to true, wildcards (partial wildcards in instance names and wildcards in counters names) in configured counter paths will be expanded
# # and in case of localized Windows, counter paths will be also localized. It also returns instance indexes in instance names.
# # If false, wildcards (not partial) in instance names will still be expanded, but instance indexes will not be returned in instance names.
# #UseWildcardsExpansion = false
# # When running on a localized version of Windows and with UseWildcardsExpansion = true, Windows will
# # localize object and counter names. When LocalizeWildcardsExpansion = false, use the names in object.Counters instead
# # of the localized names. Only Instances can have wildcards in this case. ObjectName and Counters must not have wildcards when this
# # setting is false.
# #LocalizeWildcardsExpansion = true
# # Period after which counters will be reread from configuration and wildcards in counter paths expanded
# CountersRefreshInterval="1m"
# ## Accepts a list of PDH error codes which are defined in pdh.go, if this error is encountered it will be ignored
# ## For example, you can provide "PDH_NO_DATA" to ignore performance counters with no instances
# ## By default no errors are ignored
# ## You can find the list here: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/win_perf_counters/pdh.go
# ## e.g.: IgnoredErrors = ["PDH_NO_DATA"]
# # IgnoredErrors = []
#
# [[inputs.win_perf_counters.object]]
# # Processor usage, alternative to native, reports on a per core.
# ObjectName = "Processor"
# Instances = ["*"]
# Counters = [
# "% Idle Time",
# "% Interrupt Time",
# "% Privileged Time",
# "% User Time",
# "% Processor Time",
# "% DPC Time",
# ]
# Measurement = "win_cpu"
# # Set to true to include _Total instance when querying for all (*).
# # IncludeTotal=false
# # Print out when the performance counter is missing from object, counter or instance.
# # WarnOnMissing = false
# # Gather raw values instead of formatted. Raw value is stored in the field name with the "_Raw" suffix, e.g. "Disk_Read_Bytes_sec_Raw".
# # UseRawValues = true
#
# [[inputs.win_perf_counters.object]]
# # Disk times and queues
# ObjectName = "LogicalDisk"
# Instances = ["*"]
# Counters = [
# "% Idle Time",
# "% Disk Time",
# "% Disk Read Time",
# "% Disk Write Time",
# "% User Time",
# "% Free Space",
# "Current Disk Queue Length",
# "Free Megabytes",
# ]
# Measurement = "win_disk"
#
# [[inputs.win_perf_counters.object]]
# ObjectName = "PhysicalDisk"
# Instances = ["*"]
# Counters = [
# "Disk Read Bytes/sec",
# "Disk Write Bytes/sec",
# "Current Disk Queue Length",
# "Disk Reads/sec",
# "Disk Writes/sec",
# "% Disk Time",
# "% Disk Read Time",
# "% Disk Write Time",
# ]
# Measurement = "win_diskio"
#
# [[inputs.win_perf_counters.object]]
# ObjectName = "Network Interface"
# Instances = ["*"]
# Counters = [
# "Bytes Received/sec",
# "Bytes Sent/sec",
# "Packets Received/sec",
# "Packets Sent/sec",
# "Packets Received Discarded",
# "Packets Outbound Discarded",
# "Packets Received Errors",
# "Packets Outbound Errors",
# ]
# Measurement = "win_net"
#
#
# [[inputs.win_perf_counters.object]]
# ObjectName = "System"
# Counters = [
# "Context Switches/sec",
# "System Calls/sec",
# "Processor Queue Length",
# "System Up Time",
# ]
# Instances = ["------"]
# Measurement = "win_system"
#
# [[inputs.win_perf_counters.object]]
# # Example counterPath where the Instance portion must be removed to get data back,
# # such as from the Memory object.
# ObjectName = "Memory"
# Counters = [
# "Available Bytes",
# "Cache Faults/sec",
# "Demand Zero Faults/sec",
# "Page Faults/sec",
# "Pages/sec",
# "Transition Faults/sec",
# "Pool Nonpaged Bytes",
# "Pool Paged Bytes",
# "Standby Cache Reserve Bytes",
# "Standby Cache Normal Priority Bytes",
# "Standby Cache Core Bytes",
# ]
# Instances = ["------"] # Use 6 x - to remove the Instance bit from the counterPath.
# Measurement = "win_mem"
#
# [[inputs.win_perf_counters.object]]
# # Example query where the Instance portion must be removed to get data back,
# # such as from the Paging File object.
# ObjectName = "Paging File"
# Counters = [
# "% Usage",
# ]
# Instances = ["_Total"]
# Measurement = "win_swap"
# Input plugin to counterPath Performance Counters on Windows operating systems
# This plugin ONLY supports Windows
[[inputs.win_perf_counters]]
## By default this plugin returns basic CPU and Disk statistics. See the
## README file for more examples. Uncomment examples below or write your own
## as you see fit. If the system being polled for data does not have the
## Object at startup of the Telegraf agent, it will not be gathered.
## Print All matching performance counters
# PrintValid = false
## Whether request a timestamp along with the PerfCounter data or use current
## time
# UsePerfCounterTime = true
## If UseWildcardsExpansion params is set to true, wildcards (partial
## wildcards in instance names and wildcards in counters names) in configured
## counter paths will be expanded and in case of localized Windows, counter
## paths will be also localized. It also returns instance indexes in instance
## names. If false, wildcards (not partial) in instance names will still be
## expanded, but instance indexes will not be returned in instance names.
# UseWildcardsExpansion = false
## When running on a localized version of Windows and with
## UseWildcardsExpansion = true, Windows will localize object and counter
## names. When LocalizeWildcardsExpansion = false, use the names in
## object.Counters instead of the localized names. Only Instances can have
## wildcards in this case. ObjectName and Counters must not have wildcards
## when this setting is false.
# LocalizeWildcardsExpansion = true
## Period after which counters will be reread from configuration and
## wildcards in counter paths expanded
# CountersRefreshInterval="1m"
## Accepts a list of PDH error codes which are defined in pdh.go, if this
## error is encountered it will be ignored. For example, you can provide
## "PDH_NO_DATA" to ignore performance counters with no instances. By default
## no errors are ignored You can find the list here:
## https://github.com/influxdata/telegraf/blob/master/plugins/inputs/win_perf_counters/pdh.go
## e.g. IgnoredErrors = ["PDH_NO_DATA"]
# IgnoredErrors = []
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
# [[inputs.win_perf_counters.object]]
# Measurement = ""
# ObjectName = ""
# Instances = [""]
# Counters = []
## Additional Object Settings
## * IncludeTotal: set to true to include _Total instance when querying
## for all metrics via '*'
## * WarnOnMissing: print out when the performance counter is missing
## from object, counter or instance
## * UseRawValues: gather raw values instead of formatted. Raw values are
## stored in the field name with the "_Raw" suffix, e.g.
## "Disk_Read_Bytes_sec_Raw".
# IncludeTotal = false
# WarnOnMissing = false
# UseRawValues = false
## Processor usage, alternative to native, reports on a per core.
# [[inputs.win_perf_counters.object]]
# Measurement = "win_cpu"
# ObjectName = "Processor"
# Instances = ["*"]
# UseRawValues = true
# Counters = [
# "% Idle Time",
# "% Interrupt Time",
# "% Privileged Time",
# "% User Time",
# "% Processor Time",
# "% DPC Time",
# ]
## Disk times and queues
# [[inputs.win_perf_counters.object]]
# Measurement = "win_disk"
# ObjectName = "LogicalDisk"
# Instances = ["*"]
# Counters = [
# "% Idle Time",
# "% Disk Time",
# "% Disk Read Time",
# "% Disk Write Time",
# "% User Time",
# "% Free Space",
# "Current Disk Queue Length",
# "Free Megabytes",
# ]
# [[inputs.win_perf_counters.object]]
# Measurement = "win_diskio"
# ObjectName = "PhysicalDisk"
# Instances = ["*"]
# Counters = [
# "Disk Read Bytes/sec",
# "Disk Write Bytes/sec",
# "Current Disk Queue Length",
# "Disk Reads/sec",
# "Disk Writes/sec",
# "% Disk Time",
# "% Disk Read Time",
# "% Disk Write Time",
# ]
# [[inputs.win_perf_counters.object]]
# Measurement = "win_net"
# ObjectName = "Network Interface"
# Instances = ["*"]
# Counters = [
# "Bytes Received/sec",
# "Bytes Sent/sec",
# "Packets Received/sec",
# "Packets Sent/sec",
# "Packets Received Discarded",
# "Packets Outbound Discarded",
# "Packets Received Errors",
# "Packets Outbound Errors",
# ]
# [[inputs.win_perf_counters.object]]
# Measurement = "win_system"
# ObjectName = "System"
# Instances = ["------"]
# Counters = [
# "Context Switches/sec",
# "System Calls/sec",
# "Processor Queue Length",
# "System Up Time",
# ]
## Example counterPath where the Instance portion must be removed to get
## data back, such as from the Memory object.
# [[inputs.win_perf_counters.object]]
# Measurement = "win_mem"
# ObjectName = "Memory"
## Use 6 x - to remove the Instance bit from the counterPath.
# Instances = ["------"]
# Counters = [
# "Available Bytes",
# "Cache Faults/sec",
# "Demand Zero Faults/sec",
# "Page Faults/sec",
# "Pages/sec",
# "Transition Faults/sec",
# "Pool Nonpaged Bytes",
# "Pool Paged Bytes",
# "Standby Cache Reserve Bytes",
# "Standby Cache Normal Priority Bytes",
# "Standby Cache Core Bytes",
# ]
## Example query where the Instance portion must be removed to get data back,
## such as from the Paging File object.
# [[inputs.win_perf_counters.object]]
# Measurement = "win_swap"
# ObjectName = "Paging File"
# Instances = ["_Total"]
# Counters = [
# "% Usage",
# ]

View File

@ -29,6 +29,10 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
## Enable additional diagnostic logging.
# enable_diagnostic_logging = false
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Context Tag Sources add Application Insights context tags to a tag value.
##
## For list of allowed context tag keys see:

View File

@ -12,6 +12,10 @@
## Enable additional diagnostic logging.
# enable_diagnostic_logging = false
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Context Tag Sources add Application Insights context tags to a tag value.
##
## For list of allowed context tag keys see:

View File

@ -65,6 +65,10 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
## Optional. If true, published PubSub message data will be base64-encoded.
# base64_data = false
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Optional. PubSub attributes to add to metrics.
# [outputs.cloud_pubsub.attributes]
# my_attr = "tag_value"

View File

@ -48,6 +48,10 @@
## Optional. If true, published PubSub message data will be base64-encoded.
# base64_data = false
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Optional. PubSub attributes to add to metrics.
# [outputs.cloud_pubsub.attributes]
# my_attr = "tag_value"

View File

@ -144,6 +144,10 @@ to use them.
## If you want metrics to be treated and reported as delta counters, add the metric names here
additional_counters = [ ]
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Optional dimensions to be added to every metric
# [outputs.dynatrace.default_dimensions]
# default_key = "default value"

View File

@ -31,6 +31,10 @@
## If you want metrics to be treated and reported as delta counters, add the metric names here
additional_counters = [ ]
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Optional dimensions to be added to every metric
# [outputs.dynatrace.default_dimensions]
# default_key = "default value"

View File

@ -42,6 +42,10 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## One or more check sub-tables should be defined, it is also recommended to
## use metric filtering to limit the metrics that flow into this output.
##

View File

@ -21,6 +21,10 @@
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## One or more check sub-tables should be defined, it is also recommended to
## use metric filtering to limit the metrics that flow into this output.
##

View File

@ -82,11 +82,6 @@ to use them.
## compress body or "identity" to apply no encoding.
# content_encoding = "identity"
## Additional HTTP headers
# [outputs.http.headers]
# # Should be set manually to "application/json" for json data_format
# Content-Type = "text/plain; charset=utf-8"
## MaxIdleConns controls the maximum number of idle (keep-alive)
## connections across all hosts. Zero means no limit.
# max_idle_conn = 0
@ -129,6 +124,15 @@ to use them.
## Optional list of statuscodes (<200 or >300) upon which requests should not be retried
# non_retryable_statuscodes = [409, 413]
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Additional HTTP headers
# [outputs.http.headers]
# ## Should be set manually to "application/json" for json data_format
# Content-Type = "text/plain; charset=utf-8"
```
### Google API Auth

View File

@ -55,11 +55,6 @@
## compress body or "identity" to apply no encoding.
# content_encoding = "identity"
## Additional HTTP headers
# [outputs.http.headers]
# # Should be set manually to "application/json" for json data_format
# Content-Type = "text/plain; charset=utf-8"
## MaxIdleConns controls the maximum number of idle (keep-alive)
## connections across all hosts. Zero means no limit.
# max_idle_conn = 0
@ -102,3 +97,12 @@
## Optional list of statuscodes (<200 or >300) upon which requests should not be retried
# non_retryable_statuscodes = [409, 413]
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Additional HTTP headers
# [outputs.http.headers]
# ## Should be set manually to "application/json" for json data_format
# Content-Type = "text/plain; charset=utf-8"

View File

@ -51,33 +51,6 @@ to use them.
## ex: version = "1.1.0"
# version = ""
## Optional topic suffix configuration.
## If the section is omitted, no suffix is used.
## Following topic suffix methods are supported:
## measurement - suffix equals to separator + measurement's name
## tags - suffix equals to separator + specified tags' values
## interleaved with separator
## Suffix equals to "_" + measurement name
# [outputs.kafka.topic_suffix]
# method = "measurement"
# separator = "_"
## Suffix equals to "__" + measurement's "foo" tag value.
## If there's no such a tag, suffix equals to an empty string
# [outputs.kafka.topic_suffix]
# method = "tags"
# keys = ["foo"]
# separator = "__"
## Suffix equals to "_" + measurement's "foo" and "bar"
## tag values, separated by "_". If there is no such tags,
## their values treated as empty strings.
# [outputs.kafka.topic_suffix]
# method = "tags"
# keys = ["foo", "bar"]
# separator = "_"
## The routing tag specifies a tagkey on the metric whose value is used as
## the message key. The message key is used to determine which partition to
## send the message to. This tag is prefered over the routing_key option.
@ -188,6 +161,37 @@ to use them.
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
# data_format = "influx"
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Optional topic suffix configuration.
## If the section is omitted, no suffix is used.
## Following topic suffix methods are supported:
## measurement - suffix equals to separator + measurement's name
## tags - suffix equals to separator + specified tags' values
## interleaved with separator
## Suffix equals to "_" + measurement name
# [outputs.kafka.topic_suffix]
# method = "measurement"
# separator = "_"
## Suffix equals to "__" + measurement's "foo" tag value.
## If there's no such a tag, suffix equals to an empty string
# [outputs.kafka.topic_suffix]
# method = "tags"
# keys = ["foo"]
# separator = "__"
## Suffix equals to "_" + measurement's "foo" and "bar"
## tag values, separated by "_". If there is no such tags,
## their values treated as empty strings.
# [outputs.kafka.topic_suffix]
# method = "tags"
# keys = ["foo", "bar"]
# separator = "_"
```
### `max_retry`

View File

@ -25,33 +25,6 @@
## ex: version = "1.1.0"
# version = ""
## Optional topic suffix configuration.
## If the section is omitted, no suffix is used.
## Following topic suffix methods are supported:
## measurement - suffix equals to separator + measurement's name
## tags - suffix equals to separator + specified tags' values
## interleaved with separator
## Suffix equals to "_" + measurement name
# [outputs.kafka.topic_suffix]
# method = "measurement"
# separator = "_"
## Suffix equals to "__" + measurement's "foo" tag value.
## If there's no such a tag, suffix equals to an empty string
# [outputs.kafka.topic_suffix]
# method = "tags"
# keys = ["foo"]
# separator = "__"
## Suffix equals to "_" + measurement's "foo" and "bar"
## tag values, separated by "_". If there is no such tags,
## their values treated as empty strings.
# [outputs.kafka.topic_suffix]
# method = "tags"
# keys = ["foo", "bar"]
# separator = "_"
## The routing tag specifies a tagkey on the metric whose value is used as
## the message key. The message key is used to determine which partition to
## send the message to. This tag is prefered over the routing_key option.
@ -162,3 +135,34 @@
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
# data_format = "influx"
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Optional topic suffix configuration.
## If the section is omitted, no suffix is used.
## Following topic suffix methods are supported:
## measurement - suffix equals to separator + measurement's name
## tags - suffix equals to separator + specified tags' values
## interleaved with separator
## Suffix equals to "_" + measurement name
# [outputs.kafka.topic_suffix]
# method = "measurement"
# separator = "_"
## Suffix equals to "__" + measurement's "foo" tag value.
## If there's no such a tag, suffix equals to an empty string
# [outputs.kafka.topic_suffix]
# method = "tags"
# keys = ["foo"]
# separator = "__"
## Suffix equals to "_" + measurement's "foo" and "bar"
## tag values, separated by "_". If there is no such tags,
## their values treated as empty strings.
# [outputs.kafka.topic_suffix]
# method = "tags"
# keys = ["foo", "bar"]
# separator = "_"

View File

@ -78,6 +78,19 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
## Kinesis StreamName must exist prior to starting telegraf.
streamname = "StreamName"
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"
## debug will show upstream aws messages.
debug = false
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## The partition key can be calculated using one of several methods:
##
## Use a static value for all writes:
@ -99,16 +112,6 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
# method = "tag"
# key = "host"
# default = "mykey"
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"
## debug will show upstream aws messages.
debug = false
```
For this output plugin to function correctly the following variables must be

View File

@ -30,6 +30,19 @@
## Kinesis StreamName must exist prior to starting telegraf.
streamname = "StreamName"
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"
## debug will show upstream aws messages.
debug = false
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## The partition key can be calculated using one of several methods:
##
## Use a static value for all writes:
@ -51,13 +64,3 @@
# method = "tag"
# key = "host"
# default = "mykey"
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"
## debug will show upstream aws messages.
debug = false

View File

@ -125,6 +125,10 @@ to use them.
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Optional MQTT 5 publish properties
## These setting only apply if the "protocol" property is set to 5. This must
## be defined at the end of the plugin settings, otherwise TOML will assume

View File

@ -90,6 +90,10 @@
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Optional MQTT 5 publish properties
## These setting only apply if the "protocol" property is set to 5. This must
## be defined at the end of the plugin settings, otherwise TOML will assume

View File

@ -41,6 +41,10 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
## Supports: "gzip", "none"
# compression = "gzip"
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Configuration options for the Coralogix dialect
## Enable the following section of you use this plugin with a Coralogix endpoint
# [outputs.opentelemetry.coralogix]

View File

@ -24,6 +24,10 @@
## Supports: "gzip", "none"
# compression = "gzip"
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Configuration options for the Coralogix dialect
## Enable the following section of you use this plugin with a Coralogix endpoint
# [outputs.opentelemetry.coralogix]

View File

@ -68,6 +68,10 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
## compress body or "identity" to apply no encoding.
# content_encoding = "identity"
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Sensu Event details
##
## Below are the event details to be sent to Sensu. The main portions of the

View File

@ -51,6 +51,10 @@
## compress body or "identity" to apply no encoding.
# content_encoding = "identity"
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Sensu Event details
##
## Below are the event details to be sent to Sensu. The main portions of the

View File

@ -106,23 +106,6 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
## Initialization SQL
# init_sql = ""
## Metric type to SQL type conversion
## The values on the left are the data types Telegraf has and the values on
## the right are the data types Telegraf will use when sending to a database.
##
## The database values used must be data types the destination database
## understands. It is up to the user to ensure that the selected data type is
## available in the database they are using. Refer to your database
## documentation for what data types are available and supported.
#[outputs.sql.convert]
# integer = "INT"
# real = "DOUBLE"
# text = "TEXT"
# timestamp = "TIMESTAMP"
# defaultvalue = "TEXT"
# unsigned = "UNSIGNED"
# bool = "BOOL"
## This setting controls the behavior of the unsigned value. By default the
## setting will take the integer value and append the unsigned value to it. The other
## option is "literal", which will use the actual value the user provides to
@ -143,6 +126,27 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
## Maximum number of open connections to the database. 0 means unlimited.
# connection_max_open = 0
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Metric type to SQL type conversion
## The values on the left are the data types Telegraf has and the values on
## the right are the data types Telegraf will use when sending to a database.
##
## The database values used must be data types the destination database
## understands. It is up to the user to ensure that the selected data type is
## available in the database they are using. Refer to your database
## documentation for what data types are available and supported.
#[outputs.sql.convert]
# integer = "INT"
# real = "DOUBLE"
# text = "TEXT"
# timestamp = "TIMESTAMP"
# defaultvalue = "TEXT"
# unsigned = "UNSIGNED"
# bool = "BOOL"
```
## Driver-specific information

View File

@ -28,23 +28,6 @@
## Initialization SQL
# init_sql = ""
## Metric type to SQL type conversion
## The values on the left are the data types Telegraf has and the values on
## the right are the data types Telegraf will use when sending to a database.
##
## The database values used must be data types the destination database
## understands. It is up to the user to ensure that the selected data type is
## available in the database they are using. Refer to your database
## documentation for what data types are available and supported.
#[outputs.sql.convert]
# integer = "INT"
# real = "DOUBLE"
# text = "TEXT"
# timestamp = "TIMESTAMP"
# defaultvalue = "TEXT"
# unsigned = "UNSIGNED"
# bool = "BOOL"
## This setting controls the behavior of the unsigned value. By default the
## setting will take the integer value and append the unsigned value to it. The other
## option is "literal", which will use the actual value the user provides to
@ -65,3 +48,24 @@
## Maximum number of open connections to the database. 0 means unlimited.
# connection_max_open = 0
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Metric type to SQL type conversion
## The values on the left are the data types Telegraf has and the values on
## the right are the data types Telegraf will use when sending to a database.
##
## The database values used must be data types the destination database
## understands. It is up to the user to ensure that the selected data type is
## available in the database they are using. Refer to your database
## documentation for what data types are available and supported.
#[outputs.sql.convert]
# integer = "INT"
# real = "DOUBLE"
# text = "TEXT"
# timestamp = "TIMESTAMP"
# defaultvalue = "TEXT"
# unsigned = "UNSIGNED"
# bool = "BOOL"

View File

@ -69,6 +69,10 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
## Custom resource type
# resource_type = "generic_node"
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Additional resource labels
# [outputs.stackdriver.resource_labels]
# node_id = "$HOSTNAME"

View File

@ -34,6 +34,10 @@
## Custom resource type
# resource_type = "generic_node"
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Additional resource labels
# [outputs.stackdriver.resource_labels]
# node_id = "$HOSTNAME"

View File

@ -54,6 +54,10 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
# data_format = "influx"
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Additional HTTP Upgrade headers
# [outputs.websocket.headers]
# Authorization = "Bearer <TOKEN>"

View File

@ -34,6 +34,10 @@
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
# data_format = "influx"
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Additional HTTP Upgrade headers
# [outputs.websocket.headers]
# Authorization = "Bearer <TOKEN>"