diff --git a/plugins/inputs/aliyuncms/README.md b/plugins/inputs/aliyuncms/README.md index a2413248f..086c54682 100644 --- a/plugins/inputs/aliyuncms/README.md +++ b/plugins/inputs/aliyuncms/README.md @@ -106,46 +106,55 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. ## How often the discovery API call executed (default 1m) #discovery_interval = "1m" - ## Metrics to Pull (Required) + ## NOTE: Due to the way TOML is parsed, tables must be at the END of the + ## plugin definition, otherwise additional config options are read as part of + ## the table + + ## Metrics to Pull + ## At least one metrics definition required [[inputs.aliyuncms.metrics]] - ## Metrics names to be requested, - ## Description can be found here (per project): - ## https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq - names = ["InstanceActiveConnection", "InstanceNewConnection"] + ## Metrics names to be requested, + ## Description can be found here (per project): + ## https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq + names = ["InstanceActiveConnection", "InstanceNewConnection"] - ## Dimension filters for Metric (optional). - ## This allows to get additional metric dimension. If dimension is not - ## specified it can be returned or the data can be aggregated - it depends - ## on particular metric, you can find details here: - ## https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq - ## - ## Note, that by default dimension filter includes the list of discovered - ## objects in scope (if discovery is enabled). - # Values specified here would be added into the list of discovered objects. - ## You can specify either single dimension: - #dimensions = '{"instanceId": "p-example"}' + ## Dimension filters for Metric (optional) + ## This allows to get additional metric dimension. If dimension is not + ## specified it can be returned or the data can be aggregated - it depends + ## on particular metric, you can find details here: + ## https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq + ## + ## Note, that by default dimension filter includes the list of discovered + ## objects in scope (if discovery is enabled). Values specified here would + ## be added into the list of discovered objects. You can specify either + ## single dimension: + # dimensions = '{"instanceId": "p-example"}' - ## Or you can specify several dimensions at once: - #dimensions = '[{"instanceId": "p-example"},{"instanceId": "q-example"}]' + ## Or you can specify several dimensions at once: + # dimensions = '[{"instanceId": "p-example"},{"instanceId": "q-example"}]' - ## Enrichment tags, can be added from discovery (if supported) - ## Notation is : - ## To figure out which fields are available, consult the Describe - ## API per project. - ## For example, for SLB: https://api.aliyun.com/#/?product=Slb&version=2014-05-15&api=DescribeLoadBalancers¶ms={}&tab=MOCK&lang=GO - #tag_query_path = [ - # "address:Address", - # "name:LoadBalancerName", - # "cluster_owner:Tags.Tag[?TagKey=='cs.cluster.name'].TagValue | [0]" - # ] - ## The following tags added by default: - ## regionId (if discovery enabled), userId, instanceId. + ## Tag Query Path + ## The following tags added by default: + ## * regionId (if discovery enabled) + ## * userId + ## * instanceId + ## Enrichment tags, can be added from discovery (if supported) + ## Notation is + ## : + ## To figure out which fields are available, consult the + ## Describe API per project. For example, for SLB see: + ## https://api.aliyun.com/#/?product=Slb&version=2014-05-15&api=DescribeLoadBalancers¶ms={}&tab=MOCK&lang=GO + # tag_query_path = [ + # "address:Address", + # "name:LoadBalancerName", + # "cluster_owner:Tags.Tag[?TagKey=='cs.cluster.name'].TagValue | [0]" + # ] - ## Allow metrics without discovery data, if discovery is enabled. - ## If set to true, then metric without discovery data would be emitted, otherwise dropped. - ## This cane be of help, in case debugging dimension filters, or partial coverage of - ## discovery scope vs monitoring scope - # allow_dps_without_discovery = false + ## Allow metrics without discovery data, if discovery is enabled. + ## If set to true, then metric without discovery data would be emitted, otherwise dropped. + ## This cane be of help, in case debugging dimension filters, or partial coverage of + ## discovery scope vs monitoring scope + # allow_dps_without_discovery = false ``` ### Requirements and Terminology diff --git a/plugins/inputs/aliyuncms/sample.conf b/plugins/inputs/aliyuncms/sample.conf index b11f6ba62..ab343ed78 100644 --- a/plugins/inputs/aliyuncms/sample.conf +++ b/plugins/inputs/aliyuncms/sample.conf @@ -69,43 +69,52 @@ ## How often the discovery API call executed (default 1m) #discovery_interval = "1m" - ## Metrics to Pull (Required) + ## NOTE: Due to the way TOML is parsed, tables must be at the END of the + ## plugin definition, otherwise additional config options are read as part of + ## the table + + ## Metrics to Pull + ## At least one metrics definition required [[inputs.aliyuncms.metrics]] - ## Metrics names to be requested, - ## Description can be found here (per project): - ## https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq - names = ["InstanceActiveConnection", "InstanceNewConnection"] + ## Metrics names to be requested, + ## Description can be found here (per project): + ## https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq + names = ["InstanceActiveConnection", "InstanceNewConnection"] - ## Dimension filters for Metric (optional). - ## This allows to get additional metric dimension. If dimension is not - ## specified it can be returned or the data can be aggregated - it depends - ## on particular metric, you can find details here: - ## https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq - ## - ## Note, that by default dimension filter includes the list of discovered - ## objects in scope (if discovery is enabled). - # Values specified here would be added into the list of discovered objects. - ## You can specify either single dimension: - #dimensions = '{"instanceId": "p-example"}' + ## Dimension filters for Metric (optional) + ## This allows to get additional metric dimension. If dimension is not + ## specified it can be returned or the data can be aggregated - it depends + ## on particular metric, you can find details here: + ## https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq + ## + ## Note, that by default dimension filter includes the list of discovered + ## objects in scope (if discovery is enabled). Values specified here would + ## be added into the list of discovered objects. You can specify either + ## single dimension: + # dimensions = '{"instanceId": "p-example"}' - ## Or you can specify several dimensions at once: - #dimensions = '[{"instanceId": "p-example"},{"instanceId": "q-example"}]' + ## Or you can specify several dimensions at once: + # dimensions = '[{"instanceId": "p-example"},{"instanceId": "q-example"}]' - ## Enrichment tags, can be added from discovery (if supported) - ## Notation is : - ## To figure out which fields are available, consult the Describe - ## API per project. - ## For example, for SLB: https://api.aliyun.com/#/?product=Slb&version=2014-05-15&api=DescribeLoadBalancers¶ms={}&tab=MOCK&lang=GO - #tag_query_path = [ - # "address:Address", - # "name:LoadBalancerName", - # "cluster_owner:Tags.Tag[?TagKey=='cs.cluster.name'].TagValue | [0]" - # ] - ## The following tags added by default: - ## regionId (if discovery enabled), userId, instanceId. + ## Tag Query Path + ## The following tags added by default: + ## * regionId (if discovery enabled) + ## * userId + ## * instanceId + ## Enrichment tags, can be added from discovery (if supported) + ## Notation is + ## : + ## To figure out which fields are available, consult the + ## Describe API per project. For example, for SLB see: + ## https://api.aliyun.com/#/?product=Slb&version=2014-05-15&api=DescribeLoadBalancers¶ms={}&tab=MOCK&lang=GO + # tag_query_path = [ + # "address:Address", + # "name:LoadBalancerName", + # "cluster_owner:Tags.Tag[?TagKey=='cs.cluster.name'].TagValue | [0]" + # ] - ## Allow metrics without discovery data, if discovery is enabled. - ## If set to true, then metric without discovery data would be emitted, otherwise dropped. - ## This cane be of help, in case debugging dimension filters, or partial coverage of - ## discovery scope vs monitoring scope - # allow_dps_without_discovery = false + ## Allow metrics without discovery data, if discovery is enabled. + ## If set to true, then metric without discovery data would be emitted, otherwise dropped. + ## This cane be of help, in case debugging dimension filters, or partial coverage of + ## discovery scope vs monitoring scope + # allow_dps_without_discovery = false diff --git a/plugins/inputs/vsphere/README.md b/plugins/inputs/vsphere/README.md index 9879e6c91..6d9370ce1 100644 --- a/plugins/inputs/vsphere/README.md +++ b/plugins/inputs/vsphere/README.md @@ -40,7 +40,7 @@ to use them. ## Configuration ```toml @sample.conf --# Read metrics from one or many vCenters +# Read metrics from one or many vCenters [[inputs.vsphere]] ## List of vCenter URLs to be monitored. These three lines must be uncommented ## and edited for the plugin to work. diff --git a/plugins/inputs/vsphere/sample.conf b/plugins/inputs/vsphere/sample.conf index 0b94967d3..8bb103d4e 100644 --- a/plugins/inputs/vsphere/sample.conf +++ b/plugins/inputs/vsphere/sample.conf @@ -1,4 +1,4 @@ --# Read metrics from one or many vCenters +# Read metrics from one or many vCenters [[inputs.vsphere]] ## List of vCenter URLs to be monitored. These three lines must be uncommented ## and edited for the plugin to work. diff --git a/plugins/inputs/win_perf_counters/README.md b/plugins/inputs/win_perf_counters/README.md index 4639a6289..c6810dd13 100644 --- a/plugins/inputs/win_perf_counters/README.md +++ b/plugins/inputs/win_perf_counters/README.md @@ -302,143 +302,172 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. ## Configuration ```toml @sample.conf -# # Input plugin to counterPath Performance Counters on Windows operating systems -# # This plugin ONLY supports Windows -# [[inputs.win_perf_counters]] -# ## By default this plugin returns basic CPU and Disk statistics. -# ## See the README file for more examples. -# ## Uncomment examples below or write your own as you see fit. If the system -# ## being polled for data does not have the Object at startup of the Telegraf -# ## agent, it will not be gathered. -# ## Settings: -# # PrintValid = false # Print All matching performance counters -# # Whether request a timestamp along with the PerfCounter data or just use current time -# # UsePerfCounterTime=true -# # If UseWildcardsExpansion params is set to true, wildcards (partial wildcards in instance names and wildcards in counters names) in configured counter paths will be expanded -# # and in case of localized Windows, counter paths will be also localized. It also returns instance indexes in instance names. -# # If false, wildcards (not partial) in instance names will still be expanded, but instance indexes will not be returned in instance names. -# #UseWildcardsExpansion = false -# # When running on a localized version of Windows and with UseWildcardsExpansion = true, Windows will -# # localize object and counter names. When LocalizeWildcardsExpansion = false, use the names in object.Counters instead -# # of the localized names. Only Instances can have wildcards in this case. ObjectName and Counters must not have wildcards when this -# # setting is false. -# #LocalizeWildcardsExpansion = true -# # Period after which counters will be reread from configuration and wildcards in counter paths expanded -# CountersRefreshInterval="1m" -# ## Accepts a list of PDH error codes which are defined in pdh.go, if this error is encountered it will be ignored -# ## For example, you can provide "PDH_NO_DATA" to ignore performance counters with no instances -# ## By default no errors are ignored -# ## You can find the list here: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/win_perf_counters/pdh.go -# ## e.g.: IgnoredErrors = ["PDH_NO_DATA"] -# # IgnoredErrors = [] -# -# [[inputs.win_perf_counters.object]] -# # Processor usage, alternative to native, reports on a per core. -# ObjectName = "Processor" -# Instances = ["*"] -# Counters = [ -# "% Idle Time", -# "% Interrupt Time", -# "% Privileged Time", -# "% User Time", -# "% Processor Time", -# "% DPC Time", -# ] -# Measurement = "win_cpu" -# # Set to true to include _Total instance when querying for all (*). -# # IncludeTotal=false -# # Print out when the performance counter is missing from object, counter or instance. -# # WarnOnMissing = false -# # Gather raw values instead of formatted. Raw value is stored in the field name with the "_Raw" suffix, e.g. "Disk_Read_Bytes_sec_Raw". -# # UseRawValues = true -# -# [[inputs.win_perf_counters.object]] -# # Disk times and queues -# ObjectName = "LogicalDisk" -# Instances = ["*"] -# Counters = [ -# "% Idle Time", -# "% Disk Time", -# "% Disk Read Time", -# "% Disk Write Time", -# "% User Time", -# "% Free Space", -# "Current Disk Queue Length", -# "Free Megabytes", -# ] -# Measurement = "win_disk" -# -# [[inputs.win_perf_counters.object]] -# ObjectName = "PhysicalDisk" -# Instances = ["*"] -# Counters = [ -# "Disk Read Bytes/sec", -# "Disk Write Bytes/sec", -# "Current Disk Queue Length", -# "Disk Reads/sec", -# "Disk Writes/sec", -# "% Disk Time", -# "% Disk Read Time", -# "% Disk Write Time", -# ] -# Measurement = "win_diskio" -# -# [[inputs.win_perf_counters.object]] -# ObjectName = "Network Interface" -# Instances = ["*"] -# Counters = [ -# "Bytes Received/sec", -# "Bytes Sent/sec", -# "Packets Received/sec", -# "Packets Sent/sec", -# "Packets Received Discarded", -# "Packets Outbound Discarded", -# "Packets Received Errors", -# "Packets Outbound Errors", -# ] -# Measurement = "win_net" -# -# -# [[inputs.win_perf_counters.object]] -# ObjectName = "System" -# Counters = [ -# "Context Switches/sec", -# "System Calls/sec", -# "Processor Queue Length", -# "System Up Time", -# ] -# Instances = ["------"] -# Measurement = "win_system" -# -# [[inputs.win_perf_counters.object]] -# # Example counterPath where the Instance portion must be removed to get data back, -# # such as from the Memory object. -# ObjectName = "Memory" -# Counters = [ -# "Available Bytes", -# "Cache Faults/sec", -# "Demand Zero Faults/sec", -# "Page Faults/sec", -# "Pages/sec", -# "Transition Faults/sec", -# "Pool Nonpaged Bytes", -# "Pool Paged Bytes", -# "Standby Cache Reserve Bytes", -# "Standby Cache Normal Priority Bytes", -# "Standby Cache Core Bytes", -# ] -# Instances = ["------"] # Use 6 x - to remove the Instance bit from the counterPath. -# Measurement = "win_mem" -# -# [[inputs.win_perf_counters.object]] -# # Example query where the Instance portion must be removed to get data back, -# # such as from the Paging File object. -# ObjectName = "Paging File" -# Counters = [ -# "% Usage", -# ] -# Instances = ["_Total"] -# Measurement = "win_swap" +# Input plugin to counterPath Performance Counters on Windows operating systems +# This plugin ONLY supports Windows +[[inputs.win_perf_counters]] + ## By default this plugin returns basic CPU and Disk statistics. See the + ## README file for more examples. Uncomment examples below or write your own + ## as you see fit. If the system being polled for data does not have the + ## Object at startup of the Telegraf agent, it will not be gathered. + + ## Print All matching performance counters + # PrintValid = false + + ## Whether request a timestamp along with the PerfCounter data or use current + ## time + # UsePerfCounterTime = true + + ## If UseWildcardsExpansion params is set to true, wildcards (partial + ## wildcards in instance names and wildcards in counters names) in configured + ## counter paths will be expanded and in case of localized Windows, counter + ## paths will be also localized. It also returns instance indexes in instance + ## names. If false, wildcards (not partial) in instance names will still be + ## expanded, but instance indexes will not be returned in instance names. + # UseWildcardsExpansion = false + + ## When running on a localized version of Windows and with + ## UseWildcardsExpansion = true, Windows will localize object and counter + ## names. When LocalizeWildcardsExpansion = false, use the names in + ## object.Counters instead of the localized names. Only Instances can have + ## wildcards in this case. ObjectName and Counters must not have wildcards + ## when this setting is false. + # LocalizeWildcardsExpansion = true + + ## Period after which counters will be reread from configuration and + ## wildcards in counter paths expanded + # CountersRefreshInterval="1m" + + ## Accepts a list of PDH error codes which are defined in pdh.go, if this + ## error is encountered it will be ignored. For example, you can provide + ## "PDH_NO_DATA" to ignore performance counters with no instances. By default + ## no errors are ignored You can find the list here: + ## https://github.com/influxdata/telegraf/blob/master/plugins/inputs/win_perf_counters/pdh.go + ## e.g. IgnoredErrors = ["PDH_NO_DATA"] + # IgnoredErrors = [] + + ## NOTE: Due to the way TOML is parsed, tables must be at the END of the + ## plugin definition, otherwise additional config options are read as part of + ## the table + + # [[inputs.win_perf_counters.object]] + # Measurement = "" + # ObjectName = "" + # Instances = [""] + # Counters = [] + ## Additional Object Settings + ## * IncludeTotal: set to true to include _Total instance when querying + ## for all metrics via '*' + ## * WarnOnMissing: print out when the performance counter is missing + ## from object, counter or instance + ## * UseRawValues: gather raw values instead of formatted. Raw values are + ## stored in the field name with the "_Raw" suffix, e.g. + ## "Disk_Read_Bytes_sec_Raw". + # IncludeTotal = false + # WarnOnMissing = false + # UseRawValues = false + + ## Processor usage, alternative to native, reports on a per core. + # [[inputs.win_perf_counters.object]] + # Measurement = "win_cpu" + # ObjectName = "Processor" + # Instances = ["*"] + # UseRawValues = true + # Counters = [ + # "% Idle Time", + # "% Interrupt Time", + # "% Privileged Time", + # "% User Time", + # "% Processor Time", + # "% DPC Time", + # ] + + ## Disk times and queues + # [[inputs.win_perf_counters.object]] + # Measurement = "win_disk" + # ObjectName = "LogicalDisk" + # Instances = ["*"] + # Counters = [ + # "% Idle Time", + # "% Disk Time", + # "% Disk Read Time", + # "% Disk Write Time", + # "% User Time", + # "% Free Space", + # "Current Disk Queue Length", + # "Free Megabytes", + # ] + + # [[inputs.win_perf_counters.object]] + # Measurement = "win_diskio" + # ObjectName = "PhysicalDisk" + # Instances = ["*"] + # Counters = [ + # "Disk Read Bytes/sec", + # "Disk Write Bytes/sec", + # "Current Disk Queue Length", + # "Disk Reads/sec", + # "Disk Writes/sec", + # "% Disk Time", + # "% Disk Read Time", + # "% Disk Write Time", + # ] + + # [[inputs.win_perf_counters.object]] + # Measurement = "win_net" + # ObjectName = "Network Interface" + # Instances = ["*"] + # Counters = [ + # "Bytes Received/sec", + # "Bytes Sent/sec", + # "Packets Received/sec", + # "Packets Sent/sec", + # "Packets Received Discarded", + # "Packets Outbound Discarded", + # "Packets Received Errors", + # "Packets Outbound Errors", + # ] + + # [[inputs.win_perf_counters.object]] + # Measurement = "win_system" + # ObjectName = "System" + # Instances = ["------"] + # Counters = [ + # "Context Switches/sec", + # "System Calls/sec", + # "Processor Queue Length", + # "System Up Time", + # ] + + ## Example counterPath where the Instance portion must be removed to get + ## data back, such as from the Memory object. + # [[inputs.win_perf_counters.object]] + # Measurement = "win_mem" + # ObjectName = "Memory" + ## Use 6 x - to remove the Instance bit from the counterPath. + # Instances = ["------"] + # Counters = [ + # "Available Bytes", + # "Cache Faults/sec", + # "Demand Zero Faults/sec", + # "Page Faults/sec", + # "Pages/sec", + # "Transition Faults/sec", + # "Pool Nonpaged Bytes", + # "Pool Paged Bytes", + # "Standby Cache Reserve Bytes", + # "Standby Cache Normal Priority Bytes", + # "Standby Cache Core Bytes", + # ] + + ## Example query where the Instance portion must be removed to get data back, + ## such as from the Paging File object. + # [[inputs.win_perf_counters.object]] + # Measurement = "win_swap" + # ObjectName = "Paging File" + # Instances = ["_Total"] + # Counters = [ + # "% Usage", + # ] ``` ### Generic Queries diff --git a/plugins/inputs/win_perf_counters/sample.conf b/plugins/inputs/win_perf_counters/sample.conf index 6996ceff3..4736ab11b 100644 --- a/plugins/inputs/win_perf_counters/sample.conf +++ b/plugins/inputs/win_perf_counters/sample.conf @@ -1,137 +1,166 @@ -# # Input plugin to counterPath Performance Counters on Windows operating systems -# # This plugin ONLY supports Windows -# [[inputs.win_perf_counters]] -# ## By default this plugin returns basic CPU and Disk statistics. -# ## See the README file for more examples. -# ## Uncomment examples below or write your own as you see fit. If the system -# ## being polled for data does not have the Object at startup of the Telegraf -# ## agent, it will not be gathered. -# ## Settings: -# # PrintValid = false # Print All matching performance counters -# # Whether request a timestamp along with the PerfCounter data or just use current time -# # UsePerfCounterTime=true -# # If UseWildcardsExpansion params is set to true, wildcards (partial wildcards in instance names and wildcards in counters names) in configured counter paths will be expanded -# # and in case of localized Windows, counter paths will be also localized. It also returns instance indexes in instance names. -# # If false, wildcards (not partial) in instance names will still be expanded, but instance indexes will not be returned in instance names. -# #UseWildcardsExpansion = false -# # When running on a localized version of Windows and with UseWildcardsExpansion = true, Windows will -# # localize object and counter names. When LocalizeWildcardsExpansion = false, use the names in object.Counters instead -# # of the localized names. Only Instances can have wildcards in this case. ObjectName and Counters must not have wildcards when this -# # setting is false. -# #LocalizeWildcardsExpansion = true -# # Period after which counters will be reread from configuration and wildcards in counter paths expanded -# CountersRefreshInterval="1m" -# ## Accepts a list of PDH error codes which are defined in pdh.go, if this error is encountered it will be ignored -# ## For example, you can provide "PDH_NO_DATA" to ignore performance counters with no instances -# ## By default no errors are ignored -# ## You can find the list here: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/win_perf_counters/pdh.go -# ## e.g.: IgnoredErrors = ["PDH_NO_DATA"] -# # IgnoredErrors = [] -# -# [[inputs.win_perf_counters.object]] -# # Processor usage, alternative to native, reports on a per core. -# ObjectName = "Processor" -# Instances = ["*"] -# Counters = [ -# "% Idle Time", -# "% Interrupt Time", -# "% Privileged Time", -# "% User Time", -# "% Processor Time", -# "% DPC Time", -# ] -# Measurement = "win_cpu" -# # Set to true to include _Total instance when querying for all (*). -# # IncludeTotal=false -# # Print out when the performance counter is missing from object, counter or instance. -# # WarnOnMissing = false -# # Gather raw values instead of formatted. Raw value is stored in the field name with the "_Raw" suffix, e.g. "Disk_Read_Bytes_sec_Raw". -# # UseRawValues = true -# -# [[inputs.win_perf_counters.object]] -# # Disk times and queues -# ObjectName = "LogicalDisk" -# Instances = ["*"] -# Counters = [ -# "% Idle Time", -# "% Disk Time", -# "% Disk Read Time", -# "% Disk Write Time", -# "% User Time", -# "% Free Space", -# "Current Disk Queue Length", -# "Free Megabytes", -# ] -# Measurement = "win_disk" -# -# [[inputs.win_perf_counters.object]] -# ObjectName = "PhysicalDisk" -# Instances = ["*"] -# Counters = [ -# "Disk Read Bytes/sec", -# "Disk Write Bytes/sec", -# "Current Disk Queue Length", -# "Disk Reads/sec", -# "Disk Writes/sec", -# "% Disk Time", -# "% Disk Read Time", -# "% Disk Write Time", -# ] -# Measurement = "win_diskio" -# -# [[inputs.win_perf_counters.object]] -# ObjectName = "Network Interface" -# Instances = ["*"] -# Counters = [ -# "Bytes Received/sec", -# "Bytes Sent/sec", -# "Packets Received/sec", -# "Packets Sent/sec", -# "Packets Received Discarded", -# "Packets Outbound Discarded", -# "Packets Received Errors", -# "Packets Outbound Errors", -# ] -# Measurement = "win_net" -# -# -# [[inputs.win_perf_counters.object]] -# ObjectName = "System" -# Counters = [ -# "Context Switches/sec", -# "System Calls/sec", -# "Processor Queue Length", -# "System Up Time", -# ] -# Instances = ["------"] -# Measurement = "win_system" -# -# [[inputs.win_perf_counters.object]] -# # Example counterPath where the Instance portion must be removed to get data back, -# # such as from the Memory object. -# ObjectName = "Memory" -# Counters = [ -# "Available Bytes", -# "Cache Faults/sec", -# "Demand Zero Faults/sec", -# "Page Faults/sec", -# "Pages/sec", -# "Transition Faults/sec", -# "Pool Nonpaged Bytes", -# "Pool Paged Bytes", -# "Standby Cache Reserve Bytes", -# "Standby Cache Normal Priority Bytes", -# "Standby Cache Core Bytes", -# ] -# Instances = ["------"] # Use 6 x - to remove the Instance bit from the counterPath. -# Measurement = "win_mem" -# -# [[inputs.win_perf_counters.object]] -# # Example query where the Instance portion must be removed to get data back, -# # such as from the Paging File object. -# ObjectName = "Paging File" -# Counters = [ -# "% Usage", -# ] -# Instances = ["_Total"] -# Measurement = "win_swap" +# Input plugin to counterPath Performance Counters on Windows operating systems +# This plugin ONLY supports Windows +[[inputs.win_perf_counters]] + ## By default this plugin returns basic CPU and Disk statistics. See the + ## README file for more examples. Uncomment examples below or write your own + ## as you see fit. If the system being polled for data does not have the + ## Object at startup of the Telegraf agent, it will not be gathered. + + ## Print All matching performance counters + # PrintValid = false + + ## Whether request a timestamp along with the PerfCounter data or use current + ## time + # UsePerfCounterTime = true + + ## If UseWildcardsExpansion params is set to true, wildcards (partial + ## wildcards in instance names and wildcards in counters names) in configured + ## counter paths will be expanded and in case of localized Windows, counter + ## paths will be also localized. It also returns instance indexes in instance + ## names. If false, wildcards (not partial) in instance names will still be + ## expanded, but instance indexes will not be returned in instance names. + # UseWildcardsExpansion = false + + ## When running on a localized version of Windows and with + ## UseWildcardsExpansion = true, Windows will localize object and counter + ## names. When LocalizeWildcardsExpansion = false, use the names in + ## object.Counters instead of the localized names. Only Instances can have + ## wildcards in this case. ObjectName and Counters must not have wildcards + ## when this setting is false. + # LocalizeWildcardsExpansion = true + + ## Period after which counters will be reread from configuration and + ## wildcards in counter paths expanded + # CountersRefreshInterval="1m" + + ## Accepts a list of PDH error codes which are defined in pdh.go, if this + ## error is encountered it will be ignored. For example, you can provide + ## "PDH_NO_DATA" to ignore performance counters with no instances. By default + ## no errors are ignored You can find the list here: + ## https://github.com/influxdata/telegraf/blob/master/plugins/inputs/win_perf_counters/pdh.go + ## e.g. IgnoredErrors = ["PDH_NO_DATA"] + # IgnoredErrors = [] + + ## NOTE: Due to the way TOML is parsed, tables must be at the END of the + ## plugin definition, otherwise additional config options are read as part of + ## the table + + # [[inputs.win_perf_counters.object]] + # Measurement = "" + # ObjectName = "" + # Instances = [""] + # Counters = [] + ## Additional Object Settings + ## * IncludeTotal: set to true to include _Total instance when querying + ## for all metrics via '*' + ## * WarnOnMissing: print out when the performance counter is missing + ## from object, counter or instance + ## * UseRawValues: gather raw values instead of formatted. Raw values are + ## stored in the field name with the "_Raw" suffix, e.g. + ## "Disk_Read_Bytes_sec_Raw". + # IncludeTotal = false + # WarnOnMissing = false + # UseRawValues = false + + ## Processor usage, alternative to native, reports on a per core. + # [[inputs.win_perf_counters.object]] + # Measurement = "win_cpu" + # ObjectName = "Processor" + # Instances = ["*"] + # UseRawValues = true + # Counters = [ + # "% Idle Time", + # "% Interrupt Time", + # "% Privileged Time", + # "% User Time", + # "% Processor Time", + # "% DPC Time", + # ] + + ## Disk times and queues + # [[inputs.win_perf_counters.object]] + # Measurement = "win_disk" + # ObjectName = "LogicalDisk" + # Instances = ["*"] + # Counters = [ + # "% Idle Time", + # "% Disk Time", + # "% Disk Read Time", + # "% Disk Write Time", + # "% User Time", + # "% Free Space", + # "Current Disk Queue Length", + # "Free Megabytes", + # ] + + # [[inputs.win_perf_counters.object]] + # Measurement = "win_diskio" + # ObjectName = "PhysicalDisk" + # Instances = ["*"] + # Counters = [ + # "Disk Read Bytes/sec", + # "Disk Write Bytes/sec", + # "Current Disk Queue Length", + # "Disk Reads/sec", + # "Disk Writes/sec", + # "% Disk Time", + # "% Disk Read Time", + # "% Disk Write Time", + # ] + + # [[inputs.win_perf_counters.object]] + # Measurement = "win_net" + # ObjectName = "Network Interface" + # Instances = ["*"] + # Counters = [ + # "Bytes Received/sec", + # "Bytes Sent/sec", + # "Packets Received/sec", + # "Packets Sent/sec", + # "Packets Received Discarded", + # "Packets Outbound Discarded", + # "Packets Received Errors", + # "Packets Outbound Errors", + # ] + + # [[inputs.win_perf_counters.object]] + # Measurement = "win_system" + # ObjectName = "System" + # Instances = ["------"] + # Counters = [ + # "Context Switches/sec", + # "System Calls/sec", + # "Processor Queue Length", + # "System Up Time", + # ] + + ## Example counterPath where the Instance portion must be removed to get + ## data back, such as from the Memory object. + # [[inputs.win_perf_counters.object]] + # Measurement = "win_mem" + # ObjectName = "Memory" + ## Use 6 x - to remove the Instance bit from the counterPath. + # Instances = ["------"] + # Counters = [ + # "Available Bytes", + # "Cache Faults/sec", + # "Demand Zero Faults/sec", + # "Page Faults/sec", + # "Pages/sec", + # "Transition Faults/sec", + # "Pool Nonpaged Bytes", + # "Pool Paged Bytes", + # "Standby Cache Reserve Bytes", + # "Standby Cache Normal Priority Bytes", + # "Standby Cache Core Bytes", + # ] + + ## Example query where the Instance portion must be removed to get data back, + ## such as from the Paging File object. + # [[inputs.win_perf_counters.object]] + # Measurement = "win_swap" + # ObjectName = "Paging File" + # Instances = ["_Total"] + # Counters = [ + # "% Usage", + # ] diff --git a/plugins/outputs/application_insights/README.md b/plugins/outputs/application_insights/README.md index 83e204a35..6fc8d05fb 100644 --- a/plugins/outputs/application_insights/README.md +++ b/plugins/outputs/application_insights/README.md @@ -29,6 +29,10 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. ## Enable additional diagnostic logging. # enable_diagnostic_logging = false + ## NOTE: Due to the way TOML is parsed, tables must be at the END of the + ## plugin definition, otherwise additional config options are read as part of + ## the table + ## Context Tag Sources add Application Insights context tags to a tag value. ## ## For list of allowed context tag keys see: diff --git a/plugins/outputs/application_insights/sample.conf b/plugins/outputs/application_insights/sample.conf index 75df9d7be..9cea42af2 100644 --- a/plugins/outputs/application_insights/sample.conf +++ b/plugins/outputs/application_insights/sample.conf @@ -12,6 +12,10 @@ ## Enable additional diagnostic logging. # enable_diagnostic_logging = false + ## NOTE: Due to the way TOML is parsed, tables must be at the END of the + ## plugin definition, otherwise additional config options are read as part of + ## the table + ## Context Tag Sources add Application Insights context tags to a tag value. ## ## For list of allowed context tag keys see: diff --git a/plugins/outputs/cloud_pubsub/README.md b/plugins/outputs/cloud_pubsub/README.md index d1edf10a0..979f98088 100644 --- a/plugins/outputs/cloud_pubsub/README.md +++ b/plugins/outputs/cloud_pubsub/README.md @@ -65,6 +65,10 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. ## Optional. If true, published PubSub message data will be base64-encoded. # base64_data = false + ## NOTE: Due to the way TOML is parsed, tables must be at the END of the + ## plugin definition, otherwise additional config options are read as part of + ## the table + ## Optional. PubSub attributes to add to metrics. # [outputs.cloud_pubsub.attributes] # my_attr = "tag_value" diff --git a/plugins/outputs/cloud_pubsub/sample.conf b/plugins/outputs/cloud_pubsub/sample.conf index 4f259163e..6521fa616 100644 --- a/plugins/outputs/cloud_pubsub/sample.conf +++ b/plugins/outputs/cloud_pubsub/sample.conf @@ -48,6 +48,10 @@ ## Optional. If true, published PubSub message data will be base64-encoded. # base64_data = false + ## NOTE: Due to the way TOML is parsed, tables must be at the END of the + ## plugin definition, otherwise additional config options are read as part of + ## the table + ## Optional. PubSub attributes to add to metrics. # [outputs.cloud_pubsub.attributes] # my_attr = "tag_value" diff --git a/plugins/outputs/dynatrace/README.md b/plugins/outputs/dynatrace/README.md index 8f12cf5ab..c618338a2 100644 --- a/plugins/outputs/dynatrace/README.md +++ b/plugins/outputs/dynatrace/README.md @@ -144,6 +144,10 @@ to use them. ## If you want metrics to be treated and reported as delta counters, add the metric names here additional_counters = [ ] + ## NOTE: Due to the way TOML is parsed, tables must be at the END of the + ## plugin definition, otherwise additional config options are read as part of + ## the table + ## Optional dimensions to be added to every metric # [outputs.dynatrace.default_dimensions] # default_key = "default value" diff --git a/plugins/outputs/dynatrace/sample.conf b/plugins/outputs/dynatrace/sample.conf index 500b49d29..ff1eb1b2c 100644 --- a/plugins/outputs/dynatrace/sample.conf +++ b/plugins/outputs/dynatrace/sample.conf @@ -31,6 +31,10 @@ ## If you want metrics to be treated and reported as delta counters, add the metric names here additional_counters = [ ] + ## NOTE: Due to the way TOML is parsed, tables must be at the END of the + ## plugin definition, otherwise additional config options are read as part of + ## the table + ## Optional dimensions to be added to every metric # [outputs.dynatrace.default_dimensions] # default_key = "default value" diff --git a/plugins/outputs/health/README.md b/plugins/outputs/health/README.md index 1f4c81bef..fcb280eca 100644 --- a/plugins/outputs/health/README.md +++ b/plugins/outputs/health/README.md @@ -42,6 +42,10 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" + ## NOTE: Due to the way TOML is parsed, tables must be at the END of the + ## plugin definition, otherwise additional config options are read as part of + ## the table + ## One or more check sub-tables should be defined, it is also recommended to ## use metric filtering to limit the metrics that flow into this output. ## diff --git a/plugins/outputs/health/sample.conf b/plugins/outputs/health/sample.conf index eb3e4a45e..07d5a4b3d 100644 --- a/plugins/outputs/health/sample.conf +++ b/plugins/outputs/health/sample.conf @@ -21,6 +21,10 @@ # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" + ## NOTE: Due to the way TOML is parsed, tables must be at the END of the + ## plugin definition, otherwise additional config options are read as part of + ## the table + ## One or more check sub-tables should be defined, it is also recommended to ## use metric filtering to limit the metrics that flow into this output. ## diff --git a/plugins/outputs/http/README.md b/plugins/outputs/http/README.md index f30388fa8..69f970ed0 100644 --- a/plugins/outputs/http/README.md +++ b/plugins/outputs/http/README.md @@ -82,11 +82,6 @@ to use them. ## compress body or "identity" to apply no encoding. # content_encoding = "identity" - ## Additional HTTP headers - # [outputs.http.headers] - # # Should be set manually to "application/json" for json data_format - # Content-Type = "text/plain; charset=utf-8" - ## MaxIdleConns controls the maximum number of idle (keep-alive) ## connections across all hosts. Zero means no limit. # max_idle_conn = 0 @@ -129,6 +124,15 @@ to use them. ## Optional list of statuscodes (<200 or >300) upon which requests should not be retried # non_retryable_statuscodes = [409, 413] + + ## NOTE: Due to the way TOML is parsed, tables must be at the END of the + ## plugin definition, otherwise additional config options are read as part of + ## the table + + ## Additional HTTP headers + # [outputs.http.headers] + # ## Should be set manually to "application/json" for json data_format + # Content-Type = "text/plain; charset=utf-8" ``` ### Google API Auth diff --git a/plugins/outputs/http/sample.conf b/plugins/outputs/http/sample.conf index 742e1ae57..4d89c7e3b 100644 --- a/plugins/outputs/http/sample.conf +++ b/plugins/outputs/http/sample.conf @@ -55,11 +55,6 @@ ## compress body or "identity" to apply no encoding. # content_encoding = "identity" - ## Additional HTTP headers - # [outputs.http.headers] - # # Should be set manually to "application/json" for json data_format - # Content-Type = "text/plain; charset=utf-8" - ## MaxIdleConns controls the maximum number of idle (keep-alive) ## connections across all hosts. Zero means no limit. # max_idle_conn = 0 @@ -102,3 +97,12 @@ ## Optional list of statuscodes (<200 or >300) upon which requests should not be retried # non_retryable_statuscodes = [409, 413] + + ## NOTE: Due to the way TOML is parsed, tables must be at the END of the + ## plugin definition, otherwise additional config options are read as part of + ## the table + + ## Additional HTTP headers + # [outputs.http.headers] + # ## Should be set manually to "application/json" for json data_format + # Content-Type = "text/plain; charset=utf-8" diff --git a/plugins/outputs/kafka/README.md b/plugins/outputs/kafka/README.md index 635c70746..3176e98f8 100644 --- a/plugins/outputs/kafka/README.md +++ b/plugins/outputs/kafka/README.md @@ -51,33 +51,6 @@ to use them. ## ex: version = "1.1.0" # version = "" - ## Optional topic suffix configuration. - ## If the section is omitted, no suffix is used. - ## Following topic suffix methods are supported: - ## measurement - suffix equals to separator + measurement's name - ## tags - suffix equals to separator + specified tags' values - ## interleaved with separator - - ## Suffix equals to "_" + measurement name - # [outputs.kafka.topic_suffix] - # method = "measurement" - # separator = "_" - - ## Suffix equals to "__" + measurement's "foo" tag value. - ## If there's no such a tag, suffix equals to an empty string - # [outputs.kafka.topic_suffix] - # method = "tags" - # keys = ["foo"] - # separator = "__" - - ## Suffix equals to "_" + measurement's "foo" and "bar" - ## tag values, separated by "_". If there is no such tags, - ## their values treated as empty strings. - # [outputs.kafka.topic_suffix] - # method = "tags" - # keys = ["foo", "bar"] - # separator = "_" - ## The routing tag specifies a tagkey on the metric whose value is used as ## the message key. The message key is used to determine which partition to ## send the message to. This tag is prefered over the routing_key option. @@ -188,6 +161,37 @@ to use them. ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md # data_format = "influx" + + ## NOTE: Due to the way TOML is parsed, tables must be at the END of the + ## plugin definition, otherwise additional config options are read as part of + ## the table + + ## Optional topic suffix configuration. + ## If the section is omitted, no suffix is used. + ## Following topic suffix methods are supported: + ## measurement - suffix equals to separator + measurement's name + ## tags - suffix equals to separator + specified tags' values + ## interleaved with separator + + ## Suffix equals to "_" + measurement name + # [outputs.kafka.topic_suffix] + # method = "measurement" + # separator = "_" + + ## Suffix equals to "__" + measurement's "foo" tag value. + ## If there's no such a tag, suffix equals to an empty string + # [outputs.kafka.topic_suffix] + # method = "tags" + # keys = ["foo"] + # separator = "__" + + ## Suffix equals to "_" + measurement's "foo" and "bar" + ## tag values, separated by "_". If there is no such tags, + ## their values treated as empty strings. + # [outputs.kafka.topic_suffix] + # method = "tags" + # keys = ["foo", "bar"] + # separator = "_" ``` ### `max_retry` diff --git a/plugins/outputs/kafka/sample.conf b/plugins/outputs/kafka/sample.conf index f6c4d7968..241758dc7 100644 --- a/plugins/outputs/kafka/sample.conf +++ b/plugins/outputs/kafka/sample.conf @@ -25,33 +25,6 @@ ## ex: version = "1.1.0" # version = "" - ## Optional topic suffix configuration. - ## If the section is omitted, no suffix is used. - ## Following topic suffix methods are supported: - ## measurement - suffix equals to separator + measurement's name - ## tags - suffix equals to separator + specified tags' values - ## interleaved with separator - - ## Suffix equals to "_" + measurement name - # [outputs.kafka.topic_suffix] - # method = "measurement" - # separator = "_" - - ## Suffix equals to "__" + measurement's "foo" tag value. - ## If there's no such a tag, suffix equals to an empty string - # [outputs.kafka.topic_suffix] - # method = "tags" - # keys = ["foo"] - # separator = "__" - - ## Suffix equals to "_" + measurement's "foo" and "bar" - ## tag values, separated by "_". If there is no such tags, - ## their values treated as empty strings. - # [outputs.kafka.topic_suffix] - # method = "tags" - # keys = ["foo", "bar"] - # separator = "_" - ## The routing tag specifies a tagkey on the metric whose value is used as ## the message key. The message key is used to determine which partition to ## send the message to. This tag is prefered over the routing_key option. @@ -162,3 +135,34 @@ ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md # data_format = "influx" + + ## NOTE: Due to the way TOML is parsed, tables must be at the END of the + ## plugin definition, otherwise additional config options are read as part of + ## the table + + ## Optional topic suffix configuration. + ## If the section is omitted, no suffix is used. + ## Following topic suffix methods are supported: + ## measurement - suffix equals to separator + measurement's name + ## tags - suffix equals to separator + specified tags' values + ## interleaved with separator + + ## Suffix equals to "_" + measurement name + # [outputs.kafka.topic_suffix] + # method = "measurement" + # separator = "_" + + ## Suffix equals to "__" + measurement's "foo" tag value. + ## If there's no such a tag, suffix equals to an empty string + # [outputs.kafka.topic_suffix] + # method = "tags" + # keys = ["foo"] + # separator = "__" + + ## Suffix equals to "_" + measurement's "foo" and "bar" + ## tag values, separated by "_". If there is no such tags, + ## their values treated as empty strings. + # [outputs.kafka.topic_suffix] + # method = "tags" + # keys = ["foo", "bar"] + # separator = "_" diff --git a/plugins/outputs/kinesis/README.md b/plugins/outputs/kinesis/README.md index 2c4d1caf1..cce8f14b9 100644 --- a/plugins/outputs/kinesis/README.md +++ b/plugins/outputs/kinesis/README.md @@ -78,6 +78,19 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. ## Kinesis StreamName must exist prior to starting telegraf. streamname = "StreamName" + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "influx" + + ## debug will show upstream aws messages. + debug = false + + ## NOTE: Due to the way TOML is parsed, tables must be at the END of the + ## plugin definition, otherwise additional config options are read as part of + ## the table + ## The partition key can be calculated using one of several methods: ## ## Use a static value for all writes: @@ -99,16 +112,6 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. # method = "tag" # key = "host" # default = "mykey" - - - ## Data format to output. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md - data_format = "influx" - - ## debug will show upstream aws messages. - debug = false ``` For this output plugin to function correctly the following variables must be diff --git a/plugins/outputs/kinesis/sample.conf b/plugins/outputs/kinesis/sample.conf index 5b24149b0..828913465 100644 --- a/plugins/outputs/kinesis/sample.conf +++ b/plugins/outputs/kinesis/sample.conf @@ -30,6 +30,19 @@ ## Kinesis StreamName must exist prior to starting telegraf. streamname = "StreamName" + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "influx" + + ## debug will show upstream aws messages. + debug = false + + ## NOTE: Due to the way TOML is parsed, tables must be at the END of the + ## plugin definition, otherwise additional config options are read as part of + ## the table + ## The partition key can be calculated using one of several methods: ## ## Use a static value for all writes: @@ -51,13 +64,3 @@ # method = "tag" # key = "host" # default = "mykey" - - - ## Data format to output. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md - data_format = "influx" - - ## debug will show upstream aws messages. - debug = false diff --git a/plugins/outputs/mqtt/README.md b/plugins/outputs/mqtt/README.md index 30819848a..6ebf09fda 100644 --- a/plugins/outputs/mqtt/README.md +++ b/plugins/outputs/mqtt/README.md @@ -125,6 +125,10 @@ to use them. ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md data_format = "influx" + ## NOTE: Due to the way TOML is parsed, tables must be at the END of the + ## plugin definition, otherwise additional config options are read as part of + ## the table + ## Optional MQTT 5 publish properties ## These setting only apply if the "protocol" property is set to 5. This must ## be defined at the end of the plugin settings, otherwise TOML will assume diff --git a/plugins/outputs/mqtt/sample.conf b/plugins/outputs/mqtt/sample.conf index ba2ce2d6d..850be63e5 100644 --- a/plugins/outputs/mqtt/sample.conf +++ b/plugins/outputs/mqtt/sample.conf @@ -90,6 +90,10 @@ ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md data_format = "influx" + ## NOTE: Due to the way TOML is parsed, tables must be at the END of the + ## plugin definition, otherwise additional config options are read as part of + ## the table + ## Optional MQTT 5 publish properties ## These setting only apply if the "protocol" property is set to 5. This must ## be defined at the end of the plugin settings, otherwise TOML will assume diff --git a/plugins/outputs/opentelemetry/README.md b/plugins/outputs/opentelemetry/README.md index df5179e34..3066daa69 100644 --- a/plugins/outputs/opentelemetry/README.md +++ b/plugins/outputs/opentelemetry/README.md @@ -41,6 +41,10 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. ## Supports: "gzip", "none" # compression = "gzip" + ## NOTE: Due to the way TOML is parsed, tables must be at the END of the + ## plugin definition, otherwise additional config options are read as part of + ## the table + ## Configuration options for the Coralogix dialect ## Enable the following section of you use this plugin with a Coralogix endpoint # [outputs.opentelemetry.coralogix] diff --git a/plugins/outputs/opentelemetry/sample.conf b/plugins/outputs/opentelemetry/sample.conf index 9ab1eafe5..3464ef256 100644 --- a/plugins/outputs/opentelemetry/sample.conf +++ b/plugins/outputs/opentelemetry/sample.conf @@ -24,6 +24,10 @@ ## Supports: "gzip", "none" # compression = "gzip" + ## NOTE: Due to the way TOML is parsed, tables must be at the END of the + ## plugin definition, otherwise additional config options are read as part of + ## the table + ## Configuration options for the Coralogix dialect ## Enable the following section of you use this plugin with a Coralogix endpoint # [outputs.opentelemetry.coralogix] diff --git a/plugins/outputs/sensu/README.md b/plugins/outputs/sensu/README.md index 7cee56dc1..c3bae7a76 100644 --- a/plugins/outputs/sensu/README.md +++ b/plugins/outputs/sensu/README.md @@ -68,6 +68,10 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. ## compress body or "identity" to apply no encoding. # content_encoding = "identity" + ## NOTE: Due to the way TOML is parsed, tables must be at the END of the + ## plugin definition, otherwise additional config options are read as part of + ## the table + ## Sensu Event details ## ## Below are the event details to be sent to Sensu. The main portions of the diff --git a/plugins/outputs/sensu/sample.conf b/plugins/outputs/sensu/sample.conf index 29b938d92..ac5580cd7 100644 --- a/plugins/outputs/sensu/sample.conf +++ b/plugins/outputs/sensu/sample.conf @@ -51,6 +51,10 @@ ## compress body or "identity" to apply no encoding. # content_encoding = "identity" + ## NOTE: Due to the way TOML is parsed, tables must be at the END of the + ## plugin definition, otherwise additional config options are read as part of + ## the table + ## Sensu Event details ## ## Below are the event details to be sent to Sensu. The main portions of the diff --git a/plugins/outputs/sql/README.md b/plugins/outputs/sql/README.md index 7fa5821a8..5c3e4fb0a 100644 --- a/plugins/outputs/sql/README.md +++ b/plugins/outputs/sql/README.md @@ -106,23 +106,6 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. ## Initialization SQL # init_sql = "" - ## Metric type to SQL type conversion - ## The values on the left are the data types Telegraf has and the values on - ## the right are the data types Telegraf will use when sending to a database. - ## - ## The database values used must be data types the destination database - ## understands. It is up to the user to ensure that the selected data type is - ## available in the database they are using. Refer to your database - ## documentation for what data types are available and supported. - #[outputs.sql.convert] - # integer = "INT" - # real = "DOUBLE" - # text = "TEXT" - # timestamp = "TIMESTAMP" - # defaultvalue = "TEXT" - # unsigned = "UNSIGNED" - # bool = "BOOL" - ## This setting controls the behavior of the unsigned value. By default the ## setting will take the integer value and append the unsigned value to it. The other ## option is "literal", which will use the actual value the user provides to @@ -143,6 +126,27 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. ## Maximum number of open connections to the database. 0 means unlimited. # connection_max_open = 0 + + ## NOTE: Due to the way TOML is parsed, tables must be at the END of the + ## plugin definition, otherwise additional config options are read as part of + ## the table + + ## Metric type to SQL type conversion + ## The values on the left are the data types Telegraf has and the values on + ## the right are the data types Telegraf will use when sending to a database. + ## + ## The database values used must be data types the destination database + ## understands. It is up to the user to ensure that the selected data type is + ## available in the database they are using. Refer to your database + ## documentation for what data types are available and supported. + #[outputs.sql.convert] + # integer = "INT" + # real = "DOUBLE" + # text = "TEXT" + # timestamp = "TIMESTAMP" + # defaultvalue = "TEXT" + # unsigned = "UNSIGNED" + # bool = "BOOL" ``` ## Driver-specific information diff --git a/plugins/outputs/sql/sample.conf b/plugins/outputs/sql/sample.conf index c9528d5e9..78d9852e8 100644 --- a/plugins/outputs/sql/sample.conf +++ b/plugins/outputs/sql/sample.conf @@ -28,23 +28,6 @@ ## Initialization SQL # init_sql = "" - ## Metric type to SQL type conversion - ## The values on the left are the data types Telegraf has and the values on - ## the right are the data types Telegraf will use when sending to a database. - ## - ## The database values used must be data types the destination database - ## understands. It is up to the user to ensure that the selected data type is - ## available in the database they are using. Refer to your database - ## documentation for what data types are available and supported. - #[outputs.sql.convert] - # integer = "INT" - # real = "DOUBLE" - # text = "TEXT" - # timestamp = "TIMESTAMP" - # defaultvalue = "TEXT" - # unsigned = "UNSIGNED" - # bool = "BOOL" - ## This setting controls the behavior of the unsigned value. By default the ## setting will take the integer value and append the unsigned value to it. The other ## option is "literal", which will use the actual value the user provides to @@ -65,3 +48,24 @@ ## Maximum number of open connections to the database. 0 means unlimited. # connection_max_open = 0 + + ## NOTE: Due to the way TOML is parsed, tables must be at the END of the + ## plugin definition, otherwise additional config options are read as part of + ## the table + + ## Metric type to SQL type conversion + ## The values on the left are the data types Telegraf has and the values on + ## the right are the data types Telegraf will use when sending to a database. + ## + ## The database values used must be data types the destination database + ## understands. It is up to the user to ensure that the selected data type is + ## available in the database they are using. Refer to your database + ## documentation for what data types are available and supported. + #[outputs.sql.convert] + # integer = "INT" + # real = "DOUBLE" + # text = "TEXT" + # timestamp = "TIMESTAMP" + # defaultvalue = "TEXT" + # unsigned = "UNSIGNED" + # bool = "BOOL" diff --git a/plugins/outputs/stackdriver/README.md b/plugins/outputs/stackdriver/README.md index 6495c6f64..53c5a483d 100644 --- a/plugins/outputs/stackdriver/README.md +++ b/plugins/outputs/stackdriver/README.md @@ -69,6 +69,10 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. ## Custom resource type # resource_type = "generic_node" + ## NOTE: Due to the way TOML is parsed, tables must be at the END of the + ## plugin definition, otherwise additional config options are read as part of + ## the table + ## Additional resource labels # [outputs.stackdriver.resource_labels] # node_id = "$HOSTNAME" diff --git a/plugins/outputs/stackdriver/sample.conf b/plugins/outputs/stackdriver/sample.conf index 824be6924..1550d558a 100644 --- a/plugins/outputs/stackdriver/sample.conf +++ b/plugins/outputs/stackdriver/sample.conf @@ -34,6 +34,10 @@ ## Custom resource type # resource_type = "generic_node" + ## NOTE: Due to the way TOML is parsed, tables must be at the END of the + ## plugin definition, otherwise additional config options are read as part of + ## the table + ## Additional resource labels # [outputs.stackdriver.resource_labels] # node_id = "$HOSTNAME" diff --git a/plugins/outputs/websocket/README.md b/plugins/outputs/websocket/README.md index a659cdc26..56cf63d7e 100644 --- a/plugins/outputs/websocket/README.md +++ b/plugins/outputs/websocket/README.md @@ -54,6 +54,10 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md # data_format = "influx" + ## NOTE: Due to the way TOML is parsed, tables must be at the END of the + ## plugin definition, otherwise additional config options are read as part of + ## the table + ## Additional HTTP Upgrade headers # [outputs.websocket.headers] # Authorization = "Bearer " diff --git a/plugins/outputs/websocket/sample.conf b/plugins/outputs/websocket/sample.conf index 5c0c6271f..adbe21889 100644 --- a/plugins/outputs/websocket/sample.conf +++ b/plugins/outputs/websocket/sample.conf @@ -34,6 +34,10 @@ ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md # data_format = "influx" + ## NOTE: Due to the way TOML is parsed, tables must be at the END of the + ## plugin definition, otherwise additional config options are read as part of + ## the table + ## Additional HTTP Upgrade headers # [outputs.websocket.headers] # Authorization = "Bearer "