docs: Clean up markdown add warning about tables (#13792)

This commit is contained in:
Joshua Powers 2023-08-28 14:07:45 -06:00 committed by GitHub
parent cb488ad0f8
commit 318a4b69e5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
32 changed files with 642 additions and 464 deletions

View File

@ -106,46 +106,55 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
## How often the discovery API call executed (default 1m) ## How often the discovery API call executed (default 1m)
#discovery_interval = "1m" #discovery_interval = "1m"
## Metrics to Pull (Required) ## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Metrics to Pull
## At least one metrics definition required
[[inputs.aliyuncms.metrics]] [[inputs.aliyuncms.metrics]]
## Metrics names to be requested, ## Metrics names to be requested,
## Description can be found here (per project): ## Description can be found here (per project):
## https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq ## https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq
names = ["InstanceActiveConnection", "InstanceNewConnection"] names = ["InstanceActiveConnection", "InstanceNewConnection"]
## Dimension filters for Metric (optional). ## Dimension filters for Metric (optional)
## This allows to get additional metric dimension. If dimension is not ## This allows to get additional metric dimension. If dimension is not
## specified it can be returned or the data can be aggregated - it depends ## specified it can be returned or the data can be aggregated - it depends
## on particular metric, you can find details here: ## on particular metric, you can find details here:
## https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq ## https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq
## ##
## Note, that by default dimension filter includes the list of discovered ## Note, that by default dimension filter includes the list of discovered
## objects in scope (if discovery is enabled). ## objects in scope (if discovery is enabled). Values specified here would
# Values specified here would be added into the list of discovered objects. ## be added into the list of discovered objects. You can specify either
## You can specify either single dimension: ## single dimension:
#dimensions = '{"instanceId": "p-example"}' # dimensions = '{"instanceId": "p-example"}'
## Or you can specify several dimensions at once: ## Or you can specify several dimensions at once:
#dimensions = '[{"instanceId": "p-example"},{"instanceId": "q-example"}]' # dimensions = '[{"instanceId": "p-example"},{"instanceId": "q-example"}]'
## Enrichment tags, can be added from discovery (if supported) ## Tag Query Path
## Notation is <measurement_tag_name>:<JMES query path (https://jmespath.org/tutorial.html)> ## The following tags added by default:
## To figure out which fields are available, consult the Describe<ObjectType> ## * regionId (if discovery enabled)
## API per project. ## * userId
## For example, for SLB: https://api.aliyun.com/#/?product=Slb&version=2014-05-15&api=DescribeLoadBalancers&params={}&tab=MOCK&lang=GO ## * instanceId
#tag_query_path = [ ## Enrichment tags, can be added from discovery (if supported)
# "address:Address", ## Notation is
# "name:LoadBalancerName", ## <measurement_tag_name>:<JMES query path (https://jmespath.org/tutorial.html)>
# "cluster_owner:Tags.Tag[?TagKey=='cs.cluster.name'].TagValue | [0]" ## To figure out which fields are available, consult the
# ] ## Describe<ObjectType> API per project. For example, for SLB see:
## The following tags added by default: ## https://api.aliyun.com/#/?product=Slb&version=2014-05-15&api=DescribeLoadBalancers&params={}&tab=MOCK&lang=GO
## regionId (if discovery enabled), userId, instanceId. # tag_query_path = [
# "address:Address",
# "name:LoadBalancerName",
# "cluster_owner:Tags.Tag[?TagKey=='cs.cluster.name'].TagValue | [0]"
# ]
## Allow metrics without discovery data, if discovery is enabled. ## Allow metrics without discovery data, if discovery is enabled.
## If set to true, then metric without discovery data would be emitted, otherwise dropped. ## If set to true, then metric without discovery data would be emitted, otherwise dropped.
## This cane be of help, in case debugging dimension filters, or partial coverage of ## This cane be of help, in case debugging dimension filters, or partial coverage of
## discovery scope vs monitoring scope ## discovery scope vs monitoring scope
# allow_dps_without_discovery = false # allow_dps_without_discovery = false
``` ```
### Requirements and Terminology ### Requirements and Terminology

View File

@ -69,43 +69,52 @@
## How often the discovery API call executed (default 1m) ## How often the discovery API call executed (default 1m)
#discovery_interval = "1m" #discovery_interval = "1m"
## Metrics to Pull (Required) ## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Metrics to Pull
## At least one metrics definition required
[[inputs.aliyuncms.metrics]] [[inputs.aliyuncms.metrics]]
## Metrics names to be requested, ## Metrics names to be requested,
## Description can be found here (per project): ## Description can be found here (per project):
## https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq ## https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq
names = ["InstanceActiveConnection", "InstanceNewConnection"] names = ["InstanceActiveConnection", "InstanceNewConnection"]
## Dimension filters for Metric (optional). ## Dimension filters for Metric (optional)
## This allows to get additional metric dimension. If dimension is not ## This allows to get additional metric dimension. If dimension is not
## specified it can be returned or the data can be aggregated - it depends ## specified it can be returned or the data can be aggregated - it depends
## on particular metric, you can find details here: ## on particular metric, you can find details here:
## https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq ## https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq
## ##
## Note, that by default dimension filter includes the list of discovered ## Note, that by default dimension filter includes the list of discovered
## objects in scope (if discovery is enabled). ## objects in scope (if discovery is enabled). Values specified here would
# Values specified here would be added into the list of discovered objects. ## be added into the list of discovered objects. You can specify either
## You can specify either single dimension: ## single dimension:
#dimensions = '{"instanceId": "p-example"}' # dimensions = '{"instanceId": "p-example"}'
## Or you can specify several dimensions at once: ## Or you can specify several dimensions at once:
#dimensions = '[{"instanceId": "p-example"},{"instanceId": "q-example"}]' # dimensions = '[{"instanceId": "p-example"},{"instanceId": "q-example"}]'
## Enrichment tags, can be added from discovery (if supported) ## Tag Query Path
## Notation is <measurement_tag_name>:<JMES query path (https://jmespath.org/tutorial.html)> ## The following tags added by default:
## To figure out which fields are available, consult the Describe<ObjectType> ## * regionId (if discovery enabled)
## API per project. ## * userId
## For example, for SLB: https://api.aliyun.com/#/?product=Slb&version=2014-05-15&api=DescribeLoadBalancers&params={}&tab=MOCK&lang=GO ## * instanceId
#tag_query_path = [ ## Enrichment tags, can be added from discovery (if supported)
# "address:Address", ## Notation is
# "name:LoadBalancerName", ## <measurement_tag_name>:<JMES query path (https://jmespath.org/tutorial.html)>
# "cluster_owner:Tags.Tag[?TagKey=='cs.cluster.name'].TagValue | [0]" ## To figure out which fields are available, consult the
# ] ## Describe<ObjectType> API per project. For example, for SLB see:
## The following tags added by default: ## https://api.aliyun.com/#/?product=Slb&version=2014-05-15&api=DescribeLoadBalancers&params={}&tab=MOCK&lang=GO
## regionId (if discovery enabled), userId, instanceId. # tag_query_path = [
# "address:Address",
# "name:LoadBalancerName",
# "cluster_owner:Tags.Tag[?TagKey=='cs.cluster.name'].TagValue | [0]"
# ]
## Allow metrics without discovery data, if discovery is enabled. ## Allow metrics without discovery data, if discovery is enabled.
## If set to true, then metric without discovery data would be emitted, otherwise dropped. ## If set to true, then metric without discovery data would be emitted, otherwise dropped.
## This cane be of help, in case debugging dimension filters, or partial coverage of ## This cane be of help, in case debugging dimension filters, or partial coverage of
## discovery scope vs monitoring scope ## discovery scope vs monitoring scope
# allow_dps_without_discovery = false # allow_dps_without_discovery = false

View File

@ -40,7 +40,7 @@ to use them.
## Configuration ## Configuration
```toml @sample.conf ```toml @sample.conf
-# Read metrics from one or many vCenters # Read metrics from one or many vCenters
[[inputs.vsphere]] [[inputs.vsphere]]
## List of vCenter URLs to be monitored. These three lines must be uncommented ## List of vCenter URLs to be monitored. These three lines must be uncommented
## and edited for the plugin to work. ## and edited for the plugin to work.

View File

@ -1,4 +1,4 @@
-# Read metrics from one or many vCenters # Read metrics from one or many vCenters
[[inputs.vsphere]] [[inputs.vsphere]]
## List of vCenter URLs to be monitored. These three lines must be uncommented ## List of vCenter URLs to be monitored. These three lines must be uncommented
## and edited for the plugin to work. ## and edited for the plugin to work.

View File

@ -302,143 +302,172 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
## Configuration ## Configuration
```toml @sample.conf ```toml @sample.conf
# # Input plugin to counterPath Performance Counters on Windows operating systems # Input plugin to counterPath Performance Counters on Windows operating systems
# # This plugin ONLY supports Windows # This plugin ONLY supports Windows
# [[inputs.win_perf_counters]] [[inputs.win_perf_counters]]
# ## By default this plugin returns basic CPU and Disk statistics. ## By default this plugin returns basic CPU and Disk statistics. See the
# ## See the README file for more examples. ## README file for more examples. Uncomment examples below or write your own
# ## Uncomment examples below or write your own as you see fit. If the system ## as you see fit. If the system being polled for data does not have the
# ## being polled for data does not have the Object at startup of the Telegraf ## Object at startup of the Telegraf agent, it will not be gathered.
# ## agent, it will not be gathered.
# ## Settings: ## Print All matching performance counters
# # PrintValid = false # Print All matching performance counters # PrintValid = false
# # Whether request a timestamp along with the PerfCounter data or just use current time
# # UsePerfCounterTime=true ## Whether request a timestamp along with the PerfCounter data or use current
# # If UseWildcardsExpansion params is set to true, wildcards (partial wildcards in instance names and wildcards in counters names) in configured counter paths will be expanded ## time
# # and in case of localized Windows, counter paths will be also localized. It also returns instance indexes in instance names. # UsePerfCounterTime = true
# # If false, wildcards (not partial) in instance names will still be expanded, but instance indexes will not be returned in instance names.
# #UseWildcardsExpansion = false ## If UseWildcardsExpansion params is set to true, wildcards (partial
# # When running on a localized version of Windows and with UseWildcardsExpansion = true, Windows will ## wildcards in instance names and wildcards in counters names) in configured
# # localize object and counter names. When LocalizeWildcardsExpansion = false, use the names in object.Counters instead ## counter paths will be expanded and in case of localized Windows, counter
# # of the localized names. Only Instances can have wildcards in this case. ObjectName and Counters must not have wildcards when this ## paths will be also localized. It also returns instance indexes in instance
# # setting is false. ## names. If false, wildcards (not partial) in instance names will still be
# #LocalizeWildcardsExpansion = true ## expanded, but instance indexes will not be returned in instance names.
# # Period after which counters will be reread from configuration and wildcards in counter paths expanded # UseWildcardsExpansion = false
# CountersRefreshInterval="1m"
# ## Accepts a list of PDH error codes which are defined in pdh.go, if this error is encountered it will be ignored ## When running on a localized version of Windows and with
# ## For example, you can provide "PDH_NO_DATA" to ignore performance counters with no instances ## UseWildcardsExpansion = true, Windows will localize object and counter
# ## By default no errors are ignored ## names. When LocalizeWildcardsExpansion = false, use the names in
# ## You can find the list here: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/win_perf_counters/pdh.go ## object.Counters instead of the localized names. Only Instances can have
# ## e.g.: IgnoredErrors = ["PDH_NO_DATA"] ## wildcards in this case. ObjectName and Counters must not have wildcards
# # IgnoredErrors = [] ## when this setting is false.
# # LocalizeWildcardsExpansion = true
# [[inputs.win_perf_counters.object]]
# # Processor usage, alternative to native, reports on a per core. ## Period after which counters will be reread from configuration and
# ObjectName = "Processor" ## wildcards in counter paths expanded
# Instances = ["*"] # CountersRefreshInterval="1m"
# Counters = [
# "% Idle Time", ## Accepts a list of PDH error codes which are defined in pdh.go, if this
# "% Interrupt Time", ## error is encountered it will be ignored. For example, you can provide
# "% Privileged Time", ## "PDH_NO_DATA" to ignore performance counters with no instances. By default
# "% User Time", ## no errors are ignored You can find the list here:
# "% Processor Time", ## https://github.com/influxdata/telegraf/blob/master/plugins/inputs/win_perf_counters/pdh.go
# "% DPC Time", ## e.g. IgnoredErrors = ["PDH_NO_DATA"]
# ] # IgnoredErrors = []
# Measurement = "win_cpu"
# # Set to true to include _Total instance when querying for all (*). ## NOTE: Due to the way TOML is parsed, tables must be at the END of the
# # IncludeTotal=false ## plugin definition, otherwise additional config options are read as part of
# # Print out when the performance counter is missing from object, counter or instance. ## the table
# # WarnOnMissing = false
# # Gather raw values instead of formatted. Raw value is stored in the field name with the "_Raw" suffix, e.g. "Disk_Read_Bytes_sec_Raw". # [[inputs.win_perf_counters.object]]
# # UseRawValues = true # Measurement = ""
# # ObjectName = ""
# [[inputs.win_perf_counters.object]] # Instances = [""]
# # Disk times and queues # Counters = []
# ObjectName = "LogicalDisk" ## Additional Object Settings
# Instances = ["*"] ## * IncludeTotal: set to true to include _Total instance when querying
# Counters = [ ## for all metrics via '*'
# "% Idle Time", ## * WarnOnMissing: print out when the performance counter is missing
# "% Disk Time", ## from object, counter or instance
# "% Disk Read Time", ## * UseRawValues: gather raw values instead of formatted. Raw values are
# "% Disk Write Time", ## stored in the field name with the "_Raw" suffix, e.g.
# "% User Time", ## "Disk_Read_Bytes_sec_Raw".
# "% Free Space", # IncludeTotal = false
# "Current Disk Queue Length", # WarnOnMissing = false
# "Free Megabytes", # UseRawValues = false
# ]
# Measurement = "win_disk" ## Processor usage, alternative to native, reports on a per core.
# # [[inputs.win_perf_counters.object]]
# [[inputs.win_perf_counters.object]] # Measurement = "win_cpu"
# ObjectName = "PhysicalDisk" # ObjectName = "Processor"
# Instances = ["*"] # Instances = ["*"]
# Counters = [ # UseRawValues = true
# "Disk Read Bytes/sec", # Counters = [
# "Disk Write Bytes/sec", # "% Idle Time",
# "Current Disk Queue Length", # "% Interrupt Time",
# "Disk Reads/sec", # "% Privileged Time",
# "Disk Writes/sec", # "% User Time",
# "% Disk Time", # "% Processor Time",
# "% Disk Read Time", # "% DPC Time",
# "% Disk Write Time", # ]
# ]
# Measurement = "win_diskio" ## Disk times and queues
# # [[inputs.win_perf_counters.object]]
# [[inputs.win_perf_counters.object]] # Measurement = "win_disk"
# ObjectName = "Network Interface" # ObjectName = "LogicalDisk"
# Instances = ["*"] # Instances = ["*"]
# Counters = [ # Counters = [
# "Bytes Received/sec", # "% Idle Time",
# "Bytes Sent/sec", # "% Disk Time",
# "Packets Received/sec", # "% Disk Read Time",
# "Packets Sent/sec", # "% Disk Write Time",
# "Packets Received Discarded", # "% User Time",
# "Packets Outbound Discarded", # "% Free Space",
# "Packets Received Errors", # "Current Disk Queue Length",
# "Packets Outbound Errors", # "Free Megabytes",
# ] # ]
# Measurement = "win_net"
# # [[inputs.win_perf_counters.object]]
# # Measurement = "win_diskio"
# [[inputs.win_perf_counters.object]] # ObjectName = "PhysicalDisk"
# ObjectName = "System" # Instances = ["*"]
# Counters = [ # Counters = [
# "Context Switches/sec", # "Disk Read Bytes/sec",
# "System Calls/sec", # "Disk Write Bytes/sec",
# "Processor Queue Length", # "Current Disk Queue Length",
# "System Up Time", # "Disk Reads/sec",
# ] # "Disk Writes/sec",
# Instances = ["------"] # "% Disk Time",
# Measurement = "win_system" # "% Disk Read Time",
# # "% Disk Write Time",
# [[inputs.win_perf_counters.object]] # ]
# # Example counterPath where the Instance portion must be removed to get data back,
# # such as from the Memory object. # [[inputs.win_perf_counters.object]]
# ObjectName = "Memory" # Measurement = "win_net"
# Counters = [ # ObjectName = "Network Interface"
# "Available Bytes", # Instances = ["*"]
# "Cache Faults/sec", # Counters = [
# "Demand Zero Faults/sec", # "Bytes Received/sec",
# "Page Faults/sec", # "Bytes Sent/sec",
# "Pages/sec", # "Packets Received/sec",
# "Transition Faults/sec", # "Packets Sent/sec",
# "Pool Nonpaged Bytes", # "Packets Received Discarded",
# "Pool Paged Bytes", # "Packets Outbound Discarded",
# "Standby Cache Reserve Bytes", # "Packets Received Errors",
# "Standby Cache Normal Priority Bytes", # "Packets Outbound Errors",
# "Standby Cache Core Bytes", # ]
# ]
# Instances = ["------"] # Use 6 x - to remove the Instance bit from the counterPath. # [[inputs.win_perf_counters.object]]
# Measurement = "win_mem" # Measurement = "win_system"
# # ObjectName = "System"
# [[inputs.win_perf_counters.object]] # Instances = ["------"]
# # Example query where the Instance portion must be removed to get data back, # Counters = [
# # such as from the Paging File object. # "Context Switches/sec",
# ObjectName = "Paging File" # "System Calls/sec",
# Counters = [ # "Processor Queue Length",
# "% Usage", # "System Up Time",
# ] # ]
# Instances = ["_Total"]
# Measurement = "win_swap" ## Example counterPath where the Instance portion must be removed to get
## data back, such as from the Memory object.
# [[inputs.win_perf_counters.object]]
# Measurement = "win_mem"
# ObjectName = "Memory"
## Use 6 x - to remove the Instance bit from the counterPath.
# Instances = ["------"]
# Counters = [
# "Available Bytes",
# "Cache Faults/sec",
# "Demand Zero Faults/sec",
# "Page Faults/sec",
# "Pages/sec",
# "Transition Faults/sec",
# "Pool Nonpaged Bytes",
# "Pool Paged Bytes",
# "Standby Cache Reserve Bytes",
# "Standby Cache Normal Priority Bytes",
# "Standby Cache Core Bytes",
# ]
## Example query where the Instance portion must be removed to get data back,
## such as from the Paging File object.
# [[inputs.win_perf_counters.object]]
# Measurement = "win_swap"
# ObjectName = "Paging File"
# Instances = ["_Total"]
# Counters = [
# "% Usage",
# ]
``` ```
### Generic Queries ### Generic Queries

View File

@ -1,137 +1,166 @@
# # Input plugin to counterPath Performance Counters on Windows operating systems # Input plugin to counterPath Performance Counters on Windows operating systems
# # This plugin ONLY supports Windows # This plugin ONLY supports Windows
# [[inputs.win_perf_counters]] [[inputs.win_perf_counters]]
# ## By default this plugin returns basic CPU and Disk statistics. ## By default this plugin returns basic CPU and Disk statistics. See the
# ## See the README file for more examples. ## README file for more examples. Uncomment examples below or write your own
# ## Uncomment examples below or write your own as you see fit. If the system ## as you see fit. If the system being polled for data does not have the
# ## being polled for data does not have the Object at startup of the Telegraf ## Object at startup of the Telegraf agent, it will not be gathered.
# ## agent, it will not be gathered.
# ## Settings: ## Print All matching performance counters
# # PrintValid = false # Print All matching performance counters # PrintValid = false
# # Whether request a timestamp along with the PerfCounter data or just use current time
# # UsePerfCounterTime=true ## Whether request a timestamp along with the PerfCounter data or use current
# # If UseWildcardsExpansion params is set to true, wildcards (partial wildcards in instance names and wildcards in counters names) in configured counter paths will be expanded ## time
# # and in case of localized Windows, counter paths will be also localized. It also returns instance indexes in instance names. # UsePerfCounterTime = true
# # If false, wildcards (not partial) in instance names will still be expanded, but instance indexes will not be returned in instance names.
# #UseWildcardsExpansion = false ## If UseWildcardsExpansion params is set to true, wildcards (partial
# # When running on a localized version of Windows and with UseWildcardsExpansion = true, Windows will ## wildcards in instance names and wildcards in counters names) in configured
# # localize object and counter names. When LocalizeWildcardsExpansion = false, use the names in object.Counters instead ## counter paths will be expanded and in case of localized Windows, counter
# # of the localized names. Only Instances can have wildcards in this case. ObjectName and Counters must not have wildcards when this ## paths will be also localized. It also returns instance indexes in instance
# # setting is false. ## names. If false, wildcards (not partial) in instance names will still be
# #LocalizeWildcardsExpansion = true ## expanded, but instance indexes will not be returned in instance names.
# # Period after which counters will be reread from configuration and wildcards in counter paths expanded # UseWildcardsExpansion = false
# CountersRefreshInterval="1m"
# ## Accepts a list of PDH error codes which are defined in pdh.go, if this error is encountered it will be ignored ## When running on a localized version of Windows and with
# ## For example, you can provide "PDH_NO_DATA" to ignore performance counters with no instances ## UseWildcardsExpansion = true, Windows will localize object and counter
# ## By default no errors are ignored ## names. When LocalizeWildcardsExpansion = false, use the names in
# ## You can find the list here: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/win_perf_counters/pdh.go ## object.Counters instead of the localized names. Only Instances can have
# ## e.g.: IgnoredErrors = ["PDH_NO_DATA"] ## wildcards in this case. ObjectName and Counters must not have wildcards
# # IgnoredErrors = [] ## when this setting is false.
# # LocalizeWildcardsExpansion = true
# [[inputs.win_perf_counters.object]]
# # Processor usage, alternative to native, reports on a per core. ## Period after which counters will be reread from configuration and
# ObjectName = "Processor" ## wildcards in counter paths expanded
# Instances = ["*"] # CountersRefreshInterval="1m"
# Counters = [
# "% Idle Time", ## Accepts a list of PDH error codes which are defined in pdh.go, if this
# "% Interrupt Time", ## error is encountered it will be ignored. For example, you can provide
# "% Privileged Time", ## "PDH_NO_DATA" to ignore performance counters with no instances. By default
# "% User Time", ## no errors are ignored You can find the list here:
# "% Processor Time", ## https://github.com/influxdata/telegraf/blob/master/plugins/inputs/win_perf_counters/pdh.go
# "% DPC Time", ## e.g. IgnoredErrors = ["PDH_NO_DATA"]
# ] # IgnoredErrors = []
# Measurement = "win_cpu"
# # Set to true to include _Total instance when querying for all (*). ## NOTE: Due to the way TOML is parsed, tables must be at the END of the
# # IncludeTotal=false ## plugin definition, otherwise additional config options are read as part of
# # Print out when the performance counter is missing from object, counter or instance. ## the table
# # WarnOnMissing = false
# # Gather raw values instead of formatted. Raw value is stored in the field name with the "_Raw" suffix, e.g. "Disk_Read_Bytes_sec_Raw". # [[inputs.win_perf_counters.object]]
# # UseRawValues = true # Measurement = ""
# # ObjectName = ""
# [[inputs.win_perf_counters.object]] # Instances = [""]
# # Disk times and queues # Counters = []
# ObjectName = "LogicalDisk" ## Additional Object Settings
# Instances = ["*"] ## * IncludeTotal: set to true to include _Total instance when querying
# Counters = [ ## for all metrics via '*'
# "% Idle Time", ## * WarnOnMissing: print out when the performance counter is missing
# "% Disk Time", ## from object, counter or instance
# "% Disk Read Time", ## * UseRawValues: gather raw values instead of formatted. Raw values are
# "% Disk Write Time", ## stored in the field name with the "_Raw" suffix, e.g.
# "% User Time", ## "Disk_Read_Bytes_sec_Raw".
# "% Free Space", # IncludeTotal = false
# "Current Disk Queue Length", # WarnOnMissing = false
# "Free Megabytes", # UseRawValues = false
# ]
# Measurement = "win_disk" ## Processor usage, alternative to native, reports on a per core.
# # [[inputs.win_perf_counters.object]]
# [[inputs.win_perf_counters.object]] # Measurement = "win_cpu"
# ObjectName = "PhysicalDisk" # ObjectName = "Processor"
# Instances = ["*"] # Instances = ["*"]
# Counters = [ # UseRawValues = true
# "Disk Read Bytes/sec", # Counters = [
# "Disk Write Bytes/sec", # "% Idle Time",
# "Current Disk Queue Length", # "% Interrupt Time",
# "Disk Reads/sec", # "% Privileged Time",
# "Disk Writes/sec", # "% User Time",
# "% Disk Time", # "% Processor Time",
# "% Disk Read Time", # "% DPC Time",
# "% Disk Write Time", # ]
# ]
# Measurement = "win_diskio" ## Disk times and queues
# # [[inputs.win_perf_counters.object]]
# [[inputs.win_perf_counters.object]] # Measurement = "win_disk"
# ObjectName = "Network Interface" # ObjectName = "LogicalDisk"
# Instances = ["*"] # Instances = ["*"]
# Counters = [ # Counters = [
# "Bytes Received/sec", # "% Idle Time",
# "Bytes Sent/sec", # "% Disk Time",
# "Packets Received/sec", # "% Disk Read Time",
# "Packets Sent/sec", # "% Disk Write Time",
# "Packets Received Discarded", # "% User Time",
# "Packets Outbound Discarded", # "% Free Space",
# "Packets Received Errors", # "Current Disk Queue Length",
# "Packets Outbound Errors", # "Free Megabytes",
# ] # ]
# Measurement = "win_net"
# # [[inputs.win_perf_counters.object]]
# # Measurement = "win_diskio"
# [[inputs.win_perf_counters.object]] # ObjectName = "PhysicalDisk"
# ObjectName = "System" # Instances = ["*"]
# Counters = [ # Counters = [
# "Context Switches/sec", # "Disk Read Bytes/sec",
# "System Calls/sec", # "Disk Write Bytes/sec",
# "Processor Queue Length", # "Current Disk Queue Length",
# "System Up Time", # "Disk Reads/sec",
# ] # "Disk Writes/sec",
# Instances = ["------"] # "% Disk Time",
# Measurement = "win_system" # "% Disk Read Time",
# # "% Disk Write Time",
# [[inputs.win_perf_counters.object]] # ]
# # Example counterPath where the Instance portion must be removed to get data back,
# # such as from the Memory object. # [[inputs.win_perf_counters.object]]
# ObjectName = "Memory" # Measurement = "win_net"
# Counters = [ # ObjectName = "Network Interface"
# "Available Bytes", # Instances = ["*"]
# "Cache Faults/sec", # Counters = [
# "Demand Zero Faults/sec", # "Bytes Received/sec",
# "Page Faults/sec", # "Bytes Sent/sec",
# "Pages/sec", # "Packets Received/sec",
# "Transition Faults/sec", # "Packets Sent/sec",
# "Pool Nonpaged Bytes", # "Packets Received Discarded",
# "Pool Paged Bytes", # "Packets Outbound Discarded",
# "Standby Cache Reserve Bytes", # "Packets Received Errors",
# "Standby Cache Normal Priority Bytes", # "Packets Outbound Errors",
# "Standby Cache Core Bytes", # ]
# ]
# Instances = ["------"] # Use 6 x - to remove the Instance bit from the counterPath. # [[inputs.win_perf_counters.object]]
# Measurement = "win_mem" # Measurement = "win_system"
# # ObjectName = "System"
# [[inputs.win_perf_counters.object]] # Instances = ["------"]
# # Example query where the Instance portion must be removed to get data back, # Counters = [
# # such as from the Paging File object. # "Context Switches/sec",
# ObjectName = "Paging File" # "System Calls/sec",
# Counters = [ # "Processor Queue Length",
# "% Usage", # "System Up Time",
# ] # ]
# Instances = ["_Total"]
# Measurement = "win_swap" ## Example counterPath where the Instance portion must be removed to get
## data back, such as from the Memory object.
# [[inputs.win_perf_counters.object]]
# Measurement = "win_mem"
# ObjectName = "Memory"
## Use 6 x - to remove the Instance bit from the counterPath.
# Instances = ["------"]
# Counters = [
# "Available Bytes",
# "Cache Faults/sec",
# "Demand Zero Faults/sec",
# "Page Faults/sec",
# "Pages/sec",
# "Transition Faults/sec",
# "Pool Nonpaged Bytes",
# "Pool Paged Bytes",
# "Standby Cache Reserve Bytes",
# "Standby Cache Normal Priority Bytes",
# "Standby Cache Core Bytes",
# ]
## Example query where the Instance portion must be removed to get data back,
## such as from the Paging File object.
# [[inputs.win_perf_counters.object]]
# Measurement = "win_swap"
# ObjectName = "Paging File"
# Instances = ["_Total"]
# Counters = [
# "% Usage",
# ]

View File

@ -29,6 +29,10 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
## Enable additional diagnostic logging. ## Enable additional diagnostic logging.
# enable_diagnostic_logging = false # enable_diagnostic_logging = false
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Context Tag Sources add Application Insights context tags to a tag value. ## Context Tag Sources add Application Insights context tags to a tag value.
## ##
## For list of allowed context tag keys see: ## For list of allowed context tag keys see:

View File

@ -12,6 +12,10 @@
## Enable additional diagnostic logging. ## Enable additional diagnostic logging.
# enable_diagnostic_logging = false # enable_diagnostic_logging = false
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Context Tag Sources add Application Insights context tags to a tag value. ## Context Tag Sources add Application Insights context tags to a tag value.
## ##
## For list of allowed context tag keys see: ## For list of allowed context tag keys see:

View File

@ -65,6 +65,10 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
## Optional. If true, published PubSub message data will be base64-encoded. ## Optional. If true, published PubSub message data will be base64-encoded.
# base64_data = false # base64_data = false
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Optional. PubSub attributes to add to metrics. ## Optional. PubSub attributes to add to metrics.
# [outputs.cloud_pubsub.attributes] # [outputs.cloud_pubsub.attributes]
# my_attr = "tag_value" # my_attr = "tag_value"

View File

@ -48,6 +48,10 @@
## Optional. If true, published PubSub message data will be base64-encoded. ## Optional. If true, published PubSub message data will be base64-encoded.
# base64_data = false # base64_data = false
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Optional. PubSub attributes to add to metrics. ## Optional. PubSub attributes to add to metrics.
# [outputs.cloud_pubsub.attributes] # [outputs.cloud_pubsub.attributes]
# my_attr = "tag_value" # my_attr = "tag_value"

View File

@ -144,6 +144,10 @@ to use them.
## If you want metrics to be treated and reported as delta counters, add the metric names here ## If you want metrics to be treated and reported as delta counters, add the metric names here
additional_counters = [ ] additional_counters = [ ]
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Optional dimensions to be added to every metric ## Optional dimensions to be added to every metric
# [outputs.dynatrace.default_dimensions] # [outputs.dynatrace.default_dimensions]
# default_key = "default value" # default_key = "default value"

View File

@ -31,6 +31,10 @@
## If you want metrics to be treated and reported as delta counters, add the metric names here ## If you want metrics to be treated and reported as delta counters, add the metric names here
additional_counters = [ ] additional_counters = [ ]
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Optional dimensions to be added to every metric ## Optional dimensions to be added to every metric
# [outputs.dynatrace.default_dimensions] # [outputs.dynatrace.default_dimensions]
# default_key = "default value" # default_key = "default value"

View File

@ -42,6 +42,10 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
# tls_cert = "/etc/telegraf/cert.pem" # tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem" # tls_key = "/etc/telegraf/key.pem"
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## One or more check sub-tables should be defined, it is also recommended to ## One or more check sub-tables should be defined, it is also recommended to
## use metric filtering to limit the metrics that flow into this output. ## use metric filtering to limit the metrics that flow into this output.
## ##

View File

@ -21,6 +21,10 @@
# tls_cert = "/etc/telegraf/cert.pem" # tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem" # tls_key = "/etc/telegraf/key.pem"
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## One or more check sub-tables should be defined, it is also recommended to ## One or more check sub-tables should be defined, it is also recommended to
## use metric filtering to limit the metrics that flow into this output. ## use metric filtering to limit the metrics that flow into this output.
## ##

View File

@ -82,11 +82,6 @@ to use them.
## compress body or "identity" to apply no encoding. ## compress body or "identity" to apply no encoding.
# content_encoding = "identity" # content_encoding = "identity"
## Additional HTTP headers
# [outputs.http.headers]
# # Should be set manually to "application/json" for json data_format
# Content-Type = "text/plain; charset=utf-8"
## MaxIdleConns controls the maximum number of idle (keep-alive) ## MaxIdleConns controls the maximum number of idle (keep-alive)
## connections across all hosts. Zero means no limit. ## connections across all hosts. Zero means no limit.
# max_idle_conn = 0 # max_idle_conn = 0
@ -129,6 +124,15 @@ to use them.
## Optional list of statuscodes (<200 or >300) upon which requests should not be retried ## Optional list of statuscodes (<200 or >300) upon which requests should not be retried
# non_retryable_statuscodes = [409, 413] # non_retryable_statuscodes = [409, 413]
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Additional HTTP headers
# [outputs.http.headers]
# ## Should be set manually to "application/json" for json data_format
# Content-Type = "text/plain; charset=utf-8"
``` ```
### Google API Auth ### Google API Auth

View File

@ -55,11 +55,6 @@
## compress body or "identity" to apply no encoding. ## compress body or "identity" to apply no encoding.
# content_encoding = "identity" # content_encoding = "identity"
## Additional HTTP headers
# [outputs.http.headers]
# # Should be set manually to "application/json" for json data_format
# Content-Type = "text/plain; charset=utf-8"
## MaxIdleConns controls the maximum number of idle (keep-alive) ## MaxIdleConns controls the maximum number of idle (keep-alive)
## connections across all hosts. Zero means no limit. ## connections across all hosts. Zero means no limit.
# max_idle_conn = 0 # max_idle_conn = 0
@ -102,3 +97,12 @@
## Optional list of statuscodes (<200 or >300) upon which requests should not be retried ## Optional list of statuscodes (<200 or >300) upon which requests should not be retried
# non_retryable_statuscodes = [409, 413] # non_retryable_statuscodes = [409, 413]
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Additional HTTP headers
# [outputs.http.headers]
# ## Should be set manually to "application/json" for json data_format
# Content-Type = "text/plain; charset=utf-8"

View File

@ -51,33 +51,6 @@ to use them.
## ex: version = "1.1.0" ## ex: version = "1.1.0"
# version = "" # version = ""
## Optional topic suffix configuration.
## If the section is omitted, no suffix is used.
## Following topic suffix methods are supported:
## measurement - suffix equals to separator + measurement's name
## tags - suffix equals to separator + specified tags' values
## interleaved with separator
## Suffix equals to "_" + measurement name
# [outputs.kafka.topic_suffix]
# method = "measurement"
# separator = "_"
## Suffix equals to "__" + measurement's "foo" tag value.
## If there's no such a tag, suffix equals to an empty string
# [outputs.kafka.topic_suffix]
# method = "tags"
# keys = ["foo"]
# separator = "__"
## Suffix equals to "_" + measurement's "foo" and "bar"
## tag values, separated by "_". If there is no such tags,
## their values treated as empty strings.
# [outputs.kafka.topic_suffix]
# method = "tags"
# keys = ["foo", "bar"]
# separator = "_"
## The routing tag specifies a tagkey on the metric whose value is used as ## The routing tag specifies a tagkey on the metric whose value is used as
## the message key. The message key is used to determine which partition to ## the message key. The message key is used to determine which partition to
## send the message to. This tag is prefered over the routing_key option. ## send the message to. This tag is prefered over the routing_key option.
@ -188,6 +161,37 @@ to use them.
## more about them here: ## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
# data_format = "influx" # data_format = "influx"
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Optional topic suffix configuration.
## If the section is omitted, no suffix is used.
## Following topic suffix methods are supported:
## measurement - suffix equals to separator + measurement's name
## tags - suffix equals to separator + specified tags' values
## interleaved with separator
## Suffix equals to "_" + measurement name
# [outputs.kafka.topic_suffix]
# method = "measurement"
# separator = "_"
## Suffix equals to "__" + measurement's "foo" tag value.
## If there's no such a tag, suffix equals to an empty string
# [outputs.kafka.topic_suffix]
# method = "tags"
# keys = ["foo"]
# separator = "__"
## Suffix equals to "_" + measurement's "foo" and "bar"
## tag values, separated by "_". If there is no such tags,
## their values treated as empty strings.
# [outputs.kafka.topic_suffix]
# method = "tags"
# keys = ["foo", "bar"]
# separator = "_"
``` ```
### `max_retry` ### `max_retry`

View File

@ -25,33 +25,6 @@
## ex: version = "1.1.0" ## ex: version = "1.1.0"
# version = "" # version = ""
## Optional topic suffix configuration.
## If the section is omitted, no suffix is used.
## Following topic suffix methods are supported:
## measurement - suffix equals to separator + measurement's name
## tags - suffix equals to separator + specified tags' values
## interleaved with separator
## Suffix equals to "_" + measurement name
# [outputs.kafka.topic_suffix]
# method = "measurement"
# separator = "_"
## Suffix equals to "__" + measurement's "foo" tag value.
## If there's no such a tag, suffix equals to an empty string
# [outputs.kafka.topic_suffix]
# method = "tags"
# keys = ["foo"]
# separator = "__"
## Suffix equals to "_" + measurement's "foo" and "bar"
## tag values, separated by "_". If there is no such tags,
## their values treated as empty strings.
# [outputs.kafka.topic_suffix]
# method = "tags"
# keys = ["foo", "bar"]
# separator = "_"
## The routing tag specifies a tagkey on the metric whose value is used as ## The routing tag specifies a tagkey on the metric whose value is used as
## the message key. The message key is used to determine which partition to ## the message key. The message key is used to determine which partition to
## send the message to. This tag is prefered over the routing_key option. ## send the message to. This tag is prefered over the routing_key option.
@ -162,3 +135,34 @@
## more about them here: ## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
# data_format = "influx" # data_format = "influx"
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Optional topic suffix configuration.
## If the section is omitted, no suffix is used.
## Following topic suffix methods are supported:
## measurement - suffix equals to separator + measurement's name
## tags - suffix equals to separator + specified tags' values
## interleaved with separator
## Suffix equals to "_" + measurement name
# [outputs.kafka.topic_suffix]
# method = "measurement"
# separator = "_"
## Suffix equals to "__" + measurement's "foo" tag value.
## If there's no such a tag, suffix equals to an empty string
# [outputs.kafka.topic_suffix]
# method = "tags"
# keys = ["foo"]
# separator = "__"
## Suffix equals to "_" + measurement's "foo" and "bar"
## tag values, separated by "_". If there is no such tags,
## their values treated as empty strings.
# [outputs.kafka.topic_suffix]
# method = "tags"
# keys = ["foo", "bar"]
# separator = "_"

View File

@ -78,6 +78,19 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
## Kinesis StreamName must exist prior to starting telegraf. ## Kinesis StreamName must exist prior to starting telegraf.
streamname = "StreamName" streamname = "StreamName"
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"
## debug will show upstream aws messages.
debug = false
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## The partition key can be calculated using one of several methods: ## The partition key can be calculated using one of several methods:
## ##
## Use a static value for all writes: ## Use a static value for all writes:
@ -99,16 +112,6 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
# method = "tag" # method = "tag"
# key = "host" # key = "host"
# default = "mykey" # default = "mykey"
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"
## debug will show upstream aws messages.
debug = false
``` ```
For this output plugin to function correctly the following variables must be For this output plugin to function correctly the following variables must be

View File

@ -30,6 +30,19 @@
## Kinesis StreamName must exist prior to starting telegraf. ## Kinesis StreamName must exist prior to starting telegraf.
streamname = "StreamName" streamname = "StreamName"
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"
## debug will show upstream aws messages.
debug = false
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## The partition key can be calculated using one of several methods: ## The partition key can be calculated using one of several methods:
## ##
## Use a static value for all writes: ## Use a static value for all writes:
@ -51,13 +64,3 @@
# method = "tag" # method = "tag"
# key = "host" # key = "host"
# default = "mykey" # default = "mykey"
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"
## debug will show upstream aws messages.
debug = false

View File

@ -125,6 +125,10 @@ to use them.
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx" data_format = "influx"
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Optional MQTT 5 publish properties ## Optional MQTT 5 publish properties
## These setting only apply if the "protocol" property is set to 5. This must ## These setting only apply if the "protocol" property is set to 5. This must
## be defined at the end of the plugin settings, otherwise TOML will assume ## be defined at the end of the plugin settings, otherwise TOML will assume

View File

@ -90,6 +90,10 @@
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx" data_format = "influx"
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Optional MQTT 5 publish properties ## Optional MQTT 5 publish properties
## These setting only apply if the "protocol" property is set to 5. This must ## These setting only apply if the "protocol" property is set to 5. This must
## be defined at the end of the plugin settings, otherwise TOML will assume ## be defined at the end of the plugin settings, otherwise TOML will assume

View File

@ -41,6 +41,10 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
## Supports: "gzip", "none" ## Supports: "gzip", "none"
# compression = "gzip" # compression = "gzip"
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Configuration options for the Coralogix dialect ## Configuration options for the Coralogix dialect
## Enable the following section of you use this plugin with a Coralogix endpoint ## Enable the following section of you use this plugin with a Coralogix endpoint
# [outputs.opentelemetry.coralogix] # [outputs.opentelemetry.coralogix]

View File

@ -24,6 +24,10 @@
## Supports: "gzip", "none" ## Supports: "gzip", "none"
# compression = "gzip" # compression = "gzip"
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Configuration options for the Coralogix dialect ## Configuration options for the Coralogix dialect
## Enable the following section of you use this plugin with a Coralogix endpoint ## Enable the following section of you use this plugin with a Coralogix endpoint
# [outputs.opentelemetry.coralogix] # [outputs.opentelemetry.coralogix]

View File

@ -68,6 +68,10 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
## compress body or "identity" to apply no encoding. ## compress body or "identity" to apply no encoding.
# content_encoding = "identity" # content_encoding = "identity"
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Sensu Event details ## Sensu Event details
## ##
## Below are the event details to be sent to Sensu. The main portions of the ## Below are the event details to be sent to Sensu. The main portions of the

View File

@ -51,6 +51,10 @@
## compress body or "identity" to apply no encoding. ## compress body or "identity" to apply no encoding.
# content_encoding = "identity" # content_encoding = "identity"
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Sensu Event details ## Sensu Event details
## ##
## Below are the event details to be sent to Sensu. The main portions of the ## Below are the event details to be sent to Sensu. The main portions of the

View File

@ -106,23 +106,6 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
## Initialization SQL ## Initialization SQL
# init_sql = "" # init_sql = ""
## Metric type to SQL type conversion
## The values on the left are the data types Telegraf has and the values on
## the right are the data types Telegraf will use when sending to a database.
##
## The database values used must be data types the destination database
## understands. It is up to the user to ensure that the selected data type is
## available in the database they are using. Refer to your database
## documentation for what data types are available and supported.
#[outputs.sql.convert]
# integer = "INT"
# real = "DOUBLE"
# text = "TEXT"
# timestamp = "TIMESTAMP"
# defaultvalue = "TEXT"
# unsigned = "UNSIGNED"
# bool = "BOOL"
## This setting controls the behavior of the unsigned value. By default the ## This setting controls the behavior of the unsigned value. By default the
## setting will take the integer value and append the unsigned value to it. The other ## setting will take the integer value and append the unsigned value to it. The other
## option is "literal", which will use the actual value the user provides to ## option is "literal", which will use the actual value the user provides to
@ -143,6 +126,27 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
## Maximum number of open connections to the database. 0 means unlimited. ## Maximum number of open connections to the database. 0 means unlimited.
# connection_max_open = 0 # connection_max_open = 0
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Metric type to SQL type conversion
## The values on the left are the data types Telegraf has and the values on
## the right are the data types Telegraf will use when sending to a database.
##
## The database values used must be data types the destination database
## understands. It is up to the user to ensure that the selected data type is
## available in the database they are using. Refer to your database
## documentation for what data types are available and supported.
#[outputs.sql.convert]
# integer = "INT"
# real = "DOUBLE"
# text = "TEXT"
# timestamp = "TIMESTAMP"
# defaultvalue = "TEXT"
# unsigned = "UNSIGNED"
# bool = "BOOL"
``` ```
## Driver-specific information ## Driver-specific information

View File

@ -28,23 +28,6 @@
## Initialization SQL ## Initialization SQL
# init_sql = "" # init_sql = ""
## Metric type to SQL type conversion
## The values on the left are the data types Telegraf has and the values on
## the right are the data types Telegraf will use when sending to a database.
##
## The database values used must be data types the destination database
## understands. It is up to the user to ensure that the selected data type is
## available in the database they are using. Refer to your database
## documentation for what data types are available and supported.
#[outputs.sql.convert]
# integer = "INT"
# real = "DOUBLE"
# text = "TEXT"
# timestamp = "TIMESTAMP"
# defaultvalue = "TEXT"
# unsigned = "UNSIGNED"
# bool = "BOOL"
## This setting controls the behavior of the unsigned value. By default the ## This setting controls the behavior of the unsigned value. By default the
## setting will take the integer value and append the unsigned value to it. The other ## setting will take the integer value and append the unsigned value to it. The other
## option is "literal", which will use the actual value the user provides to ## option is "literal", which will use the actual value the user provides to
@ -65,3 +48,24 @@
## Maximum number of open connections to the database. 0 means unlimited. ## Maximum number of open connections to the database. 0 means unlimited.
# connection_max_open = 0 # connection_max_open = 0
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Metric type to SQL type conversion
## The values on the left are the data types Telegraf has and the values on
## the right are the data types Telegraf will use when sending to a database.
##
## The database values used must be data types the destination database
## understands. It is up to the user to ensure that the selected data type is
## available in the database they are using. Refer to your database
## documentation for what data types are available and supported.
#[outputs.sql.convert]
# integer = "INT"
# real = "DOUBLE"
# text = "TEXT"
# timestamp = "TIMESTAMP"
# defaultvalue = "TEXT"
# unsigned = "UNSIGNED"
# bool = "BOOL"

View File

@ -69,6 +69,10 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
## Custom resource type ## Custom resource type
# resource_type = "generic_node" # resource_type = "generic_node"
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Additional resource labels ## Additional resource labels
# [outputs.stackdriver.resource_labels] # [outputs.stackdriver.resource_labels]
# node_id = "$HOSTNAME" # node_id = "$HOSTNAME"

View File

@ -34,6 +34,10 @@
## Custom resource type ## Custom resource type
# resource_type = "generic_node" # resource_type = "generic_node"
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Additional resource labels ## Additional resource labels
# [outputs.stackdriver.resource_labels] # [outputs.stackdriver.resource_labels]
# node_id = "$HOSTNAME" # node_id = "$HOSTNAME"

View File

@ -54,6 +54,10 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
# data_format = "influx" # data_format = "influx"
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Additional HTTP Upgrade headers ## Additional HTTP Upgrade headers
# [outputs.websocket.headers] # [outputs.websocket.headers]
# Authorization = "Bearer <TOKEN>" # Authorization = "Bearer <TOKEN>"

View File

@ -34,6 +34,10 @@
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
# data_format = "influx" # data_format = "influx"
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table
## Additional HTTP Upgrade headers ## Additional HTTP Upgrade headers
# [outputs.websocket.headers] # [outputs.websocket.headers]
# Authorization = "Bearer <TOKEN>" # Authorization = "Bearer <TOKEN>"