feat: update etc/telegraf.conf and etc/telegraf_windows.conf (#10760)

Co-authored-by: Tiger Bot <>
This commit is contained in:
telegraf-tiger[bot] 2022-03-01 17:08:49 -06:00 committed by GitHub
parent a4f6b27bd5
commit 2357a88bf3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 22 additions and 182 deletions

View File

@ -202,10 +202,6 @@
# # Publishes metrics to an AMQP broker
# [[outputs.amqp]]
# ## Broker to publish to.
# ## deprecated in 1.7; use the brokers option
# # url = "amqp://localhost:5672/influxdb"
#
# ## Brokers to publish to. If multiple brokers are specified a random broker
# ## will be selected anytime a connection is established. This can be
# ## helpful for load balancing when not using a dedicated load balancer.
@ -254,14 +250,6 @@
# ## One of "transient" or "persistent".
# # delivery_mode = "transient"
#
# ## InfluxDB database added as a message header.
# ## deprecated in 1.7; use the headers option
# # database = "telegraf"
#
# ## InfluxDB retention policy added as a message header
# ## deprecated in 1.7; use the headers option
# # retention_policy = "default"
#
# ## Static headers added to each published message.
# # headers = { }
# # headers = {"database" = "telegraf", "retention_policy" = "default"}
@ -1255,12 +1243,7 @@
#
# ## Kinesis StreamName must exist prior to starting telegraf.
# streamname = "StreamName"
# ## DEPRECATED: PartitionKey as used for sharding data.
# partitionkey = "PartitionKey"
# ## DEPRECATED: If set the partitionKey will be a random UUID on every put.
# ## This allows for scaling across multiple shards in a stream.
# ## This will cause issues with ordering.
# use_random_partitionkey = false
#
# ## The partition key can be calculated using one of several methods:
# ##
# ## Use a static value for all writes:
@ -2224,13 +2207,6 @@
# ## of metrics will block for a longer time, but this will be handled gracefully by the internal buffering in
# ## Telegraf.
# #immediate_flush = true
#
# ## Define a mapping, namespaced by metric prefix, from string values to numeric values
# ## deprecated in 1.9; use the enum processor plugin
# #[[outputs.wavefront.string_to_number.elasticsearch]]
# # green = 1.0
# # yellow = 0.5
# # red = 0.0
# # Generic WebSocket output writer.
@ -3223,11 +3199,6 @@
# ## ActiveMQ WebConsole URL
# url = "http://127.0.0.1:8161"
#
# ## Required ActiveMQ Endpoint
# ## deprecated in 1.11; use the url option
# # server = "127.0.0.1"
# # port = 8161
#
# ## Credentials for basic HTTP authentication
# # username = "admin"
# # password = "admin"
@ -3877,9 +3848,6 @@
# ## Set to true to collect Swarm metrics(desired_replicas, running_replicas)
# gather_services = false
#
# ## Only collect metrics for these containers, collect all if empty
# container_names = []
#
# ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars
# source_tag = false
#
@ -3898,24 +3866,11 @@
# ## Timeout for docker list, info, and stats commands
# timeout = "5s"
#
# ## Whether to report for each container per-device blkio (8:0, 8:1...),
# ## network (eth0, eth1, ...) and cpu (cpu0, cpu1, ...) stats or not.
# ## Usage of this setting is discouraged since it will be deprecated in favor of 'perdevice_include'.
# ## Default value is 'true' for backwards compatibility, please set it to 'false' so that 'perdevice_include' setting
# ## is honored.
# perdevice = true
#
# ## Specifies for which classes a per-device metric should be issued
# ## Possible values are 'cpu' (cpu0, cpu1, ...), 'blkio' (8:0, 8:1, ...) and 'network' (eth0, eth1, ...)
# ## Please note that this setting has no effect if 'perdevice' is set to 'true'
# # perdevice_include = ["cpu"]
#
# ## Whether to report for each container total blkio and network stats or not.
# ## Usage of this setting is discouraged since it will be deprecated in favor of 'total_include'.
# ## Default value is 'false' for backwards compatibility, please set it to 'true' so that 'total_include' setting
# ## is honored.
# total = false
#
# ## Specifies for which classes a total metric should be issued. Total is an aggregated of the 'perdevice' values.
# ## Possible values are 'cpu', 'blkio' and 'network'
# ## Total 'cpu' is reported directly by Docker daemon, and 'network' and 'blkio' totals are aggregated by this plugin.
@ -4241,10 +4196,6 @@
# # Count files in a directory
# [[inputs.filecount]]
# ## Directory to gather stats about.
# ## deprecated in 1.9; use the directories option
# # directory = "/var/cache/apt/archives"
#
# ## Directories to gather stats about.
# ## This accept standard unit glob matching rules, but with the addition of
# ## ** as a "super asterisk". ie:
@ -4502,10 +4453,6 @@
# # HTTP/HTTPS request given an address a method and a timeout
# [[inputs.http_response]]
# ## Deprecated in 1.12, use 'urls'
# ## Server address (default http://localhost)
# # address = "http://localhost"
#
# ## List of urls to query.
# # urls = ["http://localhost"]
#
@ -4580,12 +4527,6 @@
# ## NOTE This plugin only reads numerical measurements, strings and booleans
# ## will be ignored.
#
# ## Name for the service being polled. Will be appended to the name of the
# ## measurement e.g. httpjson_webserver_stats
# ##
# ## Deprecated (1.3.0): Use name_override, name_suffix, name_prefix instead.
# name = "webserver_stats"
#
# ## URL of each server in the service's cluster
# servers = [
# "http://localhost:9999/stats/",
@ -6400,8 +6341,6 @@
# [[inputs.rabbitmq]]
# ## Management Plugin url. (default: http://localhost:15672)
# # url = "http://localhost:15672"
# ## Tag added to rabbitmq_overview series; deprecated: use tags
# # name = "rmq-server-1"
# ## Credentials
# # username = "guest"
# # password = "guest"
@ -6427,10 +6366,6 @@
# ## specified, metrics for all nodes are gathered.
# # nodes = ["rabbit@node1", "rabbit@node2"]
#
# ## A list of queues to gather as the rabbitmq_queue measurement. If not
# ## specified, metrics for all queues are gathered.
# # queues = ["telegraf"]
#
# ## A list of exchanges to gather as the rabbitmq_exchange measurement. If not
# ## specified, metrics for all exchanges are gathered.
# # exchanges = ["telegraf"]
@ -7394,10 +7329,6 @@
# # AMQP consumer plugin
# [[inputs.amqp_consumer]]
# ## Broker to consume from.
# ## deprecated in 1.7; use the brokers option
# # url = "amqp://localhost:5672/influxdb"
#
# ## Brokers to consume from. If multiple brokers are specified a random broker
# ## will be selected anytime a connection is established. This can be
# ## helpful for load balancing when not using a dedicated load balancer.
@ -7986,6 +7917,7 @@
# # Accept metrics over InfluxDB 1.x HTTP API
# [[inputs.http_listener]]
# ## DEPRECATED: The 'http_listener' plugin is deprecated in version 1.9.0, has been renamed to 'influxdb_listener', use 'inputs.influxdb_listener' or 'inputs.http_listener_v2' instead.
# ## Address and port to host InfluxDB listener on
# service_address = ":8186"
#
@ -8027,10 +7959,6 @@
# ## Address and port to host HTTP listener on
# service_address = ":8080"
#
# ## Path to listen to.
# ## This option is deprecated and only available for backward-compatibility. Please use paths instead.
# # path = ""
#
# ## Paths to listen to.
# # paths = ["/telegraf"]
#
@ -8683,9 +8611,6 @@
# # Read NSQ topic for metrics.
# [[inputs.nsq_consumer]]
# ## Server option still works but is deprecated, we just prepend it to the nsqd array.
# # server = "localhost:4150"
#
# ## An array representing the NSQD TCP HTTP Endpoints
# nsqd = ["localhost:4150"]
#
@ -9198,7 +9123,7 @@
# ## valid methods: "connection_string", "AAD"
# # auth_method = "connection_string"
#
# ## "database_type" enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2
# ## "database_type" enables a specific set of queries depending on the database type.
# ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type.
# ## Possible values for database_type are - "SQLServer" or "AzureSQLDB" or "AzureSQLManagedInstance" or "AzureSQLPool"
#
@ -9225,19 +9150,6 @@
# ## Queries enabled by default for database_type = "AzureSQLPool" are -
# ## AzureSQLPoolResourceStats, AzureSQLPoolResourceGovernance, AzureSQLPoolDatabaseIO, AzureSQLPoolWaitStats,
# ## AzureSQLPoolMemoryClerks, AzureSQLPoolPerformanceCounters, AzureSQLPoolSchedulers
#
# ## Following are old config settings
# ## You may use them only if you are using the earlier flavor of queries, however it is recommended to use
# ## the new mechanism of identifying the database_type there by use it's corresponding queries
#
# ## Optional parameter, setting this to 2 will use a new version
# ## of the collection queries that break compatibility with the original
# ## dashboards.
# ## Version 2 - is compatible from SQL Server 2012 and later versions and also for SQL Azure DB
# # query_version = 2
#
# ## If you are using AzureDB, setting this to true will gather resource utilization metrics
# # azuredb = false
# # Statsd UDP/TCP Server
@ -9305,6 +9217,14 @@
#
# ## Max duration (TTL) for each metric to stay cached/reported without being updated.
# #max_ttl = "1000h"
#
# ## Sanitize name method
# ## By default, telegraf will pass names directly as they are received.
# ## However, upstream statsd now does sanitization of names which can be
# ## enabled by using the "upstream" method option. This option will a) replace
# ## white space with '_', replace '/' with '-', and remove charachters not
# ## matching 'a-zA-Z_\-0-9\.;='.
# #sanitize_name_method = ""
# # Suricata stats and alerts plugin

View File

@ -202,10 +202,6 @@
# # Publishes metrics to an AMQP broker
# [[outputs.amqp]]
# ## Broker to publish to.
# ## deprecated in 1.7; use the brokers option
# # url = "amqp://localhost:5672/influxdb"
#
# ## Brokers to publish to. If multiple brokers are specified a random broker
# ## will be selected anytime a connection is established. This can be
# ## helpful for load balancing when not using a dedicated load balancer.
@ -254,14 +250,6 @@
# ## One of "transient" or "persistent".
# # delivery_mode = "transient"
#
# ## InfluxDB database added as a message header.
# ## deprecated in 1.7; use the headers option
# # database = "telegraf"
#
# ## InfluxDB retention policy added as a message header
# ## deprecated in 1.7; use the headers option
# # retention_policy = "default"
#
# ## Static headers added to each published message.
# # headers = { }
# # headers = {"database" = "telegraf", "retention_policy" = "default"}
@ -1255,12 +1243,7 @@
#
# ## Kinesis StreamName must exist prior to starting telegraf.
# streamname = "StreamName"
# ## DEPRECATED: PartitionKey as used for sharding data.
# partitionkey = "PartitionKey"
# ## DEPRECATED: If set the partitionKey will be a random UUID on every put.
# ## This allows for scaling across multiple shards in a stream.
# ## This will cause issues with ordering.
# use_random_partitionkey = false
#
# ## The partition key can be calculated using one of several methods:
# ##
# ## Use a static value for all writes:
@ -2224,13 +2207,6 @@
# ## of metrics will block for a longer time, but this will be handled gracefully by the internal buffering in
# ## Telegraf.
# #immediate_flush = true
#
# ## Define a mapping, namespaced by metric prefix, from string values to numeric values
# ## deprecated in 1.9; use the enum processor plugin
# #[[outputs.wavefront.string_to_number.elasticsearch]]
# # green = 1.0
# # yellow = 0.5
# # red = 0.0
# # Generic WebSocket output writer.
@ -3223,11 +3199,6 @@
# ## ActiveMQ WebConsole URL
# url = "http://127.0.0.1:8161"
#
# ## Required ActiveMQ Endpoint
# ## deprecated in 1.11; use the url option
# # server = "127.0.0.1"
# # port = 8161
#
# ## Credentials for basic HTTP authentication
# # username = "admin"
# # password = "admin"
@ -3849,9 +3820,6 @@
# ## Set to true to collect Swarm metrics(desired_replicas, running_replicas)
# gather_services = false
#
# ## Only collect metrics for these containers, collect all if empty
# container_names = []
#
# ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars
# source_tag = false
#
@ -3870,24 +3838,11 @@
# ## Timeout for docker list, info, and stats commands
# timeout = "5s"
#
# ## Whether to report for each container per-device blkio (8:0, 8:1...),
# ## network (eth0, eth1, ...) and cpu (cpu0, cpu1, ...) stats or not.
# ## Usage of this setting is discouraged since it will be deprecated in favor of 'perdevice_include'.
# ## Default value is 'true' for backwards compatibility, please set it to 'false' so that 'perdevice_include' setting
# ## is honored.
# perdevice = true
#
# ## Specifies for which classes a per-device metric should be issued
# ## Possible values are 'cpu' (cpu0, cpu1, ...), 'blkio' (8:0, 8:1, ...) and 'network' (eth0, eth1, ...)
# ## Please note that this setting has no effect if 'perdevice' is set to 'true'
# # perdevice_include = ["cpu"]
#
# ## Whether to report for each container total blkio and network stats or not.
# ## Usage of this setting is discouraged since it will be deprecated in favor of 'total_include'.
# ## Default value is 'false' for backwards compatibility, please set it to 'true' so that 'total_include' setting
# ## is honored.
# total = false
#
# ## Specifies for which classes a total metric should be issued. Total is an aggregated of the 'perdevice' values.
# ## Possible values are 'cpu', 'blkio' and 'network'
# ## Total 'cpu' is reported directly by Docker daemon, and 'network' and 'blkio' totals are aggregated by this plugin.
@ -4179,10 +4134,6 @@
# # Count files in a directory
# [[inputs.filecount]]
# ## Directory to gather stats about.
# ## deprecated in 1.9; use the directories option
# # directory = "/var/cache/apt/archives"
#
# ## Directories to gather stats about.
# ## This accept standard unit glob matching rules, but with the addition of
# ## ** as a "super asterisk". ie:
@ -4440,10 +4391,6 @@
# # HTTP/HTTPS request given an address a method and a timeout
# [[inputs.http_response]]
# ## Deprecated in 1.12, use 'urls'
# ## Server address (default http://localhost)
# # address = "http://localhost"
#
# ## List of urls to query.
# # urls = ["http://localhost"]
#
@ -4518,12 +4465,6 @@
# ## NOTE This plugin only reads numerical measurements, strings and booleans
# ## will be ignored.
#
# ## Name for the service being polled. Will be appended to the name of the
# ## measurement e.g. httpjson_webserver_stats
# ##
# ## Deprecated (1.3.0): Use name_override, name_suffix, name_prefix instead.
# name = "webserver_stats"
#
# ## URL of each server in the service's cluster
# servers = [
# "http://localhost:9999/stats/",
@ -6267,8 +6208,6 @@
# [[inputs.rabbitmq]]
# ## Management Plugin url. (default: http://localhost:15672)
# # url = "http://localhost:15672"
# ## Tag added to rabbitmq_overview series; deprecated: use tags
# # name = "rmq-server-1"
# ## Credentials
# # username = "guest"
# # password = "guest"
@ -6294,10 +6233,6 @@
# ## specified, metrics for all nodes are gathered.
# # nodes = ["rabbit@node1", "rabbit@node2"]
#
# ## A list of queues to gather as the rabbitmq_queue measurement. If not
# ## specified, metrics for all queues are gathered.
# # queues = ["telegraf"]
#
# ## A list of exchanges to gather as the rabbitmq_exchange measurement. If not
# ## specified, metrics for all exchanges are gathered.
# # exchanges = ["telegraf"]
@ -7356,10 +7291,6 @@
# # AMQP consumer plugin
# [[inputs.amqp_consumer]]
# ## Broker to consume from.
# ## deprecated in 1.7; use the brokers option
# # url = "amqp://localhost:5672/influxdb"
#
# ## Brokers to consume from. If multiple brokers are specified a random broker
# ## will be selected anytime a connection is established. This can be
# ## helpful for load balancing when not using a dedicated load balancer.
@ -7948,6 +7879,7 @@
# # Accept metrics over InfluxDB 1.x HTTP API
# [[inputs.http_listener]]
# ## DEPRECATED: The 'http_listener' plugin is deprecated in version 1.9.0, has been renamed to 'influxdb_listener', use 'inputs.influxdb_listener' or 'inputs.http_listener_v2' instead.
# ## Address and port to host InfluxDB listener on
# service_address = ":8186"
#
@ -7989,10 +7921,6 @@
# ## Address and port to host HTTP listener on
# service_address = ":8080"
#
# ## Path to listen to.
# ## This option is deprecated and only available for backward-compatibility. Please use paths instead.
# # path = ""
#
# ## Paths to listen to.
# # paths = ["/telegraf"]
#
@ -8566,9 +8494,6 @@
# # Read NSQ topic for metrics.
# [[inputs.nsq_consumer]]
# ## Server option still works but is deprecated, we just prepend it to the nsqd array.
# # server = "localhost:4150"
#
# ## An array representing the NSQD TCP HTTP Endpoints
# nsqd = ["localhost:4150"]
#
@ -9074,7 +8999,7 @@
# ## valid methods: "connection_string", "AAD"
# # auth_method = "connection_string"
#
# ## "database_type" enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2
# ## "database_type" enables a specific set of queries depending on the database type.
# ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type.
# ## Possible values for database_type are - "SQLServer" or "AzureSQLDB" or "AzureSQLManagedInstance" or "AzureSQLPool"
#
@ -9101,19 +9026,6 @@
# ## Queries enabled by default for database_type = "AzureSQLPool" are -
# ## AzureSQLPoolResourceStats, AzureSQLPoolResourceGovernance, AzureSQLPoolDatabaseIO, AzureSQLPoolWaitStats,
# ## AzureSQLPoolMemoryClerks, AzureSQLPoolPerformanceCounters, AzureSQLPoolSchedulers
#
# ## Following are old config settings
# ## You may use them only if you are using the earlier flavor of queries, however it is recommended to use
# ## the new mechanism of identifying the database_type there by use it's corresponding queries
#
# ## Optional parameter, setting this to 2 will use a new version
# ## of the collection queries that break compatibility with the original
# ## dashboards.
# ## Version 2 - is compatible from SQL Server 2012 and later versions and also for SQL Azure DB
# # query_version = 2
#
# ## If you are using AzureDB, setting this to true will gather resource utilization metrics
# # azuredb = false
# # Statsd UDP/TCP Server
@ -9181,6 +9093,14 @@
#
# ## Max duration (TTL) for each metric to stay cached/reported without being updated.
# #max_ttl = "1000h"
#
# ## Sanitize name method
# ## By default, telegraf will pass names directly as they are received.
# ## However, upstream statsd now does sanitization of names which can be
# ## enabled by using the "upstream" method option. This option will a) replace
# ## white space with '_', replace '/' with '-', and remove charachters not
# ## matching 'a-zA-Z_\-0-9\.;='.
# #sanitize_name_method = ""
# # Suricata stats and alerts plugin