diff --git a/etc/telegraf.conf b/etc/telegraf.conf index bb52d620e..fb6e72b30 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -211,6 +211,10 @@ # # Publishes metrics to an AMQP broker # [[outputs.amqp]] +# ## Broker to publish to. +# ## deprecated in 1.7; use the brokers option +# # url = "amqp://localhost:5672/influxdb" +# # ## Brokers to publish to. If multiple brokers are specified a random broker # ## will be selected anytime a connection is established. This can be # ## helpful for load balancing when not using a dedicated load balancer. @@ -259,6 +263,14 @@ # ## One of "transient" or "persistent". # # delivery_mode = "transient" # +# ## InfluxDB database added as a message header. +# ## deprecated in 1.7; use the headers option +# # database = "telegraf" +# +# ## InfluxDB retention policy added as a message header +# ## deprecated in 1.7; use the headers option +# # retention_policy = "default" +# # ## Static headers added to each published message. # # headers = { } # # headers = {"database" = "telegraf", "retention_policy" = "default"} @@ -319,8 +331,8 @@ # # Sends metrics to Azure Data Explorer # [[outputs.azure_data_explorer]] -# ## Azure Data Explorer cluster endpoint -# ## ex: endpoint_url = "https://clustername.australiasoutheast.kusto.windows.net" +# ## The URI property of the Azure Data Explorer resource on Azure +# ## ex: endpoint_url = https://myadxresource.australiasoutheast.kusto.windows.net # endpoint_url = "" # # ## The Azure Data Explorer database that the metrics will be ingested into. @@ -369,8 +381,8 @@ # ## ex: resource_id = "/subscriptions//resourceGroups//providers/Microsoft.Compute/virtualMachines/" # # resource_id = "" # -# ## Optionally, if in Azure US Government, China or other sovereign -# ## cloud environment, set appropriate REST endpoint for receiving +# ## Optionally, if in Azure US Government, China, or other sovereign +# ## cloud environment, set the appropriate REST endpoint for receiving # ## metrics. (Note: region may be unused in this context) # # endpoint_url = "https://monitoring.core.usgovcloudapi.net" @@ -490,62 +502,62 @@ # # Configuration for AWS CloudWatchLogs output. # [[outputs.cloudwatch_logs]] -# ## The region is the Amazon region that you wish to connect to. -# ## Examples include but are not limited to: -# ## - us-west-1 -# ## - us-west-2 -# ## - us-east-1 -# ## - ap-southeast-1 -# ## - ap-southeast-2 -# ## ... -# region = "us-east-1" +# ## The region is the Amazon region that you wish to connect to. +# ## Examples include but are not limited to: +# ## - us-west-1 +# ## - us-west-2 +# ## - us-east-1 +# ## - ap-southeast-1 +# ## - ap-southeast-2 +# ## ... +# region = "us-east-1" # -# ## Amazon Credentials -# ## Credentials are loaded in the following order -# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified -# ## 2) Assumed credentials via STS if role_arn is specified -# ## 3) explicit credentials from 'access_key' and 'secret_key' -# ## 4) shared profile from 'profile' -# ## 5) environment variables -# ## 6) shared credentials file -# ## 7) EC2 Instance Profile -# #access_key = "" -# #secret_key = "" -# #token = "" -# #role_arn = "" -# #web_identity_token_file = "" -# #role_session_name = "" -# #profile = "" -# #shared_credential_file = "" +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" +# #profile = "" +# #shared_credential_file = "" # -# ## Endpoint to make request against, the correct endpoint is automatically -# ## determined and this option should only be set if you wish to override the -# ## default. -# ## ex: endpoint_url = "http://localhost:8000" -# # endpoint_url = "" +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" # -# ## Cloud watch log group. Must be created in AWS cloudwatch logs upfront! -# ## For example, you can specify the name of the k8s cluster here to group logs from all cluster in oine place -# log_group = "my-group-name" +# ## Cloud watch log group. Must be created in AWS cloudwatch logs upfront! +# ## For example, you can specify the name of the k8s cluster here to group logs from all cluster in oine place +# log_group = "my-group-name" # -# ## Log stream in log group -# ## Either log group name or reference to metric attribute, from which it can be parsed: -# ## tag: or field:. If log stream is not exist, it will be created. -# ## Since AWS is not automatically delete logs streams with expired logs entries (i.e. empty log stream) -# ## you need to put in place appropriate house-keeping (https://forums.aws.amazon.com/thread.jspa?threadID=178855) -# log_stream = "tag:location" +# ## Log stream in log group +# ## Either log group name or reference to metric attribute, from which it can be parsed: +# ## tag: or field:. If log stream is not exist, it will be created. +# ## Since AWS is not automatically delete logs streams with expired logs entries (i.e. empty log stream) +# ## you need to put in place appropriate house-keeping (https://forums.aws.amazon.com/thread.jspa?threadID=178855) +# log_stream = "tag:location" # -# ## Source of log data - metric name -# ## specify the name of the metric, from which the log data should be retrieved. -# ## I.e., if you are using docker_log plugin to stream logs from container, then -# ## specify log_data_metric_name = "docker_log" -# log_data_metric_name = "docker_log" +# ## Source of log data - metric name +# ## specify the name of the metric, from which the log data should be retrieved. +# ## I.e., if you are using docker_log plugin to stream logs from container, then +# ## specify log_data_metric_name = "docker_log" +# log_data_metric_name = "docker_log" # -# ## Specify from which metric attribute the log data should be retrieved: -# ## tag: or field:. -# ## I.e., if you are using docker_log plugin to stream logs from container, then -# ## specify log_data_source = "field:message" -# log_data_source = "field:message" +# ## Specify from which metric attribute the log data should be retrieved: +# ## tag: or field:. +# ## I.e., if you are using docker_log plugin to stream logs from container, then +# ## specify log_data_source = "field:message" +# log_data_source = "field:message" # # Configuration for CrateDB to send metrics to. @@ -611,11 +623,9 @@ # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" # # tls_key = "/etc/telegraf/key.pem" -# # ## Optional flag for ignoring tls certificate check # # insecure_skip_verify = false # -# # ## Connection timeout, defaults to "5s" if not set. # timeout = "5s" # @@ -631,18 +641,21 @@ # [[outputs.elasticsearch]] # ## The full HTTP endpoint URL for your Elasticsearch instance # ## Multiple urls can be specified as part of the same cluster, -# ## this means that only ONE of the urls will be written to each interval. +# ## this means that only ONE of the urls will be written to each interval # urls = [ "http://node1.es.example.com:9200" ] # required. # ## Elasticsearch client timeout, defaults to "5s" if not set. # timeout = "5s" # ## Set to true to ask Elasticsearch a list of all cluster nodes, -# ## thus it is not necessary to list all nodes in the urls config option. +# ## thus it is not necessary to list all nodes in the urls config option # enable_sniffer = false # ## Set to true to enable gzip compression # enable_gzip = false # ## Set the interval to check if the Elasticsearch nodes are available # ## Setting to "0s" will disable the health check (not recommended in production) # health_check_interval = "10s" +# ## Set the timeout for periodic health checks. +# # health_check_timeout = "1s" +# ## HTTP basic authentication details. # ## HTTP basic authentication details # # username = "telegraf" # # password = "mypassword" @@ -714,6 +727,12 @@ # ## Client timeout (defaults to 30s) # # timeout = "30s" # +# ## Partition key +# ## Metric tag or field name to use for the event partition key. The value of +# ## this tag or field is set as the key for events if it exists. If both, tag +# ## and field, exist the tag is preferred. +# # partition_key = "" +# # ## Data format to output. # ## Each data format has its own unique set of configuration options, read # ## more about them here: @@ -726,6 +745,12 @@ # ## Command to ingest metrics via stdin. # command = ["tee", "-a", "/dev/null"] # +# ## Environment variables +# ## Array of "key=value" pairs to pass as environment variables +# ## e.g. "KEY=value", "USERNAME=John Doe", +# ## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs" +# # environment = [] +# # ## Timeout for command to complete. # # timeout = "5s" # @@ -738,9 +763,16 @@ # # Run executable as long-running output plugin # [[outputs.execd]] -# ## Program to run as daemon +# ## One program to run as daemon. +# ## NOTE: process and each argument should each be their own string # command = ["my-telegraf-output", "--some-flag", "value"] # +# ## Environment variables +# ## Array of "key=value" pairs to pass as environment variables +# ## e.g. "KEY=value", "USERNAME=John Doe", +# ## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs" +# # environment = [] +# # ## Delay before the process is restarted after an unexpected termination # restart_delay = "10s" # @@ -758,12 +790,12 @@ # # ## Use batch serialization format instead of line based delimiting. The # ## batch format allows for the production of non line based output formats and -# ## may more efficiently encode metric groups. +# ## may more efficiently encode and write metrics. # # use_batch_format = false # # ## The file will be rotated after the time interval specified. When set # ## to 0 no time based rotation is performed. -# # rotation_interval = "0d" +# # rotation_interval = "0h" # # ## The logfile will be rotated when it becomes larger than the specified # ## size. When set to 0 no size based rotation is performed. @@ -783,7 +815,7 @@ # # Configuration for Graphite server to send metrics to # [[outputs.graphite]] # ## TCP endpoint for your graphite instance. -# ## If multiple endpoints are configured, output will be load balanced. +# ## If multiple endpoints are configured, the output will be load balanced. # ## Only one of the endpoints will be written to with each iteration. # servers = ["localhost:2003"] # ## Prefix metrics name @@ -936,6 +968,9 @@ # # token_url = "https://indentityprovider/oauth2/v1/token" # # scopes = ["urn:opc:idm:__myscopes__"] # +# ## Goole API Auth +# # google_application_credentials = "/etc/telegraf/example_secret.json" +# # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" @@ -973,6 +1008,15 @@ # # # Should be set manually to "application/json" for json data_format # # Content-Type = "text/plain; charset=utf-8" # +# ## MaxIdleConns controls the maximum number of idle (keep-alive) +# ## connections across all hosts. Zero means no limit. +# # max_idle_conn = 0 +# +# ## MaxIdleConnsPerHost, if non-zero, controls the maximum idle +# ## (keep-alive) connections to keep per-host. If zero, +# ## DefaultMaxIdleConnsPerHost is used(2). +# # max_idle_conn_per_host = 2 +# # ## Idle (keep-alive) connection timeout. # ## Maximum amount of time before idle connection is closed. # ## Zero means no limit. @@ -998,9 +1042,12 @@ # #role_session_name = "" # #profile = "" # #shared_credential_file = "" +# +# ## Optional list of statuscodes (<200 or >300) upon which requests should not be retried +# # non_retryable_statuscodes = [409, 413] -# # Configuration for sending metrics to InfluxDB +# # Configuration for sending metrics to InfluxDB 2.0 # [[outputs.influxdb_v2]] # ## The URLs of the InfluxDB cluster nodes. # ## @@ -1012,7 +1059,7 @@ # ## Token for authentication. # token = "" # -# ## Organization is the name of the organization you wish to write to; must exist. +# ## Organization is the name of the organization you wish to write to. # organization = "" # # ## Destination bucket to write into. @@ -1056,7 +1103,7 @@ # # Configuration for sending metrics to an Instrumental project # [[outputs.instrumental]] # ## Project API Token (required) -# api_token = "API Token" # required +# api_token = "API Token" # required # ## Prefix the metrics with a given name # prefix = "" # ## Stats output template (Graphite formatting) @@ -1064,7 +1111,7 @@ # template = "host.tags.measurement.field" # ## Timeout in seconds to connect # timeout = "2s" -# ## Display Communication to Instrumental +# ## Debug true - Print communication to Instrumental # debug = false @@ -1086,7 +1133,7 @@ # # client_id = "Telegraf" # # ## Set the minimal supported Kafka version. Setting this enables the use of new -# ## Kafka features and APIs. Of particular interest, lz4 compression +# ## Kafka features and APIs. Of particular interested, lz4 compression # ## requires at least version 0.10.0.0. # ## ex: version = "1.1.0" # # version = "" @@ -1144,7 +1191,7 @@ # ## 2 : Snappy # ## 3 : LZ4 # ## 4 : ZSTD -# # compression_codec = 0 +# # compression_codec = 0 # # ## Idempotent Writes # ## If enabled, exactly one copy of each message is written. @@ -1302,27 +1349,34 @@ # ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite # ## This template is used in librato's source (not metric's name) # template = "host" -# -# # Send aggregate metrics to Logz.io +# # A plugin that can send metrics over HTTPs to Logz.io # [[outputs.logzio]] -# ## Connection timeout, defaults to "5s" if not set. -# timeout = "5s" +# ## Set to true if Logz.io sender checks the disk space before adding metrics to the disk queue. +# # check_disk_space = true # -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" +# ## The percent of used file system space at which the sender will stop queueing. +# ## When we will reach that percentage, the file system in which the queue is stored will drop +# ## all new logs until the percentage of used space drops below that threshold. +# # disk_threshold = 98 +# +# ## How often Logz.io sender should drain the queue. +# ## Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". +# # drain_duration = "3s" +# +# ## Where Logz.io sender should store the queue +# ## queue_dir = Sprintf("%s%s%s%s%d", os.TempDir(), string(os.PathSeparator), +# ## "logzio-buffer", string(os.PathSeparator), time.Now().UnixNano()) # # ## Logz.io account token -# token = "your logz.io token" # required +# token = "your Logz.io token" # required # # ## Use your listener URL for your Logz.io account region. # # url = "https://listener.logz.io:8071" -# # Send logs to Loki +# # A plugin that can transmit logs to Loki # [[outputs.loki]] # ## The domain of Loki # domain = "https://loki.domain.tld" @@ -1349,7 +1403,7 @@ # # tls_key = "/etc/telegraf/key.pem" -# # Sends metrics to MongoDB +# # A plugin that can transmit logs to mongodb # [[outputs.mongodb]] # # connection string examples for mongodb # dsn = "mongodb://localhost:27017" @@ -1587,15 +1641,12 @@ # # Configuration for the Prometheus client to spawn # [[outputs.prometheus_client]] -# ## Address to listen on +# ## Address to listen on. # listen = ":9273" # -# ## Metric version controls the mapping from Telegraf metrics into -# ## Prometheus format. When using the prometheus input, use the same value in -# ## both plugins to ensure metrics are round-tripped without modification. -# ## -# ## example: metric_version = 1; -# ## metric_version = 2; recommended version +# ## Metric version controls the mapping from Prometheus metrics into Telegraf metrics. +# ## See "Metric Format Configuration" in plugins/inputs/prometheus/README.md for details. +# ## Valid options: 1, 2 # # metric_version = 1 # # ## Use HTTP Basic Authentication. @@ -1632,7 +1683,7 @@ # # export_timestamp = false -# # Configuration for the Riemann server to send metrics to +# # Configuration for Riemann to send metrics to # [[outputs.riemann]] # ## The full TCP or UDP URL of the Riemann server # url = "tcp://localhost:5555" @@ -1666,9 +1717,9 @@ # # timeout = "5s" +# ## DEPRECATED: The 'riemann_legacy' plugin is deprecated in version 1.3.0, use 'outputs.riemann' instead (see https://github.com/influxdata/telegraf/issues/1878). # # Configuration for the Riemann server to send metrics to # [[outputs.riemann_legacy]] -# ## DEPRECATED: The 'riemann_legacy' plugin is deprecated in version 1.3.0, use 'outputs.riemann' instead (see https://github.com/influxdata/telegraf/issues/1878). # ## URL of server # url = "localhost:5555" # ## transport protocol to use either tcp or udp @@ -1744,7 +1795,7 @@ # ## The check name is the name to give the Sensu check associated with the event # ## created. This maps to check.metatadata.name in the event. # [outputs.sensu.check] -# name = "telegraf" +# name = "telegraf" # # ## Entity specification # ## Configure the entity name and namespace, if necessary. This will be part of @@ -1770,21 +1821,21 @@ # # Send metrics and events to SignalFx # [[outputs.signalfx]] -# ## SignalFx Org Access Token -# access_token = "my-secret-token" +# ## SignalFx Org Access Token +# access_token = "my-secret-token" # -# ## The SignalFx realm that your organization resides in -# signalfx_realm = "us9" # Required if ingest_url is not set +# ## The SignalFx realm that your organization resides in +# signalfx_realm = "us9" # Required if ingest_url is not set # -# ## You can optionally provide a custom ingest url instead of the -# ## signalfx_realm option above if you are using a gateway or proxy -# ## instance. This option takes precident over signalfx_realm. -# ingest_url = "https://my-custom-ingest/" +# ## You can optionally provide a custom ingest url instead of the +# ## signalfx_realm option above if you are using a gateway or proxy +# ## instance. This option takes precident over signalfx_realm. +# ingest_url = "https://my-custom-ingest/" # -# ## Event typed metrics are omitted by default, -# ## If you require an event typed metric you must specify the -# ## metric name in the following list. -# included_event_names = ["plugin.metric_name"] +# ## Event typed metrics are omitted by default, +# ## If you require an event typed metric you must specify the +# ## metric name in the following list. +# included_event_names = ["plugin.metric_name"] # # Generic socket writer capable of handling multiple socket types. @@ -1814,19 +1865,19 @@ # ## Defaults to the OS configuration. # # keep_alive_period = "5m" # -# ## Content encoding for packet-based connections (i.e. UDP, unixgram). -# ## Can be set to "gzip" or to "identity" to apply no encoding. +# ## Content encoding for message payloads, can be set to "gzip" or to +# ## "identity" to apply no encoding. # ## # # content_encoding = "identity" # # ## Data format to generate. # ## Each data format has its own unique set of configuration options, read # ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md # # data_format = "influx" -# # Send metrics to SQL Database +# # Save metrics to an SQL Database # [[outputs.sql]] # ## Database driver # ## Valid options: mssql (Microsoft SQL Server), mysql (MySQL), pgx (Postgres), @@ -1899,7 +1950,7 @@ # # location = "eu-north0" -# # A plugin that can transmit metrics to Sumo Logic HTTP Source +# # A plugin that can send metrics to Sumo Logic HTTP metric collector. # [[outputs.sumologic]] # ## Unique URL generated for your HTTP Metrics Source. # ## This is the address to send metrics to. @@ -2030,13 +2081,13 @@ # # default_appname = "Telegraf" -# # Configuration for Amazon Timestream output. +# # Configuration for sending metrics to Amazon Timestream. # [[outputs.timestream]] # ## Amazon Region # region = "us-east-1" # # ## Amazon Credentials -# ## Credentials are loaded in the following order: +# ## Credentials are loaded in the following order # ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified # ## 2) Assumed credentials via STS if role_arn is specified # ## 3) explicit credentials from 'access_key' and 'secret_key' @@ -2180,7 +2231,7 @@ # # Configuration for Wavefront server to send metrics to # [[outputs.wavefront]] # ## Url for Wavefront Direct Ingestion. For Wavefront Proxy Ingestion, see -# ## the 'host' and 'port' optioins below. +# ## the 'host' and 'port' options below. # url = "https://metrics.wavefront.com" # # ## Authentication Token for Wavefront. Only required if using Direct Ingestion @@ -2230,10 +2281,10 @@ # #immediate_flush = true -# # Generic WebSocket output writer. +# # A plugin that can transmit metrics over WebSocket. # [[outputs.websocket]] # ## URL is the address to send metrics to. Make sure ws or wss scheme is used. -# url = "ws://127.0.0.1:8080/telegraf" +# url = "ws://127.0.0.1:3000/telegraf" # # ## Timeouts (make sure read_timeout is larger than server ping interval or set to zero). # # connect_timeout = "30s" @@ -2333,7 +2384,7 @@ # max_parallel_calls = 10 -# # Clone metrics and apply modifications. +# # Apply metric modifications using override semantics. # [[processors.clone]] # ## All modifications on inputs and aggregators can be overridden: # # name_override = "new_name" @@ -2377,28 +2428,28 @@ # # Dates measurements, tags, and fields that pass through this filter. # [[processors.date]] -# ## New tag to create -# tag_key = "month" +# ## New tag to create +# tag_key = "month" # -# ## New field to create (cannot set both field_key and tag_key) -# # field_key = "month" +# ## New field to create (cannot set both field_key and tag_key) +# # field_key = "month" # -# ## Date format string, must be a representation of the Go "reference time" -# ## which is "Mon Jan 2 15:04:05 -0700 MST 2006". -# date_format = "Jan" +# ## Date format string, must be a representation of the Go "reference time" +# ## which is "Mon Jan 2 15:04:05 -0700 MST 2006". +# date_format = "Jan" # -# ## If destination is a field, date format can also be one of -# ## "unix", "unix_ms", "unix_us", or "unix_ns", which will insert an integer field. -# # date_format = "unix" +# ## If destination is a field, date format can also be one of +# ## "unix", "unix_ms", "unix_us", or "unix_ns", which will insert an integer field. +# # date_format = "unix" # -# ## Offset duration added to the date string when writing the new tag. -# # date_offset = "0s" +# ## Offset duration added to the date string when writing the new tag. +# # date_offset = "0s" # -# ## Timezone to use when creating the tag or field using a reference time -# ## string. This can be set to one of "UTC", "Local", or to a location name -# ## in the IANA Time Zone database. -# ## example: timezone = "America/Los_Angeles" -# # timezone = "UTC" +# ## Timezone to use when creating the tag or field using a reference time +# ## string. This can be set to one of "UTC", "Local", or to a location name +# ## in the IANA Time Zone database. +# ## example: timezone = "America/Los_Angeles" +# # timezone = "UTC" # # Filter metrics with repeating field values @@ -2407,7 +2458,7 @@ # dedup_interval = "600s" -# # Defaults sets default value(s) for specified fields that are not set on incoming metrics. +# ## Set default fields on your metric(s) when they are nil or empty # [[processors.defaults]] # ## Ensures a set of fields always exists on your metric(s) with their # ## respective default value. @@ -2418,10 +2469,10 @@ # ## or it is not nil but its value is an empty string or is a string # ## of one or more spaces. # ## = -# # [processors.defaults.fields] -# # field_1 = "bar" -# # time_idle = 0 -# # is_error = true +# [processors.defaults.fields] +# field_1 = "bar" +# time_idle = 0 +# is_error = true # # Map enum values according to given table. @@ -2438,8 +2489,8 @@ # dest = "status_code" # # ## Default value to be used for all values not contained in the mapping -# ## table. When unset, the unmodified value for the field will be used if no -# ## match is found. +# ## table. When unset and no match is found, the original field will remain +# ## unmodified and the destination tag or field will not be created. # # default = 0 # # ## Table of mappings @@ -2451,12 +2502,19 @@ # # Run executable as long-running processor plugin # [[processors.execd]] -# ## Program to run as daemon -# ## eg: command = ["/path/to/your_program", "arg1", "arg2"] -# command = ["cat"] +# ## One program to run as daemon. +# ## NOTE: process and each argument should each be their own string +# ## eg: command = ["/path/to/your_program", "arg1", "arg2"] +# command = ["cat"] +# +# ## Environment variables +# ## Array of "key=value" pairs to pass as environment variables +# ## e.g. "KEY=value", "USERNAME=John Doe", +# ## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs" +# # environment = [] # # ## Delay before the process is restarted after an unexpected termination -# restart_delay = "10s" +# # restart_delay = "10s" # # Performs file path manipulations on tags and fields @@ -2553,25 +2611,25 @@ # # Adds noise to numerical fields # [[processors.noise]] -# ## Specified the type of the random distribution. -# ## Can be "laplacian", "gaussian" or "uniform". -# # type = "laplacian +# ## Specified the type of the random distribution. +# ## Can be "laplacian", "gaussian" or "uniform". +# # type = "laplacian # -# ## Center of the distribution. -# ## Only used for Laplacian and Gaussian distributions. -# # mu = 0.0 +# ## Center of the distribution. +# ## Only used for Laplacian and Gaussian distributions. +# # mu = 0.0 # -# ## Scale parameter for the Laplacian or Gaussian distribution -# # scale = 1.0 +# ## Scale parameter for the Laplacian or Gaussian distribution +# # scale = 1.0 # -# ## Upper and lower bound of the Uniform distribution -# # min = -1.0 -# # max = 1.0 +# ## Upper and lower bound of the Uniform distribution +# # min = -1.0 +# # max = 1.0 # -# ## Apply the noise only to numeric fields matching the filter criteria below. -# ## Excludes takes precedence over includes. -# # include_fields = [] -# # exclude_fields = [] +# ## Apply the noise only to numeric fields matching the filter criteria below. +# ## Excludes takes precedence over includes. +# # include_fields = [] +# # exclude_fields = [] # # Apply metric modifications using override semantics. @@ -2589,7 +2647,7 @@ # # Parse a value in a specified field/tag(s) and add the result in a new metric # [[processors.parser]] # ## The name of the fields whose value will be parsed. -# parse_fields = [] +# parse_fields = ["message"] # # ## If true, incoming metrics are not emitted. # drop_original = false @@ -2615,7 +2673,6 @@ # # Given a tag/field of a TCP or UDP port number, add a tag/field of the service name looked up in the system services file # [[processors.port_name]] -# [[processors.port_name]] # ## Name of tag holding the port number # # tag = "port" # ## Or name of the field holding the port number @@ -2640,48 +2697,50 @@ # # Transforms tag and field values as well as measurement, tag and field names with regex pattern # [[processors.regex]] -# ## Tag and field conversions defined in a separate sub-tables -# # [[processors.regex.tags]] -# # ## Tag to change -# # key = "resp_code" -# # ## Regular expression to match on a tag value -# # pattern = "^(\\d)\\d\\d$" -# # ## Matches of the pattern will be replaced with this string. Use ${1} -# # ## notation to use the text of the first submatch. -# # replacement = "${1}xx" +# namepass = ["nginx_requests"] # -# # [[processors.regex.fields]] -# # ## Field to change -# # key = "request" -# # ## All the power of the Go regular expressions available here -# # ## For example, named subgroups -# # pattern = "^/api(?P/[\\w/]+)\\S*" -# # replacement = "${method}" -# # ## If result_key is present, a new field will be created -# # ## instead of changing existing field -# # result_key = "method" +# # Tag and field conversions defined in a separate sub-tables +# [[processors.regex.tags]] +# ## Tag to change, "*" will change every tag +# key = "resp_code" +# ## Regular expression to match on a tag value +# pattern = "^(\\d)\\d\\d$" +# ## Matches of the pattern will be replaced with this string. Use ${1} +# ## notation to use the text of the first submatch. +# replacement = "${1}xx" # -# ## Multiple conversions may be applied for one field sequentially -# ## Let's extract one more value -# # [[processors.regex.fields]] -# # key = "request" -# # pattern = ".*category=(\\w+).*" -# # replacement = "${1}" -# # result_key = "search_category" +# [[processors.regex.fields]] +# ## Field to change +# key = "request" +# ## All the power of the Go regular expressions available here +# ## For example, named subgroups +# pattern = "^/api(?P/[\\w/]+)\\S*" +# replacement = "${method}" +# ## If result_key is present, a new field will be created +# ## instead of changing existing field +# result_key = "method" # -# ## Rename metric fields -# # [[processors.regex.field_rename]] -# # ## Regular expression to match on a field name -# # pattern = "^search_(\\w+)d$" -# # ## Matches of the pattern will be replaced with this string. Use ${1} -# # ## notation to use the text of the first submatch. -# # replacement = "${1}" -# # ## If the new field name already exists, you can either "overwrite" the -# # ## existing one with the value of the renamed field OR you can "keep" -# # ## both the existing and source field. -# # # result_key = "keep" +# # Multiple conversions may be applied for one field sequentially +# # Let's extract one more value +# [[processors.regex.fields]] +# key = "request" +# pattern = ".*category=(\\w+).*" +# replacement = "${1}" +# result_key = "search_category" # -# ## Rename metric tags +# # Rename metric fields +# [[processors.regex.field_rename]] +# ## Regular expression to match on a field name +# pattern = "^search_(\\w+)d$" +# ## Matches of the pattern will be replaced with this string. Use ${1} +# ## notation to use the text of the first submatch. +# replacement = "${1}" +# ## If the new field name already exists, you can either "overwrite" the +# ## existing one with the value of the renamed field OR you can "keep" +# ## both the existing and source field. +# # result_key = "keep" +# +# # Rename metric tags # # [[processors.regex.tag_rename]] # # ## Regular expression to match on a tag name # # pattern = "^search_(\\w+)d$" @@ -2693,7 +2752,7 @@ # # ## both the existing and source tag. # # # result_key = "keep" # -# ## Rename metrics +# # Rename metrics # # [[processors.regex.metric_rename]] # # ## Regular expression to match on an metric name # # pattern = "^search_(\\w+)d$" @@ -2704,6 +2763,22 @@ # # Rename measurements, tags, and fields that pass through this filter. # [[processors.rename]] +# ## Specify one sub-table per rename operation. +# [[processors.rename.replace]] +# measurement = "network_interface_throughput" +# dest = "throughput" +# +# [[processors.rename.replace]] +# tag = "hostname" +# dest = "host" +# +# [[processors.rename.replace]] +# field = "lower" +# dest = "min" +# +# [[processors.rename.replace]] +# field = "upper" +# dest = "max" # # ReverseDNS does a reverse lookup on IP addresses to retrieve the DNS name @@ -2773,11 +2848,11 @@ # ## The Starlark source can be set as a string in this configuration file, or # ## by referencing a file containing the script. Only one source or script # ## should be set at once. -# ## +# # ## Source of the Starlark script. # source = ''' # def apply(metric): -# return metric +# return metric # ''' # # ## File containing a Starlark script. @@ -2793,15 +2868,15 @@ # # Perform string processing on tags, fields, and measurements # [[processors.strings]] -# ## Convert a tag value to uppercase -# # [[processors.strings.uppercase]] -# # tag = "method" -# # ## Convert a field value to lowercase and store in a new field # # [[processors.strings.lowercase]] # # field = "uri_stem" # # dest = "uri_stem_normalised" # +# ## Convert a tag value to uppercase +# # [[processors.strings.uppercase]] +# # tag = "method" +# # ## Convert a field value to titlecase # # [[processors.strings.titlecase]] # # field = "status" @@ -2855,10 +2930,10 @@ # # Restricts the number of tags that can pass through this filter and chooses which tags to preserve when over the limit. # [[processors.tag_limit]] # ## Maximum number of tags to preserve -# limit = 10 +# limit = 3 # # ## List of tags to preferentially preserve -# keep = ["foo", "bar", "baz"] +# keep = ["environment", "region"] # # Uses a Go template to create a new tag @@ -2895,7 +2970,7 @@ # ## the defaults processor plugin to ensure fields are set if required. # # fields = ["value"] # -# ## What aggregation to use. Options: sum, mean, min, max +# ## What aggregation function to use. Options: sum, mean, min, max # # aggregation = "mean" # # ## Instead of the top k largest metrics, return the bottom k lowest metrics @@ -2952,50 +3027,26 @@ # drop_original = false # # ## Configures which basic stats to push as fields -# # stats = ["count", "min", "max", "mean", "stdev", "s2", "sum"] +# # stats = ["count","diff","rate","min","max","mean","non_negative_diff","non_negative_rate","stdev","s2","sum","interval"] # # Calculates a derivative for every field. # [[aggregators.derivative]] -# ## The period in which to flush the aggregator. -# period = "30s" -# ## -# ## If true, the original metric will be dropped by the -# ## aggregator and will not get sent to the output plugins. -# drop_original = false -# ## -# ## This aggregator will estimate a derivative for each field, which is -# ## contained in both the first and last metric of the aggregation interval. -# ## Without further configuration the derivative will be calculated with -# ## respect to the time difference between these two measurements in seconds. -# ## The formula applied is for every field: -# ## -# ## value_last - value_first -# ## derivative = -------------------------- -# ## time_difference_in_seconds -# ## -# ## The resulting derivative will be named *fieldname_rate*. The suffix -# ## "_rate" can be configured by the *suffix* parameter. When using a -# ## derivation variable you can include its name for more clarity. -# # suffix = "_rate" -# ## -# ## As an abstraction the derivative can be calculated not only by the time -# ## difference but by the difference of a field, which is contained in the -# ## measurement. This field is assumed to be monotonously increasing. This -# ## feature is used by specifying a *variable*. -# ## Make sure the specified variable is not filtered and exists in the metrics -# ## passed to this aggregator! -# # variable = "" -# ## -# ## When using a field as the derivation parameter the name of that field will -# ## be used for the resulting derivative, e.g. *fieldname_by_parameter*. -# ## -# ## Note, that the calculation is based on the actual timestamp of the -# ## measurements. When there is only one measurement during that period, the -# ## measurement will be rolled over to the next period. The maximum number of -# ## such roll-overs can be configured with a default of 10. -# # max_roll_over = 10 -# ## +# ## Specific Derivative Aggregator Arguments: +# +# ## Configure a custom derivation variable. Timestamp is used if none is given. +# # variable = "" +# +# ## Suffix to add to the field name for the derivative name. +# # suffix = "_rate" +# +# ## Roll-Over last measurement to first measurement of next period +# # max_roll_over = 10 +# +# ## General Aggregator Arguments: +# +# ## calculate derivative every 30 seconds +# period = "30s" # # Report the final metric of a series @@ -3010,7 +3061,7 @@ # series_timeout = "5m" -# # Create aggregate histograms. +# # Configuration for aggregate histogram metrics # [[aggregators.histogram]] # ## The period in which to flush the aggregator. # period = "30s" @@ -3137,7 +3188,7 @@ # ## aggregator and will not get sent to the output plugins. # drop_original = false # ## The fields for which the values will be counted -# fields = [] +# fields = ["status"] ############################################################################### @@ -3155,6 +3206,8 @@ collect_cpu_time = false ## If true, compute and report the sum of all non-idle CPU states report_active = false + ## If true and the info is available then add core_id and physical_id tags + core_tags = false # Read metrics about disk usage by mount point @@ -3166,6 +3219,11 @@ ## Ignore mount points by filesystem type. ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"] + ## Ignore mount points by mount options. + ## The 'mount' command reports options of all mounts in parathesis. + ## Bind mounts can be ignored with the special 'bind' option. + # ignore_mount_opts = [] + # Read metrics about disk IO by device [[inputs.diskio]] @@ -3218,8 +3276,7 @@ # Read metrics about system load & uptime [[inputs.system]] - ## Uncomment to remove deprecated metrics. - # fielddrop = ["uptime_format"] + # no configuration # # Gather ActiveMQ metrics @@ -3227,6 +3284,11 @@ # ## ActiveMQ WebConsole URL # url = "http://127.0.0.1:8161" # +# ## Required ActiveMQ Endpoint +# ## deprecated in 1.11; use the url option +# # server = "192.168.50.10" +# # port = 8161 +# # ## Credentials for basic HTTP authentication # # username = "admin" # # password = "admin" @@ -3290,11 +3352,11 @@ # # Query statistics from AMD Graphics cards using rocm-smi binary # [[inputs.amd_rocm_smi]] -# ## Optional: path to rocm-smi binary, defaults to $PATH via exec.LookPath -# # bin_path = "/opt/rocm/bin/rocm-smi" +# ## Optional: path to rocm-smi binary, defaults to $PATH via exec.LookPath +# # bin_path = "/opt/rocm/bin/rocm-smi" # -# ## Optional: timeout for GPU polling -# # timeout = "5s" +# ## Optional: timeout for GPU polling +# # timeout = "5s" # # Read Apache status information (mod_status) @@ -3454,7 +3516,6 @@ # ## Tries to collect additional bond details from /sys/class/net/{bond} # ## currently only useful for LACP (mode 4) bonds # # collect_sys_details = false -# # # Collect Kafka topics and consumers status from Burrow HTTP API. @@ -3665,18 +3726,18 @@ # # Collects conntrack stats from the configured directories and files. # [[inputs.conntrack]] -# ## The following defaults would work with multiple versions of conntrack. -# ## Note the nf_ and ip_ filename prefixes are mutually exclusive across -# ## kernel versions, as are the directory locations. +# ## The following defaults would work with multiple versions of conntrack. +# ## Note the nf_ and ip_ filename prefixes are mutually exclusive across +# ## kernel versions, as are the directory locations. # -# ## Superset of filenames to look for within the conntrack dirs. -# ## Missing files will be ignored. -# files = ["ip_conntrack_count","ip_conntrack_max", -# "nf_conntrack_count","nf_conntrack_max"] +# ## Superset of filenames to look for within the conntrack dirs. +# ## Missing files will be ignored. +# files = ["ip_conntrack_count","ip_conntrack_max", +# "nf_conntrack_count","nf_conntrack_max"] # -# ## Directories to search within for the conntrack files above. -# ## Missing directories will be ignored. -# dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"] +# ## Directories to search within for the conntrack files above. +# ## Missing directories will be ignored. +# dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"] # # Gather health check statuses from services registered in Consul @@ -3688,9 +3749,10 @@ # # scheme = "http" # # ## Metric version controls the mapping from Consul metrics into -# ## Telegraf metrics. +# ## Telegraf metrics. Version 2 moved all fields with string values +# ## to tags. # ## -# ## example: metric_version = 1; deprecated in 1.15 +# ## example: metric_version = 1; deprecated in 1.16 # ## metric_version = 2; recommended version # # metric_version = 1 # @@ -3723,7 +3785,8 @@ # # url = "http://127.0.0.1:8500" # # ## Use auth token for authorization. -# ## Only one of the options can be set. Leave empty to not use any token. +# ## If both are set, an error is thrown. +# ## If both are empty, no token will be used. # # token_file = "/path/to/auth/token" # ## OR # # token = "a1234567-40c7-9048-7bae-378687048181" @@ -3788,7 +3851,7 @@ # # Input plugin for DC/OS metrics # [[inputs.dcos]] # ## The DC/OS cluster URL. -# cluster_url = "https://dcos-ee-master-1" +# cluster_url = "https://dcos-master-1" # # ## The ID of the service account. # service_account_id = "telegraf" @@ -3874,13 +3937,19 @@ # endpoint = "unix:///var/run/docker.sock" # # ## Set to true to collect Swarm metrics(desired_replicas, running_replicas) +# ## Note: configure this in one of the manager nodes in a Swarm cluster. +# ## configuring in multiple Swarm managers results in duplication of metrics. # gather_services = false # +# ## Only collect metrics for these containers. Values will be appended to +# ## container_name_include. +# ## Deprecated (1.4.0), use container_name_include +# container_names = [] +# # ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars # source_tag = false # -# ## Containers to include and exclude. Globs accepted. -# ## Note that an empty array for both will include all containers +# ## Containers to include and exclude. Collect all if empty. Globs accepted. # container_name_include = [] # container_name_exclude = [] # @@ -3894,25 +3963,38 @@ # ## Timeout for docker list, info, and stats commands # timeout = "5s" # +# ## Whether to report for each container per-device blkio (8:0, 8:1...), +# ## network (eth0, eth1, ...) and cpu (cpu0, cpu1, ...) stats or not. +# ## Usage of this setting is discouraged since it will be deprecated in favor of 'perdevice_include'. +# ## Default value is 'true' for backwards compatibility, please set it to 'false' so that 'perdevice_include' setting +# ## is honored. +# perdevice = true +# # ## Specifies for which classes a per-device metric should be issued # ## Possible values are 'cpu' (cpu0, cpu1, ...), 'blkio' (8:0, 8:1, ...) and 'network' (eth0, eth1, ...) # ## Please note that this setting has no effect if 'perdevice' is set to 'true' # # perdevice_include = ["cpu"] # +# ## Whether to report for each container total blkio and network stats or not. +# ## Usage of this setting is discouraged since it will be deprecated in favor of 'total_include'. +# ## Default value is 'false' for backwards compatibility, please set it to 'true' so that 'total_include' setting +# ## is honored. +# total = false +# # ## Specifies for which classes a total metric should be issued. Total is an aggregated of the 'perdevice' values. # ## Possible values are 'cpu', 'blkio' and 'network' # ## Total 'cpu' is reported directly by Docker daemon, and 'network' and 'blkio' totals are aggregated by this plugin. # ## Please note that this setting has no effect if 'total' is set to 'false' # # total_include = ["cpu", "blkio", "network"] # -# ## Which environment variables should we use as a tag -# ##tag_env = ["JAVA_HOME", "HEAP_SIZE"] -# # ## docker labels to include and exclude as tags. Globs accepted. # ## Note that an empty array for both will include all labels as tags # docker_label_include = [] # docker_label_exclude = [] # +# ## Which environment variables should we use as a tag +# tag_env = ["JAVA_HOME", "HEAP_SIZE"] +# # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" @@ -3921,11 +4003,14 @@ # # insecure_skip_verify = false -# # Read statistics from one or many dovecot servers +# # Read metrics about dovecot servers # [[inputs.dovecot]] # ## specify dovecot servers via an address:port list # ## e.g. # ## localhost:24242 +# ## or as an UDS socket +# ## e.g. +# ## /var/run/dovecot/old-stats # ## # ## If no servers are specified, then localhost is used as the host. # servers = ["localhost:24242"] @@ -3972,7 +4057,7 @@ # ## dpdk_instance = "my-fwd-app" -# # Read metrics about docker containers from Fargate/ECS v2, v3 meta endpoints. +# # Read metrics about ECS containers # [[inputs.ecs]] # ## ECS metadata url. # ## Metadata v2 API is used if set explicitly. Otherwise, @@ -4003,8 +4088,8 @@ # # Read stats from one or more Elasticsearch servers or clusters # [[inputs.elasticsearch]] # ## specify a list of one or more Elasticsearch servers -# # you can add username and password to your url to use basic authentication: -# # servers = ["http://user:pass@localhost:9200"] +# ## you can add username and password to your url to use basic authentication: +# ## servers = ["http://user:pass@localhost:9200"] # servers = ["http://localhost:9200"] # # ## Timeout for HTTP requests to the elastic search server(s) @@ -4015,16 +4100,16 @@ # ## of the cluster. # local = true # -# ## Set cluster_health to true when you want to also obtain cluster health stats +# ## Set cluster_health to true when you want to obtain cluster health stats # cluster_health = false # -# ## Adjust cluster_health_level when you want to also obtain detailed health stats +# ## Adjust cluster_health_level when you want to obtain detailed health stats # ## The options are # ## - indices (default) # ## - cluster # # cluster_health_level = "indices" # -# ## Set cluster_stats to true when you want to also obtain cluster stats. +# ## Set cluster_stats to true when you want to obtain cluster stats. # cluster_stats = false # # ## Only gather cluster_stats from the master node. To work this require local = true @@ -4035,6 +4120,7 @@ # indices_include = ["_all"] # # ## One of "shards", "cluster", "indices" +# ## Currently only "shards" is implemented # indices_level = "shards" # # ## node_stats is a list of sub-stats that you want to have gathered. Valid options @@ -4054,8 +4140,9 @@ # # insecure_skip_verify = false # # ## Sets the number of most recent indices to return for indices that are configured with a date-stamped suffix. -# ## Each 'indices_include' entry ending with a wildcard (*) or glob matching pattern will group together all indices that match it, and sort them -# ## by the date or number after the wildcard. Metrics then are gathered for only the 'num_most_recent_indices' amount of most recent indices. +# ## Each 'indices_include' entry ending with a wildcard (*) or glob matching pattern will group together all indices that match it, and +# ## sort them by the date or number after the wildcard. Metrics then are gathered for only the 'num_most_recent_indices' amount of most +# ## recent indices. # # num_most_recent_indices = 0 @@ -4159,6 +4246,12 @@ # "/tmp/collect_*.sh" # ] # +# ## Environment variables +# ## Array of "key=value" pairs to pass as environment variables +# ## e.g. "KEY=value", "USERNAME=John Doe", +# ## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs" +# # environment = [] +# # ## Timeout for each command to complete. # timeout = "5s" # @@ -4198,14 +4291,6 @@ # ## as well as ** to match recursive files and directories. # files = ["/tmp/metrics.out"] # -# -# ## Name a tag containing the name of the file the data was parsed from. Leave empty -# ## to disable. Cautious when file name variation is high, this can increase the cardinality -# ## significantly. Read more about cardinality here: -# ## https://docs.influxdata.com/influxdb/cloud/reference/glossary/#series-cardinality -# # file_tag = "" -# # -# # ## Character encoding to use when interpreting the file contents. Invalid # ## characters are replaced using the unicode replacement character. When set # ## to the empty string the data is not decoded to text. @@ -4215,11 +4300,18 @@ # ## character_encoding = "" # # character_encoding = "" # -# ## The dataformat to be read from files +# ## Data format to consume. # ## Each data format has its own unique set of configuration options, read # ## more about them here: # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md # data_format = "influx" +# +# +# ## Name a tag containing the name of the file the data was parsed from. Leave empty +# ## to disable. Cautious when file name variation is high, this can increase the cardinality +# ## significantly. Read more about cardinality here: +# ## https://docs.influxdata.com/influxdb/cloud/reference/glossary/#series-cardinality +# # file_tag = "" # # Count files in a directory @@ -4230,13 +4322,13 @@ # ## /var/log/** -> recursively find all directories in /var/log and count files in each directories # ## /var/log/*/* -> find all directories with a parent dir in /var/log and count files in each directories # ## /var/log -> count all files in /var/log and all of its subdirectories -# directories = ["/var/cache/apt/archives"] +# directories = ["/var/cache/apt", "/tmp"] # # ## Only count files that match the name pattern. Defaults to "*". -# name = "*.deb" +# name = "*" # # ## Count files in subdirectories. Defaults to true. -# recursive = false +# recursive = true # # ## Only count regular files. Defaults to true. # regular_only = true @@ -4260,14 +4352,8 @@ # [[inputs.filestat]] # ## Files to gather stats about. # ## These accept standard unix glob matching rules, but with the addition of -# ## ** as a "super asterisk". ie: -# ## "/var/log/**.log" -> recursively find all .log files in /var/log -# ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log -# ## "/var/log/apache.log" -> just tail the apache log file -# ## -# ## See https://github.com/gobwas/glob for more examples -# ## -# files = ["/var/log/**.log"] +# ## ** as a "super asterisk". See https://github.com/gobwas/glob. +# files = ["/etc/telegraf/telegraf.conf", "/var/log/**.log"] # # ## If true, read the entire file and calculate an md5 checksum. # md5 = false @@ -4296,17 +4382,17 @@ # # ## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent) # exclude = [ -# "monitor_agent", -# "dummy", +# "monitor_agent", +# "dummy", # ] # # Gather repository information from GitHub hosted repositories. # [[inputs.github]] -# ## List of repositories to monitor. +# ## List of repositories to monitor # repositories = [ -# "influxdata/telegraf", -# "influxdata/influxdb" +# "influxdata/telegraf", +# "influxdata/influxdb" # ] # # ## Github API access token. Unauthenticated requests are limited to 60 per hour. @@ -4319,11 +4405,11 @@ # # http_timeout = "5s" # # ## List of additional fields to query. -# ## NOTE: Getting those fields might involve issuing additional API-calls, so please -# ## make sure you do not exceed the rate-limit of GitHub. -# ## -# ## Available fields are: -# ## - pull-requests -- number of open and closed pull requests (2 API-calls per repository) +# ## NOTE: Getting those fields might involve issuing additional API-calls, so please +# ## make sure you do not exceed the rate-limit of GitHub. +# ## +# ## Available fields are: +# ## - pull-requests -- number of open and closed pull requests (2 API-calls per repository) # # additional_fields = [] @@ -4367,20 +4453,20 @@ # # insecure_skip_verify = false -# # Read metrics of haproxy, via socket or csv stats page +# # Read metrics of HAProxy, via socket or HTTP stats page # [[inputs.haproxy]] # ## An array of address to gather stats about. Specify an ip on hostname # ## with optional port. ie localhost, 10.10.3.33:1936, etc. # ## Make sure you specify the complete path to the stats endpoint # ## including the protocol, ie http://10.10.3.33:1936/haproxy?stats # -# ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats -# servers = ["http://myhaproxy.com:1936/haproxy?stats"] -# # ## Credentials for basic HTTP authentication # # username = "admin" # # password = "admin" # +# ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats +# servers = ["http://myhaproxy.com:1936/haproxy?stats"] +# # ## You can also use local socket with standard wildcard globbing. # ## Server address not starting with 'http' will be treated as a possible # ## socket, so both examples below are valid. @@ -4425,6 +4511,13 @@ # ## Optional HTTP headers # # headers = {"X-Special-Header" = "Special-Value"} # +# ## HTTP entity-body to send with POST/PUT requests. +# # body = "" +# +# ## HTTP Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "identity" +# # ## Optional file with Bearer token # ## file content is added as an Authorization header # # bearer_token = "/path/to/file" @@ -4433,22 +4526,15 @@ # # username = "username" # # password = "pa$$word" # -# ## HTTP entity-body to send with POST/PUT requests. -# # body = "" -# -# ## HTTP Content-Encoding for write request body, can be set to "gzip" to -# ## compress body or "identity" to apply no encoding. -# # content_encoding = "identity" -# -# ## HTTP Proxy support -# # http_proxy_url = "" -# -# ## OAuth2 Client Credentials Grant +# ## OAuth2 Client Credentials. The options 'client_id', 'client_secret', and 'token_url' are required to use OAuth2. # # client_id = "clientid" # # client_secret = "secret" # # token_url = "https://indentityprovider/oauth2/v1/token" # # scopes = ["urn:opc:idm:__myscopes__"] # +# ## HTTP Proxy support +# # http_proxy_url = "" +# # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" @@ -4477,6 +4563,7 @@ # ## more about them here: # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md # # data_format = "influx" +# # # HTTP/HTTPS request given an address a method and a timeout @@ -4535,6 +4622,8 @@ # # tls_key = "/etc/telegraf/key.pem" # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false +# ## Use the given name as the SNI server name on each URL +# # tls_server_name = "" # # ## HTTP Request Headers (all values must be strings) # # [inputs.http_response.headers] @@ -4549,12 +4638,18 @@ # # interface = "eth0" +# ## DEPRECATED: The 'httpjson' plugin is deprecated in version 1.6.0, use 'inputs.http' instead. # # Read flattened metrics from one or more JSON HTTP endpoints # [[inputs.httpjson]] -# ## DEPRECATED: The 'httpjson' plugin is deprecated in version 1.6.0, use 'inputs.http' instead. # ## NOTE This plugin only reads numerical measurements, strings and booleans # ## will be ignored. # +# ## Name for the service being polled. Will be appended to the name of the +# ## measurement e.g. "httpjson_webserver_stats". +# ## +# ## Deprecated (1.3.0): Use name_override, name_suffix, name_prefix instead. +# name = "webserver_stats" +# # ## URL of each server in the service's cluster # servers = [ # "http://localhost:9999/stats/", @@ -4566,7 +4661,7 @@ # ## HTTP method to use: GET or POST (case-sensitive) # method = "GET" # -# ## List of tag names to extract from top-level of JSON server response +# ## Tags to extract from top-level of JSON server response. # # tag_keys = [ # # "my_tag_1", # # "my_tag_2" @@ -4579,14 +4674,14 @@ # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false # -# ## HTTP parameters (all values must be strings). For "GET" requests, data +# ## HTTP Request Parameters (all values must be strings). For "GET" requests, data # ## will be included in the query. For "POST" requests, data will be included # ## in the request body as "x-www-form-urlencoded". # # [inputs.httpjson.parameters] # # event_type = "cpu_spike" # # threshold = "0.75" # -# ## HTTP Headers (all values must be strings) +# ## HTTP Request Headers (all values must be strings). # # [inputs.httpjson.headers] # # X-Auth-Token = "my-xauth-token" # # apiVersion = "v1" @@ -4656,14 +4751,21 @@ # timeout = "5s" -# # Intel PowerStat plugin enables monitoring of platform metrics (power, TDP) and Core metrics like temperature, power and utilization. +# # Intel PowerStat plugin enables monitoring of platform metrics (power, TDP) and per-CPU metrics like temperature, power and utilization. # [[inputs.intel_powerstat]] -# ## All global metrics are always collected by Intel PowerStat plugin. -# ## User can choose which per-CPU metrics are monitored by the plugin in cpu_metrics array. -# ## Empty array means no per-CPU specific metrics will be collected by the plugin - in this case only platform level -# ## telemetry will be exposed by Intel PowerStat plugin. +# ## The user can choose which package metrics are monitored by the plugin with the package_metrics setting: +# ## - The default, will collect "current_power_consumption", "current_dram_power_consumption" and "thermal_design_power" +# ## - Setting this value to an empty array means no package metrics will be collected +# ## - Finally, a user can specify individual metrics to capture from the supported options list # ## Supported options: -# ## "cpu_frequency", "cpu_busy_frequency", "cpu_temperature", "cpu_c1_state_residency", "cpu_c6_state_residency", "cpu_busy_cycles" +# ## "current_power_consumption", "current_dram_power_consumption", "thermal_design_power", "max_turbo_frequency", "uncore_frequency" +# # package_metrics = ["current_power_consumption", "current_dram_power_consumption", "thermal_design_power"] +# +# ## The user can choose which per-CPU metrics are monitored by the plugin in cpu_metrics array. +# ## Empty or missing array means no per-CPU specific metrics will be collected by the plugin. +# ## Supported options: +# ## "cpu_frequency", "cpu_c0_state_residency", "cpu_c1_state_residency", "cpu_c6_state_residency", "cpu_busy_cycles", "cpu_temperature", "cpu_busy_frequency" +# ## ATTENTION: cpu_busy_cycles option is DEPRECATED - superseded by cpu_c0_state_residency # # cpu_metrics = [] @@ -4675,6 +4777,11 @@ # # Monitors internet speed using speedtest.net service # [[inputs.internet_speed]] +# ## This plugin downloads many MB of data each time it is run. As such +# ## consider setting a higher interval for this plugin to reduce the +# ## demand on your internet connection. +# # interval = "60m" +# # ## Sets if runs file download test # # enable_file_download = false # @@ -4723,7 +4830,7 @@ # ## gaps or overlap in pulled data # interval = "30s" # -# ## Timeout for the ipmitool command to complete +# ## Timeout for the ipmitool command to complete. Default is 20 seconds. # timeout = "20s" # # ## Schema Version: (Optional, defaults to version 1) @@ -4743,14 +4850,17 @@ # # Gather packets and bytes counters from Linux ipsets -# [[inputs.ipset]] -# ## By default, we only show sets which have already matched at least 1 packet. -# ## set include_unmatched_sets = true to gather them all. -# include_unmatched_sets = false -# ## Adjust your sudo settings appropriately if using this option ("sudo ipset save") -# use_sudo = false -# ## The default timeout of 1s for ipset execution can be overridden here: -# # timeout = "1s" +# [[inputs.ipset]] +# ## By default, we only show sets which have already matched at least 1 packet. +# ## set include_unmatched_sets = true to gather them all. +# include_unmatched_sets = false +# ## Adjust your sudo settings appropriately if using this option ("sudo ipset save") +# ## You can avoid using sudo or root, by setting appropriate privileges for +# ## the telegraf.service systemd service. +# use_sudo = false +# ## The default timeout of 1s for ipset execution can be overridden here: +# # timeout = "1s" +# # # Gather packets and bytes throughput from iptables @@ -4827,16 +4937,11 @@ # # max_connections = 5 +# ## DEPRECATED: The 'jolokia' plugin is deprecated in version 1.5.0, use 'inputs.jolokia2' instead. # # Read JMX metrics through Jolokia # [[inputs.jolokia]] -# ## DEPRECATED: The 'jolokia' plugin is deprecated in version 1.5.0, use 'inputs.jolokia2' instead. -# # DEPRECATED: the jolokia plugin has been deprecated in favor of the -# # jolokia2 plugin -# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2 -# # ## This is the context root used to compose the jolokia url # ## NOTE that Jolokia requires a trailing slash at the end of the context root -# ## NOTE that your jolokia security policy must allow for POST requests. # context = "/jolokia/" # # ## This specifies the mode used @@ -4859,13 +4964,6 @@ # ## Includes connection time, any redirects, and reading the response body. # # client_timeout = "4s" # -# ## Attribute delimiter -# ## -# ## When multiple attributes are returned for a single -# ## [inputs.jolokia.metrics], the field name is a concatenation of the metric -# ## name, and the attribute name, separated by the given delimiter. -# # delimiter = "_" -# # ## List of servers exposing jolokia read service # [[inputs.jolokia.servers]] # name = "as-server-01" @@ -5033,12 +5131,19 @@ # # selector_exclude = ["*"] # # ## Optional TLS Config +# ## Trusted root certificates for server # # tls_ca = "/path/to/cafile" +# ## Used for TLS client certificate authentication # # tls_cert = "/path/to/certfile" +# ## Used for TLS client certificate authentication # # tls_key = "/path/to/keyfile" +# ## Send the specified TLS server name via SNI # # tls_server_name = "kubernetes.example.com" # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false +# +# ## Uncomment to remove deprecated metrics. +# # fielddrop = ["terminated_reason"] # # Read metrics from the kubernetes kubelet api @@ -5073,7 +5178,7 @@ # [[inputs.leofs]] # ## An array of URLs of the form: # ## host [ ":" port] -# servers = ["127.0.0.1:4020"] +# servers = ["127.0.0.1:4010"] # # Provides Linux sysctl fs metrics @@ -5123,17 +5228,19 @@ # # "/proc/fs/lustre/obdfilter/*/stats", # # "/proc/fs/lustre/osd-ldiskfs/*/stats", # # "/proc/fs/lustre/obdfilter/*/job_stats", +# # "/proc/fs/lustre/obdfilter/*/exports/*/stats", # # ] # # mds_procfiles = [ # # "/proc/fs/lustre/mdt/*/md_stats", # # "/proc/fs/lustre/mdt/*/job_stats", +# # "/proc/fs/lustre/mdt/*/exports/*/stats", # # ] # # Read metrics about LVM physical volumes, volume groups, logical volumes. # [[inputs.lvm]] -# ## Use sudo to run LVM commands -# use_sudo = false +# ## Use sudo to run LVM commands +# use_sudo = false # # Gathers metrics from the /3.0/reports MailChimp API @@ -5141,9 +5248,11 @@ # ## MailChimp API key # ## get from https://admin.mailchimp.com/account/api/ # api_key = "" # required +# # ## Reports for campaigns sent more than days_old ago will not be collected. -# ## 0 means collect all. +# ## 0 means collect all and is the default value. # days_old = 0 +# # ## Campaign ID to get, if empty gets all campaigns, this option overrides days_old # # campaign_id = "" @@ -5168,28 +5277,29 @@ # # insecure_skip_verify = false -# # Read metrics from one or many mcrouter servers +# # Read metrics from one or many mcrouter servers. # [[inputs.mcrouter]] # ## An array of address to gather stats about. Specify an ip or hostname # ## with port. ie tcp://localhost:11211, tcp://10.0.0.1:11211, etc. -# servers = ["tcp://localhost:11211", "unix:///var/run/mcrouter.sock"] +# servers = ["tcp://localhost:11211", "unix:///var/run/mcrouter.sock"] # -# ## Timeout for metric collections from all servers. Minimum timeout is "1s". +# ## Timeout for metric collections from all servers. Minimum timeout is "1s". # # timeout = "5s" -# # Get md array statistics from /proc/mdstat +# # Get kernel statistics from /proc/mdstat # [[inputs.mdstat]] -# ## Sets file path -# ## If not specified, then default is /proc/mdstat -# # file_name = "/proc/mdstat" +# ## Sets file path +# ## If not specified, then default is /proc/mdstat +# # file_name = "/proc/mdstat" -# # Read metrics from one or many memcached servers +# # Read metrics from one or many memcached servers. # [[inputs.memcached]] -# ## An array of address to gather stats about. Specify an ip on hostname -# ## with optional port. ie localhost, 10.0.0.1:11211, etc. +# # An array of address to gather stats about. Specify an ip on hostname +# # with optional port. ie localhost, 10.0.0.1:11211, etc. # servers = ["localhost:11211"] +# # An array of unix memcached sockets to gather stats about. # # unix_sockets = ["/var/run/memcached.sock"] # # ## Optional TLS Config @@ -5271,6 +5381,9 @@ # # ## One or more mock data fields *must* be defined. # ## +# ## [[inputs.mock.constant]] +# ## name = "constant" +# ## value = value_of_any_type # ## [[inputs.mock.random]] # ## name = "rand" # ## min = 1.0 @@ -5321,21 +5434,23 @@ # # parity = "N" # # stop_bits = 1 # -# ## Trace the connection to the modbus device as debug messages -# ## Note: You have to enable telegraf's debug mode to see those messages! -# # debug_connection = false -# # ## For Modbus over TCP you can choose between "TCP", "RTUoverTCP" and "ASCIIoverTCP" # ## default behaviour is "TCP" if the controller is TCP # ## For Serial you can choose between "RTU" and "ASCII" # # transmission_mode = "RTU" # -# ## Define the configuration schema +# ## Trace the connection to the modbus device as debug messages +# ## Note: You have to enable telegraf's debug mode to see those messages! +# # debug_connection = false +# +# ## Define the configuration schema # ## |---register -- define fields per register type in the original style (only supports one slave ID) # ## |---request -- define fields on a requests base # configuration_type = "register" # -# ## Per register definition +# ## --- "register" configuration style --- +# +# ## Measurements # ## # # ## Digital Variables, Discrete Inputs and Coils @@ -5363,11 +5478,11 @@ # ## |---BA, DCBA - Little Endian # ## |---BADC - Mid-Big Endian # ## |---CDAB - Mid-Little Endian -# ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, -# ## FLOAT32-IEEE, FLOAT64-IEEE (the IEEE 754 binary representation) -# ## FLOAT32, FIXED, UFIXED (fixed-point representation on input) -# ## scale - the final numeric variable representation -# ## address - variable address +# ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, +# ## FLOAT32-IEEE, FLOAT64-IEEE (the IEEE 754 binary representation) +# ## FLOAT32, FIXED, UFIXED (fixed-point representation on input) +# ## scale - the final numeric variable representation +# ## address - variable address # # holding_registers = [ # { name = "power_factor", byte_order = "AB", data_type = "FIXED", scale=0.01, address = [8]}, @@ -5384,26 +5499,28 @@ # ] # # +# ## --- "request" configuration style --- +# # ## Per request definition # ## # # ## Define a request sent to the device # ## Multiple of those requests can be defined. Data will be collated into metrics at the end of data collection. -# # [[inputs.modbus.request]] +# [[inputs.modbus.request]] # ## ID of the modbus slave device to query. # ## If you need to query multiple slave-devices, create several "request" definitions. -# # slave_id = 0 +# slave_id = 1 # # ## Byte order of the data. -# ## |---ABCD or MSW-BE -- Big Endian (Motorola) -# ## |---DCBA or LSW-LE -- Little Endian (Intel) -# ## |---BADC or MSW-LE -- Big Endian with byte swap -# ## |---CDAB or LSW-BE -- Little Endian with byte swap -# # byte_order = "ABCD" +# ## |---ABCD -- Big Endian (Motorola) +# ## |---DCBA -- Little Endian (Intel) +# ## |---BADC -- Big Endian with byte swap +# ## |---CDAB -- Little Endian with byte swap +# byte_order = "ABCD" # # ## Type of the register for the request # ## Can be "coil", "discrete", "holding" or "input" -# # register = "holding" +# register = "coil" # # ## Name of the measurement. # ## Can be overriden by the individual field definitions. Defaults to "modbus" @@ -5428,41 +5545,51 @@ # ## the fields are output as zero or one in UINT64 format by default. # # ## Coil / discrete input example -# # fields = [ -# # { address=0, name="motor1_run"}, -# # { address=1, name="jog", measurement="motor"}, -# # { address=2, name="motor1_stop", omit=true}, -# # { address=3, name="motor1_overheating"}, -# # ] +# fields = [ +# { address=0, name="motor1_run"}, +# { address=1, name="jog", measurement="motor"}, +# { address=2, name="motor1_stop", omit=true}, +# { address=3, name="motor1_overheating"}, +# ] # -# ## Per-request tags -# ## These tags take precedence over predefined tags. -# # [[inputs.modbus.request.tags]] -# # name = "value" +# [[inputs.modbus.request.tags]] +# machine = "impresser" +# location = "main building" # -# ## Holding / input example +# [[inputs.modbus.request]] +# ## Holding example # ## All of those examples will result in FLOAT64 field outputs -# # fields = [ -# # { address=0, name="voltage", type="INT16", scale=0.1 }, -# # { address=1, name="current", type="INT32", scale=0.001 }, -# # { address=3, name="power", type="UINT32", omit=true }, -# # { address=5, name="energy", type="FLOAT32", scale=0.001, measurement="W" }, -# # { address=7, name="frequency", type="UINT32", scale=0.1 }, -# # { address=8, name="power_factor", type="INT64", scale=0.01 }, -# # ] +# slave_id = 1 +# byte_order = "DCBA" +# register = "holding" +# fields = [ +# { address=0, name="voltage", type="INT16", scale=0.1 }, +# { address=1, name="current", type="INT32", scale=0.001 }, +# { address=3, name="power", type="UINT32", omit=true }, +# { address=5, name="energy", type="FLOAT32", scale=0.001, measurement="W" }, +# { address=7, name="frequency", type="UINT32", scale=0.1 }, +# { address=8, name="power_factor", type="INT64", scale=0.01 }, +# ] # -# ## Holding / input example with type conversions -# # fields = [ -# # { address=0, name="rpm", type="INT16" }, # will result in INT64 field -# # { address=1, name="temperature", type="INT16", scale=0.1 }, # will result in FLOAT64 field -# # { address=2, name="force", type="INT32", output="FLOAT64" }, # will result in FLOAT64 field -# # { address=4, name="hours", type="UINT32" }, # will result in UIN64 field -# # ] +# [[inputs.modbus.request.tags]] +# machine = "impresser" +# location = "main building" # -# ## Per-request tags -# ## These tags take precedence over predefined tags. -# # [[inputs.modbus.request.tags]] -# # name = "value" +# [[inputs.modbus.request]] +# ## Input example with type conversions +# slave_id = 1 +# byte_order = "ABCD" +# register = "input" +# fields = [ +# { address=0, name="rpm", type="INT16" }, # will result in INT64 field +# { address=1, name="temperature", type="INT16", scale=0.1 }, # will result in FLOAT64 field +# { address=2, name="force", type="INT32", output="FLOAT64" }, # will result in FLOAT64 field +# { address=4, name="hours", type="UINT32" }, # will result in UIN64 field +# ] +# +# [[inputs.modbus.request.tags]] +# machine = "impresser" +# location = "main building" # # # @@ -5483,9 +5610,13 @@ # ## For example: # ## mongodb://user:auth_key@10.10.3.30:27017, # ## mongodb://10.10.3.33:18832, -# servers = ["mongodb://127.0.0.1:27017?connect=direct"] +# ## +# ## If connecting to a cluster, users must include the "?connect=direct" in +# ## the URL to ensure that the connection goes directly to the specified node +# ## and not have all connections passed to the master node. +# servers = ["mongodb://127.0.0.1:27017/?connect=direct"] # -# ## When true, collect cluster status +# ## When true, collect cluster status. # ## Note that the query that counts jumbo chunks triggers a COLLSCAN, which # ## may have an impact on performance. # # gather_cluster_status = true @@ -5538,7 +5669,7 @@ # ## Omit this option to use absolute paths. # base_dir = "/sys/bus/i2c/devices/1-0076/iio:device0" # -# ## If true, Telegraf discard all data when a single file can't be read. +# ## If true discard all data when a single file can't be read. # ## Else, Telegraf omits the field generated from this file. # # fail_early = true # @@ -5600,19 +5731,19 @@ # ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS # # gather_innodb_metrics = false # -# ## gather metrics from SHOW SLAVE STATUS command output -# # gather_slave_status = false -# # ## gather metrics from all channels from SHOW SLAVE STATUS command output # # gather_all_slave_channels = false # -# ## use MariaDB dialect for all channels SHOW SLAVE STATUS +# ## gather metrics from SHOW SLAVE STATUS command output +# # gather_slave_status = false +# +# ## use SHOW ALL SLAVES STATUS command output for MariaDB # # mariadb_dialect = false # # ## gather metrics from SHOW BINARY LOGS command output # # gather_binary_logs = false # -# ## gather metrics from PERFORMANCE_SCHEMA.GLOBAL_VARIABLES +# ## gather metrics from SHOW GLOBAL VARIABLES command output # # gather_global_variables = true # # ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE @@ -5631,6 +5762,15 @@ # # gather_file_events_stats = false # # ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST +# # gather_perf_events_statements = false +# # +# ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME +# # gather_perf_sum_per_acc_per_event = false +# # +# ## list of events to be gathered for gather_perf_sum_per_acc_per_event +# ## in case of empty list all events will be gathered +# # perf_summary_events = [] +# # # # gather_perf_events_statements = false # # ## the limits for metrics form perf_events_statements @@ -5638,13 +5778,6 @@ # # perf_events_statements_limit = 250 # # perf_events_statements_time_limit = 86400 # -# ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME -# # gather_perf_sum_per_acc_per_event = false -# -# ## list of events to be gathered for gather_perf_sum_per_acc_per_event -# ## in case of empty list all events will be gathered -# # perf_summary_events = [] -# # ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES) # ## example: interval_slow = "30m" # # interval_slow = "" @@ -5679,15 +5812,17 @@ # # ## The response_timeout specifies how long to wait for a reply from the Apex. # #response_timeout = "5s" +# -# # Read metrics about network interface usage +# # Gather metrics about network interfaces # [[inputs.net]] # ## By default, telegraf gathers stats from any up interface (excluding loopback) # ## Setting interfaces will tell it to gather these explicit interfaces, -# ## regardless of status. +# ## regardless of status. When specifying an interface, glob-style +# ## patterns are also supported. # ## -# # interfaces = ["eth0"] +# # interfaces = ["eth*", "enp0s[0-1]", "lo"] # ## # ## On linux systems telegraf also collects protocol stats. # ## Setting ignore_protocol_stats to true will skip reporting of protocol metrics. @@ -5719,7 +5854,7 @@ # ## expected string in answer # # expect = "ssh" # -# ## Uncomment to remove deprecated fields +# ## Uncomment to remove deprecated fields; recommended for new deploys # # fielddrop = ["result_type", "string_found"] @@ -5759,23 +5894,23 @@ # # Read Nginx's basic status information (ngx_http_stub_status_module) # [[inputs.nginx]] -# # An array of Nginx stub_status URI to gather stats. +# ## An array of Nginx stub_status URI to gather stats. # urls = ["http://localhost/server_status"] # # ## Optional TLS Config -# tls_ca = "/etc/telegraf/ca.pem" -# tls_cert = "/etc/telegraf/cert.cer" -# tls_key = "/etc/telegraf/key.key" +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" # ## Use TLS but skip chain & host verification -# insecure_skip_verify = false +# # insecure_skip_verify = false # -# # HTTP response timeout (default: 5s) +# ## HTTP response timeout (default: 5s) # response_timeout = "5s" -# # Read Nginx Plus' full status information (ngx_http_status_module) +# # Read Nginx Plus' advanced status information # [[inputs.nginx_plus]] -# ## An array of ngx_http_status_module or status URI to gather stats. +# ## An array of Nginx status URIs to gather stats. # urls = ["http://localhost/status"] # # # HTTP response timeout (default: 5s) @@ -5789,11 +5924,10 @@ # # insecure_skip_verify = false -# # Read Nginx Plus Api documentation +# # Read Nginx Plus API advanced status information # [[inputs.nginx_plus_api]] -# ## An array of API URI to gather stats. +# ## An array of Nginx API URIs to gather stats. # urls = ["http://localhost/api"] -# # # Nginx API version, default: 3 # # api_version = 3 # @@ -5884,7 +6018,7 @@ # # tls_key = /path/to/keyfile -# # A plugin to collect stats from the NSD authoritative DNS name server +# # A plugin to collect stats from the NSD DNS resolver # [[inputs.nsd]] # ## Address of server to connect to, optionally ':port'. Defaults to the # ## address in the nsd config file. @@ -5995,8 +6129,9 @@ # ## namespace - OPC UA namespace of the node (integer value 0 thru 3) # ## identifier_type - OPC UA ID type (s=string, i=numeric, g=guid, b=opaque) # ## identifier - OPC UA ID (tag as shown in opcua browser) +# ## tags - extra tags to be added to the output metric (optional) # ## Example: -# ## {name="ProductUri", namespace="0", identifier_type="i", identifier="2262"} +# ## {name="ProductUri", namespace="0", identifier_type="i", identifier="2262", tags=[["tag1","value1"],["tag2","value2]]} # # nodes = [ # # {name="", namespace="", identifier_type="", identifier=""}, # # {name="", namespace="", identifier_type="", identifier=""}, @@ -6053,8 +6188,8 @@ # bind_dn = "" # bind_password = "" # -# # Reverse metric names so they sort more naturally. Recommended. -# # This defaults to false if unset, but is set to true when generating a new config +# # reverse metric names so they sort more naturally +# # Defaults to false if unset, but is set to true when generating a new config # reverse_metric_names = true @@ -6070,16 +6205,16 @@ # # timeout = "5ms" -# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver -# [[inputs.opensmtpd]] -# ## If running as a restricted user you can prepend sudo for additional access: -# #use_sudo = false +# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver +# [[inputs.opensmtpd]] +# ## If running as a restricted user you can prepend sudo for additional access: +# #use_sudo = false # -# ## The default location of the smtpctl binary can be overridden with: -# binary = "/usr/sbin/smtpctl" +# ## The default location of the smtpctl binary can be overridden with: +# binary = "/usr/sbin/smtpctl" # -# ## The default timeout of 1000ms can be overridden with (in milliseconds): -# timeout = 1000 +# # The default timeout of 1s can be overridden with: +# #timeout = "1s" # # Collects performance metrics from OpenStack services @@ -6162,7 +6297,7 @@ # ## "metric", "imperial", or "standard". # # units = "metric" # -# ## Query interval; OpenWeatherMap updates their weather data every 10 +# ## Query interval; OpenWeatherMap weather data is updated every 10 # ## minutes. # interval = "10m" @@ -6203,6 +6338,8 @@ # ## "/var/run/php5-fpm.sock" # ## or using a custom fpm status path: # ## "/var/run/php5-fpm.sock:fpm-custom-status-path" +# ## glob patterns are also supported: +# ## "/var/run/php*.sock" # ## # ## - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie: # ## "fcgi://10.0.0.12:9000/status" @@ -6285,8 +6422,10 @@ # # Read metrics from one or many PowerDNS servers # [[inputs.powerdns]] -# ## An array of sockets to gather stats about. -# ## Specify a path to unix socket. +# # An array of sockets to gather stats about. +# # Specify a path to unix socket. +# # +# # If no servers are specified, then '/var/run/pdns.controlsocket' is used as the path. # unix_sockets = ["/var/run/pdns.controlsocket"] @@ -6378,6 +6517,8 @@ # [[inputs.rabbitmq]] # ## Management Plugin url. (default: http://localhost:15672) # # url = "http://localhost:15672" +# ## Tag added to rabbitmq_overview series; deprecated: use tags +# # name = "rmq-server-1" # ## Credentials # # username = "guest" # # password = "guest" @@ -6403,6 +6544,11 @@ # ## specified, metrics for all nodes are gathered. # # nodes = ["rabbit@node1", "rabbit@node2"] # +# ## A list of queues to gather as the rabbitmq_queue measurement. If not +# ## specified, metrics for all queues are gathered. +# ## Deprecated in 1.6: Use queue_name_include instead. +# # queues = ["telegraf"] +# # ## A list of exchanges to gather as the rabbitmq_exchange measurement. If not # ## specified, metrics for all exchanges are gathered. # # exchanges = ["telegraf"] @@ -6415,16 +6561,13 @@ # # ## Queues to include and exclude. Globs accepted. # ## Note that an empty array for both will include all queues -# queue_name_include = [] -# queue_name_exclude = [] +# # queue_name_include = [] +# # queue_name_exclude = [] # -# ## Federation upstreams include and exclude when gathering the rabbitmq_federation measurement. -# ## If neither are specified, metrics for all federation upstreams are gathered. -# ## Federation link metrics will only be gathered for queues and exchanges -# ## whose non-federation metrics will be collected (e.g a queue excluded -# ## by the 'queue_name_exclude' option will also be excluded from federation). -# ## Globs accepted. -# # federation_upstream_include = ["dataCentre-*"] +# ## Federation upstreams to include and exclude specified as an array of glob +# ## pattern strings. Federation links can also be limited by the queue and +# ## exchange filters. +# # federation_upstream_include = [] # # federation_upstream_exclude = [] @@ -6474,15 +6617,15 @@ # # Read CPU, Fans, Powersupply and Voltage metrics of hardware server through redfish APIs # [[inputs.redfish]] -# ## Server url +# ## Redfish API Base URL. # address = "https://127.0.0.1:5000" # -# ## Username, Password for hardware server +# ## Credentials for the Redfish API. # username = "root" # password = "password123456" # -# ## ComputerSystemId -# computer_system_id="2M220100SL" +# ## System Id to collect data for in Redfish APIs. +# computer_system_id="System.Embedded.1" # # ## Amount of time allowed to complete the HTTP request # # timeout = "5s" @@ -6495,40 +6638,6 @@ # # insecure_skip_verify = false -# # Read metrics from one or many redis servers -# [[inputs.redis]] -# ## specify servers via a url matching: -# ## [protocol://][:password]@address[:port] -# ## e.g. -# ## tcp://localhost:6379 -# ## tcp://:password@192.168.99.100 -# ## unix:///var/run/redis.sock -# ## -# ## If no servers are specified, then localhost is used as the host. -# ## If no port is specified, 6379 is used -# servers = ["tcp://localhost:6379"] -# -# ## Optional. Specify redis commands to retrieve values -# # [[inputs.redis.commands]] -# # # The command to run where each argument is a separate element -# # command = ["get", "sample-key"] -# # # The field to store the result in -# # field = "sample-key-value" -# # # The type of the result -# # # Can be "string", "integer", or "float" -# # type = "string" -# -# ## specify server password -# # password = "s#cr@t%" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = true - - # # Read metrics from one or many redis-sentinel servers # [[inputs.redis_sentinel]] # ## specify servers via a url matching: @@ -6558,11 +6667,11 @@ # ## rethinkdb://10.10.3.33:18832, # ## 10.0.0.1:10000, etc. # servers = ["127.0.0.1:28015"] -# ## +# # ## If you use actual rethinkdb of > 2.3.0 with username/password authorization, # ## protocol have to be named "rethinkdb2" - it will use 1_0 H. # # servers = ["rethinkdb2://username:password@127.0.0.1:28015"] -# ## +# # ## If you use older versions of rethinkdb (<2.2) with auth_key, protocol # ## have to be named "rethinkdb". # # servers = ["rethinkdb://username:auth_key@127.0.0.1:28015"] @@ -6604,55 +6713,61 @@ # # timeout = "5s" +# # Get slab statistics from procfs +# [[inputs.slab]] +# # no configuration - please see the plugin's README for steps to configure +# # sudo properly + + # # Read metrics from storage devices supporting S.M.A.R.T. # [[inputs.smart]] -# ## Optionally specify the path to the smartctl executable -# # path_smartctl = "/usr/bin/smartctl" +# ## Optionally specify the path to the smartctl executable +# # path_smartctl = "/usr/bin/smartctl" # -# ## Optionally specify the path to the nvme-cli executable -# # path_nvme = "/usr/bin/nvme" +# ## Optionally specify the path to the nvme-cli executable +# # path_nvme = "/usr/bin/nvme" # -# ## Optionally specify if vendor specific attributes should be propagated for NVMe disk case -# ## ["auto-on"] - automatically find and enable additional vendor specific disk info -# ## ["vendor1", "vendor2", ...] - e.g. "Intel" enable additional Intel specific disk info -# # enable_extensions = ["auto-on"] +# ## Optionally specify if vendor specific attributes should be propagated for NVMe disk case +# ## ["auto-on"] - automatically find and enable additional vendor specific disk info +# ## ["vendor1", "vendor2", ...] - e.g. "Intel" enable additional Intel specific disk info +# # enable_extensions = ["auto-on"] # -# ## On most platforms used cli utilities requires root access. -# ## Setting 'use_sudo' to true will make use of sudo to run smartctl or nvme-cli. -# ## Sudo must be configured to allow the telegraf user to run smartctl or nvme-cli -# ## without a password. -# # use_sudo = false +# ## On most platforms used cli utilities requires root access. +# ## Setting 'use_sudo' to true will make use of sudo to run smartctl or nvme-cli. +# ## Sudo must be configured to allow the telegraf user to run smartctl or nvme-cli +# ## without a password. +# # use_sudo = false # -# ## Skip checking disks in this power mode. Defaults to -# ## "standby" to not wake up disks that have stopped rotating. -# ## See --nocheck in the man pages for smartctl. -# ## smartctl version 5.41 and 5.42 have faulty detection of -# ## power mode and might require changing this value to -# ## "never" depending on your disks. -# # nocheck = "standby" +# ## Skip checking disks in this power mode. Defaults to +# ## "standby" to not wake up disks that have stopped rotating. +# ## See --nocheck in the man pages for smartctl. +# ## smartctl version 5.41 and 5.42 have faulty detection of +# ## power mode and might require changing this value to +# ## "never" depending on your disks. +# # nocheck = "standby" # -# ## Gather all returned S.M.A.R.T. attribute metrics and the detailed -# ## information from each drive into the 'smart_attribute' measurement. -# # attributes = false +# ## Gather all returned S.M.A.R.T. attribute metrics and the detailed +# ## information from each drive into the 'smart_attribute' measurement. +# # attributes = false # -# ## Optionally specify devices to exclude from reporting if disks auto-discovery is performed. -# # excludes = [ "/dev/pass6" ] +# ## Optionally specify devices to exclude from reporting if disks auto-discovery is performed. +# # excludes = [ "/dev/pass6" ] # -# ## Optionally specify devices and device type, if unset -# ## a scan (smartctl --scan and smartctl --scan -d nvme) for S.M.A.R.T. devices will be done -# ## and all found will be included except for the excluded in excludes. -# # devices = [ "/dev/ada0 -d atacam", "/dev/nvme0"] +# ## Optionally specify devices and device type, if unset +# ## a scan (smartctl --scan and smartctl --scan -d nvme) for S.M.A.R.T. devices will be done +# ## and all found will be included except for the excluded in excludes. +# # devices = [ "/dev/ada0 -d atacam", "/dev/nvme0"] # -# ## Timeout for the cli command to complete. -# # timeout = "30s" +# ## Timeout for the cli command to complete. +# # timeout = "30s" # -# ## Optionally call smartctl and nvme-cli with a specific concurrency policy. -# ## By default, smartctl and nvme-cli are called in separate threads (goroutines) to gather disk attributes. -# ## Some devices (e.g. disks in RAID arrays) may have access limitations that require sequential reading of -# ## SMART data - one individual array drive at the time. In such case please set this configuration option -# ## to "sequential" to get readings for all drives. -# ## valid options: concurrent, sequential -# # read_method = "concurrent" +# ## Optionally call smartctl and nvme-cli with a specific concurrency policy. +# ## By default, smartctl and nvme-cli are called in separate threads (goroutines) to gather disk attributes. +# ## Some devices (e.g. disks in RAID arrays) may have access limitations that require sequential reading of +# ## SMART data - one individual array drive at the time. In such case please set this configuration option +# ## to "sequential" to get readings for all drives. +# ## valid options: concurrent, sequential +# # read_method = "concurrent" # # Retrieves SNMP values from remote agents @@ -6678,12 +6793,12 @@ # ## To add paths when translating with netsnmp, use the MIBDIRS environment variable # # path = ["/usr/share/snmp/mibs"] # -# ## Agent host tag; the tag used to reference the source host -# # agent_host_tag = "agent_host" -# # ## SNMP community string. # # community = "public" # +# ## Agent host tag +# # agent_host_tag = "agent_host" +# # ## Number of retries to attempt. # # retries = 3 # @@ -6702,7 +6817,9 @@ # # sec_level = "authNoPriv" # ## Context Name. # # context_name = "" -# ## Privacy protocol used for encrypted messages; one of "DES", "AES" or "". +# ## Privacy protocol used for encrypted messages; one of "DES", "AES", "AES192", "AES192C", "AES256", "AES256C", or "". +# ### Protocols "AES192", "AES192", "AES256", and "AES256C" require the underlying net-snmp tools +# ### to be compiled with --enable-blumenthal-aes (http://www.net-snmp.org/docs/INSTALL.html) # # priv_protocol = "" # ## Privacy password used for encrypted messages. # # priv_password = "" @@ -6710,11 +6827,29 @@ # ## Add fields and tables defining the variables you wish to collect. This # ## example collects the system uptime and interface variables. Reference the # ## full plugin documentation for configuration details. +# [[inputs.snmp.field]] +# oid = "RFC1213-MIB::sysUpTime.0" +# name = "uptime" +# +# [[inputs.snmp.field]] +# oid = "RFC1213-MIB::sysName.0" +# name = "source" +# is_tag = true +# +# [[inputs.snmp.table]] +# oid = "IF-MIB::ifTable" +# name = "interface" +# inherit_tags = ["source"] +# +# [[inputs.snmp.table.field]] +# oid = "IF-MIB::ifDescr" +# name = "ifDescr" +# is_tag = true +# ## DEPRECATED: The 'snmp_legacy' plugin is deprecated in version 1.0.0, use 'inputs.snmp' instead. # # DEPRECATED! PLEASE USE inputs.snmp INSTEAD. # [[inputs.snmp_legacy]] -# ## DEPRECATED: The 'snmp_legacy' plugin is deprecated in version 1.0.0, use 'inputs.snmp' instead. # ## Use 'oids.txt' file to translate oids to names # ## To generate 'oids.txt' you need to run: # ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt @@ -6736,7 +6871,6 @@ # collect = ["mybulk", "sysservices", "sysdescr"] # # Simple list of OIDs to get, in addition to "collect" # get_oids = [] -# # [[inputs.snmp.host]] # address = "192.168.2.3:161" # community = "public" @@ -6748,31 +6882,25 @@ # "ifNumber", # ".1.3.6.1.2.1.1.3.0", # ] -# # [[inputs.snmp.get]] # name = "ifnumber" # oid = "ifNumber" -# # [[inputs.snmp.get]] # name = "interface_speed" # oid = "ifSpeed" # instance = "0" -# # [[inputs.snmp.get]] # name = "sysuptime" # oid = ".1.3.6.1.2.1.1.3.0" # unit = "second" -# # [[inputs.snmp.bulk]] # name = "mybulk" # max_repetition = 127 # oid = ".1.3.6.1.2.1.1" -# # [[inputs.snmp.bulk]] # name = "ifoutoctets" # max_repetition = 127 # oid = "ifOutOctets" -# # [[inputs.snmp.host]] # address = "192.168.2.13:161" # #address = "127.0.0.1:161" @@ -6785,19 +6913,16 @@ # [[inputs.snmp.host.table]] # name = "iftable3" # include_instances = ["enp5s0", "eth1"] -# # # SNMP TABLEs # # table without mapping neither subtables # [[inputs.snmp.table]] # name = "iftable1" # oid = ".1.3.6.1.2.1.31.1.1.1" -# # # table without mapping but with subtables # [[inputs.snmp.table]] # name = "iftable2" # oid = ".1.3.6.1.2.1.31.1.1.1" # sub_tables = [".1.3.6.1.2.1.2.2.1.13"] -# # # table with mapping but without subtables # [[inputs.snmp.table]] # name = "iftable3" @@ -6805,7 +6930,6 @@ # # if empty. get all instances # mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" # # if empty, get all subtables -# # # table with both mapping and subtables # [[inputs.snmp.table]] # name = "iftable4" @@ -6817,11 +6941,11 @@ # sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"] -# # Gather indicators from established connections, using iproute2's `ss` command. +# # Gather indicators from established connections, using iproute2's ss command. # [[inputs.socketstat]] # ## ss can display information about tcp, udp, raw, unix, packet, dccp and sctp sockets -# ## List of protocol types to collect -# # protocols = [ "tcp", "udp" ] +# ## Specify here the types you want to gather +# socket_types = [ "tcp", "udp" ] # ## The default timeout of 1s for ss execution can be overridden here: # # timeout = "1s" @@ -6830,10 +6954,10 @@ # [[inputs.solr]] # ## specify a list of one or more Solr servers # servers = ["http://localhost:8983"] -# +# ## # ## specify a list of one or more Solr cores (default - all) # # cores = ["main"] -# +# ## # ## Optional HTTP Basic Auth Credentials # # username = "username" # # password = "pa$$word" @@ -6852,8 +6976,8 @@ # ## Exclude timeseries that start with the given metric type. # # metric_type_prefix_exclude = [] # -# ## Many metrics are updated once per minute; it is recommended to override -# ## the agent level interval with a value of 1m or greater. +# ## Most metrics are updated no more than once per minute; it is recommended +# ## to override the agent level interval with a value of 1m or greater. # interval = "1m" # # ## Maximum number of API calls to make per second. The quota for accounts @@ -6889,9 +7013,9 @@ # ## For a list of aligner strings see: # ## https://cloud.google.com/monitoring/api/ref_v3/rpc/google.monitoring.v3#aligner # # distribution_aggregation_aligners = [ -# # "ALIGN_PERCENTILE_99", -# # "ALIGN_PERCENTILE_95", -# # "ALIGN_PERCENTILE_50", +# # "ALIGN_PERCENTILE_99", +# # "ALIGN_PERCENTILE_95", +# # "ALIGN_PERCENTILE_50", # # ] # # ## Filters can be added to reduce the number of time series matched. All @@ -6915,12 +7039,9 @@ # ## Metric labels refine the time series selection with the following expression: # ## metric.labels. = # # [[inputs.stackdriver.filter.metric_labels]] -# # key = "device_name" -# # value = 'one_of("sda", "sdb")' +# # key = "device_name" +# # value = 'one_of("sda", "sdb")' -# # Get slab statistics from procfs -# [[inputs.slab]] -# # no configuration # # Get synproxy counter statistics from procfs # [[inputs.synproxy]] @@ -6953,12 +7074,10 @@ # ## If Group is true, corresponding metrics are grouped to a single measurement. # # group = true # -# ## Options for the sadf command. The values on the left represent the sadf -# ## options and the values on the right their description (which are used for -# ## grouping and prefixing metrics). +# ## Options for the sadf command. The values on the left represent the sadf options and +# ## the values on the right their description (wich are used for grouping and prefixing metrics). # ## -# ## Run 'sar -h' or 'man sar' to find out the supported options for your -# ## sysstat version. +# ## Run 'sar -h' or 'man sar' to find out the supported options for your sysstat version. # [inputs.sysstat.options] # -C = "cpu" # -B = "paging" @@ -6974,12 +7093,11 @@ # -v = "inode" # -W = "swap" # -w = "task" -# # -H = "hugepages" # only available for newer linux distributions -# # "-I ALL" = "interrupts" # requires INT activity +# # -H = "hugepages" # only available for newer linux distributions +# # "-I ALL" = "interrupts" # requires INT activity # -# ## Device tags can be used to add additional tags for devices. -# ## For example the configuration below adds a tag vg with value rootvg for -# ## all metrics with sda devices. +# ## Device tags can be used to add additional tags for devices. For example the configuration below +# ## adds a tag vg with value rootvg for all metrics with sda devices. # # [[inputs.sysstat.device_tags.sda]] # # vg = "rootvg" @@ -7010,6 +7128,8 @@ # username = "serverqueryuser" # ## Password for ServerQuery # password = "secret" +# ## Nickname of the ServerQuery client +# nickname = "telegraf" # ## Array of virtual servers # # virtual_servers = [1] @@ -7021,16 +7141,16 @@ # # Read Tengine's basic status information (ngx_http_reqstat_module) # [[inputs.tengine]] -# # An array of Tengine reqstat module URI to gather stats. +# ## An array of Tengine reqstat module URI to gather stats. # urls = ["http://127.0.0.1/us"] # -# # HTTP response timeout (default: 5s) +# ## HTTP response timeout (default: 5s) # # response_timeout = "5s" # # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.cer" -# # tls_key = "/etc/telegraf/key.key" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false @@ -7097,7 +7217,7 @@ # # Read uWSGI metrics. # [[inputs.uwsgi]] -# ## List with urls of uWSGI Stats servers. URL must match pattern: +# ## List with urls of uWSGI Stats servers. Url must match pattern: # ## scheme://address[:port] # ## # ## For example: @@ -7119,19 +7239,20 @@ # ## Additional custom arguments for the varnishstat command # # binary_args = ["-f", "MAIN.*"] # -# ## The default location of the varnishadm binary can be overriden with: +# ## The default location of the varnishadm binary can be overridden with: # adm_binary = "/usr/bin/varnishadm" # # ## Custom arguments for the varnishadm command # # adm_binary_args = [""] # -# ## Metric version defaults to metric_version=1, use metric_version=2 for removal of nonactive vcls. +# ## Metric version defaults to metric_version=1, use metric_version=2 for removal of nonactive vcls +# ## Varnish 6.0.2 and newer is required for metric_version=2. # metric_version = 1 # # ## Additional regexps to override builtin conversion of varnish metrics into telegraf metrics. -# ## Regexp group "_vcl" is used for extracting the VCL name. Metrics that contains nonactive VCL's are skipped. -# ## Regexp group "_field" overrides field name. Other named regexp groups are used as tags. -# # regexps = ['XCNT\.(?P<_vcl>[\w\-]*)\.(?P[\w\-.+]*)\.(?P<_field>[\w\-.+]*)\.val'] +# ## Regexp group "_vcl" is used for extracting the VCL name. Metrics that contain nonactive VCL's are skipped. +# ## Regexp group "_field" overrides the field name. Other named regexp groups are used as tags. +# # regexps = ['^XCNT\.(?P<_vcl>[\w\-]*)(\.)*(?P[\w\-.+]*)\.(?P<_field>[\w\-.+]*)\.val'] # # ## By default, telegraf gather stats for 3 metric points. # ## Setting stats will override the defaults shown below. @@ -7184,29 +7305,30 @@ # # Reads metrics from a SSL certificate # [[inputs.x509_cert]] -# ## List certificate sources +# ## List certificate sources, support wildcard expands for files # ## Prefix your entry with 'file://' if you intend to use relative paths # sources = ["tcp://example.org:443", "https://influxdata.com:443", -# "udp://127.0.0.1:4433", "/etc/ssl/certs/ssl-cert-snakeoil.pem", +# "smtp://mail.localhost:25", "udp://127.0.0.1:4433", +# "/etc/ssl/certs/ssl-cert-snakeoil.pem", # "/etc/mycerts/*.mydomain.org.pem", "file:///path/to/*.pem"] # # ## Timeout for SSL connection # # timeout = "5s" # -# ## Pass a different name into the TLS request (Server Name Indication) +# ## Pass a different name into the TLS request (Server Name Indication). +# ## This is synonymous with tls_server_name, and only one of the two +# ## options may be specified at one time. # ## example: server_name = "myhost.example.org" -# # server_name = "" -# -# ## Don't include root or intermediate certificates in output -# # exclude_root_certs = false +# # server_name = "myhost.example.org" # # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" # # tls_key = "/etc/telegraf/key.pem" +# # tls_server_name = "myhost.example.org" -# # Gathers Metrics From a Dell EMC XtremIO Storage Array's V3 API +# # Gathers Metrics From a Dell EMC XtremIO Storage Array's V3 API # [[inputs.xtremio]] # ## XtremIO User Interface Endpoint # url = "https://xtremio.example.com/" # required @@ -7233,14 +7355,19 @@ # # kstatPath = "/proc/spl/kstat/zfs" # # ## By default, telegraf gather all zfs stats -# ## If not specified, then default is: +# ## Override the stats list using the kstatMetrics array: +# ## For FreeBSD, the default is: # # kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"] # ## For Linux, the default is: # # kstatMetrics = ["abdstats", "arcstats", "dnodestats", "dbufcachestats", -# # "dmu_tx", "fm", "vdev_mirror_stats", "zfetchstats", "zil"] +# # "dmu_tx", "fm", "vdev_mirror_stats", "zfetchstats", "zil"] +# # ## By default, don't gather zpool stats # # poolMetrics = false -# ## By default, don't gather zdataset stats +# +# ## By default, don't gather dataset stats +# ## On FreeBSD, if the user has enabled listsnapshots in the pool property, +# ## telegraf may not be able to correctly parse the output. # # datasetMetrics = false @@ -7448,9 +7575,9 @@ # data_format = "influx" +# ## DEPRECATED: The 'cassandra' plugin is deprecated in version 1.7.0, use 'inputs.jolokia2' with the 'cassandra.conf' example configuration instead. # # Read Cassandra metrics through Jolokia # [[inputs.cassandra]] -# ## DEPRECATED: The 'cassandra' plugin is deprecated in version 1.7.0, use 'inputs.jolokia2' with the 'cassandra.conf' example configuration instead. # ## DEPRECATED: The cassandra plugin has been deprecated. Please use the # ## jolokia2 plugin instead. # ## @@ -7480,6 +7607,9 @@ # ## Address and port to host telemetry listener # service_address = ":57000" # +# ## Grpc Maximum Message Size, default is 4MB, increase the size. +# max_msg_size = 4000000 +# # ## Enable TLS; grpc transport only. # # tls_cert = "/etc/telegraf/cert.pem" # # tls_key = "/etc/telegraf/key.pem" @@ -7494,10 +7624,20 @@ # ## Define aliases to map telemetry encoding paths to simple measurement names # [inputs.cisco_telemetry_mdt.aliases] # ifstats = "ietf-interfaces:interfaces-state/interface/statistics" -# ##Define Property Xformation, please refer README and https://pubhub.devnetcloud.com/media/dme-docs-9-3-3/docs/appendix/ for Model details. +# ## Define Property Xformation, please refer README and https://pubhub.devnetcloud.com/media/dme-docs-9-3-3/docs/appendix/ for Model details. # [inputs.cisco_telemetry_mdt.dmes] -# ModTs = "ignore" -# CreateTs = "ignore" +# # Global Property Xformation. +# # prop1 = "uint64 to int" +# # prop2 = "uint64 to string" +# # prop3 = "string to uint64" +# # prop4 = "string to int64" +# # prop5 = "string to float64" +# # auto-prop-xfrom = "auto-float-xfrom" #Xform any property which is string, and has float number to type float64 +# # Per Path property xformation, Name is telemetry configuration under sensor-group, path configuration "WORD Distinguished Name" +# # Per Path configuration is better as it avoid property collision issue of types. +# # dnpath = '{"Name": "show ip route summary","prop": [{"Key": "routes","Value": "string"}, {"Key": "best-paths","Value": "string"}]}' +# # dnpath2 = '{"Name": "show processes cpu","prop": [{"Key": "kernel_percent","Value": "float"}, {"Key": "idle_percent","Value": "float"}, {"Key": "process","Value": "string"}, {"Key": "user_percent","Value": "float"}, {"Key": "onesec","Value": "float"}]}' +# # dnpath3 = '{"Name": "show processes memory physical","prop": [{"Key": "processname","Value": "string"}]}' # # Read metrics from one or many ClickHouse servers @@ -7599,7 +7739,7 @@ # # max_message_len = 1000000 # # ## Optional. Maximum messages to read from PubSub that have not been written -# ## to an output. Defaults to 1000. +# ## to an output. Defaults to %d. # ## For best throughput set based on the number of metrics within # ## each message and the size of the output's metric_batch_size. # ## @@ -7638,7 +7778,8 @@ # # max_receiver_go_routines = 0 # # ## Optional. If true, Telegraf will attempt to base64 decode the -# ## PubSub message data before parsing +# ## PubSub message data before parsing. Many GCP services that +# ## output JSON to Google PubSub base64-encode the JSON payload. # # base64_data = false @@ -7723,7 +7864,7 @@ # # # ## The maximum amount of file paths to queue up for processing at once, before waiting until files are processed to find more files. # ## Lowering this value will result in *slightly* less memory use, with a potential sacrifice in speed efficiency, if absolutely necessary. -# # file_queue_size = 100000 +# # file_queue_size = 100000 # # # ## Name a tag containing the name of the file the data was parsed from. Leave empty # ## to disable. Cautious when file name variation is high, this can increase the cardinality @@ -7731,11 +7872,14 @@ # ## https://docs.influxdata.com/influxdb/cloud/reference/glossary/#series-cardinality # # file_tag = "" # # +# ## Specify if the file can be read completely at once or if it needs to be read line by line (default). +# ## Possible values: "line-by-line", "at-once" +# # parse_method = "line-by-line" +# # # ## The dataformat to be read from the files. # ## Each data format has its own unique set of configuration options, read # ## more about them here: # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -# ## NOTE: We currently only support parsing newline-delimited JSON. See the format here: https://github.com/ndjson/ndjson-spec # data_format = "influx" @@ -7867,15 +8011,22 @@ # # Run executable as long-running input plugin # [[inputs.execd]] -# ## Program to run as daemon +# ## One program to run as daemon. +# ## NOTE: process and each argument should each be their own string # command = ["telegraf-smartctl", "-d", "/dev/sda"] # +# ## Environment variables +# ## Array of "key=value" pairs to pass as environment variables +# ## e.g. "KEY=value", "USERNAME=John Doe", +# ## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs" +# # environment = [] +# # ## Define how the process is signaled on each collection interval. # ## Valid values are: -# ## "none" : Do not signal anything. -# ## The process must output metrics by itself. -# ## "STDIN" : Send a newline on STDIN. -# ## "SIGHUP" : Send a HUP signal. Not available on Windows. +# ## "none" : Do not signal anything. (Recommended for service inputs) +# ## The process must output metrics by itself. +# ## "STDIN" : Send a newline on STDIN. (Recommended for gather inputs) +# ## "SIGHUP" : Send a HUP signal. Not available on Windows. (not recommended) # ## "SIGUSR1" : Send a USR1 signal. Not available on Windows. # ## "SIGUSR2" : Send a USR2 signal. Not available on Windows. # signal = "none" @@ -7892,60 +8043,60 @@ # # gNMI telemetry input plugin # [[inputs.gnmi]] -# ## Address and port of the gNMI GRPC server -# addresses = ["10.49.234.114:57777"] +# ## Address and port of the gNMI GRPC server +# addresses = ["10.49.234.114:57777"] # -# ## define credentials -# username = "cisco" -# password = "cisco" +# ## define credentials +# username = "cisco" +# password = "cisco" # -# ## gNMI encoding requested (one of: "proto", "json", "json_ietf", "bytes") -# # encoding = "proto" +# ## gNMI encoding requested (one of: "proto", "json", "json_ietf", "bytes") +# # encoding = "proto" # -# ## redial in case of failures after -# redial = "10s" +# ## redial in case of failures after +# redial = "10s" # -# ## enable client-side TLS and define CA to authenticate the device -# # enable_tls = true -# # tls_ca = "/etc/telegraf/ca.pem" -# # insecure_skip_verify = true +# ## enable client-side TLS and define CA to authenticate the device +# # enable_tls = true +# # tls_ca = "/etc/telegraf/ca.pem" +# # insecure_skip_verify = true # -# ## define client-side TLS certificate & key to authenticate to the device -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" +# ## define client-side TLS certificate & key to authenticate to the device +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" # -# ## gNMI subscription prefix (optional, can usually be left empty) -# ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths -# # origin = "" -# # prefix = "" -# # target = "" -# -# ## Define additional aliases to map telemetry encoding paths to simple measurement names -# #[inputs.gnmi.aliases] -# # ifcounters = "openconfig:/interfaces/interface/state/counters" -# -# [[inputs.gnmi.subscription]] -# ## Name of the measurement that will be emitted -# name = "ifcounters" -# -# ## Origin and path of the subscription +# ## gNMI subscription prefix (optional, can usually be left empty) # ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths -# ## -# ## origin usually refers to a (YANG) data model implemented by the device -# ## and path to a specific substructure inside it that should be subscribed to (similar to an XPath) -# ## YANG models can be found e.g. here: https://github.com/YangModels/yang/tree/master/vendor/cisco/xr -# origin = "openconfig-interfaces" -# path = "/interfaces/interface/state/counters" +# # origin = "" +# # prefix = "" +# # target = "" # -# # Subscription mode (one of: "target_defined", "sample", "on_change") and interval -# subscription_mode = "sample" -# sample_interval = "10s" +# ## Define additional aliases to map telemetry encoding paths to simple measurement names +# # [inputs.gnmi.aliases] +# # ifcounters = "openconfig:/interfaces/interface/state/counters" # -# ## Suppress redundant transmissions when measured values are unchanged -# # suppress_redundant = false +# [[inputs.gnmi.subscription]] +# ## Name of the measurement that will be emitted +# name = "ifcounters" # -# ## If suppression is enabled, send updates at least every X seconds anyway -# # heartbeat_interval = "60s" +# ## Origin and path of the subscription +# ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths +# ## +# ## origin usually refers to a (YANG) data model implemented by the device +# ## and path to a specific substructure inside it that should be subscribed to (similar to an XPath) +# ## YANG models can be found e.g. here: https://github.com/YangModels/yang/tree/master/vendor/cisco/xr +# origin = "openconfig-interfaces" +# path = "/interfaces/interface/state/counters" +# +# # Subscription mode (one of: "target_defined", "sample", "on_change") and interval +# subscription_mode = "sample" +# sample_interval = "10s" +# +# ## Suppress redundant transmissions when measured values are unchanged +# # suppress_redundant = false +# +# ## If suppression is enabled, send updates at least every X seconds anyway +# # heartbeat_interval = "60s" # # #[[inputs.gnmi.subscription]] # # name = "descr" @@ -7960,10 +8111,10 @@ # # tag_only = true +# ## DEPRECATED: The 'http_listener' plugin is deprecated in version 1.9.0, has been renamed to 'influxdb_listener', use 'inputs.influxdb_listener' or 'inputs.http_listener_v2' instead. # # Accept metrics over InfluxDB 1.x HTTP API -# [[inputs.http_listener]] -# ## DEPRECATED: The 'http_listener' plugin is deprecated in version 1.9.0, has been renamed to 'influxdb_listener', use 'inputs.influxdb_listener' or 'inputs.http_listener_v2' instead. -# ## Address and port to host InfluxDB listener on +# [[inputs.influxdb_listener]] +# ## Address and port to host HTTP listener on # service_address = ":8186" # # ## maximum duration before timing out read of the request @@ -7973,17 +8124,11 @@ # # ## Maximum allowed HTTP request body size in bytes. # ## 0 means to use the default of 32MiB. -# max_body_size = "32MiB" +# max_body_size = 0 # -# ## Optional tag name used to store the database. -# ## If the write has a database in the query string then it will be kept in this tag name. -# ## This tag can be used in downstream outputs. -# ## The default value of nothing means it will be off and the database will not be recorded. -# # database_tag = "" -# -# ## If set the retention policy specified in the write query will be added as -# ## the value of this tag name. -# # retention_policy_tag = "" +# ## Maximum line size allowed to be sent in bytes. +# ## deprecated in 1.14; parser now handles lines of unlimited length and option is ignored +# # max_line_size = 0 # # ## Set one or more allowed client CA certificate file names to # ## enable mutually authenticated TLS connections @@ -7993,6 +8138,18 @@ # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" # +# ## Optional tag name used to store the database name. +# ## If the write has a database in the query string then it will be kept in this tag name. +# ## This tag can be used in downstream outputs. +# ## The default value of nothing means it will be off and the database will not be recorded. +# ## If you have a tag that is the same as the one specified below, and supply a database, +# ## the tag will be overwritten with the database supplied. +# # database_tag = "" +# +# ## If set the retention policy specified in the write query will be added as +# ## the value of this tag name. +# # retention_policy_tag = "" +# # ## Optional username and password to accept for HTTP basic authentication. # ## You probably want to make sure you have TLS configured above for this. # # basic_username = "foobar" @@ -8058,7 +8215,7 @@ # # Accept metrics over InfluxDB 1.x HTTP API # [[inputs.influxdb_listener]] -# ## Address and port to host InfluxDB listener on +# ## Address and port to host HTTP listener on # service_address = ":8186" # # ## maximum duration before timing out read of the request @@ -8068,17 +8225,11 @@ # # ## Maximum allowed HTTP request body size in bytes. # ## 0 means to use the default of 32MiB. -# max_body_size = "32MiB" +# max_body_size = 0 # -# ## Optional tag name used to store the database. -# ## If the write has a database in the query string then it will be kept in this tag name. -# ## This tag can be used in downstream outputs. -# ## The default value of nothing means it will be off and the database will not be recorded. -# # database_tag = "" -# -# ## If set the retention policy specified in the write query will be added as -# ## the value of this tag name. -# # retention_policy_tag = "" +# ## Maximum line size allowed to be sent in bytes. +# ## deprecated in 1.14; parser now handles lines of unlimited length and option is ignored +# # max_line_size = 0 # # ## Set one or more allowed client CA certificate file names to # ## enable mutually authenticated TLS connections @@ -8088,6 +8239,18 @@ # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" # +# ## Optional tag name used to store the database name. +# ## If the write has a database in the query string then it will be kept in this tag name. +# ## This tag can be used in downstream outputs. +# ## The default value of nothing means it will be off and the database will not be recorded. +# ## If you have a tag that is the same as the one specified below, and supply a database, +# ## the tag will be overwritten with the database supplied. +# # database_tag = "" +# +# ## If set the retention policy specified in the write query will be added as +# ## the value of this tag name. +# # retention_policy_tag = "" +# # ## Optional username and password to accept for HTTP basic authentication. # ## You probably want to make sure you have TLS configured above for this. # # basic_username = "foobar" @@ -8182,37 +8345,37 @@ # # events_tag = "" -# # Intel Resource Director Technology plugin +# # Read Intel RDT metrics # [[inputs.intel_rdt]] -# ## Optionally set sampling interval to Nx100ms. -# ## This value is propagated to pqos tool. Interval format is defined by pqos itself. -# ## If not provided or provided 0, will be set to 10 = 10x100ms = 1s. -# # sampling_interval = "10" -# -# ## Optionally specify the path to pqos executable. -# ## If not provided, auto discovery will be performed. -# # pqos_path = "/usr/local/bin/pqos" +# ## Optionally set sampling interval to Nx100ms. +# ## This value is propagated to pqos tool. Interval format is defined by pqos itself. +# ## If not provided or provided 0, will be set to 10 = 10x100ms = 1s. +# # sampling_interval = "10" # -# ## Optionally specify if IPC and LLC_Misses metrics shouldn't be propagated. -# ## If not provided, default value is false. -# # shortened_metrics = false -# -# ## Specify the list of groups of CPU core(s) to be provided as pqos input. -# ## Mandatory if processes aren't set and forbidden if processes are specified. -# ## e.g. ["0-3", "4,5,6"] or ["1-3,4"] -# # cores = ["0-3"] -# -# ## Specify the list of processes for which Metrics will be collected. -# ## Mandatory if cores aren't set and forbidden if cores are specified. -# ## e.g. ["qemu", "pmd"] -# # processes = ["process"] +# ## Optionally specify the path to pqos executable. +# ## If not provided, auto discovery will be performed. +# # pqos_path = "/usr/local/bin/pqos" # -# ## Specify if the pqos process should be called with sudo. -# ## Mandatory if the telegraf process does not run as root. -# # use_sudo = false +# ## Optionally specify if IPC and LLC_Misses metrics shouldn't be propagated. +# ## If not provided, default value is false. +# # shortened_metrics = false +# +# ## Specify the list of groups of CPU core(s) to be provided as pqos input. +# ## Mandatory if processes aren't set and forbidden if processes are specified. +# ## e.g. ["0-3", "4,5,6"] or ["1-3,4"] +# # cores = ["0-3"] +# +# ## Specify the list of processes for which Metrics will be collected. +# ## Mandatory if cores aren't set and forbidden if cores are specified. +# ## e.g. ["qemu", "pmd"] +# # processes = ["process"] +# +# ## Specify if the pqos process should be called with sudo. +# ## Mandatory if the telegraf process does not run as root. +# # use_sudo = false -# # Read JTI OpenConfig Telemetry from listed sensors +# # Subscribe and receive OpenConfig Telemetry data using JTI # [[inputs.jti_openconfig_telemetry]] # ## List of device addresses to collect telemetry from # servers = ["localhost:1883"] @@ -8327,8 +8490,7 @@ # ## 2 : Snappy # ## 3 : LZ4 # ## 4 : ZSTD -# # compression_codec = 0 -# +# # compression_codec = 0 # ## Initial offset position; one of "oldest" or "newest". # # offset = "oldest" # @@ -8365,9 +8527,10 @@ # data_format = "influx" +# ## DEPRECATED: The 'kafka_consumer_legacy' plugin is deprecated in version 1.4.0, use 'inputs.kafka_consumer' instead, NOTE: 'kafka_consumer' only supports Kafka v0.8+. +# ## DEPRECATED: The 'kafka_consumer_legacy' plugin is deprecated in version 1.4.0, use 'inputs.kafka_consumer' instead, NOTE: 'kafka_consumer' only supports Kafka v0.8+. # # Read metrics from Kafka topic(s) # [[inputs.kafka_consumer_legacy]] -# ## DEPRECATED: The 'kafka_consumer_legacy' plugin is deprecated in version 1.4.0, use 'inputs.kafka_consumer' instead, NOTE: 'kafka_consumer' only supports Kafka v0.8+. # ## topic(s) to consume # topics = ["telegraf"] # @@ -8457,9 +8620,9 @@ # ## Optional # ## Configuration for a dynamodb checkpoint # [inputs.kinesis_consumer.checkpoint_dynamodb] -# ## unique name for this consumer -# app_name = "default" -# table_name = "default" +# ## unique name for this consumer +# app_name = "default" +# table_name = "default" # # Listener capable of handling KNX bus messages provided through a KNX-IP Interface. @@ -8490,13 +8653,15 @@ # [[inputs.lanz]] # ## URL to Arista LANZ endpoint # servers = [ -# "tcp://127.0.0.1:50001" +# "tcp://switch1.int.example.com:50001", +# "tcp://switch2.int.example.com:50001", # ] -# # Stream and parse log file(s). +# ## DEPRECATED: The 'logparser' plugin is deprecated in version 1.15.0, use 'inputs.tail' with 'grok' data format instead. +# ## DEPRECATED: The 'logparser' plugin is deprecated in version 1.15.0, use 'inputs.tail' with 'grok' data format instead. +# # Read metrics off Arista LANZ, via socket # [[inputs.logparser]] -# ## DEPRECATED: The 'logparser' plugin is deprecated in version 1.15.0, use 'inputs.tail' with 'grok' data format instead. # ## Log files to parse. # ## These accept standard unix glob matching rules, but with the addition of # ## ** as a "super asterisk". ie: @@ -8544,8 +8709,8 @@ # ## 3. UTC -- or blank/unspecified, will return timestamp in UTC # # timezone = "Canada/Eastern" # -# ## When set to "disable", timestamp will not incremented if there is a -# ## duplicate. +# ## When set to "disable", timestamp will not incremented if there is a +# ## duplicate. # # unique_timestamp = "auto" @@ -8557,16 +8722,18 @@ # ## servers = ["ssl://localhost:1883"] # ## servers = ["ws://localhost:1883"] # servers = ["tcp://127.0.0.1:1883"] +# # ## Topics that will be subscribed to. # topics = [ # "telegraf/host01/cpu", # "telegraf/+/mem", # "sensors/#", # ] -# # topic_fields = "_/_/_/temperature" +# # ## The message topic will be stored in a tag specified by this value. If set # ## to the empty string no topic tag will be created. # # topic_tag = "topic" +# # ## QoS policy for messages # ## 0 = at most once # ## 1 = at least once @@ -8575,8 +8742,10 @@ # ## When using a QoS of 1 or 2, you should enable persistent_session to allow # ## resuming unacknowledged messages. # # qos = 0 +# # ## Connection timeout for initial connection in seconds # # connection_timeout = "30s" +# # ## Maximum messages to read from the broker that have not been written by an # ## output. For best throughput set based on the number of metrics within # ## each message and the size of the output's metric_batch_size. @@ -8586,37 +8755,44 @@ # ## full batch is collected and the write is triggered immediately without # ## waiting until the next flush_interval. # # max_undelivered_messages = 1000 +# # ## Persistent session disables clearing of the client session on connection. # ## In order for this option to work you must also set client_id to identify # ## the client. To receive messages that arrived while the client is offline, # ## also set the qos option to 1 or 2 and don't forget to also set the QoS when # ## publishing. # # persistent_session = false +# # ## If unset, a random client ID will be generated. # # client_id = "" +# # ## Username and password to connect MQTT server. # # username = "telegraf" # # password = "metricsmetricsmetricsmetrics" +# # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" # # tls_key = "/etc/telegraf/key.pem" # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false +# # ## Data format to consume. # ## Each data format has its own unique set of configuration options, read # ## more about them here: # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md # data_format = "influx" +# # ## Enable extracting tag values from MQTT topics # ## _ denotes an ignored entry in the topic path -# ## [[inputs.mqtt_consumer.topic_parsing]] -# ## topic = "" -# ## measurement = "" -# ## tags = "" -# ## fields = "" -# ## [inputs.mqtt_consumer.topic_parsing.types] -# ## +# # [[inputs.mqtt_consumer.topic_parsing]] +# # topic = "" +# # measurement = "" +# # tags = "" +# # fields = "" +# ## Value supported is int, float, unit +# # [[inputs.mqtt_consumer.topic.types]] +# # key = type # # Read metrics from NATS subject(s) @@ -8669,8 +8845,11 @@ # data_format = "influx" -# # Read NSQ topic for metrics. +# # Read metrics from NSQD topic(s) # [[inputs.nsq_consumer]] +# ## Server option still works but is deprecated, we just prepend it to the nsqd array. +# # server = "localhost:4150" +# # ## An array representing the NSQD TCP HTTP Endpoints # nsqd = ["localhost:4150"] # @@ -8726,10 +8905,10 @@ # # Read metrics from one or many pgbouncer servers # [[inputs.pgbouncer]] # ## specify address via a url matching: -# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ +# ## postgres://[pqgotest[:password]]@host:port[/dbname]\ # ## ?sslmode=[disable|verify-ca|verify-full] # ## or a simple string: -# ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production +# ## host=localhost port=5432 user=pqgotest password=... sslmode=... dbname=app_production # ## # ## All connection parameters are optional. # ## @@ -8739,8 +8918,7 @@ # # Read metrics from one or many postgresql servers # [[inputs.postgresql]] # ## specify address via a url matching: -# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ -# ## ?sslmode=[disable|verify-ca|verify-full] +# ## postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full] # ## or a simple string: # ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production # ## @@ -8760,7 +8938,7 @@ # ## connection configuration. # ## maxlifetime - specify the maximum lifetime of a connection. # ## default is forever (0s) -# max_lifetime = "0s" +# # max_lifetime = "0s" # # ## A list of databases to explicitly ignore. If not specified, metrics for all # ## databases are gathered. Do NOT use with the 'databases' option. @@ -8773,89 +8951,63 @@ # ## Whether to use prepared statements when connecting to the database. # ## This should be set to false when connecting through a PgBouncer instance # ## with pool_mode set to transaction. -# # prepared_statements = true +# prepared_statements = true # # Read metrics from one or many postgresql servers # [[inputs.postgresql_extensible]] -# ## specify address via a url matching: -# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ -# ## ?sslmode=[disable|verify-ca|verify-full] -# ## or a simple string: -# ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production +# # specify address via a url matching: +# # postgres://[pqgotest[:password]]@host:port[/dbname]?sslmode=... +# # or a simple string: +# # host=localhost port=5432 user=pqgotest password=... sslmode=... dbname=app_production # # -# ## All connection parameters are optional. # -# ## Without the dbname parameter, the driver will default to a database -# ## with the same name as the user. This dbname is just for instantiating a -# ## connection with the server and doesn't restrict the databases we are trying -# ## to grab metrics for. +# # All connection parameters are optional. +# # Without the dbname parameter, the driver will default to a database +# # with the same name as the user. This dbname is just for instantiating a +# # connection with the server and doesn't restrict the databases we are trying +# # to grab metrics for. # # # address = "host=localhost user=postgres sslmode=disable" # -# ## connection configuration. -# ## maxlifetime - specify the maximum lifetime of a connection. -# ## default is forever (0s) -# max_lifetime = "0s" +# ## A list of databases to pull metrics about. +# ## deprecated in 1.22.3; use the sqlquery option to specify database to use +# # databases = ["app_production", "testing"] # # ## Whether to use prepared statements when connecting to the database. # ## This should be set to false when connecting through a PgBouncer instance # ## with pool_mode set to transaction. -# # prepared_statements = true +# prepared_statements = true # -# ## A list of databases to pull metrics about. If not specified, metrics for all -# ## databases are gathered. -# ## databases = ["app_production", "testing"] +# # Define the toml config where the sql queries are stored +# # The script option can be used to specify the .sql file path. +# # If script and sqlquery options specified at same time, sqlquery will be used # # -# ## A custom name for the database that will be used as the "server" tag in the -# ## measurement output. If not specified, a default one generated from -# ## the connection address is used. -# # outputaddress = "db01" +# # the tagvalue field is used to define custom tags (separated by comas). +# # the query is expected to return columns which match the names of the +# # defined tags. The values in these columns must be of a string-type, +# # a number-type or a blob-type. # # -# ## Define the toml config where the sql queries are stored -# ## New queries can be added, if the withdbname is set to true and there is no -# ## databases defined in the 'databases field', the sql query is ended by a -# ## 'is not null' in order to make the query succeed. -# ## Example : -# ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become -# ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')" -# ## because the databases variable was set to ['postgres', 'pgbench' ] and the -# ## withdbname was true. Be careful that if the withdbname is set to false you -# ## don't have to define the where clause (aka with the dbname) the tagvalue -# ## field is used to define custom tags (separated by commas) -# ## The optional "measurement" value can be used to override the default -# ## output measurement name ("postgresql"). -# ## -# ## The script option can be used to specify the .sql file path. -# ## If script and sqlquery options specified at same time, sqlquery will be used -# ## -# ## the tagvalue field is used to define custom tags (separated by comas). -# ## the query is expected to return columns which match the names of the -# ## defined tags. The values in these columns must be of a string-type, -# ## a number-type or a blob-type. -# ## -# ## The timestamp field is used to override the data points timestamp value. By -# ## default, all rows inserted with current time. By setting a timestamp column, -# ## the row will be inserted with that column's value. -# ## -# ## Structure : -# ## [[inputs.postgresql_extensible.query]] -# ## sqlquery string -# ## version string -# ## withdbname boolean -# ## tagvalue string (comma separated) -# ## measurement string -# ## timestamp string +# # The timestamp field is used to override the data points timestamp value. By +# # default, all rows inserted with current time. By setting a timestamp column, +# # the row will be inserted with that column's value. +# # +# # Structure : +# # [[inputs.postgresql_extensible.query]] +# # sqlquery string +# # version string +# # withdbname boolean +# # tagvalue string (coma separated) +# # timestamp string # [[inputs.postgresql_extensible.query]] -# sqlquery="SELECT * FROM pg_stat_database" +# sqlquery="SELECT * FROM pg_stat_database where datname" # version=901 # withdbname=false # tagvalue="" -# measurement="" # [[inputs.postgresql_extensible.query]] -# sqlquery="SELECT * FROM pg_stat_bgwriter" +# script="your_sql-filepath.sql" # version=901 # withdbname=false -# tagvalue="postgresql.stats" +# tagvalue="" # # Read metrics from one or many prometheus clients @@ -8863,13 +9015,9 @@ # ## An array of urls to scrape metrics from. # urls = ["http://localhost:9100/metrics"] # -# ## Metric version controls the mapping from Prometheus metrics into -# ## Telegraf metrics. When using the prometheus_client output, use the same -# ## value in both plugins to ensure metrics are round-tripped without -# ## modification. -# ## -# ## example: metric_version = 1; -# ## metric_version = 2; recommended version +# ## Metric version controls the mapping from Prometheus metrics into Telegraf metrics. +# ## See "Metric Format Configuration" in plugins/inputs/prometheus/README.md for details. +# ## Valid options: 1, 2 # # metric_version = 1 # # ## Url tag name (tag containing scrapped url. optional, default is "url") @@ -8892,16 +9040,20 @@ # ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. # ## - prometheus.io/port: If port is not 9102 use this annotation # # monitor_kubernetes_pods = true +# # ## Get the list of pods to scrape with either the scope of # ## - cluster: the kubernetes watch api (default, no need to specify) # ## - node: the local cadvisor api; for scalability. Note that the config node_ip or the environment variable NODE_IP must be set to the host IP. # # pod_scrape_scope = "cluster" +# # ## Only for node scrape scope: node IP of the node that telegraf is running on. # ## Either this config or the environment variable NODE_IP must be set. # # node_ip = "10.180.1.1" -# ## Only for node scrape scope: interval in seconds for how often to get updated pod list for scraping. -# ## Default is 60 seconds. -# # pod_scrape_interval = 60 +# +# ## Only for node scrape scope: interval in seconds for how often to get updated pod list for scraping. +# ## Default is 60 seconds. +# # pod_scrape_interval = 60 +# # ## Restricts Kubernetes monitoring to a single namespace # ## ex: monitor_kubernetes_pods_namespace = "default" # # monitor_kubernetes_pods_namespace = "" @@ -8911,6 +9063,10 @@ # # eg. To scrape pods on a specific node # # kubernetes_field_selector = "spec.nodeName=$HOSTNAME" # +# # cache refresh interval to set the interval for re-sync of pods list. +# # Default is 60 minutes. +# # cache_refresh_interval = 60 +# # ## Scrape Services available in Consul Catalog # # [inputs.prometheus.consul] # # enabled = true @@ -8941,6 +9097,7 @@ # # tls_ca = /path/to/cafile # # tls_cert = /path/to/certfile # # tls_key = /path/to/keyfile +# # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false @@ -8952,33 +9109,67 @@ # # db_path = "" -# # Riemann protobuff listener. -# [[inputs.riemann_listener]] -# ## URL to listen on. +# # Read metrics from one or many redis servers +# [[inputs.redis]] +# ## specify servers via a url matching: +# ## [protocol://][:password]@address[:port] +# ## e.g. +# ## tcp://localhost:6379 +# ## tcp://:password@192.168.99.100 +# ## unix:///var/run/redis.sock +# ## +# ## If no servers are specified, then localhost is used as the host. +# ## If no port is specified, 6379 is used +# servers = ["tcp://localhost:6379"] +# +# ## Optional. Specify redis commands to retrieve values +# # [[inputs.redis.commands]] +# # # The command to run where each argument is a separate element +# # command = ["get", "sample-key"] +# # # The field to store the result in +# # field = "sample-key-value" +# # # The type of the result +# # # Can be "string", "integer", or "float" +# # type = "string" +# +# ## specify server password +# # password = "s#cr@t%" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true + + +# # Riemann protobuff listener +# [[inputs.rimann_listener]] +# ## URL to listen on # ## Default is "tcp://:5555" -# # service_address = "tcp://:8094" -# # service_address = "tcp://127.0.0.1:http" -# # service_address = "tcp4://:8094" -# # service_address = "tcp6://:8094" -# # service_address = "tcp6://[2001:db8::1]:8094" +# # service_address = "tcp://:8094" +# # service_address = "tcp://127.0.0.1:http" +# # service_address = "tcp4://:8094" +# # service_address = "tcp6://:8094" +# # service_address = "tcp6://[2001:db8::1]:8094" # # ## Maximum number of concurrent connections. # ## 0 (default) is unlimited. -# # max_connections = 1024 +# # max_connections = 1024 # ## Read timeout. # ## 0 (default) is unlimited. -# # read_timeout = "30s" +# # read_timeout = "30s" # ## Optional TLS configuration. -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" # ## Enables client authentication if set. -# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] # ## Maximum socket buffer size (in bytes when no unit specified). -# # read_buffer_size = "64KiB" +# # read_buffer_size = "64KiB" # ## Period between keep alive probes. # ## 0 disables keep alive probes. # ## Defaults to the OS configuration. -# # keep_alive_period = "5m" +# # keep_alive_period = "5m" # # SFlow V5 Protocol Listener @@ -9006,9 +9197,14 @@ # # service_address = "udp://:162" # ## # ## Path to mib files +# ## Used by the gosmi translator. +# ## To add paths when translating with netsnmp, use the MIBDIRS environment variable # # path = ["/usr/share/snmp/mibs"] # ## -# ## Snmp version, defaults to 2c +# ## Deprecated in 1.20.0; no longer running snmptranslate +# ## Timeout running snmptranslate command +# # timeout = "5s" +# ## Snmp version # # version = "2c" # ## SNMPv3 authentication and encryption options. # ## @@ -9148,15 +9344,15 @@ # # tag_columns_include = [] # # tag_columns_exclude = [] # -# ## Column names containing fields (explicit types) +# ## Column names containing fields (explicit types) # ## Convert the given columns to the corresponding type. Explicit type conversions take precedence over -# ## the automatic (driver-based) conversion below. -# ## NOTE: Columns should not be specified for multiple types or the resulting type is undefined. +# ## the automatic (driver-based) conversion below. +# ## NOTE: Columns should not be specified for multiple types or the resulting type is undefined. # # field_columns_float = [] # # field_columns_int = [] -# # field_columns_uint = [] -# # field_columns_bool = [] -# # field_columns_string = [] +# # field_columns_uint = [] +# # field_columns_bool = [] +# # field_columns_string = [] # # ## Column names containing fields (automatic types) # ## An empty include list is equivalent to '[*]' and all returned columns will be accepted. An empty @@ -9168,53 +9364,148 @@ # # Read metrics from Microsoft SQL Server # [[inputs.sqlserver]] -# ## Specify instances to monitor with a list of connection strings. -# ## All connection parameters are optional. -# ## By default, the host is localhost, listening on default port, TCP 1433. -# ## for Windows, the user is the currently running AD user (SSO). -# ## See https://github.com/denisenkom/go-mssqldb for detailed connection -# ## parameters, in particular, tls connections can be created like so: -# ## "encrypt=true;certificate=;hostNameInCertificate=" -# servers = [ -# "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", -# ] +# ## Specify instances to monitor with a list of connection strings. +# ## All connection parameters are optional. +# ## By default, the host is localhost, listening on default port, TCP 1433. +# ## for Windows, the user is the currently running AD user (SSO). +# ## See https://github.com/denisenkom/go-mssqldb for detailed connection +# ## parameters, in particular, tls connections can be created like so: +# ## "encrypt=true;certificate=;hostNameInCertificate=" +# servers = [ +# "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", +# ] # -# ## Authentication method -# ## valid methods: "connection_string", "AAD" -# # auth_method = "connection_string" +# ## Authentication method +# ## valid methods: "connection_string", "AAD" +# # auth_method = "connection_string" # -# ## "database_type" enables a specific set of queries depending on the database type. -# ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type. -# ## Possible values for database_type are - "SQLServer" or "AzureSQLDB" or "AzureSQLManagedInstance" or "AzureSQLPool" +# ## "database_type" enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2 +# ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type. +# ## Possible values for database_type are - "SQLServer" or "AzureSQLDB" or "AzureSQLManagedInstance" or "AzureSQLPool" # -# database_type = "SQLServer" +# database_type = "SQLServer" # -# ## A list of queries to include. If not specified, all the below listed queries are used. -# include_query = [] +# ## A list of queries to include. If not specified, all the below listed queries are used. +# include_query = [] # -# ## A list of queries to explicitly ignore. -# exclude_query = ["SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"] +# ## A list of queries to explicitly ignore. +# exclude_query = ["SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"] # -# ## Queries enabled by default for database_type = "SQLServer" are - -# ## SQLServerPerformanceCounters, SQLServerWaitStatsCategorized, SQLServerDatabaseIO, SQLServerProperties, SQLServerMemoryClerks, -# ## SQLServerSchedulers, SQLServerRequests, SQLServerVolumeSpace, SQLServerCpu, SQLServerAvailabilityReplicaStates, SQLServerDatabaseReplicaStates +# ## Queries enabled by default for database_type = "SQLServer" are - +# ## SQLServerPerformanceCounters, SQLServerWaitStatsCategorized, SQLServerDatabaseIO, SQLServerProperties, SQLServerMemoryClerks, +# ## SQLServerSchedulers, SQLServerRequests, SQLServerVolumeSpace, SQLServerCpu, SQLServerAvailabilityReplicaStates, SQLServerDatabaseReplicaStates, +# ## SQLServerRecentBackups # -# ## Queries enabled by default for database_type = "AzureSQLDB" are - -# ## AzureSQLDBResourceStats, AzureSQLDBResourceGovernance, AzureSQLDBWaitStats, AzureSQLDBDatabaseIO, AzureSQLDBServerProperties, -# ## AzureSQLDBOsWaitstats, AzureSQLDBMemoryClerks, AzureSQLDBPerformanceCounters, AzureSQLDBRequests, AzureSQLDBSchedulers +# ## Queries enabled by default for database_type = "AzureSQLDB" are - +# ## AzureSQLDBResourceStats, AzureSQLDBResourceGovernance, AzureSQLDBWaitStats, AzureSQLDBDatabaseIO, AzureSQLDBServerProperties, +# ## AzureSQLDBOsWaitstats, AzureSQLDBMemoryClerks, AzureSQLDBPerformanceCounters, AzureSQLDBRequests, AzureSQLDBSchedulers # -# ## Queries enabled by default for database_type = "AzureSQLManagedInstance" are - -# ## AzureSQLMIResourceStats, AzureSQLMIResourceGovernance, AzureSQLMIDatabaseIO, AzureSQLMIServerProperties, AzureSQLMIOsWaitstats, -# ## AzureSQLMIMemoryClerks, AzureSQLMIPerformanceCounters, AzureSQLMIRequests, AzureSQLMISchedulers +# ## Queries enabled by default for database_type = "AzureSQLManagedInstance" are - +# ## AzureSQLMIResourceStats, AzureSQLMIResourceGovernance, AzureSQLMIDatabaseIO, AzureSQLMIServerProperties, AzureSQLMIOsWaitstats, +# ## AzureSQLMIMemoryClerks, AzureSQLMIPerformanceCounters, AzureSQLMIRequests, AzureSQLMISchedulers # -# ## Queries enabled by default for database_type = "AzureSQLPool" are - -# ## AzureSQLPoolResourceStats, AzureSQLPoolResourceGovernance, AzureSQLPoolDatabaseIO, AzureSQLPoolWaitStats, -# ## AzureSQLPoolMemoryClerks, AzureSQLPoolPerformanceCounters, AzureSQLPoolSchedulers +# ## Queries enabled by default for database_type = "AzureSQLPool" are - +# ## AzureSQLPoolResourceStats, AzureSQLPoolResourceGovernance, AzureSQLPoolDatabaseIO, AzureSQLPoolWaitStats, +# ## AzureSQLPoolMemoryClerks, AzureSQLPoolPerformanceCounters, AzureSQLPoolSchedulers +# +# ## Following are old config settings +# ## You may use them only if you are using the earlier flavor of queries, however it is recommended to use +# ## the new mechanism of identifying the database_type there by use it's corresponding queries +# +# ## Optional parameter, setting this to 2 will use a new version +# ## of the collection queries that break compatibility with the original +# ## dashboards. +# ## Version 2 - is compatible from SQL Server 2012 and later versions and also for SQL Azure DB +# # query_version = 2 +# +# ## If you are using AzureDB, setting this to true will gather resource utilization metrics +# # azuredb = false +# +# ## Toggling this to true will emit an additional metric called "sqlserver_telegraf_health". +# ## This metric tracks the count of attempted queries and successful queries for each SQL instance specified in "servers". +# ## The purpose of this metric is to assist with identifying and diagnosing any connectivity or query issues. +# ## This setting/metric is optional and is disabled by default. +# # health_metric = false +# +# ## Possible queries accross different versions of the collectors +# ## Queries enabled by default for specific Database Type +# +# ## database_type = AzureSQLDB by default collects the following queries +# ## - AzureSQLDBWaitStats +# ## - AzureSQLDBResourceStats +# ## - AzureSQLDBResourceGovernance +# ## - AzureSQLDBDatabaseIO +# ## - AzureSQLDBServerProperties +# ## - AzureSQLDBOsWaitstats +# ## - AzureSQLDBMemoryClerks +# ## - AzureSQLDBPerformanceCounters +# ## - AzureSQLDBRequests +# ## - AzureSQLDBSchedulers +# +# ## database_type = AzureSQLManagedInstance by default collects the following queries +# ## - AzureSQLMIResourceStats +# ## - AzureSQLMIResourceGovernance +# ## - AzureSQLMIDatabaseIO +# ## - AzureSQLMIServerProperties +# ## - AzureSQLMIOsWaitstats +# ## - AzureSQLMIMemoryClerks +# ## - AzureSQLMIPerformanceCounters +# ## - AzureSQLMIRequests +# ## - AzureSQLMISchedulers +# +# ## database_type = AzureSQLPool by default collects the following queries +# ## - AzureSQLPoolResourceStats +# ## - AzureSQLPoolResourceGovernance +# ## - AzureSQLPoolDatabaseIO +# ## - AzureSQLPoolOsWaitStats, +# ## - AzureSQLPoolMemoryClerks +# ## - AzureSQLPoolPerformanceCounters +# ## - AzureSQLPoolSchedulers +# +# ## database_type = SQLServer by default collects the following queries +# ## - SQLServerPerformanceCounters +# ## - SQLServerWaitStatsCategorized +# ## - SQLServerDatabaseIO +# ## - SQLServerProperties +# ## - SQLServerMemoryClerks +# ## - SQLServerSchedulers +# ## - SQLServerRequests +# ## - SQLServerVolumeSpace +# ## - SQLServerCpu +# ## - SQLServerRecentBackups +# ## and following as optional (if mentioned in the include_query list) +# ## - SQLServerAvailabilityReplicaStates +# ## - SQLServerDatabaseReplicaStates +# +# ## Version 2 by default collects the following queries +# ## Version 2 is being deprecated, please consider using database_type. +# ## - PerformanceCounters +# ## - WaitStatsCategorized +# ## - DatabaseIO +# ## - ServerProperties +# ## - MemoryClerk +# ## - Schedulers +# ## - SqlRequests +# ## - VolumeSpace +# ## - Cpu +# +# ## Version 1 by default collects the following queries +# ## Version 1 is deprecated, please consider using database_type. +# ## - PerformanceCounters +# ## - WaitStatsCategorized +# ## - CPUHistory +# ## - DatabaseIO +# ## - DatabaseSize +# ## - DatabaseStats +# ## - DatabaseProperties +# ## - MemoryClerk +# ## - VolumeSpace +# ## - PerformanceMetrics -# # Statsd UDP/TCP Server +# # Statsd Server # [[inputs.statsd]] -# ## Protocol, must be "tcp", "udp", "udp4" or "udp6" (default=udp) +# ## Protocol, must be "tcp", "udp4", "udp6" or "udp" (default=udp) # protocol = "udp" # # ## MaxTCPConnection - applicable when protocol is set to tcp (default=250) @@ -9243,7 +9534,7 @@ # ## Reset timings & histograms every interval (default=true) # delete_timings = true # -# ## Percentiles to calculate for timing & histogram stats +# ## Percentiles to calculate for timing & histogram stats. # percentiles = [50.0, 90.0, 99.0, 99.9, 99.95, 100.0] # # ## separator to use between elements of a statsd metric @@ -9251,9 +9542,12 @@ # # ## Parses tags in the datadog statsd format # ## http://docs.datadoghq.com/guides/dogstatsd/ +# ## deprecated in 1.10; use datadog_extensions option instead # parse_data_dog_tags = false # -# ## Parses datadog extensions to the statsd format +# ## Parses extensions to statsd in the datadog statsd format +# ## currently supports metrics and datadog tags. +# ## http://docs.datadoghq.com/guides/dogstatsd/ # datadog_extensions = false # # ## Parses distributions metric as specified in the datadog statsd format @@ -9275,8 +9569,12 @@ # ## of percentiles but also increases the memory usage and cpu time. # percentile_limit = 1000 # +# ## Maximum socket buffer size in bytes, once the buffer fills up, metrics +# ## will start dropping. Defaults to the OS default. +# # read_buffer_size = 65535 +# # ## Max duration (TTL) for each metric to stay cached/reported without being updated. -# #max_ttl = "1000h" +# # max_ttl = "10h" # # ## Sanitize name method # ## By default, telegraf will pass names directly as they are received. @@ -9289,7 +9587,7 @@ # # Suricata stats and alerts plugin # [[inputs.suricata]] -# ## Data sink for Suricata stats and alerts logs +# ## Data sink for Suricata stats log. # # This is expected to be a filename of a # # unix socket to be created for listening. # source = "/var/run/suricata-stats.sock" @@ -9298,16 +9596,17 @@ # # becomes "detect_alert" when delimiter is "_". # delimiter = "_" # -# ## Detect alert logs -# # alerts = false +# # Detect alert logs +# alerts = false -# # Accepts syslog messages following RFC5424 format with transports as per RFC5426, RFC5425, or RFC6587 # [[inputs.syslog]] -# ## Specify an ip or hostname with port - eg., tcp://localhost:6514, tcp://10.0.0.1:6514 # ## Protocol, address and port to host the syslog receiver. # ## If no host is specified, then localhost is used. # ## If no port is specified, 6514 is used (RFC5425#section-4.1). +# ## ex: server = "tcp://localhost:6514" +# ## server = "udp://:6514" +# ## server = "unix:///var/run/telegraf-syslog.sock" # server = "tcp://:6514" # # ## TLS Config @@ -9333,7 +9632,7 @@ # ## The framing technique with which it is expected that messages are transported (default = "octet-counting"). # ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), # ## or the non-transparent framing technique (RFC6587#section-3.4.2). -# ## Must be one of "octet-counting", "non-transparent". +# ## Must be one of "octect-counting", "non-transparent". # # framing = "octet-counting" # # ## The trailer to be expected in case of non-transparent framing (default = "LF"). @@ -9403,232 +9702,42 @@ # ## Set the tag that will contain the path of the tailed file. If you don't want this tag, set it to an empty string. # # path_tag = "path" # +# ## Filters to apply to files before generating metrics +# ## "ansi_color" removes ANSI colors +# # filters = [] +# # ## multiline parser/codec # ## https://www.elastic.co/guide/en/logstash/2.4/plugins-filters-multiline.html # #[inputs.tail.multiline] -# ## The pattern should be a regexp which matches what you believe to be an -# ## indicator that the field is part of an event consisting of multiple lines of log data. +# ## The pattern should be a regexp which matches what you believe to be an indicator that the field is part of an event consisting of multiple lines of log data. # #pattern = "^\s" # -# ## This field must be either "previous" or "next". -# ## If a line matches the pattern, "previous" indicates that it belongs to the previous line, -# ## whereas "next" indicates that the line belongs to the next one. +# ## The field's value must be previous or next and indicates the relation to the +# ## multi-line event. # #match_which_line = "previous" # -# ## The invert_match field can be true or false (defaults to false). -# ## If true, a message not matching the pattern will constitute a match of the multiline -# ## filter and the what will be applied. (vice-versa is also true) +# ## The invert_match can be true or false (defaults to false). +# ## If true, a message not matching the pattern will constitute a match of the multiline filter and the what will be applied. (vice-versa is also true) # #invert_match = false # -# ## After the specified timeout, this plugin sends a multiline event even if no new pattern -# ## is found to start a new event. The default timeout is 5s. +# #After the specified timeout, this plugin sends the multiline event even if no new pattern is found to start a new event. The default is 5s. # #timeout = 5s +# ## DEPRECATED: The 'tcp_listener' plugin is deprecated in version 1.3.0, use 'inputs.socket_listener' instead. # # Generic TCP listener # [[inputs.tcp_listener]] -# ## DEPRECATED: The 'tcp_listener' plugin is deprecated in version 1.3.0, use 'inputs.socket_listener' instead. -# # DEPRECATED: the TCP listener plugin has been deprecated in favor of the # # socket_listener plugin # # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener +# ## DEPRECATED: The 'udp_listener' plugin is deprecated in version 1.3.0, use 'inputs.socket_listener' instead. # # Generic UDP listener # [[inputs.udp_listener]] -# ## DEPRECATED: The 'udp_listener' plugin is deprecated in version 1.3.0, use 'inputs.socket_listener' instead. -# # DEPRECATED: the TCP listener plugin has been deprecated in favor of the -# # socket_listener plugin # # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener -# # Read metrics from VMware vCenter -# [[inputs.vsphere]] -# ## List of vCenter URLs to be monitored. These three lines must be uncommented -# ## and edited for the plugin to work. -# vcenters = [ "https://vcenter.local/sdk" ] -# username = "user@corp.local" -# password = "secret" -# -# ## VMs -# ## Typical VM metrics (if omitted or empty, all metrics are collected) -# # vm_include = [ "/*/vm/**"] # Inventory path to VMs to collect (by default all are collected) -# # vm_exclude = [] # Inventory paths to exclude -# vm_metric_include = [ -# "cpu.demand.average", -# "cpu.idle.summation", -# "cpu.latency.average", -# "cpu.readiness.average", -# "cpu.ready.summation", -# "cpu.run.summation", -# "cpu.usagemhz.average", -# "cpu.used.summation", -# "cpu.wait.summation", -# "mem.active.average", -# "mem.granted.average", -# "mem.latency.average", -# "mem.swapin.average", -# "mem.swapinRate.average", -# "mem.swapout.average", -# "mem.swapoutRate.average", -# "mem.usage.average", -# "mem.vmmemctl.average", -# "net.bytesRx.average", -# "net.bytesTx.average", -# "net.droppedRx.summation", -# "net.droppedTx.summation", -# "net.usage.average", -# "power.power.average", -# "virtualDisk.numberReadAveraged.average", -# "virtualDisk.numberWriteAveraged.average", -# "virtualDisk.read.average", -# "virtualDisk.readOIO.latest", -# "virtualDisk.throughput.usage.average", -# "virtualDisk.totalReadLatency.average", -# "virtualDisk.totalWriteLatency.average", -# "virtualDisk.write.average", -# "virtualDisk.writeOIO.latest", -# "sys.uptime.latest", -# ] -# # vm_metric_exclude = [] ## Nothing is excluded by default -# # vm_instances = true ## true by default -# -# ## Hosts -# ## Typical host metrics (if omitted or empty, all metrics are collected) -# # host_include = [ "/*/host/**"] # Inventory path to hosts to collect (by default all are collected) -# # host_exclude [] # Inventory paths to exclude -# host_metric_include = [ -# "cpu.coreUtilization.average", -# "cpu.costop.summation", -# "cpu.demand.average", -# "cpu.idle.summation", -# "cpu.latency.average", -# "cpu.readiness.average", -# "cpu.ready.summation", -# "cpu.swapwait.summation", -# "cpu.usage.average", -# "cpu.usagemhz.average", -# "cpu.used.summation", -# "cpu.utilization.average", -# "cpu.wait.summation", -# "disk.deviceReadLatency.average", -# "disk.deviceWriteLatency.average", -# "disk.kernelReadLatency.average", -# "disk.kernelWriteLatency.average", -# "disk.numberReadAveraged.average", -# "disk.numberWriteAveraged.average", -# "disk.read.average", -# "disk.totalReadLatency.average", -# "disk.totalWriteLatency.average", -# "disk.write.average", -# "mem.active.average", -# "mem.latency.average", -# "mem.state.latest", -# "mem.swapin.average", -# "mem.swapinRate.average", -# "mem.swapout.average", -# "mem.swapoutRate.average", -# "mem.totalCapacity.average", -# "mem.usage.average", -# "mem.vmmemctl.average", -# "net.bytesRx.average", -# "net.bytesTx.average", -# "net.droppedRx.summation", -# "net.droppedTx.summation", -# "net.errorsRx.summation", -# "net.errorsTx.summation", -# "net.usage.average", -# "power.power.average", -# "storageAdapter.numberReadAveraged.average", -# "storageAdapter.numberWriteAveraged.average", -# "storageAdapter.read.average", -# "storageAdapter.write.average", -# "sys.uptime.latest", -# ] -# ## Collect IP addresses? Valid values are "ipv4" and "ipv6" -# # ip_addresses = ["ipv6", "ipv4" ] -# -# # host_metric_exclude = [] ## Nothing excluded by default -# # host_instances = true ## true by default -# -# -# ## Clusters -# # cluster_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected) -# # cluster_exclude = [] # Inventory paths to exclude -# # cluster_metric_include = [] ## if omitted or empty, all metrics are collected -# # cluster_metric_exclude = [] ## Nothing excluded by default -# # cluster_instances = false ## false by default -# -# ## Datastores -# # datastore_include = [ "/*/datastore/**"] # Inventory path to datastores to collect (by default all are collected) -# # datastore_exclude = [] # Inventory paths to exclude -# # datastore_metric_include = [] ## if omitted or empty, all metrics are collected -# # datastore_metric_exclude = [] ## Nothing excluded by default -# # datastore_instances = false ## false by default -# -# ## Datacenters -# # datacenter_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected) -# # datacenter_exclude = [] # Inventory paths to exclude -# datacenter_metric_include = [] ## if omitted or empty, all metrics are collected -# datacenter_metric_exclude = [ "*" ] ## Datacenters are not collected by default. -# # datacenter_instances = false ## false by default -# -# ## Plugin Settings -# ## separator character to use for measurement and field names (default: "_") -# # separator = "_" -# -# ## number of objects to retrieve per query for realtime resources (vms and hosts) -# ## set to 64 for vCenter 5.5 and 6.0 (default: 256) -# # max_query_objects = 256 -# -# ## number of metrics to retrieve per query for non-realtime resources (clusters and datastores) -# ## set to 64 for vCenter 5.5 and 6.0 (default: 256) -# # max_query_metrics = 256 -# -# ## number of go routines to use for collection and discovery of objects and metrics -# # collect_concurrency = 1 -# # discover_concurrency = 1 -# -# ## the interval before (re)discovering objects subject to metrics collection (default: 300s) -# # object_discovery_interval = "300s" -# -# ## timeout applies to any of the api request made to vcenter -# # timeout = "60s" -# -# ## When set to true, all samples are sent as integers. This makes the output -# ## data types backwards compatible with Telegraf 1.9 or lower. Normally all -# ## samples from vCenter, with the exception of percentages, are integer -# ## values, but under some conditions, some averaging takes place internally in -# ## the plugin. Setting this flag to "false" will send values as floats to -# ## preserve the full precision when averaging takes place. -# # use_int_samples = true -# -# ## Custom attributes from vCenter can be very useful for queries in order to slice the -# ## metrics along different dimension and for forming ad-hoc relationships. They are disabled -# ## by default, since they can add a considerable amount of tags to the resulting metrics. To -# ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include -# ## to select the attributes you want to include. -# ## By default, since they can add a considerable amount of tags to the resulting metrics. To -# ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include -# ## to select the attributes you want to include. -# # custom_attribute_include = [] -# # custom_attribute_exclude = ["*"] -# -# ## The number of vSphere 5 minute metric collection cycles to look back for non-realtime metrics. In -# ## some versions (6.7, 7.0 and possible more), certain metrics, such as cluster metrics, may be reported -# ## with a significant delay (>30min). If this happens, try increasing this number. Please note that increasing -# ## it too much may cause performance issues. -# # metric_lookback = 3 -# -# ## Optional SSL Config -# # ssl_ca = "/path/to/cafile" -# # ssl_cert = "/path/to/certfile" -# # ssl_key = "/path/to/keyfile" -# ## Use SSL but skip chain & host verification -# # insecure_skip_verify = false -# -# ## The Historical Interval value must match EXACTLY the interval in the daily -# # "Interval Duration" found on the VCenter server under Configure > General > Statistics > Statistic intervals -# # historical_interval = "5m" +# vm_metric_exclude = [ "*" ] # # A Webhooks Event collector @@ -9639,49 +9748,52 @@ # [inputs.webhooks.filestack] # path = "/filestack" # -# ## HTTP basic auth -# #username = "" -# #password = "" +# ## HTTP basic auth +# #username = "" +# #password = "" # # [inputs.webhooks.github] # path = "/github" # # secret = "" # -# ## HTTP basic auth -# #username = "" -# #password = "" +# ## HTTP basic auth +# #username = "" +# #password = "" # # [inputs.webhooks.mandrill] # path = "/mandrill" # -# ## HTTP basic auth -# #username = "" -# #password = "" +# ## HTTP basic auth +# #username = "" +# #password = "" # # [inputs.webhooks.rollbar] # path = "/rollbar" # -# ## HTTP basic auth -# #username = "" -# #password = "" +# ## HTTP basic auth +# #username = "" +# #password = "" # # [inputs.webhooks.papertrail] # path = "/papertrail" # -# ## HTTP basic auth -# #username = "" -# #password = "" +# ## HTTP basic auth +# #username = "" +# #password = "" # # [inputs.webhooks.particle] # path = "/particle" # -# ## HTTP basic auth -# #username = "" -# #password = "" +# ## HTTP basic auth +# #username = "" +# #password = "" +# +# [inputs.webhooks.artifactory] +# path = "/artifactory" # # This plugin implements the Zipkin http server to gather trace and timing data needed to troubleshoot latency problems in microservice architectures. # [[inputs.zipkin]] # # path = "/api/v1/spans" # URL path for span data -# # port = 9411 # Port on which Telegraf listens +# # port = 9411 # Port on which Telegraf listens diff --git a/etc/telegraf_windows.conf b/etc/telegraf_windows.conf index db671c218..0975eef74 100644 --- a/etc/telegraf_windows.conf +++ b/etc/telegraf_windows.conf @@ -89,7 +89,7 @@ ## The logfile will be rotated after the time interval specified. When set ## to 0 no time based rotation is performed. Logs are rotated only when ## written to, if there is no log activity rotation may be delayed. - # logfile_rotation_interval = "0d" + # logfile_rotation_interval = "0h" ## The logfile will be rotated when it becomes larger than the specified ## size. When set to 0 no size based rotation is performed. @@ -211,6 +211,10 @@ # # Publishes metrics to an AMQP broker # [[outputs.amqp]] +# ## Broker to publish to. +# ## deprecated in 1.7; use the brokers option +# # url = "amqp://localhost:5672/influxdb" +# # ## Brokers to publish to. If multiple brokers are specified a random broker # ## will be selected anytime a connection is established. This can be # ## helpful for load balancing when not using a dedicated load balancer. @@ -259,6 +263,14 @@ # ## One of "transient" or "persistent". # # delivery_mode = "transient" # +# ## InfluxDB database added as a message header. +# ## deprecated in 1.7; use the headers option +# # database = "telegraf" +# +# ## InfluxDB retention policy added as a message header +# ## deprecated in 1.7; use the headers option +# # retention_policy = "default" +# # ## Static headers added to each published message. # # headers = { } # # headers = {"database" = "telegraf", "retention_policy" = "default"} @@ -319,8 +331,8 @@ # # Sends metrics to Azure Data Explorer # [[outputs.azure_data_explorer]] -# ## Azure Data Explorer cluster endpoint -# ## ex: endpoint_url = "https://clustername.australiasoutheast.kusto.windows.net" +# ## The URI property of the Azure Data Explorer resource on Azure +# ## ex: endpoint_url = https://myadxresource.australiasoutheast.kusto.windows.net # endpoint_url = "" # # ## The Azure Data Explorer database that the metrics will be ingested into. @@ -369,8 +381,8 @@ # ## ex: resource_id = "/subscriptions//resourceGroups//providers/Microsoft.Compute/virtualMachines/" # # resource_id = "" # -# ## Optionally, if in Azure US Government, China or other sovereign -# ## cloud environment, set appropriate REST endpoint for receiving +# ## Optionally, if in Azure US Government, China, or other sovereign +# ## cloud environment, set the appropriate REST endpoint for receiving # ## metrics. (Note: region may be unused in this context) # # endpoint_url = "https://monitoring.core.usgovcloudapi.net" @@ -490,62 +502,62 @@ # # Configuration for AWS CloudWatchLogs output. # [[outputs.cloudwatch_logs]] -# ## The region is the Amazon region that you wish to connect to. -# ## Examples include but are not limited to: -# ## - us-west-1 -# ## - us-west-2 -# ## - us-east-1 -# ## - ap-southeast-1 -# ## - ap-southeast-2 -# ## ... -# region = "us-east-1" +# ## The region is the Amazon region that you wish to connect to. +# ## Examples include but are not limited to: +# ## - us-west-1 +# ## - us-west-2 +# ## - us-east-1 +# ## - ap-southeast-1 +# ## - ap-southeast-2 +# ## ... +# region = "us-east-1" # -# ## Amazon Credentials -# ## Credentials are loaded in the following order -# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified -# ## 2) Assumed credentials via STS if role_arn is specified -# ## 3) explicit credentials from 'access_key' and 'secret_key' -# ## 4) shared profile from 'profile' -# ## 5) environment variables -# ## 6) shared credentials file -# ## 7) EC2 Instance Profile -# #access_key = "" -# #secret_key = "" -# #token = "" -# #role_arn = "" -# #web_identity_token_file = "" -# #role_session_name = "" -# #profile = "" -# #shared_credential_file = "" +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" +# #profile = "" +# #shared_credential_file = "" # -# ## Endpoint to make request against, the correct endpoint is automatically -# ## determined and this option should only be set if you wish to override the -# ## default. -# ## ex: endpoint_url = "http://localhost:8000" -# # endpoint_url = "" +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" # -# ## Cloud watch log group. Must be created in AWS cloudwatch logs upfront! -# ## For example, you can specify the name of the k8s cluster here to group logs from all cluster in oine place -# log_group = "my-group-name" +# ## Cloud watch log group. Must be created in AWS cloudwatch logs upfront! +# ## For example, you can specify the name of the k8s cluster here to group logs from all cluster in oine place +# log_group = "my-group-name" # -# ## Log stream in log group -# ## Either log group name or reference to metric attribute, from which it can be parsed: -# ## tag: or field:. If log stream is not exist, it will be created. -# ## Since AWS is not automatically delete logs streams with expired logs entries (i.e. empty log stream) -# ## you need to put in place appropriate house-keeping (https://forums.aws.amazon.com/thread.jspa?threadID=178855) -# log_stream = "tag:location" +# ## Log stream in log group +# ## Either log group name or reference to metric attribute, from which it can be parsed: +# ## tag: or field:. If log stream is not exist, it will be created. +# ## Since AWS is not automatically delete logs streams with expired logs entries (i.e. empty log stream) +# ## you need to put in place appropriate house-keeping (https://forums.aws.amazon.com/thread.jspa?threadID=178855) +# log_stream = "tag:location" # -# ## Source of log data - metric name -# ## specify the name of the metric, from which the log data should be retrieved. -# ## I.e., if you are using docker_log plugin to stream logs from container, then -# ## specify log_data_metric_name = "docker_log" -# log_data_metric_name = "docker_log" +# ## Source of log data - metric name +# ## specify the name of the metric, from which the log data should be retrieved. +# ## I.e., if you are using docker_log plugin to stream logs from container, then +# ## specify log_data_metric_name = "docker_log" +# log_data_metric_name = "docker_log" # -# ## Specify from which metric attribute the log data should be retrieved: -# ## tag: or field:. -# ## I.e., if you are using docker_log plugin to stream logs from container, then -# ## specify log_data_source = "field:message" -# log_data_source = "field:message" +# ## Specify from which metric attribute the log data should be retrieved: +# ## tag: or field:. +# ## I.e., if you are using docker_log plugin to stream logs from container, then +# ## specify log_data_source = "field:message" +# log_data_source = "field:message" # # Configuration for CrateDB to send metrics to. @@ -611,11 +623,9 @@ # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" # # tls_key = "/etc/telegraf/key.pem" -# # ## Optional flag for ignoring tls certificate check # # insecure_skip_verify = false # -# # ## Connection timeout, defaults to "5s" if not set. # timeout = "5s" # @@ -631,18 +641,21 @@ # [[outputs.elasticsearch]] # ## The full HTTP endpoint URL for your Elasticsearch instance # ## Multiple urls can be specified as part of the same cluster, -# ## this means that only ONE of the urls will be written to each interval. +# ## this means that only ONE of the urls will be written to each interval # urls = [ "http://node1.es.example.com:9200" ] # required. # ## Elasticsearch client timeout, defaults to "5s" if not set. # timeout = "5s" # ## Set to true to ask Elasticsearch a list of all cluster nodes, -# ## thus it is not necessary to list all nodes in the urls config option. +# ## thus it is not necessary to list all nodes in the urls config option # enable_sniffer = false # ## Set to true to enable gzip compression # enable_gzip = false # ## Set the interval to check if the Elasticsearch nodes are available # ## Setting to "0s" will disable the health check (not recommended in production) # health_check_interval = "10s" +# ## Set the timeout for periodic health checks. +# # health_check_timeout = "1s" +# ## HTTP basic authentication details. # ## HTTP basic authentication details # # username = "telegraf" # # password = "mypassword" @@ -714,6 +727,12 @@ # ## Client timeout (defaults to 30s) # # timeout = "30s" # +# ## Partition key +# ## Metric tag or field name to use for the event partition key. The value of +# ## this tag or field is set as the key for events if it exists. If both, tag +# ## and field, exist the tag is preferred. +# # partition_key = "" +# # ## Data format to output. # ## Each data format has its own unique set of configuration options, read # ## more about them here: @@ -726,6 +745,12 @@ # ## Command to ingest metrics via stdin. # command = ["tee", "-a", "/dev/null"] # +# ## Environment variables +# ## Array of "key=value" pairs to pass as environment variables +# ## e.g. "KEY=value", "USERNAME=John Doe", +# ## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs" +# # environment = [] +# # ## Timeout for command to complete. # # timeout = "5s" # @@ -738,9 +763,16 @@ # # Run executable as long-running output plugin # [[outputs.execd]] -# ## Program to run as daemon +# ## One program to run as daemon. +# ## NOTE: process and each argument should each be their own string # command = ["my-telegraf-output", "--some-flag", "value"] # +# ## Environment variables +# ## Array of "key=value" pairs to pass as environment variables +# ## e.g. "KEY=value", "USERNAME=John Doe", +# ## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs" +# # environment = [] +# # ## Delay before the process is restarted after an unexpected termination # restart_delay = "10s" # @@ -758,12 +790,12 @@ # # ## Use batch serialization format instead of line based delimiting. The # ## batch format allows for the production of non line based output formats and -# ## may more efficiently encode metric groups. +# ## may more efficiently encode and write metrics. # # use_batch_format = false # # ## The file will be rotated after the time interval specified. When set # ## to 0 no time based rotation is performed. -# # rotation_interval = "0d" +# # rotation_interval = "0h" # # ## The logfile will be rotated when it becomes larger than the specified # ## size. When set to 0 no size based rotation is performed. @@ -783,7 +815,7 @@ # # Configuration for Graphite server to send metrics to # [[outputs.graphite]] # ## TCP endpoint for your graphite instance. -# ## If multiple endpoints are configured, output will be load balanced. +# ## If multiple endpoints are configured, the output will be load balanced. # ## Only one of the endpoints will be written to with each iteration. # servers = ["localhost:2003"] # ## Prefix metrics name @@ -936,6 +968,9 @@ # # token_url = "https://indentityprovider/oauth2/v1/token" # # scopes = ["urn:opc:idm:__myscopes__"] # +# ## Goole API Auth +# # google_application_credentials = "/etc/telegraf/example_secret.json" +# # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" @@ -973,6 +1008,15 @@ # # # Should be set manually to "application/json" for json data_format # # Content-Type = "text/plain; charset=utf-8" # +# ## MaxIdleConns controls the maximum number of idle (keep-alive) +# ## connections across all hosts. Zero means no limit. +# # max_idle_conn = 0 +# +# ## MaxIdleConnsPerHost, if non-zero, controls the maximum idle +# ## (keep-alive) connections to keep per-host. If zero, +# ## DefaultMaxIdleConnsPerHost is used(2). +# # max_idle_conn_per_host = 2 +# # ## Idle (keep-alive) connection timeout. # ## Maximum amount of time before idle connection is closed. # ## Zero means no limit. @@ -998,9 +1042,12 @@ # #role_session_name = "" # #profile = "" # #shared_credential_file = "" +# +# ## Optional list of statuscodes (<200 or >300) upon which requests should not be retried +# # non_retryable_statuscodes = [409, 413] -# # Configuration for sending metrics to InfluxDB +# # Configuration for sending metrics to InfluxDB 2.0 # [[outputs.influxdb_v2]] # ## The URLs of the InfluxDB cluster nodes. # ## @@ -1012,7 +1059,7 @@ # ## Token for authentication. # token = "" # -# ## Organization is the name of the organization you wish to write to; must exist. +# ## Organization is the name of the organization you wish to write to. # organization = "" # # ## Destination bucket to write into. @@ -1056,7 +1103,7 @@ # # Configuration for sending metrics to an Instrumental project # [[outputs.instrumental]] # ## Project API Token (required) -# api_token = "API Token" # required +# api_token = "API Token" # required # ## Prefix the metrics with a given name # prefix = "" # ## Stats output template (Graphite formatting) @@ -1064,7 +1111,7 @@ # template = "host.tags.measurement.field" # ## Timeout in seconds to connect # timeout = "2s" -# ## Display Communication to Instrumental +# ## Debug true - Print communication to Instrumental # debug = false @@ -1086,7 +1133,7 @@ # # client_id = "Telegraf" # # ## Set the minimal supported Kafka version. Setting this enables the use of new -# ## Kafka features and APIs. Of particular interest, lz4 compression +# ## Kafka features and APIs. Of particular interested, lz4 compression # ## requires at least version 0.10.0.0. # ## ex: version = "1.1.0" # # version = "" @@ -1144,7 +1191,7 @@ # ## 2 : Snappy # ## 3 : LZ4 # ## 4 : ZSTD -# # compression_codec = 0 +# # compression_codec = 0 # # ## Idempotent Writes # ## If enabled, exactly one copy of each message is written. @@ -1302,27 +1349,34 @@ # ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite # ## This template is used in librato's source (not metric's name) # template = "host" -# -# # Send aggregate metrics to Logz.io +# # A plugin that can send metrics over HTTPs to Logz.io # [[outputs.logzio]] -# ## Connection timeout, defaults to "5s" if not set. -# timeout = "5s" +# ## Set to true if Logz.io sender checks the disk space before adding metrics to the disk queue. +# # check_disk_space = true # -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" +# ## The percent of used file system space at which the sender will stop queueing. +# ## When we will reach that percentage, the file system in which the queue is stored will drop +# ## all new logs until the percentage of used space drops below that threshold. +# # disk_threshold = 98 +# +# ## How often Logz.io sender should drain the queue. +# ## Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". +# # drain_duration = "3s" +# +# ## Where Logz.io sender should store the queue +# ## queue_dir = Sprintf("%s%s%s%s%d", os.TempDir(), string(os.PathSeparator), +# ## "logzio-buffer", string(os.PathSeparator), time.Now().UnixNano()) # # ## Logz.io account token -# token = "your logz.io token" # required +# token = "your Logz.io token" # required # # ## Use your listener URL for your Logz.io account region. # # url = "https://listener.logz.io:8071" -# # Send logs to Loki +# # A plugin that can transmit logs to Loki # [[outputs.loki]] # ## The domain of Loki # domain = "https://loki.domain.tld" @@ -1349,7 +1403,7 @@ # # tls_key = "/etc/telegraf/key.pem" -# # Sends metrics to MongoDB +# # A plugin that can transmit logs to mongodb # [[outputs.mongodb]] # # connection string examples for mongodb # dsn = "mongodb://localhost:27017" @@ -1587,15 +1641,12 @@ # # Configuration for the Prometheus client to spawn # [[outputs.prometheus_client]] -# ## Address to listen on +# ## Address to listen on. # listen = ":9273" # -# ## Metric version controls the mapping from Telegraf metrics into -# ## Prometheus format. When using the prometheus input, use the same value in -# ## both plugins to ensure metrics are round-tripped without modification. -# ## -# ## example: metric_version = 1; -# ## metric_version = 2; recommended version +# ## Metric version controls the mapping from Prometheus metrics into Telegraf metrics. +# ## See "Metric Format Configuration" in plugins/inputs/prometheus/README.md for details. +# ## Valid options: 1, 2 # # metric_version = 1 # # ## Use HTTP Basic Authentication. @@ -1632,7 +1683,7 @@ # # export_timestamp = false -# # Configuration for the Riemann server to send metrics to +# # Configuration for Riemann to send metrics to # [[outputs.riemann]] # ## The full TCP or UDP URL of the Riemann server # url = "tcp://localhost:5555" @@ -1666,9 +1717,9 @@ # # timeout = "5s" +# ## DEPRECATED: The 'riemann_legacy' plugin is deprecated in version 1.3.0, use 'outputs.riemann' instead (see https://github.com/influxdata/telegraf/issues/1878). # # Configuration for the Riemann server to send metrics to # [[outputs.riemann_legacy]] -# ## DEPRECATED: The 'riemann_legacy' plugin is deprecated in version 1.3.0, use 'outputs.riemann' instead (see https://github.com/influxdata/telegraf/issues/1878). # ## URL of server # url = "localhost:5555" # ## transport protocol to use either tcp or udp @@ -1744,7 +1795,7 @@ # ## The check name is the name to give the Sensu check associated with the event # ## created. This maps to check.metatadata.name in the event. # [outputs.sensu.check] -# name = "telegraf" +# name = "telegraf" # # ## Entity specification # ## Configure the entity name and namespace, if necessary. This will be part of @@ -1770,21 +1821,21 @@ # # Send metrics and events to SignalFx # [[outputs.signalfx]] -# ## SignalFx Org Access Token -# access_token = "my-secret-token" +# ## SignalFx Org Access Token +# access_token = "my-secret-token" # -# ## The SignalFx realm that your organization resides in -# signalfx_realm = "us9" # Required if ingest_url is not set +# ## The SignalFx realm that your organization resides in +# signalfx_realm = "us9" # Required if ingest_url is not set # -# ## You can optionally provide a custom ingest url instead of the -# ## signalfx_realm option above if you are using a gateway or proxy -# ## instance. This option takes precident over signalfx_realm. -# ingest_url = "https://my-custom-ingest/" +# ## You can optionally provide a custom ingest url instead of the +# ## signalfx_realm option above if you are using a gateway or proxy +# ## instance. This option takes precident over signalfx_realm. +# ingest_url = "https://my-custom-ingest/" # -# ## Event typed metrics are omitted by default, -# ## If you require an event typed metric you must specify the -# ## metric name in the following list. -# included_event_names = ["plugin.metric_name"] +# ## Event typed metrics are omitted by default, +# ## If you require an event typed metric you must specify the +# ## metric name in the following list. +# included_event_names = ["plugin.metric_name"] # # Generic socket writer capable of handling multiple socket types. @@ -1814,19 +1865,19 @@ # ## Defaults to the OS configuration. # # keep_alive_period = "5m" # -# ## Content encoding for packet-based connections (i.e. UDP, unixgram). -# ## Can be set to "gzip" or to "identity" to apply no encoding. +# ## Content encoding for message payloads, can be set to "gzip" or to +# ## "identity" to apply no encoding. # ## # # content_encoding = "identity" # # ## Data format to generate. # ## Each data format has its own unique set of configuration options, read # ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md # # data_format = "influx" -# # Send metrics to SQL Database +# # Save metrics to an SQL Database # [[outputs.sql]] # ## Database driver # ## Valid options: mssql (Microsoft SQL Server), mysql (MySQL), pgx (Postgres), @@ -1899,7 +1950,7 @@ # # location = "eu-north0" -# # A plugin that can transmit metrics to Sumo Logic HTTP Source +# # A plugin that can send metrics to Sumo Logic HTTP metric collector. # [[outputs.sumologic]] # ## Unique URL generated for your HTTP Metrics Source. # ## This is the address to send metrics to. @@ -2030,13 +2081,13 @@ # # default_appname = "Telegraf" -# # Configuration for Amazon Timestream output. +# # Configuration for sending metrics to Amazon Timestream. # [[outputs.timestream]] # ## Amazon Region # region = "us-east-1" # # ## Amazon Credentials -# ## Credentials are loaded in the following order: +# ## Credentials are loaded in the following order # ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified # ## 2) Assumed credentials via STS if role_arn is specified # ## 3) explicit credentials from 'access_key' and 'secret_key' @@ -2180,7 +2231,7 @@ # # Configuration for Wavefront server to send metrics to # [[outputs.wavefront]] # ## Url for Wavefront Direct Ingestion. For Wavefront Proxy Ingestion, see -# ## the 'host' and 'port' optioins below. +# ## the 'host' and 'port' options below. # url = "https://metrics.wavefront.com" # # ## Authentication Token for Wavefront. Only required if using Direct Ingestion @@ -2230,10 +2281,10 @@ # #immediate_flush = true -# # Generic WebSocket output writer. +# # A plugin that can transmit metrics over WebSocket. # [[outputs.websocket]] # ## URL is the address to send metrics to. Make sure ws or wss scheme is used. -# url = "ws://127.0.0.1:8080/telegraf" +# url = "ws://127.0.0.1:3000/telegraf" # # ## Timeouts (make sure read_timeout is larger than server ping interval or set to zero). # # connect_timeout = "30s" @@ -2333,7 +2384,7 @@ # max_parallel_calls = 10 -# # Clone metrics and apply modifications. +# # Apply metric modifications using override semantics. # [[processors.clone]] # ## All modifications on inputs and aggregators can be overridden: # # name_override = "new_name" @@ -2377,28 +2428,28 @@ # # Dates measurements, tags, and fields that pass through this filter. # [[processors.date]] -# ## New tag to create -# tag_key = "month" +# ## New tag to create +# tag_key = "month" # -# ## New field to create (cannot set both field_key and tag_key) -# # field_key = "month" +# ## New field to create (cannot set both field_key and tag_key) +# # field_key = "month" # -# ## Date format string, must be a representation of the Go "reference time" -# ## which is "Mon Jan 2 15:04:05 -0700 MST 2006". -# date_format = "Jan" +# ## Date format string, must be a representation of the Go "reference time" +# ## which is "Mon Jan 2 15:04:05 -0700 MST 2006". +# date_format = "Jan" # -# ## If destination is a field, date format can also be one of -# ## "unix", "unix_ms", "unix_us", or "unix_ns", which will insert an integer field. -# # date_format = "unix" +# ## If destination is a field, date format can also be one of +# ## "unix", "unix_ms", "unix_us", or "unix_ns", which will insert an integer field. +# # date_format = "unix" # -# ## Offset duration added to the date string when writing the new tag. -# # date_offset = "0s" +# ## Offset duration added to the date string when writing the new tag. +# # date_offset = "0s" # -# ## Timezone to use when creating the tag or field using a reference time -# ## string. This can be set to one of "UTC", "Local", or to a location name -# ## in the IANA Time Zone database. -# ## example: timezone = "America/Los_Angeles" -# # timezone = "UTC" +# ## Timezone to use when creating the tag or field using a reference time +# ## string. This can be set to one of "UTC", "Local", or to a location name +# ## in the IANA Time Zone database. +# ## example: timezone = "America/Los_Angeles" +# # timezone = "UTC" # # Filter metrics with repeating field values @@ -2407,7 +2458,7 @@ # dedup_interval = "600s" -# # Defaults sets default value(s) for specified fields that are not set on incoming metrics. +# ## Set default fields on your metric(s) when they are nil or empty # [[processors.defaults]] # ## Ensures a set of fields always exists on your metric(s) with their # ## respective default value. @@ -2418,10 +2469,10 @@ # ## or it is not nil but its value is an empty string or is a string # ## of one or more spaces. # ## = -# # [processors.defaults.fields] -# # field_1 = "bar" -# # time_idle = 0 -# # is_error = true +# [processors.defaults.fields] +# field_1 = "bar" +# time_idle = 0 +# is_error = true # # Map enum values according to given table. @@ -2438,8 +2489,8 @@ # dest = "status_code" # # ## Default value to be used for all values not contained in the mapping -# ## table. When unset, the unmodified value for the field will be used if no -# ## match is found. +# ## table. When unset and no match is found, the original field will remain +# ## unmodified and the destination tag or field will not be created. # # default = 0 # # ## Table of mappings @@ -2451,12 +2502,19 @@ # # Run executable as long-running processor plugin # [[processors.execd]] -# ## Program to run as daemon -# ## eg: command = ["/path/to/your_program", "arg1", "arg2"] -# command = ["cat"] +# ## One program to run as daemon. +# ## NOTE: process and each argument should each be their own string +# ## eg: command = ["/path/to/your_program", "arg1", "arg2"] +# command = ["cat"] +# +# ## Environment variables +# ## Array of "key=value" pairs to pass as environment variables +# ## e.g. "KEY=value", "USERNAME=John Doe", +# ## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs" +# # environment = [] # # ## Delay before the process is restarted after an unexpected termination -# restart_delay = "10s" +# # restart_delay = "10s" # # Performs file path manipulations on tags and fields @@ -2553,25 +2611,25 @@ # # Adds noise to numerical fields # [[processors.noise]] -# ## Specified the type of the random distribution. -# ## Can be "laplacian", "gaussian" or "uniform". -# # type = "laplacian +# ## Specified the type of the random distribution. +# ## Can be "laplacian", "gaussian" or "uniform". +# # type = "laplacian # -# ## Center of the distribution. -# ## Only used for Laplacian and Gaussian distributions. -# # mu = 0.0 +# ## Center of the distribution. +# ## Only used for Laplacian and Gaussian distributions. +# # mu = 0.0 # -# ## Scale parameter for the Laplacian or Gaussian distribution -# # scale = 1.0 +# ## Scale parameter for the Laplacian or Gaussian distribution +# # scale = 1.0 # -# ## Upper and lower bound of the Uniform distribution -# # min = -1.0 -# # max = 1.0 +# ## Upper and lower bound of the Uniform distribution +# # min = -1.0 +# # max = 1.0 # -# ## Apply the noise only to numeric fields matching the filter criteria below. -# ## Excludes takes precedence over includes. -# # include_fields = [] -# # exclude_fields = [] +# ## Apply the noise only to numeric fields matching the filter criteria below. +# ## Excludes takes precedence over includes. +# # include_fields = [] +# # exclude_fields = [] # # Apply metric modifications using override semantics. @@ -2589,7 +2647,7 @@ # # Parse a value in a specified field/tag(s) and add the result in a new metric # [[processors.parser]] # ## The name of the fields whose value will be parsed. -# parse_fields = [] +# parse_fields = ["message"] # # ## If true, incoming metrics are not emitted. # drop_original = false @@ -2615,7 +2673,6 @@ # # Given a tag/field of a TCP or UDP port number, add a tag/field of the service name looked up in the system services file # [[processors.port_name]] -# [[processors.port_name]] # ## Name of tag holding the port number # # tag = "port" # ## Or name of the field holding the port number @@ -2640,48 +2697,50 @@ # # Transforms tag and field values as well as measurement, tag and field names with regex pattern # [[processors.regex]] -# ## Tag and field conversions defined in a separate sub-tables -# # [[processors.regex.tags]] -# # ## Tag to change -# # key = "resp_code" -# # ## Regular expression to match on a tag value -# # pattern = "^(\\d)\\d\\d$" -# # ## Matches of the pattern will be replaced with this string. Use ${1} -# # ## notation to use the text of the first submatch. -# # replacement = "${1}xx" +# namepass = ["nginx_requests"] # -# # [[processors.regex.fields]] -# # ## Field to change -# # key = "request" -# # ## All the power of the Go regular expressions available here -# # ## For example, named subgroups -# # pattern = "^/api(?P/[\\w/]+)\\S*" -# # replacement = "${method}" -# # ## If result_key is present, a new field will be created -# # ## instead of changing existing field -# # result_key = "method" +# # Tag and field conversions defined in a separate sub-tables +# [[processors.regex.tags]] +# ## Tag to change, "*" will change every tag +# key = "resp_code" +# ## Regular expression to match on a tag value +# pattern = "^(\\d)\\d\\d$" +# ## Matches of the pattern will be replaced with this string. Use ${1} +# ## notation to use the text of the first submatch. +# replacement = "${1}xx" # -# ## Multiple conversions may be applied for one field sequentially -# ## Let's extract one more value -# # [[processors.regex.fields]] -# # key = "request" -# # pattern = ".*category=(\\w+).*" -# # replacement = "${1}" -# # result_key = "search_category" +# [[processors.regex.fields]] +# ## Field to change +# key = "request" +# ## All the power of the Go regular expressions available here +# ## For example, named subgroups +# pattern = "^/api(?P/[\\w/]+)\\S*" +# replacement = "${method}" +# ## If result_key is present, a new field will be created +# ## instead of changing existing field +# result_key = "method" # -# ## Rename metric fields -# # [[processors.regex.field_rename]] -# # ## Regular expression to match on a field name -# # pattern = "^search_(\\w+)d$" -# # ## Matches of the pattern will be replaced with this string. Use ${1} -# # ## notation to use the text of the first submatch. -# # replacement = "${1}" -# # ## If the new field name already exists, you can either "overwrite" the -# # ## existing one with the value of the renamed field OR you can "keep" -# # ## both the existing and source field. -# # # result_key = "keep" +# # Multiple conversions may be applied for one field sequentially +# # Let's extract one more value +# [[processors.regex.fields]] +# key = "request" +# pattern = ".*category=(\\w+).*" +# replacement = "${1}" +# result_key = "search_category" # -# ## Rename metric tags +# # Rename metric fields +# [[processors.regex.field_rename]] +# ## Regular expression to match on a field name +# pattern = "^search_(\\w+)d$" +# ## Matches of the pattern will be replaced with this string. Use ${1} +# ## notation to use the text of the first submatch. +# replacement = "${1}" +# ## If the new field name already exists, you can either "overwrite" the +# ## existing one with the value of the renamed field OR you can "keep" +# ## both the existing and source field. +# # result_key = "keep" +# +# # Rename metric tags # # [[processors.regex.tag_rename]] # # ## Regular expression to match on a tag name # # pattern = "^search_(\\w+)d$" @@ -2693,7 +2752,7 @@ # # ## both the existing and source tag. # # # result_key = "keep" # -# ## Rename metrics +# # Rename metrics # # [[processors.regex.metric_rename]] # # ## Regular expression to match on an metric name # # pattern = "^search_(\\w+)d$" @@ -2704,6 +2763,22 @@ # # Rename measurements, tags, and fields that pass through this filter. # [[processors.rename]] +# ## Specify one sub-table per rename operation. +# [[processors.rename.replace]] +# measurement = "network_interface_throughput" +# dest = "throughput" +# +# [[processors.rename.replace]] +# tag = "hostname" +# dest = "host" +# +# [[processors.rename.replace]] +# field = "lower" +# dest = "min" +# +# [[processors.rename.replace]] +# field = "upper" +# dest = "max" # # ReverseDNS does a reverse lookup on IP addresses to retrieve the DNS name @@ -2773,11 +2848,11 @@ # ## The Starlark source can be set as a string in this configuration file, or # ## by referencing a file containing the script. Only one source or script # ## should be set at once. -# ## +# # ## Source of the Starlark script. # source = ''' # def apply(metric): -# return metric +# return metric # ''' # # ## File containing a Starlark script. @@ -2793,15 +2868,15 @@ # # Perform string processing on tags, fields, and measurements # [[processors.strings]] -# ## Convert a tag value to uppercase -# # [[processors.strings.uppercase]] -# # tag = "method" -# # ## Convert a field value to lowercase and store in a new field # # [[processors.strings.lowercase]] # # field = "uri_stem" # # dest = "uri_stem_normalised" # +# ## Convert a tag value to uppercase +# # [[processors.strings.uppercase]] +# # tag = "method" +# # ## Convert a field value to titlecase # # [[processors.strings.titlecase]] # # field = "status" @@ -2855,10 +2930,10 @@ # # Restricts the number of tags that can pass through this filter and chooses which tags to preserve when over the limit. # [[processors.tag_limit]] # ## Maximum number of tags to preserve -# limit = 10 +# limit = 3 # # ## List of tags to preferentially preserve -# keep = ["foo", "bar", "baz"] +# keep = ["environment", "region"] # # Uses a Go template to create a new tag @@ -2895,7 +2970,7 @@ # ## the defaults processor plugin to ensure fields are set if required. # # fields = ["value"] # -# ## What aggregation to use. Options: sum, mean, min, max +# ## What aggregation function to use. Options: sum, mean, min, max # # aggregation = "mean" # # ## Instead of the top k largest metrics, return the bottom k lowest metrics @@ -2952,50 +3027,26 @@ # drop_original = false # # ## Configures which basic stats to push as fields -# # stats = ["count", "min", "max", "mean", "stdev", "s2", "sum"] +# # stats = ["count","diff","rate","min","max","mean","non_negative_diff","non_negative_rate","stdev","s2","sum","interval"] # # Calculates a derivative for every field. # [[aggregators.derivative]] -# ## The period in which to flush the aggregator. -# period = "30s" -# ## -# ## If true, the original metric will be dropped by the -# ## aggregator and will not get sent to the output plugins. -# drop_original = false -# ## -# ## This aggregator will estimate a derivative for each field, which is -# ## contained in both the first and last metric of the aggregation interval. -# ## Without further configuration the derivative will be calculated with -# ## respect to the time difference between these two measurements in seconds. -# ## The formula applied is for every field: -# ## -# ## value_last - value_first -# ## derivative = -------------------------- -# ## time_difference_in_seconds -# ## -# ## The resulting derivative will be named *fieldname_rate*. The suffix -# ## "_rate" can be configured by the *suffix* parameter. When using a -# ## derivation variable you can include its name for more clarity. -# # suffix = "_rate" -# ## -# ## As an abstraction the derivative can be calculated not only by the time -# ## difference but by the difference of a field, which is contained in the -# ## measurement. This field is assumed to be monotonously increasing. This -# ## feature is used by specifying a *variable*. -# ## Make sure the specified variable is not filtered and exists in the metrics -# ## passed to this aggregator! -# # variable = "" -# ## -# ## When using a field as the derivation parameter the name of that field will -# ## be used for the resulting derivative, e.g. *fieldname_by_parameter*. -# ## -# ## Note, that the calculation is based on the actual timestamp of the -# ## measurements. When there is only one measurement during that period, the -# ## measurement will be rolled over to the next period. The maximum number of -# ## such roll-overs can be configured with a default of 10. -# # max_roll_over = 10 -# ## +# ## Specific Derivative Aggregator Arguments: +# +# ## Configure a custom derivation variable. Timestamp is used if none is given. +# # variable = "" +# +# ## Suffix to add to the field name for the derivative name. +# # suffix = "_rate" +# +# ## Roll-Over last measurement to first measurement of next period +# # max_roll_over = 10 +# +# ## General Aggregator Arguments: +# +# ## calculate derivative every 30 seconds +# period = "30s" # # Report the final metric of a series @@ -3010,7 +3061,7 @@ # series_timeout = "5m" -# # Create aggregate histograms. +# # Configuration for aggregate histogram metrics # [[aggregators.histogram]] # ## The period in which to flush the aggregator. # period = "30s" @@ -3137,7 +3188,7 @@ # ## aggregator and will not get sent to the output plugins. # drop_original = false # ## The fields for which the values will be counted -# fields = [] +# fields = ["status"] ############################################################################### @@ -3155,6 +3206,8 @@ collect_cpu_time = false ## If true, compute and report the sum of all non-idle CPU states report_active = false + ## If true and the info is available then add core_id and physical_id tags + core_tags = false # Read metrics about disk usage by mount point @@ -3166,6 +3219,11 @@ ## Ignore mount points by filesystem type. ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"] + ## Ignore mount points by mount options. + ## The 'mount' command reports options of all mounts in parathesis. + ## Bind mounts can be ignored with the special 'bind' option. + # ignore_mount_opts = [] + # Read metrics about disk IO by device [[inputs.diskio]] @@ -3196,8 +3254,7 @@ # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"] -# Get kernel statistics from /proc/stat -[[inputs.kernel]] +#[[inputs.kernel]] # no configuration @@ -3218,8 +3275,7 @@ # Read metrics about system load & uptime [[inputs.system]] - ## Uncomment to remove deprecated metrics. - # fielddrop = ["uptime_format"] + # no configuration # # Gather ActiveMQ metrics @@ -3227,6 +3283,11 @@ # ## ActiveMQ WebConsole URL # url = "http://127.0.0.1:8161" # +# ## Required ActiveMQ Endpoint +# ## deprecated in 1.11; use the url option +# # server = "192.168.50.10" +# # port = 8161 +# # ## Credentials for basic HTTP authentication # # username = "admin" # # password = "admin" @@ -3290,11 +3351,11 @@ # # Query statistics from AMD Graphics cards using rocm-smi binary # [[inputs.amd_rocm_smi]] -# ## Optional: path to rocm-smi binary, defaults to $PATH via exec.LookPath -# # bin_path = "/opt/rocm/bin/rocm-smi" +# ## Optional: path to rocm-smi binary, defaults to $PATH via exec.LookPath +# # bin_path = "/opt/rocm/bin/rocm-smi" # -# ## Optional: timeout for GPU polling -# # timeout = "5s" +# ## Optional: timeout for GPU polling +# # timeout = "5s" # # Read Apache status information (mod_status) @@ -3442,7 +3503,6 @@ # ## Tries to collect additional bond details from /sys/class/net/{bond} # ## currently only useful for LACP (mode 4) bonds # # collect_sys_details = false -# # # Collect Kafka topics and consumers status from Burrow HTTP API. @@ -3660,9 +3720,10 @@ # # scheme = "http" # # ## Metric version controls the mapping from Consul metrics into -# ## Telegraf metrics. +# ## Telegraf metrics. Version 2 moved all fields with string values +# ## to tags. # ## -# ## example: metric_version = 1; deprecated in 1.15 +# ## example: metric_version = 1; deprecated in 1.16 # ## metric_version = 2; recommended version # # metric_version = 1 # @@ -3695,7 +3756,8 @@ # # url = "http://127.0.0.1:8500" # # ## Use auth token for authorization. -# ## Only one of the options can be set. Leave empty to not use any token. +# ## If both are set, an error is thrown. +# ## If both are empty, no token will be used. # # token_file = "/path/to/auth/token" # ## OR # # token = "a1234567-40c7-9048-7bae-378687048181" @@ -3760,7 +3822,7 @@ # # Input plugin for DC/OS metrics # [[inputs.dcos]] # ## The DC/OS cluster URL. -# cluster_url = "https://dcos-ee-master-1" +# cluster_url = "https://dcos-master-1" # # ## The ID of the service account. # service_account_id = "telegraf" @@ -3846,13 +3908,19 @@ # endpoint = "unix:///var/run/docker.sock" # # ## Set to true to collect Swarm metrics(desired_replicas, running_replicas) +# ## Note: configure this in one of the manager nodes in a Swarm cluster. +# ## configuring in multiple Swarm managers results in duplication of metrics. # gather_services = false # +# ## Only collect metrics for these containers. Values will be appended to +# ## container_name_include. +# ## Deprecated (1.4.0), use container_name_include +# container_names = [] +# # ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars # source_tag = false # -# ## Containers to include and exclude. Globs accepted. -# ## Note that an empty array for both will include all containers +# ## Containers to include and exclude. Collect all if empty. Globs accepted. # container_name_include = [] # container_name_exclude = [] # @@ -3866,25 +3934,38 @@ # ## Timeout for docker list, info, and stats commands # timeout = "5s" # +# ## Whether to report for each container per-device blkio (8:0, 8:1...), +# ## network (eth0, eth1, ...) and cpu (cpu0, cpu1, ...) stats or not. +# ## Usage of this setting is discouraged since it will be deprecated in favor of 'perdevice_include'. +# ## Default value is 'true' for backwards compatibility, please set it to 'false' so that 'perdevice_include' setting +# ## is honored. +# perdevice = true +# # ## Specifies for which classes a per-device metric should be issued # ## Possible values are 'cpu' (cpu0, cpu1, ...), 'blkio' (8:0, 8:1, ...) and 'network' (eth0, eth1, ...) # ## Please note that this setting has no effect if 'perdevice' is set to 'true' # # perdevice_include = ["cpu"] # +# ## Whether to report for each container total blkio and network stats or not. +# ## Usage of this setting is discouraged since it will be deprecated in favor of 'total_include'. +# ## Default value is 'false' for backwards compatibility, please set it to 'true' so that 'total_include' setting +# ## is honored. +# total = false +# # ## Specifies for which classes a total metric should be issued. Total is an aggregated of the 'perdevice' values. # ## Possible values are 'cpu', 'blkio' and 'network' # ## Total 'cpu' is reported directly by Docker daemon, and 'network' and 'blkio' totals are aggregated by this plugin. # ## Please note that this setting has no effect if 'total' is set to 'false' # # total_include = ["cpu", "blkio", "network"] # -# ## Which environment variables should we use as a tag -# ##tag_env = ["JAVA_HOME", "HEAP_SIZE"] -# # ## docker labels to include and exclude as tags. Globs accepted. # ## Note that an empty array for both will include all labels as tags # docker_label_include = [] # docker_label_exclude = [] # +# ## Which environment variables should we use as a tag +# tag_env = ["JAVA_HOME", "HEAP_SIZE"] +# # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" @@ -3893,11 +3974,14 @@ # # insecure_skip_verify = false -# # Read statistics from one or many dovecot servers +# # Read metrics about dovecot servers # [[inputs.dovecot]] # ## specify dovecot servers via an address:port list # ## e.g. # ## localhost:24242 +# ## or as an UDS socket +# ## e.g. +# ## /var/run/dovecot/old-stats # ## # ## If no servers are specified, then localhost is used as the host. # servers = ["localhost:24242"] @@ -3910,7 +3994,7 @@ # filters = [""] -# # Read metrics about docker containers from Fargate/ECS v2, v3 meta endpoints. +# # Read metrics about ECS containers # [[inputs.ecs]] # ## ECS metadata url. # ## Metadata v2 API is used if set explicitly. Otherwise, @@ -3941,8 +4025,8 @@ # # Read stats from one or more Elasticsearch servers or clusters # [[inputs.elasticsearch]] # ## specify a list of one or more Elasticsearch servers -# # you can add username and password to your url to use basic authentication: -# # servers = ["http://user:pass@localhost:9200"] +# ## you can add username and password to your url to use basic authentication: +# ## servers = ["http://user:pass@localhost:9200"] # servers = ["http://localhost:9200"] # # ## Timeout for HTTP requests to the elastic search server(s) @@ -3953,16 +4037,16 @@ # ## of the cluster. # local = true # -# ## Set cluster_health to true when you want to also obtain cluster health stats +# ## Set cluster_health to true when you want to obtain cluster health stats # cluster_health = false # -# ## Adjust cluster_health_level when you want to also obtain detailed health stats +# ## Adjust cluster_health_level when you want to obtain detailed health stats # ## The options are # ## - indices (default) # ## - cluster # # cluster_health_level = "indices" # -# ## Set cluster_stats to true when you want to also obtain cluster stats. +# ## Set cluster_stats to true when you want to obtain cluster stats. # cluster_stats = false # # ## Only gather cluster_stats from the master node. To work this require local = true @@ -3973,6 +4057,7 @@ # indices_include = ["_all"] # # ## One of "shards", "cluster", "indices" +# ## Currently only "shards" is implemented # indices_level = "shards" # # ## node_stats is a list of sub-stats that you want to have gathered. Valid options @@ -3992,8 +4077,9 @@ # # insecure_skip_verify = false # # ## Sets the number of most recent indices to return for indices that are configured with a date-stamped suffix. -# ## Each 'indices_include' entry ending with a wildcard (*) or glob matching pattern will group together all indices that match it, and sort them -# ## by the date or number after the wildcard. Metrics then are gathered for only the 'num_most_recent_indices' amount of most recent indices. +# ## Each 'indices_include' entry ending with a wildcard (*) or glob matching pattern will group together all indices that match it, and +# ## sort them by the date or number after the wildcard. Metrics then are gathered for only the 'num_most_recent_indices' amount of most +# ## recent indices. # # num_most_recent_indices = 0 @@ -4097,6 +4183,12 @@ # "/tmp/collect_*.sh" # ] # +# ## Environment variables +# ## Array of "key=value" pairs to pass as environment variables +# ## e.g. "KEY=value", "USERNAME=John Doe", +# ## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs" +# # environment = [] +# # ## Timeout for each command to complete. # timeout = "5s" # @@ -4136,14 +4228,6 @@ # ## as well as ** to match recursive files and directories. # files = ["/tmp/metrics.out"] # -# -# ## Name a tag containing the name of the file the data was parsed from. Leave empty -# ## to disable. Cautious when file name variation is high, this can increase the cardinality -# ## significantly. Read more about cardinality here: -# ## https://docs.influxdata.com/influxdb/cloud/reference/glossary/#series-cardinality -# # file_tag = "" -# # -# # ## Character encoding to use when interpreting the file contents. Invalid # ## characters are replaced using the unicode replacement character. When set # ## to the empty string the data is not decoded to text. @@ -4153,11 +4237,18 @@ # ## character_encoding = "" # # character_encoding = "" # -# ## The dataformat to be read from files +# ## Data format to consume. # ## Each data format has its own unique set of configuration options, read # ## more about them here: # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md # data_format = "influx" +# +# +# ## Name a tag containing the name of the file the data was parsed from. Leave empty +# ## to disable. Cautious when file name variation is high, this can increase the cardinality +# ## significantly. Read more about cardinality here: +# ## https://docs.influxdata.com/influxdb/cloud/reference/glossary/#series-cardinality +# # file_tag = "" # # Count files in a directory @@ -4168,13 +4259,13 @@ # ## /var/log/** -> recursively find all directories in /var/log and count files in each directories # ## /var/log/*/* -> find all directories with a parent dir in /var/log and count files in each directories # ## /var/log -> count all files in /var/log and all of its subdirectories -# directories = ["/var/cache/apt/archives"] +# directories = ["/var/cache/apt", "/tmp"] # # ## Only count files that match the name pattern. Defaults to "*". -# name = "*.deb" +# name = "*" # # ## Count files in subdirectories. Defaults to true. -# recursive = false +# recursive = true # # ## Only count regular files. Defaults to true. # regular_only = true @@ -4198,14 +4289,8 @@ # [[inputs.filestat]] # ## Files to gather stats about. # ## These accept standard unix glob matching rules, but with the addition of -# ## ** as a "super asterisk". ie: -# ## "/var/log/**.log" -> recursively find all .log files in /var/log -# ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log -# ## "/var/log/apache.log" -> just tail the apache log file -# ## -# ## See https://github.com/gobwas/glob for more examples -# ## -# files = ["/var/log/**.log"] +# ## ** as a "super asterisk". See https://github.com/gobwas/glob. +# files = ["/etc/telegraf/telegraf.conf", "/var/log/**.log"] # # ## If true, read the entire file and calculate an md5 checksum. # md5 = false @@ -4234,17 +4319,17 @@ # # ## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent) # exclude = [ -# "monitor_agent", -# "dummy", +# "monitor_agent", +# "dummy", # ] # # Gather repository information from GitHub hosted repositories. # [[inputs.github]] -# ## List of repositories to monitor. +# ## List of repositories to monitor # repositories = [ -# "influxdata/telegraf", -# "influxdata/influxdb" +# "influxdata/telegraf", +# "influxdata/influxdb" # ] # # ## Github API access token. Unauthenticated requests are limited to 60 per hour. @@ -4257,11 +4342,11 @@ # # http_timeout = "5s" # # ## List of additional fields to query. -# ## NOTE: Getting those fields might involve issuing additional API-calls, so please -# ## make sure you do not exceed the rate-limit of GitHub. -# ## -# ## Available fields are: -# ## - pull-requests -- number of open and closed pull requests (2 API-calls per repository) +# ## NOTE: Getting those fields might involve issuing additional API-calls, so please +# ## make sure you do not exceed the rate-limit of GitHub. +# ## +# ## Available fields are: +# ## - pull-requests -- number of open and closed pull requests (2 API-calls per repository) # # additional_fields = [] @@ -4305,20 +4390,20 @@ # # insecure_skip_verify = false -# # Read metrics of haproxy, via socket or csv stats page +# # Read metrics of HAProxy, via socket or HTTP stats page # [[inputs.haproxy]] # ## An array of address to gather stats about. Specify an ip on hostname # ## with optional port. ie localhost, 10.10.3.33:1936, etc. # ## Make sure you specify the complete path to the stats endpoint # ## including the protocol, ie http://10.10.3.33:1936/haproxy?stats # -# ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats -# servers = ["http://myhaproxy.com:1936/haproxy?stats"] -# # ## Credentials for basic HTTP authentication # # username = "admin" # # password = "admin" # +# ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats +# servers = ["http://myhaproxy.com:1936/haproxy?stats"] +# # ## You can also use local socket with standard wildcard globbing. # ## Server address not starting with 'http' will be treated as a possible # ## socket, so both examples below are valid. @@ -4363,6 +4448,13 @@ # ## Optional HTTP headers # # headers = {"X-Special-Header" = "Special-Value"} # +# ## HTTP entity-body to send with POST/PUT requests. +# # body = "" +# +# ## HTTP Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "identity" +# # ## Optional file with Bearer token # ## file content is added as an Authorization header # # bearer_token = "/path/to/file" @@ -4371,22 +4463,15 @@ # # username = "username" # # password = "pa$$word" # -# ## HTTP entity-body to send with POST/PUT requests. -# # body = "" -# -# ## HTTP Content-Encoding for write request body, can be set to "gzip" to -# ## compress body or "identity" to apply no encoding. -# # content_encoding = "identity" -# -# ## HTTP Proxy support -# # http_proxy_url = "" -# -# ## OAuth2 Client Credentials Grant +# ## OAuth2 Client Credentials. The options 'client_id', 'client_secret', and 'token_url' are required to use OAuth2. # # client_id = "clientid" # # client_secret = "secret" # # token_url = "https://indentityprovider/oauth2/v1/token" # # scopes = ["urn:opc:idm:__myscopes__"] # +# ## HTTP Proxy support +# # http_proxy_url = "" +# # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" @@ -4415,6 +4500,7 @@ # ## more about them here: # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md # # data_format = "influx" +# # # HTTP/HTTPS request given an address a method and a timeout @@ -4473,6 +4559,8 @@ # # tls_key = "/etc/telegraf/key.pem" # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false +# ## Use the given name as the SNI server name on each URL +# # tls_server_name = "" # # ## HTTP Request Headers (all values must be strings) # # [inputs.http_response.headers] @@ -4487,12 +4575,18 @@ # # interface = "eth0" +# ## DEPRECATED: The 'httpjson' plugin is deprecated in version 1.6.0, use 'inputs.http' instead. # # Read flattened metrics from one or more JSON HTTP endpoints # [[inputs.httpjson]] -# ## DEPRECATED: The 'httpjson' plugin is deprecated in version 1.6.0, use 'inputs.http' instead. # ## NOTE This plugin only reads numerical measurements, strings and booleans # ## will be ignored. # +# ## Name for the service being polled. Will be appended to the name of the +# ## measurement e.g. "httpjson_webserver_stats". +# ## +# ## Deprecated (1.3.0): Use name_override, name_suffix, name_prefix instead. +# name = "webserver_stats" +# # ## URL of each server in the service's cluster # servers = [ # "http://localhost:9999/stats/", @@ -4504,7 +4598,7 @@ # ## HTTP method to use: GET or POST (case-sensitive) # method = "GET" # -# ## List of tag names to extract from top-level of JSON server response +# ## Tags to extract from top-level of JSON server response. # # tag_keys = [ # # "my_tag_1", # # "my_tag_2" @@ -4517,14 +4611,14 @@ # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false # -# ## HTTP parameters (all values must be strings). For "GET" requests, data +# ## HTTP Request Parameters (all values must be strings). For "GET" requests, data # ## will be included in the query. For "POST" requests, data will be included # ## in the request body as "x-www-form-urlencoded". # # [inputs.httpjson.parameters] # # event_type = "cpu_spike" # # threshold = "0.75" # -# ## HTTP Headers (all values must be strings) +# ## HTTP Request Headers (all values must be strings). # # [inputs.httpjson.headers] # # X-Auth-Token = "my-xauth-token" # # apiVersion = "v1" @@ -4593,6 +4687,11 @@ # # Monitors internet speed using speedtest.net service # [[inputs.internet_speed]] +# ## This plugin downloads many MB of data each time it is run. As such +# ## consider setting a higher interval for this plugin to reduce the +# ## demand on your internet connection. +# # interval = "60m" +# # ## Sets if runs file download test # # enable_file_download = false # @@ -4641,7 +4740,7 @@ # ## gaps or overlap in pulled data # interval = "30s" # -# ## Timeout for the ipmitool command to complete +# ## Timeout for the ipmitool command to complete. Default is 20 seconds. # timeout = "20s" # # ## Schema Version: (Optional, defaults to version 1) @@ -4661,14 +4760,17 @@ # # Gather packets and bytes counters from Linux ipsets -# [[inputs.ipset]] -# ## By default, we only show sets which have already matched at least 1 packet. -# ## set include_unmatched_sets = true to gather them all. -# include_unmatched_sets = false -# ## Adjust your sudo settings appropriately if using this option ("sudo ipset save") -# use_sudo = false -# ## The default timeout of 1s for ipset execution can be overridden here: -# # timeout = "1s" +# [[inputs.ipset]] +# ## By default, we only show sets which have already matched at least 1 packet. +# ## set include_unmatched_sets = true to gather them all. +# include_unmatched_sets = false +# ## Adjust your sudo settings appropriately if using this option ("sudo ipset save") +# ## You can avoid using sudo or root, by setting appropriate privileges for +# ## the telegraf.service systemd service. +# use_sudo = false +# ## The default timeout of 1s for ipset execution can be overridden here: +# # timeout = "1s" +# # # Read jobs and cluster metrics from Jenkins instances @@ -4720,16 +4822,11 @@ # # max_connections = 5 +# ## DEPRECATED: The 'jolokia' plugin is deprecated in version 1.5.0, use 'inputs.jolokia2' instead. # # Read JMX metrics through Jolokia # [[inputs.jolokia]] -# ## DEPRECATED: The 'jolokia' plugin is deprecated in version 1.5.0, use 'inputs.jolokia2' instead. -# # DEPRECATED: the jolokia plugin has been deprecated in favor of the -# # jolokia2 plugin -# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2 -# # ## This is the context root used to compose the jolokia url # ## NOTE that Jolokia requires a trailing slash at the end of the context root -# ## NOTE that your jolokia security policy must allow for POST requests. # context = "/jolokia/" # # ## This specifies the mode used @@ -4752,13 +4849,6 @@ # ## Includes connection time, any redirects, and reading the response body. # # client_timeout = "4s" # -# ## Attribute delimiter -# ## -# ## When multiple attributes are returned for a single -# ## [inputs.jolokia.metrics], the field name is a concatenation of the metric -# ## name, and the attribute name, separated by the given delimiter. -# # delimiter = "_" -# # ## List of servers exposing jolokia read service # [[inputs.jolokia.servers]] # name = "as-server-01" @@ -4921,12 +5011,19 @@ # # selector_exclude = ["*"] # # ## Optional TLS Config +# ## Trusted root certificates for server # # tls_ca = "/path/to/cafile" +# ## Used for TLS client certificate authentication # # tls_cert = "/path/to/certfile" +# ## Used for TLS client certificate authentication # # tls_key = "/path/to/keyfile" +# ## Send the specified TLS server name via SNI # # tls_server_name = "kubernetes.example.com" # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false +# +# ## Uncomment to remove deprecated metrics. +# # fielddrop = ["terminated_reason"] # # Read metrics from the kubernetes kubelet api @@ -4961,7 +5058,7 @@ # [[inputs.leofs]] # ## An array of URLs of the form: # ## host [ ":" port] -# servers = ["127.0.0.1:4020"] +# servers = ["127.0.0.1:4010"] # # Provides Linux sysctl fs metrics @@ -5004,8 +5101,8 @@ # # Read metrics about LVM physical volumes, volume groups, logical volumes. # [[inputs.lvm]] -# ## Use sudo to run LVM commands -# use_sudo = false +# ## Use sudo to run LVM commands +# use_sudo = false # # Gathers metrics from the /3.0/reports MailChimp API @@ -5013,9 +5110,11 @@ # ## MailChimp API key # ## get from https://admin.mailchimp.com/account/api/ # api_key = "" # required +# # ## Reports for campaigns sent more than days_old ago will not be collected. -# ## 0 means collect all. +# ## 0 means collect all and is the default value. # days_old = 0 +# # ## Campaign ID to get, if empty gets all campaigns, this option overrides days_old # # campaign_id = "" @@ -5040,21 +5139,22 @@ # # insecure_skip_verify = false -# # Read metrics from one or many mcrouter servers +# # Read metrics from one or many mcrouter servers. # [[inputs.mcrouter]] # ## An array of address to gather stats about. Specify an ip or hostname # ## with port. ie tcp://localhost:11211, tcp://10.0.0.1:11211, etc. -# servers = ["tcp://localhost:11211", "unix:///var/run/mcrouter.sock"] +# servers = ["tcp://localhost:11211", "unix:///var/run/mcrouter.sock"] # -# ## Timeout for metric collections from all servers. Minimum timeout is "1s". +# ## Timeout for metric collections from all servers. Minimum timeout is "1s". # # timeout = "5s" -# # Read metrics from one or many memcached servers +# # Read metrics from one or many memcached servers. # [[inputs.memcached]] -# ## An array of address to gather stats about. Specify an ip on hostname -# ## with optional port. ie localhost, 10.0.0.1:11211, etc. +# # An array of address to gather stats about. Specify an ip on hostname +# # with optional port. ie localhost, 10.0.0.1:11211, etc. # servers = ["localhost:11211"] +# # An array of unix memcached sockets to gather stats about. # # unix_sockets = ["/var/run/memcached.sock"] # # ## Optional TLS Config @@ -5136,6 +5236,9 @@ # # ## One or more mock data fields *must* be defined. # ## +# ## [[inputs.mock.constant]] +# ## name = "constant" +# ## value = value_of_any_type # ## [[inputs.mock.random]] # ## name = "rand" # ## min = 1.0 @@ -5186,21 +5289,23 @@ # # parity = "N" # # stop_bits = 1 # -# ## Trace the connection to the modbus device as debug messages -# ## Note: You have to enable telegraf's debug mode to see those messages! -# # debug_connection = false -# # ## For Modbus over TCP you can choose between "TCP", "RTUoverTCP" and "ASCIIoverTCP" # ## default behaviour is "TCP" if the controller is TCP # ## For Serial you can choose between "RTU" and "ASCII" # # transmission_mode = "RTU" # -# ## Define the configuration schema +# ## Trace the connection to the modbus device as debug messages +# ## Note: You have to enable telegraf's debug mode to see those messages! +# # debug_connection = false +# +# ## Define the configuration schema # ## |---register -- define fields per register type in the original style (only supports one slave ID) # ## |---request -- define fields on a requests base # configuration_type = "register" # -# ## Per register definition +# ## --- "register" configuration style --- +# +# ## Measurements # ## # # ## Digital Variables, Discrete Inputs and Coils @@ -5228,11 +5333,11 @@ # ## |---BA, DCBA - Little Endian # ## |---BADC - Mid-Big Endian # ## |---CDAB - Mid-Little Endian -# ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, -# ## FLOAT32-IEEE, FLOAT64-IEEE (the IEEE 754 binary representation) -# ## FLOAT32, FIXED, UFIXED (fixed-point representation on input) -# ## scale - the final numeric variable representation -# ## address - variable address +# ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, +# ## FLOAT32-IEEE, FLOAT64-IEEE (the IEEE 754 binary representation) +# ## FLOAT32, FIXED, UFIXED (fixed-point representation on input) +# ## scale - the final numeric variable representation +# ## address - variable address # # holding_registers = [ # { name = "power_factor", byte_order = "AB", data_type = "FIXED", scale=0.01, address = [8]}, @@ -5249,26 +5354,28 @@ # ] # # +# ## --- "request" configuration style --- +# # ## Per request definition # ## # # ## Define a request sent to the device # ## Multiple of those requests can be defined. Data will be collated into metrics at the end of data collection. -# # [[inputs.modbus.request]] +# [[inputs.modbus.request]] # ## ID of the modbus slave device to query. # ## If you need to query multiple slave-devices, create several "request" definitions. -# # slave_id = 0 +# slave_id = 1 # # ## Byte order of the data. -# ## |---ABCD or MSW-BE -- Big Endian (Motorola) -# ## |---DCBA or LSW-LE -- Little Endian (Intel) -# ## |---BADC or MSW-LE -- Big Endian with byte swap -# ## |---CDAB or LSW-BE -- Little Endian with byte swap -# # byte_order = "ABCD" +# ## |---ABCD -- Big Endian (Motorola) +# ## |---DCBA -- Little Endian (Intel) +# ## |---BADC -- Big Endian with byte swap +# ## |---CDAB -- Little Endian with byte swap +# byte_order = "ABCD" # # ## Type of the register for the request # ## Can be "coil", "discrete", "holding" or "input" -# # register = "holding" +# register = "coil" # # ## Name of the measurement. # ## Can be overriden by the individual field definitions. Defaults to "modbus" @@ -5293,41 +5400,51 @@ # ## the fields are output as zero or one in UINT64 format by default. # # ## Coil / discrete input example -# # fields = [ -# # { address=0, name="motor1_run"}, -# # { address=1, name="jog", measurement="motor"}, -# # { address=2, name="motor1_stop", omit=true}, -# # { address=3, name="motor1_overheating"}, -# # ] +# fields = [ +# { address=0, name="motor1_run"}, +# { address=1, name="jog", measurement="motor"}, +# { address=2, name="motor1_stop", omit=true}, +# { address=3, name="motor1_overheating"}, +# ] # -# ## Per-request tags -# ## These tags take precedence over predefined tags. -# # [[inputs.modbus.request.tags]] -# # name = "value" +# [[inputs.modbus.request.tags]] +# machine = "impresser" +# location = "main building" # -# ## Holding / input example +# [[inputs.modbus.request]] +# ## Holding example # ## All of those examples will result in FLOAT64 field outputs -# # fields = [ -# # { address=0, name="voltage", type="INT16", scale=0.1 }, -# # { address=1, name="current", type="INT32", scale=0.001 }, -# # { address=3, name="power", type="UINT32", omit=true }, -# # { address=5, name="energy", type="FLOAT32", scale=0.001, measurement="W" }, -# # { address=7, name="frequency", type="UINT32", scale=0.1 }, -# # { address=8, name="power_factor", type="INT64", scale=0.01 }, -# # ] +# slave_id = 1 +# byte_order = "DCBA" +# register = "holding" +# fields = [ +# { address=0, name="voltage", type="INT16", scale=0.1 }, +# { address=1, name="current", type="INT32", scale=0.001 }, +# { address=3, name="power", type="UINT32", omit=true }, +# { address=5, name="energy", type="FLOAT32", scale=0.001, measurement="W" }, +# { address=7, name="frequency", type="UINT32", scale=0.1 }, +# { address=8, name="power_factor", type="INT64", scale=0.01 }, +# ] # -# ## Holding / input example with type conversions -# # fields = [ -# # { address=0, name="rpm", type="INT16" }, # will result in INT64 field -# # { address=1, name="temperature", type="INT16", scale=0.1 }, # will result in FLOAT64 field -# # { address=2, name="force", type="INT32", output="FLOAT64" }, # will result in FLOAT64 field -# # { address=4, name="hours", type="UINT32" }, # will result in UIN64 field -# # ] +# [[inputs.modbus.request.tags]] +# machine = "impresser" +# location = "main building" # -# ## Per-request tags -# ## These tags take precedence over predefined tags. -# # [[inputs.modbus.request.tags]] -# # name = "value" +# [[inputs.modbus.request]] +# ## Input example with type conversions +# slave_id = 1 +# byte_order = "ABCD" +# register = "input" +# fields = [ +# { address=0, name="rpm", type="INT16" }, # will result in INT64 field +# { address=1, name="temperature", type="INT16", scale=0.1 }, # will result in FLOAT64 field +# { address=2, name="force", type="INT32", output="FLOAT64" }, # will result in FLOAT64 field +# { address=4, name="hours", type="UINT32" }, # will result in UIN64 field +# ] +# +# [[inputs.modbus.request.tags]] +# machine = "impresser" +# location = "main building" # # # @@ -5348,9 +5465,13 @@ # ## For example: # ## mongodb://user:auth_key@10.10.3.30:27017, # ## mongodb://10.10.3.33:18832, -# servers = ["mongodb://127.0.0.1:27017?connect=direct"] +# ## +# ## If connecting to a cluster, users must include the "?connect=direct" in +# ## the URL to ensure that the connection goes directly to the specified node +# ## and not have all connections passed to the master node. +# servers = ["mongodb://127.0.0.1:27017/?connect=direct"] # -# ## When true, collect cluster status +# ## When true, collect cluster status. # ## Note that the query that counts jumbo chunks triggers a COLLSCAN, which # ## may have an impact on performance. # # gather_cluster_status = true @@ -5403,7 +5524,7 @@ # ## Omit this option to use absolute paths. # base_dir = "/sys/bus/i2c/devices/1-0076/iio:device0" # -# ## If true, Telegraf discard all data when a single file can't be read. +# ## If true discard all data when a single file can't be read. # ## Else, Telegraf omits the field generated from this file. # # fail_early = true # @@ -5465,19 +5586,19 @@ # ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS # # gather_innodb_metrics = false # -# ## gather metrics from SHOW SLAVE STATUS command output -# # gather_slave_status = false -# # ## gather metrics from all channels from SHOW SLAVE STATUS command output # # gather_all_slave_channels = false # -# ## use MariaDB dialect for all channels SHOW SLAVE STATUS +# ## gather metrics from SHOW SLAVE STATUS command output +# # gather_slave_status = false +# +# ## use SHOW ALL SLAVES STATUS command output for MariaDB # # mariadb_dialect = false # # ## gather metrics from SHOW BINARY LOGS command output # # gather_binary_logs = false # -# ## gather metrics from PERFORMANCE_SCHEMA.GLOBAL_VARIABLES +# ## gather metrics from SHOW GLOBAL VARIABLES command output # # gather_global_variables = true # # ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE @@ -5496,6 +5617,15 @@ # # gather_file_events_stats = false # # ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST +# # gather_perf_events_statements = false +# # +# ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME +# # gather_perf_sum_per_acc_per_event = false +# # +# ## list of events to be gathered for gather_perf_sum_per_acc_per_event +# ## in case of empty list all events will be gathered +# # perf_summary_events = [] +# # # # gather_perf_events_statements = false # # ## the limits for metrics form perf_events_statements @@ -5503,13 +5633,6 @@ # # perf_events_statements_limit = 250 # # perf_events_statements_time_limit = 86400 # -# ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME -# # gather_perf_sum_per_acc_per_event = false -# -# ## list of events to be gathered for gather_perf_sum_per_acc_per_event -# ## in case of empty list all events will be gathered -# # perf_summary_events = [] -# # ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES) # ## example: interval_slow = "30m" # # interval_slow = "" @@ -5544,15 +5667,17 @@ # # ## The response_timeout specifies how long to wait for a reply from the Apex. # #response_timeout = "5s" +# -# # Read metrics about network interface usage +# # Gather metrics about network interfaces # [[inputs.net]] # ## By default, telegraf gathers stats from any up interface (excluding loopback) # ## Setting interfaces will tell it to gather these explicit interfaces, -# ## regardless of status. +# ## regardless of status. When specifying an interface, glob-style +# ## patterns are also supported. # ## -# # interfaces = ["eth0"] +# # interfaces = ["eth*", "enp0s[0-1]", "lo"] # ## # ## On linux systems telegraf also collects protocol stats. # ## Setting ignore_protocol_stats to true will skip reporting of protocol metrics. @@ -5584,7 +5709,7 @@ # ## expected string in answer # # expect = "ssh" # -# ## Uncomment to remove deprecated fields +# ## Uncomment to remove deprecated fields; recommended for new deploys # # fielddrop = ["result_type", "string_found"] @@ -5624,23 +5749,23 @@ # # Read Nginx's basic status information (ngx_http_stub_status_module) # [[inputs.nginx]] -# # An array of Nginx stub_status URI to gather stats. +# ## An array of Nginx stub_status URI to gather stats. # urls = ["http://localhost/server_status"] # # ## Optional TLS Config -# tls_ca = "/etc/telegraf/ca.pem" -# tls_cert = "/etc/telegraf/cert.cer" -# tls_key = "/etc/telegraf/key.key" +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" # ## Use TLS but skip chain & host verification -# insecure_skip_verify = false +# # insecure_skip_verify = false # -# # HTTP response timeout (default: 5s) +# ## HTTP response timeout (default: 5s) # response_timeout = "5s" -# # Read Nginx Plus' full status information (ngx_http_status_module) +# # Read Nginx Plus' advanced status information # [[inputs.nginx_plus]] -# ## An array of ngx_http_status_module or status URI to gather stats. +# ## An array of Nginx status URIs to gather stats. # urls = ["http://localhost/status"] # # # HTTP response timeout (default: 5s) @@ -5654,11 +5779,10 @@ # # insecure_skip_verify = false -# # Read Nginx Plus Api documentation +# # Read Nginx Plus API advanced status information # [[inputs.nginx_plus_api]] -# ## An array of API URI to gather stats. +# ## An array of Nginx API URIs to gather stats. # urls = ["http://localhost/api"] -# # # Nginx API version, default: 3 # # api_version = 3 # @@ -5749,7 +5873,7 @@ # # tls_key = /path/to/keyfile -# # A plugin to collect stats from the NSD authoritative DNS name server +# # A plugin to collect stats from the NSD DNS resolver # [[inputs.nsd]] # ## Address of server to connect to, optionally ':port'. Defaults to the # ## address in the nsd config file. @@ -5860,8 +5984,9 @@ # ## namespace - OPC UA namespace of the node (integer value 0 thru 3) # ## identifier_type - OPC UA ID type (s=string, i=numeric, g=guid, b=opaque) # ## identifier - OPC UA ID (tag as shown in opcua browser) +# ## tags - extra tags to be added to the output metric (optional) # ## Example: -# ## {name="ProductUri", namespace="0", identifier_type="i", identifier="2262"} +# ## {name="ProductUri", namespace="0", identifier_type="i", identifier="2262", tags=[["tag1","value1"],["tag2","value2]]} # # nodes = [ # # {name="", namespace="", identifier_type="", identifier=""}, # # {name="", namespace="", identifier_type="", identifier=""}, @@ -5918,8 +6043,8 @@ # bind_dn = "" # bind_password = "" # -# # Reverse metric names so they sort more naturally. Recommended. -# # This defaults to false if unset, but is set to true when generating a new config +# # reverse metric names so they sort more naturally +# # Defaults to false if unset, but is set to true when generating a new config # reverse_metric_names = true @@ -5935,16 +6060,16 @@ # # timeout = "5ms" -# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver -# [[inputs.opensmtpd]] -# ## If running as a restricted user you can prepend sudo for additional access: -# #use_sudo = false +# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver +# [[inputs.opensmtpd]] +# ## If running as a restricted user you can prepend sudo for additional access: +# #use_sudo = false # -# ## The default location of the smtpctl binary can be overridden with: -# binary = "/usr/sbin/smtpctl" +# ## The default location of the smtpctl binary can be overridden with: +# binary = "/usr/sbin/smtpctl" # -# ## The default timeout of 1000ms can be overridden with (in milliseconds): -# timeout = 1000 +# # The default timeout of 1s can be overridden with: +# #timeout = "1s" # # Collects performance metrics from OpenStack services @@ -6027,7 +6152,7 @@ # ## "metric", "imperial", or "standard". # # units = "metric" # -# ## Query interval; OpenWeatherMap updates their weather data every 10 +# ## Query interval; OpenWeatherMap weather data is updated every 10 # ## minutes. # interval = "10m" @@ -6068,6 +6193,8 @@ # ## "/var/run/php5-fpm.sock" # ## or using a custom fpm status path: # ## "/var/run/php5-fpm.sock:fpm-custom-status-path" +# ## glob patterns are also supported: +# ## "/var/run/php*.sock" # ## # ## - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie: # ## "fcgi://10.0.0.12:9000/status" @@ -6143,8 +6270,10 @@ # # Read metrics from one or many PowerDNS servers # [[inputs.powerdns]] -# ## An array of sockets to gather stats about. -# ## Specify a path to unix socket. +# # An array of sockets to gather stats about. +# # Specify a path to unix socket. +# # +# # If no servers are specified, then '/var/run/pdns.controlsocket' is used as the path. # unix_sockets = ["/var/run/pdns.controlsocket"] @@ -6236,6 +6365,8 @@ # [[inputs.rabbitmq]] # ## Management Plugin url. (default: http://localhost:15672) # # url = "http://localhost:15672" +# ## Tag added to rabbitmq_overview series; deprecated: use tags +# # name = "rmq-server-1" # ## Credentials # # username = "guest" # # password = "guest" @@ -6261,6 +6392,11 @@ # ## specified, metrics for all nodes are gathered. # # nodes = ["rabbit@node1", "rabbit@node2"] # +# ## A list of queues to gather as the rabbitmq_queue measurement. If not +# ## specified, metrics for all queues are gathered. +# ## Deprecated in 1.6: Use queue_name_include instead. +# # queues = ["telegraf"] +# # ## A list of exchanges to gather as the rabbitmq_exchange measurement. If not # ## specified, metrics for all exchanges are gathered. # # exchanges = ["telegraf"] @@ -6273,16 +6409,13 @@ # # ## Queues to include and exclude. Globs accepted. # ## Note that an empty array for both will include all queues -# queue_name_include = [] -# queue_name_exclude = [] +# # queue_name_include = [] +# # queue_name_exclude = [] # -# ## Federation upstreams include and exclude when gathering the rabbitmq_federation measurement. -# ## If neither are specified, metrics for all federation upstreams are gathered. -# ## Federation link metrics will only be gathered for queues and exchanges -# ## whose non-federation metrics will be collected (e.g a queue excluded -# ## by the 'queue_name_exclude' option will also be excluded from federation). -# ## Globs accepted. -# # federation_upstream_include = ["dataCentre-*"] +# ## Federation upstreams to include and exclude specified as an array of glob +# ## pattern strings. Federation links can also be limited by the queue and +# ## exchange filters. +# # federation_upstream_include = [] # # federation_upstream_exclude = [] @@ -6332,15 +6465,15 @@ # # Read CPU, Fans, Powersupply and Voltage metrics of hardware server through redfish APIs # [[inputs.redfish]] -# ## Server url +# ## Redfish API Base URL. # address = "https://127.0.0.1:5000" # -# ## Username, Password for hardware server +# ## Credentials for the Redfish API. # username = "root" # password = "password123456" # -# ## ComputerSystemId -# computer_system_id="2M220100SL" +# ## System Id to collect data for in Redfish APIs. +# computer_system_id="System.Embedded.1" # # ## Amount of time allowed to complete the HTTP request # # timeout = "5s" @@ -6353,40 +6486,6 @@ # # insecure_skip_verify = false -# # Read metrics from one or many redis servers -# [[inputs.redis]] -# ## specify servers via a url matching: -# ## [protocol://][:password]@address[:port] -# ## e.g. -# ## tcp://localhost:6379 -# ## tcp://:password@192.168.99.100 -# ## unix:///var/run/redis.sock -# ## -# ## If no servers are specified, then localhost is used as the host. -# ## If no port is specified, 6379 is used -# servers = ["tcp://localhost:6379"] -# -# ## Optional. Specify redis commands to retrieve values -# # [[inputs.redis.commands]] -# # # The command to run where each argument is a separate element -# # command = ["get", "sample-key"] -# # # The field to store the result in -# # field = "sample-key-value" -# # # The type of the result -# # # Can be "string", "integer", or "float" -# # type = "string" -# -# ## specify server password -# # password = "s#cr@t%" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = true - - # # Read metrics from one or many redis-sentinel servers # [[inputs.redis_sentinel]] # ## specify servers via a url matching: @@ -6416,11 +6515,11 @@ # ## rethinkdb://10.10.3.33:18832, # ## 10.0.0.1:10000, etc. # servers = ["127.0.0.1:28015"] -# ## +# # ## If you use actual rethinkdb of > 2.3.0 with username/password authorization, # ## protocol have to be named "rethinkdb2" - it will use 1_0 H. # # servers = ["rethinkdb2://username:password@127.0.0.1:28015"] -# ## +# # ## If you use older versions of rethinkdb (<2.2) with auth_key, protocol # ## have to be named "rethinkdb". # # servers = ["rethinkdb://username:auth_key@127.0.0.1:28015"] @@ -6454,53 +6553,53 @@ # # Read metrics from storage devices supporting S.M.A.R.T. # [[inputs.smart]] -# ## Optionally specify the path to the smartctl executable -# # path_smartctl = "/usr/bin/smartctl" +# ## Optionally specify the path to the smartctl executable +# # path_smartctl = "/usr/bin/smartctl" # -# ## Optionally specify the path to the nvme-cli executable -# # path_nvme = "/usr/bin/nvme" +# ## Optionally specify the path to the nvme-cli executable +# # path_nvme = "/usr/bin/nvme" # -# ## Optionally specify if vendor specific attributes should be propagated for NVMe disk case -# ## ["auto-on"] - automatically find and enable additional vendor specific disk info -# ## ["vendor1", "vendor2", ...] - e.g. "Intel" enable additional Intel specific disk info -# # enable_extensions = ["auto-on"] +# ## Optionally specify if vendor specific attributes should be propagated for NVMe disk case +# ## ["auto-on"] - automatically find and enable additional vendor specific disk info +# ## ["vendor1", "vendor2", ...] - e.g. "Intel" enable additional Intel specific disk info +# # enable_extensions = ["auto-on"] # -# ## On most platforms used cli utilities requires root access. -# ## Setting 'use_sudo' to true will make use of sudo to run smartctl or nvme-cli. -# ## Sudo must be configured to allow the telegraf user to run smartctl or nvme-cli -# ## without a password. -# # use_sudo = false +# ## On most platforms used cli utilities requires root access. +# ## Setting 'use_sudo' to true will make use of sudo to run smartctl or nvme-cli. +# ## Sudo must be configured to allow the telegraf user to run smartctl or nvme-cli +# ## without a password. +# # use_sudo = false # -# ## Skip checking disks in this power mode. Defaults to -# ## "standby" to not wake up disks that have stopped rotating. -# ## See --nocheck in the man pages for smartctl. -# ## smartctl version 5.41 and 5.42 have faulty detection of -# ## power mode and might require changing this value to -# ## "never" depending on your disks. -# # nocheck = "standby" +# ## Skip checking disks in this power mode. Defaults to +# ## "standby" to not wake up disks that have stopped rotating. +# ## See --nocheck in the man pages for smartctl. +# ## smartctl version 5.41 and 5.42 have faulty detection of +# ## power mode and might require changing this value to +# ## "never" depending on your disks. +# # nocheck = "standby" # -# ## Gather all returned S.M.A.R.T. attribute metrics and the detailed -# ## information from each drive into the 'smart_attribute' measurement. -# # attributes = false +# ## Gather all returned S.M.A.R.T. attribute metrics and the detailed +# ## information from each drive into the 'smart_attribute' measurement. +# # attributes = false # -# ## Optionally specify devices to exclude from reporting if disks auto-discovery is performed. -# # excludes = [ "/dev/pass6" ] +# ## Optionally specify devices to exclude from reporting if disks auto-discovery is performed. +# # excludes = [ "/dev/pass6" ] # -# ## Optionally specify devices and device type, if unset -# ## a scan (smartctl --scan and smartctl --scan -d nvme) for S.M.A.R.T. devices will be done -# ## and all found will be included except for the excluded in excludes. -# # devices = [ "/dev/ada0 -d atacam", "/dev/nvme0"] +# ## Optionally specify devices and device type, if unset +# ## a scan (smartctl --scan and smartctl --scan -d nvme) for S.M.A.R.T. devices will be done +# ## and all found will be included except for the excluded in excludes. +# # devices = [ "/dev/ada0 -d atacam", "/dev/nvme0"] # -# ## Timeout for the cli command to complete. -# # timeout = "30s" +# ## Timeout for the cli command to complete. +# # timeout = "30s" # -# ## Optionally call smartctl and nvme-cli with a specific concurrency policy. -# ## By default, smartctl and nvme-cli are called in separate threads (goroutines) to gather disk attributes. -# ## Some devices (e.g. disks in RAID arrays) may have access limitations that require sequential reading of -# ## SMART data - one individual array drive at the time. In such case please set this configuration option -# ## to "sequential" to get readings for all drives. -# ## valid options: concurrent, sequential -# # read_method = "concurrent" +# ## Optionally call smartctl and nvme-cli with a specific concurrency policy. +# ## By default, smartctl and nvme-cli are called in separate threads (goroutines) to gather disk attributes. +# ## Some devices (e.g. disks in RAID arrays) may have access limitations that require sequential reading of +# ## SMART data - one individual array drive at the time. In such case please set this configuration option +# ## to "sequential" to get readings for all drives. +# ## valid options: concurrent, sequential +# # read_method = "concurrent" # # Retrieves SNMP values from remote agents @@ -6526,12 +6625,12 @@ # ## To add paths when translating with netsnmp, use the MIBDIRS environment variable # # path = ["/usr/share/snmp/mibs"] # -# ## Agent host tag; the tag used to reference the source host -# # agent_host_tag = "agent_host" -# # ## SNMP community string. # # community = "public" # +# ## Agent host tag +# # agent_host_tag = "agent_host" +# # ## Number of retries to attempt. # # retries = 3 # @@ -6550,7 +6649,9 @@ # # sec_level = "authNoPriv" # ## Context Name. # # context_name = "" -# ## Privacy protocol used for encrypted messages; one of "DES", "AES" or "". +# ## Privacy protocol used for encrypted messages; one of "DES", "AES", "AES192", "AES192C", "AES256", "AES256C", or "". +# ### Protocols "AES192", "AES192", "AES256", and "AES256C" require the underlying net-snmp tools +# ### to be compiled with --enable-blumenthal-aes (http://www.net-snmp.org/docs/INSTALL.html) # # priv_protocol = "" # ## Privacy password used for encrypted messages. # # priv_password = "" @@ -6558,11 +6659,29 @@ # ## Add fields and tables defining the variables you wish to collect. This # ## example collects the system uptime and interface variables. Reference the # ## full plugin documentation for configuration details. +# [[inputs.snmp.field]] +# oid = "RFC1213-MIB::sysUpTime.0" +# name = "uptime" +# +# [[inputs.snmp.field]] +# oid = "RFC1213-MIB::sysName.0" +# name = "source" +# is_tag = true +# +# [[inputs.snmp.table]] +# oid = "IF-MIB::ifTable" +# name = "interface" +# inherit_tags = ["source"] +# +# [[inputs.snmp.table.field]] +# oid = "IF-MIB::ifDescr" +# name = "ifDescr" +# is_tag = true +# ## DEPRECATED: The 'snmp_legacy' plugin is deprecated in version 1.0.0, use 'inputs.snmp' instead. # # DEPRECATED! PLEASE USE inputs.snmp INSTEAD. # [[inputs.snmp_legacy]] -# ## DEPRECATED: The 'snmp_legacy' plugin is deprecated in version 1.0.0, use 'inputs.snmp' instead. # ## Use 'oids.txt' file to translate oids to names # ## To generate 'oids.txt' you need to run: # ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt @@ -6584,7 +6703,6 @@ # collect = ["mybulk", "sysservices", "sysdescr"] # # Simple list of OIDs to get, in addition to "collect" # get_oids = [] -# # [[inputs.snmp.host]] # address = "192.168.2.3:161" # community = "public" @@ -6596,31 +6714,25 @@ # "ifNumber", # ".1.3.6.1.2.1.1.3.0", # ] -# # [[inputs.snmp.get]] # name = "ifnumber" # oid = "ifNumber" -# # [[inputs.snmp.get]] # name = "interface_speed" # oid = "ifSpeed" # instance = "0" -# # [[inputs.snmp.get]] # name = "sysuptime" # oid = ".1.3.6.1.2.1.1.3.0" # unit = "second" -# # [[inputs.snmp.bulk]] # name = "mybulk" # max_repetition = 127 # oid = ".1.3.6.1.2.1.1" -# # [[inputs.snmp.bulk]] # name = "ifoutoctets" # max_repetition = 127 # oid = "ifOutOctets" -# # [[inputs.snmp.host]] # address = "192.168.2.13:161" # #address = "127.0.0.1:161" @@ -6633,19 +6745,16 @@ # [[inputs.snmp.host.table]] # name = "iftable3" # include_instances = ["enp5s0", "eth1"] -# # # SNMP TABLEs # # table without mapping neither subtables # [[inputs.snmp.table]] # name = "iftable1" # oid = ".1.3.6.1.2.1.31.1.1.1" -# # # table without mapping but with subtables # [[inputs.snmp.table]] # name = "iftable2" # oid = ".1.3.6.1.2.1.31.1.1.1" # sub_tables = [".1.3.6.1.2.1.2.2.1.13"] -# # # table with mapping but without subtables # [[inputs.snmp.table]] # name = "iftable3" @@ -6653,7 +6762,6 @@ # # if empty. get all instances # mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" # # if empty, get all subtables -# # # table with both mapping and subtables # [[inputs.snmp.table]] # name = "iftable4" @@ -6669,10 +6777,10 @@ # [[inputs.solr]] # ## specify a list of one or more Solr servers # servers = ["http://localhost:8983"] -# +# ## # ## specify a list of one or more Solr cores (default - all) # # cores = ["main"] -# +# ## # ## Optional HTTP Basic Auth Credentials # # username = "username" # # password = "pa$$word" @@ -6691,8 +6799,8 @@ # ## Exclude timeseries that start with the given metric type. # # metric_type_prefix_exclude = [] # -# ## Many metrics are updated once per minute; it is recommended to override -# ## the agent level interval with a value of 1m or greater. +# ## Most metrics are updated no more than once per minute; it is recommended +# ## to override the agent level interval with a value of 1m or greater. # interval = "1m" # # ## Maximum number of API calls to make per second. The quota for accounts @@ -6728,9 +6836,9 @@ # ## For a list of aligner strings see: # ## https://cloud.google.com/monitoring/api/ref_v3/rpc/google.monitoring.v3#aligner # # distribution_aggregation_aligners = [ -# # "ALIGN_PERCENTILE_99", -# # "ALIGN_PERCENTILE_95", -# # "ALIGN_PERCENTILE_50", +# # "ALIGN_PERCENTILE_99", +# # "ALIGN_PERCENTILE_95", +# # "ALIGN_PERCENTILE_50", # # ] # # ## Filters can be added to reduce the number of time series matched. All @@ -6754,8 +6862,8 @@ # ## Metric labels refine the time series selection with the following expression: # ## metric.labels. = # # [[inputs.stackdriver.filter.metric_labels]] -# # key = "device_name" -# # value = 'one_of("sda", "sdb")' +# # key = "device_name" +# # value = 'one_of("sda", "sdb")' # # Get synproxy counter statistics from procfs @@ -6763,6 +6871,24 @@ # # no configuration +# # Gather systemd units state +# [[inputs.systemd_units]] +# ## Set timeout for systemctl execution +# # timeout = "1s" +# # +# ## Filter for a specific unit type, default is "service", other possible +# ## values are "socket", "target", "device", "mount", "automount", "swap", +# ## "timer", "path", "slice" and "scope ": +# # unittype = "service" +# # +# ## Filter for a specific pattern, default is "" (i.e. all), other possible +# ## values are valid pattern for systemctl, e.g. "a*" for all units with +# ## names starting with "a" +# # pattern = "" +# ## pattern = "telegraf* influxdb*" +# ## pattern = "a*" + + # # Reads metrics from a Teamspeak 3 Server via ServerQuery # [[inputs.teamspeak]] # ## Server address for Teamspeak 3 ServerQuery @@ -6771,6 +6897,8 @@ # username = "serverqueryuser" # ## Password for ServerQuery # password = "secret" +# ## Nickname of the ServerQuery client +# nickname = "telegraf" # ## Array of virtual servers # # virtual_servers = [1] @@ -6782,16 +6910,16 @@ # # Read Tengine's basic status information (ngx_http_reqstat_module) # [[inputs.tengine]] -# # An array of Tengine reqstat module URI to gather stats. +# ## An array of Tengine reqstat module URI to gather stats. # urls = ["http://127.0.0.1/us"] # -# # HTTP response timeout (default: 5s) +# ## HTTP response timeout (default: 5s) # # response_timeout = "5s" # # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.cer" -# # tls_key = "/etc/telegraf/key.key" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false @@ -6858,7 +6986,7 @@ # # Read uWSGI metrics. # [[inputs.uwsgi]] -# ## List with urls of uWSGI Stats servers. URL must match pattern: +# ## List with urls of uWSGI Stats servers. Url must match pattern: # ## scheme://address[:port] # ## # ## For example: @@ -6969,153 +7097,153 @@ # exclude_empty = ["*ActivityID", "UserID"] -# # Input plugin to counterPath Performance Counters on Windows operating systems -# [[inputs.win_perf_counters]] -# ## By default this plugin returns basic CPU and Disk statistics. -# ## See the README file for more examples. -# ## Uncomment examples below or write your own as you see fit. If the system -# ## being polled for data does not have the Object at startup of the Telegraf -# ## agent, it will not be gathered. -# ## Settings: -# # PrintValid = false # Print All matching performance counters -# # Whether request a timestamp along with the PerfCounter data or just use current time -# # UsePerfCounterTime=true -# # If UseWildcardsExpansion params is set to true, wildcards (partial wildcards in instance names and wildcards in counters names) in configured counter paths will be expanded -# # and in case of localized Windows, counter paths will be also localized. It also returns instance indexes in instance names. -# # If false, wildcards (not partial) in instance names will still be expanded, but instance indexes will not be returned in instance names. -# #UseWildcardsExpansion = false -# # When running on a localized version of Windows and with UseWildcardsExpansion = true, Windows will -# # localize object and counter names. When LocalizeWildcardsExpansion = false, use the names in object.Counters instead -# # of the localized names. Only Instances can have wildcards in this case. ObjectName and Counters must not have wildcards when this -# # setting is false. -# #LocalizeWildcardsExpansion = true -# # Period after which counters will be reread from configuration and wildcards in counter paths expanded -# CountersRefreshInterval="1m" -# ## Accepts a list of PDH error codes which are defined in pdh.go, if this error is encountered it will be ignored -# ## For example, you can provide "PDH_NO_DATA" to ignore performance counters with no instances -# ## By default no errors are ignored -# ## You can find the list here: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/win_perf_counters/pdh.go -# ## e.g.: IgnoredErrors = ["PDH_NO_DATA"] -# # IgnoredErrors = [] -# -# [[inputs.win_perf_counters.object]] -# # Processor usage, alternative to native, reports on a per core. -# ObjectName = "Processor" -# Instances = ["*"] -# Counters = [ -# "% Idle Time", -# "% Interrupt Time", -# "% Privileged Time", -# "% User Time", -# "% Processor Time", -# "% DPC Time", -# ] -# Measurement = "win_cpu" -# # Set to true to include _Total instance when querying for all (*). -# # IncludeTotal=false -# # Print out when the performance counter is missing from object, counter or instance. -# # WarnOnMissing = false -# # Gather raw values instead of formatted. Raw value is stored in the field name with the "_Raw" suffix, e.g. "Disk_Read_Bytes_sec_Raw". -# # UseRawValues = true -# -# [[inputs.win_perf_counters.object]] -# # Disk times and queues -# ObjectName = "LogicalDisk" -# Instances = ["*"] -# Counters = [ -# "% Idle Time", -# "% Disk Time", -# "% Disk Read Time", -# "% Disk Write Time", -# "% User Time", -# "% Free Space", -# "Current Disk Queue Length", -# "Free Megabytes", -# ] -# Measurement = "win_disk" -# -# [[inputs.win_perf_counters.object]] -# ObjectName = "PhysicalDisk" -# Instances = ["*"] -# Counters = [ -# "Disk Read Bytes/sec", -# "Disk Write Bytes/sec", -# "Current Disk Queue Length", -# "Disk Reads/sec", -# "Disk Writes/sec", -# "% Disk Time", -# "% Disk Read Time", -# "% Disk Write Time", -# ] -# Measurement = "win_diskio" -# -# [[inputs.win_perf_counters.object]] -# ObjectName = "Network Interface" -# Instances = ["*"] -# Counters = [ -# "Bytes Received/sec", -# "Bytes Sent/sec", -# "Packets Received/sec", -# "Packets Sent/sec", -# "Packets Received Discarded", -# "Packets Outbound Discarded", -# "Packets Received Errors", -# "Packets Outbound Errors", -# ] -# Measurement = "win_net" -# -# -# [[inputs.win_perf_counters.object]] -# ObjectName = "System" -# Counters = [ -# "Context Switches/sec", -# "System Calls/sec", -# "Processor Queue Length", -# "System Up Time", -# ] -# Instances = ["------"] -# Measurement = "win_system" -# -# [[inputs.win_perf_counters.object]] -# # Example counterPath where the Instance portion must be removed to get data back, -# # such as from the Memory object. -# ObjectName = "Memory" -# Counters = [ -# "Available Bytes", -# "Cache Faults/sec", -# "Demand Zero Faults/sec", -# "Page Faults/sec", -# "Pages/sec", -# "Transition Faults/sec", -# "Pool Nonpaged Bytes", -# "Pool Paged Bytes", -# "Standby Cache Reserve Bytes", -# "Standby Cache Normal Priority Bytes", -# "Standby Cache Core Bytes", -# ] -# Instances = ["------"] # Use 6 x - to remove the Instance bit from the counterPath. -# Measurement = "win_mem" -# -# [[inputs.win_perf_counters.object]] -# # Example query where the Instance portion must be removed to get data back, -# # such as from the Paging File object. -# ObjectName = "Paging File" -# Counters = [ -# "% Usage", -# ] -# Instances = ["_Total"] -# Measurement = "win_swap" +# # # Input plugin to counterPath Performance Counters on Windows operating systems +# # [[inputs.win_perf_counters]] +# # ## By default this plugin returns basic CPU and Disk statistics. +# # ## See the README file for more examples. +# # ## Uncomment examples below or write your own as you see fit. If the system +# # ## being polled for data does not have the Object at startup of the Telegraf +# # ## agent, it will not be gathered. +# # ## Settings: +# # # PrintValid = false # Print All matching performance counters +# # # Whether request a timestamp along with the PerfCounter data or just use current time +# # # UsePerfCounterTime=true +# # # If UseWildcardsExpansion params is set to true, wildcards (partial wildcards in instance names and wildcards in counters names) in configured counter paths will be expanded +# # # and in case of localized Windows, counter paths will be also localized. It also returns instance indexes in instance names. +# # # If false, wildcards (not partial) in instance names will still be expanded, but instance indexes will not be returned in instance names. +# # #UseWildcardsExpansion = false +# # # When running on a localized version of Windows and with UseWildcardsExpansion = true, Windows will +# # # localize object and counter names. When LocalizeWildcardsExpansion = false, use the names in object.Counters instead +# # # of the localized names. Only Instances can have wildcards in this case. ObjectName and Counters must not have wildcards when this +# # # setting is false. +# # #LocalizeWildcardsExpansion = true +# # # Period after which counters will be reread from configuration and wildcards in counter paths expanded +# # CountersRefreshInterval="1m" +# # ## Accepts a list of PDH error codes which are defined in pdh.go, if this error is encountered it will be ignored +# # ## For example, you can provide "PDH_NO_DATA" to ignore performance counters with no instances +# # ## By default no errors are ignored +# # ## You can find the list here: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/win_perf_counters/pdh.go +# # ## e.g.: IgnoredErrors = ["PDH_NO_DATA"] +# # # IgnoredErrors = [] +# # +# # [[inputs.win_perf_counters.object]] +# # # Processor usage, alternative to native, reports on a per core. +# # ObjectName = "Processor" +# # Instances = ["*"] +# # Counters = [ +# # "% Idle Time", +# # "% Interrupt Time", +# # "% Privileged Time", +# # "% User Time", +# # "% Processor Time", +# # "% DPC Time", +# # ] +# # Measurement = "win_cpu" +# # # Set to true to include _Total instance when querying for all (*). +# # # IncludeTotal=false +# # # Print out when the performance counter is missing from object, counter or instance. +# # # WarnOnMissing = false +# # # Gather raw values instead of formatted. Raw value is stored in the field name with the "_Raw" suffix, e.g. "Disk_Read_Bytes_sec_Raw". +# # # UseRawValues = true +# # +# # [[inputs.win_perf_counters.object]] +# # # Disk times and queues +# # ObjectName = "LogicalDisk" +# # Instances = ["*"] +# # Counters = [ +# # "% Idle Time", +# # "% Disk Time", +# # "% Disk Read Time", +# # "% Disk Write Time", +# # "% User Time", +# # "% Free Space", +# # "Current Disk Queue Length", +# # "Free Megabytes", +# # ] +# # Measurement = "win_disk" +# # +# # [[inputs.win_perf_counters.object]] +# # ObjectName = "PhysicalDisk" +# # Instances = ["*"] +# # Counters = [ +# # "Disk Read Bytes/sec", +# # "Disk Write Bytes/sec", +# # "Current Disk Queue Length", +# # "Disk Reads/sec", +# # "Disk Writes/sec", +# # "% Disk Time", +# # "% Disk Read Time", +# # "% Disk Write Time", +# # ] +# # Measurement = "win_diskio" +# # +# # [[inputs.win_perf_counters.object]] +# # ObjectName = "Network Interface" +# # Instances = ["*"] +# # Counters = [ +# # "Bytes Received/sec", +# # "Bytes Sent/sec", +# # "Packets Received/sec", +# # "Packets Sent/sec", +# # "Packets Received Discarded", +# # "Packets Outbound Discarded", +# # "Packets Received Errors", +# # "Packets Outbound Errors", +# # ] +# # Measurement = "win_net" +# # +# # +# # [[inputs.win_perf_counters.object]] +# # ObjectName = "System" +# # Counters = [ +# # "Context Switches/sec", +# # "System Calls/sec", +# # "Processor Queue Length", +# # "System Up Time", +# # ] +# # Instances = ["------"] +# # Measurement = "win_system" +# # +# # [[inputs.win_perf_counters.object]] +# # # Example counterPath where the Instance portion must be removed to get data back, +# # # such as from the Memory object. +# # ObjectName = "Memory" +# # Counters = [ +# # "Available Bytes", +# # "Cache Faults/sec", +# # "Demand Zero Faults/sec", +# # "Page Faults/sec", +# # "Pages/sec", +# # "Transition Faults/sec", +# # "Pool Nonpaged Bytes", +# # "Pool Paged Bytes", +# # "Standby Cache Reserve Bytes", +# # "Standby Cache Normal Priority Bytes", +# # "Standby Cache Core Bytes", +# # ] +# # Instances = ["------"] # Use 6 x - to remove the Instance bit from the counterPath. +# # Measurement = "win_mem" +# # +# # [[inputs.win_perf_counters.object]] +# # # Example query where the Instance portion must be removed to get data back, +# # # such as from the Paging File object. +# # ObjectName = "Paging File" +# # Counters = [ +# # "% Usage", +# # ] +# # Instances = ["_Total"] +# # Measurement = "win_swap" # # Input plugin to report Windows services info. # [[inputs.win_services]] -# ## Names of the services to monitor. Leave empty to monitor all the available services on the host. Globs accepted. +# ## Names of the services to monitor. Leave empty to monitor all the available services on the host. Globs accepted. Case sensitive. # service_names = [ # "LanmanServer", -# "TermService", -# "Win*", +# "TermService", +# "Win*", # ] -# #excluded_service_names = [] # optional, list of service names to exclude +# excluded_service_names = ['WinRM'] # optional, list of service names to exclude # # Collect Wireguard server interface and peer statistics @@ -7134,29 +7262,30 @@ # # Reads metrics from a SSL certificate # [[inputs.x509_cert]] -# ## List certificate sources +# ## List certificate sources, support wildcard expands for files # ## Prefix your entry with 'file://' if you intend to use relative paths # sources = ["tcp://example.org:443", "https://influxdata.com:443", -# "udp://127.0.0.1:4433", "/etc/ssl/certs/ssl-cert-snakeoil.pem", +# "smtp://mail.localhost:25", "udp://127.0.0.1:4433", +# "/etc/ssl/certs/ssl-cert-snakeoil.pem", # "/etc/mycerts/*.mydomain.org.pem", "file:///path/to/*.pem"] # # ## Timeout for SSL connection # # timeout = "5s" # -# ## Pass a different name into the TLS request (Server Name Indication) +# ## Pass a different name into the TLS request (Server Name Indication). +# ## This is synonymous with tls_server_name, and only one of the two +# ## options may be specified at one time. # ## example: server_name = "myhost.example.org" -# # server_name = "" -# -# ## Don't include root or intermediate certificates in output -# # exclude_root_certs = false +# # server_name = "myhost.example.org" # # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" # # tls_key = "/etc/telegraf/key.pem" +# # tls_server_name = "myhost.example.org" -# # Gathers Metrics From a Dell EMC XtremIO Storage Array's V3 API +# # Gathers Metrics From a Dell EMC XtremIO Storage Array's V3 API # [[inputs.xtremio]] # ## XtremIO User Interface Endpoint # url = "https://xtremio.example.com/" # required @@ -7183,14 +7312,19 @@ # # kstatPath = "/proc/spl/kstat/zfs" # # ## By default, telegraf gather all zfs stats -# ## If not specified, then default is: +# ## Override the stats list using the kstatMetrics array: +# ## For FreeBSD, the default is: # # kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"] # ## For Linux, the default is: # # kstatMetrics = ["abdstats", "arcstats", "dnodestats", "dbufcachestats", -# # "dmu_tx", "fm", "vdev_mirror_stats", "zfetchstats", "zil"] +# # "dmu_tx", "fm", "vdev_mirror_stats", "zfetchstats", "zil"] +# # ## By default, don't gather zpool stats # # poolMetrics = false -# ## By default, don't gather zdataset stats +# +# ## By default, don't gather dataset stats +# ## On FreeBSD, if the user has enabled listsnapshots in the pool property, +# ## telegraf may not be able to correctly parse the output. # # datasetMetrics = false @@ -7398,9 +7532,9 @@ # data_format = "influx" +# ## DEPRECATED: The 'cassandra' plugin is deprecated in version 1.7.0, use 'inputs.jolokia2' with the 'cassandra.conf' example configuration instead. # # Read Cassandra metrics through Jolokia # [[inputs.cassandra]] -# ## DEPRECATED: The 'cassandra' plugin is deprecated in version 1.7.0, use 'inputs.jolokia2' with the 'cassandra.conf' example configuration instead. # ## DEPRECATED: The cassandra plugin has been deprecated. Please use the # ## jolokia2 plugin instead. # ## @@ -7430,6 +7564,9 @@ # ## Address and port to host telemetry listener # service_address = ":57000" # +# ## Grpc Maximum Message Size, default is 4MB, increase the size. +# max_msg_size = 4000000 +# # ## Enable TLS; grpc transport only. # # tls_cert = "/etc/telegraf/cert.pem" # # tls_key = "/etc/telegraf/key.pem" @@ -7444,10 +7581,20 @@ # ## Define aliases to map telemetry encoding paths to simple measurement names # [inputs.cisco_telemetry_mdt.aliases] # ifstats = "ietf-interfaces:interfaces-state/interface/statistics" -# ##Define Property Xformation, please refer README and https://pubhub.devnetcloud.com/media/dme-docs-9-3-3/docs/appendix/ for Model details. +# ## Define Property Xformation, please refer README and https://pubhub.devnetcloud.com/media/dme-docs-9-3-3/docs/appendix/ for Model details. # [inputs.cisco_telemetry_mdt.dmes] -# ModTs = "ignore" -# CreateTs = "ignore" +# # Global Property Xformation. +# # prop1 = "uint64 to int" +# # prop2 = "uint64 to string" +# # prop3 = "string to uint64" +# # prop4 = "string to int64" +# # prop5 = "string to float64" +# # auto-prop-xfrom = "auto-float-xfrom" #Xform any property which is string, and has float number to type float64 +# # Per Path property xformation, Name is telemetry configuration under sensor-group, path configuration "WORD Distinguished Name" +# # Per Path configuration is better as it avoid property collision issue of types. +# # dnpath = '{"Name": "show ip route summary","prop": [{"Key": "routes","Value": "string"}, {"Key": "best-paths","Value": "string"}]}' +# # dnpath2 = '{"Name": "show processes cpu","prop": [{"Key": "kernel_percent","Value": "float"}, {"Key": "idle_percent","Value": "float"}, {"Key": "process","Value": "string"}, {"Key": "user_percent","Value": "float"}, {"Key": "onesec","Value": "float"}]}' +# # dnpath3 = '{"Name": "show processes memory physical","prop": [{"Key": "processname","Value": "string"}]}' # # Read metrics from one or many ClickHouse servers @@ -7549,7 +7696,7 @@ # # max_message_len = 1000000 # # ## Optional. Maximum messages to read from PubSub that have not been written -# ## to an output. Defaults to 1000. +# ## to an output. Defaults to %d. # ## For best throughput set based on the number of metrics within # ## each message and the size of the output's metric_batch_size. # ## @@ -7588,7 +7735,8 @@ # # max_receiver_go_routines = 0 # # ## Optional. If true, Telegraf will attempt to base64 decode the -# ## PubSub message data before parsing +# ## PubSub message data before parsing. Many GCP services that +# ## output JSON to Google PubSub base64-encode the JSON payload. # # base64_data = false @@ -7673,7 +7821,7 @@ # # # ## The maximum amount of file paths to queue up for processing at once, before waiting until files are processed to find more files. # ## Lowering this value will result in *slightly* less memory use, with a potential sacrifice in speed efficiency, if absolutely necessary. -# # file_queue_size = 100000 +# # file_queue_size = 100000 # # # ## Name a tag containing the name of the file the data was parsed from. Leave empty # ## to disable. Cautious when file name variation is high, this can increase the cardinality @@ -7681,11 +7829,14 @@ # ## https://docs.influxdata.com/influxdb/cloud/reference/glossary/#series-cardinality # # file_tag = "" # # +# ## Specify if the file can be read completely at once or if it needs to be read line by line (default). +# ## Possible values: "line-by-line", "at-once" +# # parse_method = "line-by-line" +# # # ## The dataformat to be read from the files. # ## Each data format has its own unique set of configuration options, read # ## more about them here: # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -# ## NOTE: We currently only support parsing newline-delimited JSON. See the format here: https://github.com/ndjson/ndjson-spec # data_format = "influx" @@ -7817,15 +7968,22 @@ # # Run executable as long-running input plugin # [[inputs.execd]] -# ## Program to run as daemon +# ## One program to run as daemon. +# ## NOTE: process and each argument should each be their own string # command = ["telegraf-smartctl", "-d", "/dev/sda"] # +# ## Environment variables +# ## Array of "key=value" pairs to pass as environment variables +# ## e.g. "KEY=value", "USERNAME=John Doe", +# ## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs" +# # environment = [] +# # ## Define how the process is signaled on each collection interval. # ## Valid values are: -# ## "none" : Do not signal anything. -# ## The process must output metrics by itself. -# ## "STDIN" : Send a newline on STDIN. -# ## "SIGHUP" : Send a HUP signal. Not available on Windows. +# ## "none" : Do not signal anything. (Recommended for service inputs) +# ## The process must output metrics by itself. +# ## "STDIN" : Send a newline on STDIN. (Recommended for gather inputs) +# ## "SIGHUP" : Send a HUP signal. Not available on Windows. (not recommended) # ## "SIGUSR1" : Send a USR1 signal. Not available on Windows. # ## "SIGUSR2" : Send a USR2 signal. Not available on Windows. # signal = "none" @@ -7842,60 +8000,60 @@ # # gNMI telemetry input plugin # [[inputs.gnmi]] -# ## Address and port of the gNMI GRPC server -# addresses = ["10.49.234.114:57777"] +# ## Address and port of the gNMI GRPC server +# addresses = ["10.49.234.114:57777"] # -# ## define credentials -# username = "cisco" -# password = "cisco" +# ## define credentials +# username = "cisco" +# password = "cisco" # -# ## gNMI encoding requested (one of: "proto", "json", "json_ietf", "bytes") -# # encoding = "proto" +# ## gNMI encoding requested (one of: "proto", "json", "json_ietf", "bytes") +# # encoding = "proto" # -# ## redial in case of failures after -# redial = "10s" +# ## redial in case of failures after +# redial = "10s" # -# ## enable client-side TLS and define CA to authenticate the device -# # enable_tls = true -# # tls_ca = "/etc/telegraf/ca.pem" -# # insecure_skip_verify = true +# ## enable client-side TLS and define CA to authenticate the device +# # enable_tls = true +# # tls_ca = "/etc/telegraf/ca.pem" +# # insecure_skip_verify = true # -# ## define client-side TLS certificate & key to authenticate to the device -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" +# ## define client-side TLS certificate & key to authenticate to the device +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" # -# ## gNMI subscription prefix (optional, can usually be left empty) -# ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths -# # origin = "" -# # prefix = "" -# # target = "" -# -# ## Define additional aliases to map telemetry encoding paths to simple measurement names -# #[inputs.gnmi.aliases] -# # ifcounters = "openconfig:/interfaces/interface/state/counters" -# -# [[inputs.gnmi.subscription]] -# ## Name of the measurement that will be emitted -# name = "ifcounters" -# -# ## Origin and path of the subscription +# ## gNMI subscription prefix (optional, can usually be left empty) # ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths -# ## -# ## origin usually refers to a (YANG) data model implemented by the device -# ## and path to a specific substructure inside it that should be subscribed to (similar to an XPath) -# ## YANG models can be found e.g. here: https://github.com/YangModels/yang/tree/master/vendor/cisco/xr -# origin = "openconfig-interfaces" -# path = "/interfaces/interface/state/counters" +# # origin = "" +# # prefix = "" +# # target = "" # -# # Subscription mode (one of: "target_defined", "sample", "on_change") and interval -# subscription_mode = "sample" -# sample_interval = "10s" +# ## Define additional aliases to map telemetry encoding paths to simple measurement names +# # [inputs.gnmi.aliases] +# # ifcounters = "openconfig:/interfaces/interface/state/counters" # -# ## Suppress redundant transmissions when measured values are unchanged -# # suppress_redundant = false +# [[inputs.gnmi.subscription]] +# ## Name of the measurement that will be emitted +# name = "ifcounters" # -# ## If suppression is enabled, send updates at least every X seconds anyway -# # heartbeat_interval = "60s" +# ## Origin and path of the subscription +# ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths +# ## +# ## origin usually refers to a (YANG) data model implemented by the device +# ## and path to a specific substructure inside it that should be subscribed to (similar to an XPath) +# ## YANG models can be found e.g. here: https://github.com/YangModels/yang/tree/master/vendor/cisco/xr +# origin = "openconfig-interfaces" +# path = "/interfaces/interface/state/counters" +# +# # Subscription mode (one of: "target_defined", "sample", "on_change") and interval +# subscription_mode = "sample" +# sample_interval = "10s" +# +# ## Suppress redundant transmissions when measured values are unchanged +# # suppress_redundant = false +# +# ## If suppression is enabled, send updates at least every X seconds anyway +# # heartbeat_interval = "60s" # # #[[inputs.gnmi.subscription]] # # name = "descr" @@ -7910,10 +8068,10 @@ # # tag_only = true +# ## DEPRECATED: The 'http_listener' plugin is deprecated in version 1.9.0, has been renamed to 'influxdb_listener', use 'inputs.influxdb_listener' or 'inputs.http_listener_v2' instead. # # Accept metrics over InfluxDB 1.x HTTP API -# [[inputs.http_listener]] -# ## DEPRECATED: The 'http_listener' plugin is deprecated in version 1.9.0, has been renamed to 'influxdb_listener', use 'inputs.influxdb_listener' or 'inputs.http_listener_v2' instead. -# ## Address and port to host InfluxDB listener on +# [[inputs.influxdb_listener]] +# ## Address and port to host HTTP listener on # service_address = ":8186" # # ## maximum duration before timing out read of the request @@ -7923,17 +8081,11 @@ # # ## Maximum allowed HTTP request body size in bytes. # ## 0 means to use the default of 32MiB. -# max_body_size = "32MiB" +# max_body_size = 0 # -# ## Optional tag name used to store the database. -# ## If the write has a database in the query string then it will be kept in this tag name. -# ## This tag can be used in downstream outputs. -# ## The default value of nothing means it will be off and the database will not be recorded. -# # database_tag = "" -# -# ## If set the retention policy specified in the write query will be added as -# ## the value of this tag name. -# # retention_policy_tag = "" +# ## Maximum line size allowed to be sent in bytes. +# ## deprecated in 1.14; parser now handles lines of unlimited length and option is ignored +# # max_line_size = 0 # # ## Set one or more allowed client CA certificate file names to # ## enable mutually authenticated TLS connections @@ -7943,6 +8095,18 @@ # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" # +# ## Optional tag name used to store the database name. +# ## If the write has a database in the query string then it will be kept in this tag name. +# ## This tag can be used in downstream outputs. +# ## The default value of nothing means it will be off and the database will not be recorded. +# ## If you have a tag that is the same as the one specified below, and supply a database, +# ## the tag will be overwritten with the database supplied. +# # database_tag = "" +# +# ## If set the retention policy specified in the write query will be added as +# ## the value of this tag name. +# # retention_policy_tag = "" +# # ## Optional username and password to accept for HTTP basic authentication. # ## You probably want to make sure you have TLS configured above for this. # # basic_username = "foobar" @@ -8008,7 +8172,7 @@ # # Accept metrics over InfluxDB 1.x HTTP API # [[inputs.influxdb_listener]] -# ## Address and port to host InfluxDB listener on +# ## Address and port to host HTTP listener on # service_address = ":8186" # # ## maximum duration before timing out read of the request @@ -8018,17 +8182,11 @@ # # ## Maximum allowed HTTP request body size in bytes. # ## 0 means to use the default of 32MiB. -# max_body_size = "32MiB" +# max_body_size = 0 # -# ## Optional tag name used to store the database. -# ## If the write has a database in the query string then it will be kept in this tag name. -# ## This tag can be used in downstream outputs. -# ## The default value of nothing means it will be off and the database will not be recorded. -# # database_tag = "" -# -# ## If set the retention policy specified in the write query will be added as -# ## the value of this tag name. -# # retention_policy_tag = "" +# ## Maximum line size allowed to be sent in bytes. +# ## deprecated in 1.14; parser now handles lines of unlimited length and option is ignored +# # max_line_size = 0 # # ## Set one or more allowed client CA certificate file names to # ## enable mutually authenticated TLS connections @@ -8038,6 +8196,18 @@ # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" # +# ## Optional tag name used to store the database name. +# ## If the write has a database in the query string then it will be kept in this tag name. +# ## This tag can be used in downstream outputs. +# ## The default value of nothing means it will be off and the database will not be recorded. +# ## If you have a tag that is the same as the one specified below, and supply a database, +# ## the tag will be overwritten with the database supplied. +# # database_tag = "" +# +# ## If set the retention policy specified in the write query will be added as +# ## the value of this tag name. +# # retention_policy_tag = "" +# # ## Optional username and password to accept for HTTP basic authentication. # ## You probably want to make sure you have TLS configured above for this. # # basic_username = "foobar" @@ -8083,7 +8253,7 @@ # # parser_type = "internal" -# # Read JTI OpenConfig Telemetry from listed sensors +# # Subscribe and receive OpenConfig Telemetry data using JTI # [[inputs.jti_openconfig_telemetry]] # ## List of device addresses to collect telemetry from # servers = ["localhost:1883"] @@ -8198,8 +8368,7 @@ # ## 2 : Snappy # ## 3 : LZ4 # ## 4 : ZSTD -# # compression_codec = 0 -# +# # compression_codec = 0 # ## Initial offset position; one of "oldest" or "newest". # # offset = "oldest" # @@ -8236,9 +8405,10 @@ # data_format = "influx" +# ## DEPRECATED: The 'kafka_consumer_legacy' plugin is deprecated in version 1.4.0, use 'inputs.kafka_consumer' instead, NOTE: 'kafka_consumer' only supports Kafka v0.8+. +# ## DEPRECATED: The 'kafka_consumer_legacy' plugin is deprecated in version 1.4.0, use 'inputs.kafka_consumer' instead, NOTE: 'kafka_consumer' only supports Kafka v0.8+. # # Read metrics from Kafka topic(s) # [[inputs.kafka_consumer_legacy]] -# ## DEPRECATED: The 'kafka_consumer_legacy' plugin is deprecated in version 1.4.0, use 'inputs.kafka_consumer' instead, NOTE: 'kafka_consumer' only supports Kafka v0.8+. # ## topic(s) to consume # topics = ["telegraf"] # @@ -8328,9 +8498,9 @@ # ## Optional # ## Configuration for a dynamodb checkpoint # [inputs.kinesis_consumer.checkpoint_dynamodb] -# ## unique name for this consumer -# app_name = "default" -# table_name = "default" +# ## unique name for this consumer +# app_name = "default" +# table_name = "default" # # Listener capable of handling KNX bus messages provided through a KNX-IP Interface. @@ -8361,13 +8531,15 @@ # [[inputs.lanz]] # ## URL to Arista LANZ endpoint # servers = [ -# "tcp://127.0.0.1:50001" +# "tcp://switch1.int.example.com:50001", +# "tcp://switch2.int.example.com:50001", # ] -# # Stream and parse log file(s). +# ## DEPRECATED: The 'logparser' plugin is deprecated in version 1.15.0, use 'inputs.tail' with 'grok' data format instead. +# ## DEPRECATED: The 'logparser' plugin is deprecated in version 1.15.0, use 'inputs.tail' with 'grok' data format instead. +# # Read metrics off Arista LANZ, via socket # [[inputs.logparser]] -# ## DEPRECATED: The 'logparser' plugin is deprecated in version 1.15.0, use 'inputs.tail' with 'grok' data format instead. # ## Log files to parse. # ## These accept standard unix glob matching rules, but with the addition of # ## ** as a "super asterisk". ie: @@ -8415,8 +8587,8 @@ # ## 3. UTC -- or blank/unspecified, will return timestamp in UTC # # timezone = "Canada/Eastern" # -# ## When set to "disable", timestamp will not incremented if there is a -# ## duplicate. +# ## When set to "disable", timestamp will not incremented if there is a +# ## duplicate. # # unique_timestamp = "auto" @@ -8428,16 +8600,18 @@ # ## servers = ["ssl://localhost:1883"] # ## servers = ["ws://localhost:1883"] # servers = ["tcp://127.0.0.1:1883"] +# # ## Topics that will be subscribed to. # topics = [ # "telegraf/host01/cpu", # "telegraf/+/mem", # "sensors/#", # ] -# # topic_fields = "_/_/_/temperature" +# # ## The message topic will be stored in a tag specified by this value. If set # ## to the empty string no topic tag will be created. # # topic_tag = "topic" +# # ## QoS policy for messages # ## 0 = at most once # ## 1 = at least once @@ -8446,8 +8620,10 @@ # ## When using a QoS of 1 or 2, you should enable persistent_session to allow # ## resuming unacknowledged messages. # # qos = 0 +# # ## Connection timeout for initial connection in seconds # # connection_timeout = "30s" +# # ## Maximum messages to read from the broker that have not been written by an # ## output. For best throughput set based on the number of metrics within # ## each message and the size of the output's metric_batch_size. @@ -8457,37 +8633,44 @@ # ## full batch is collected and the write is triggered immediately without # ## waiting until the next flush_interval. # # max_undelivered_messages = 1000 +# # ## Persistent session disables clearing of the client session on connection. # ## In order for this option to work you must also set client_id to identify # ## the client. To receive messages that arrived while the client is offline, # ## also set the qos option to 1 or 2 and don't forget to also set the QoS when # ## publishing. # # persistent_session = false +# # ## If unset, a random client ID will be generated. # # client_id = "" +# # ## Username and password to connect MQTT server. # # username = "telegraf" # # password = "metricsmetricsmetricsmetrics" +# # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" # # tls_key = "/etc/telegraf/key.pem" # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false +# # ## Data format to consume. # ## Each data format has its own unique set of configuration options, read # ## more about them here: # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md # data_format = "influx" +# # ## Enable extracting tag values from MQTT topics # ## _ denotes an ignored entry in the topic path -# ## [[inputs.mqtt_consumer.topic_parsing]] -# ## topic = "" -# ## measurement = "" -# ## tags = "" -# ## fields = "" -# ## [inputs.mqtt_consumer.topic_parsing.types] -# ## +# # [[inputs.mqtt_consumer.topic_parsing]] +# # topic = "" +# # measurement = "" +# # tags = "" +# # fields = "" +# ## Value supported is int, float, unit +# # [[inputs.mqtt_consumer.topic.types]] +# # key = type # # Read metrics from NATS subject(s) @@ -8540,8 +8723,11 @@ # data_format = "influx" -# # Read NSQ topic for metrics. +# # Read metrics from NSQD topic(s) # [[inputs.nsq_consumer]] +# ## Server option still works but is deprecated, we just prepend it to the nsqd array. +# # server = "localhost:4150" +# # ## An array representing the NSQD TCP HTTP Endpoints # nsqd = ["localhost:4150"] # @@ -8597,10 +8783,10 @@ # # Read metrics from one or many pgbouncer servers # [[inputs.pgbouncer]] # ## specify address via a url matching: -# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ +# ## postgres://[pqgotest[:password]]@host:port[/dbname]\ # ## ?sslmode=[disable|verify-ca|verify-full] # ## or a simple string: -# ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production +# ## host=localhost port=5432 user=pqgotest password=... sslmode=... dbname=app_production # ## # ## All connection parameters are optional. # ## @@ -8610,8 +8796,7 @@ # # Read metrics from one or many postgresql servers # [[inputs.postgresql]] # ## specify address via a url matching: -# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ -# ## ?sslmode=[disable|verify-ca|verify-full] +# ## postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full] # ## or a simple string: # ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production # ## @@ -8631,7 +8816,7 @@ # ## connection configuration. # ## maxlifetime - specify the maximum lifetime of a connection. # ## default is forever (0s) -# max_lifetime = "0s" +# # max_lifetime = "0s" # # ## A list of databases to explicitly ignore. If not specified, metrics for all # ## databases are gathered. Do NOT use with the 'databases' option. @@ -8644,89 +8829,63 @@ # ## Whether to use prepared statements when connecting to the database. # ## This should be set to false when connecting through a PgBouncer instance # ## with pool_mode set to transaction. -# # prepared_statements = true +# prepared_statements = true # # Read metrics from one or many postgresql servers # [[inputs.postgresql_extensible]] -# ## specify address via a url matching: -# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ -# ## ?sslmode=[disable|verify-ca|verify-full] -# ## or a simple string: -# ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production +# # specify address via a url matching: +# # postgres://[pqgotest[:password]]@host:port[/dbname]?sslmode=... +# # or a simple string: +# # host=localhost port=5432 user=pqgotest password=... sslmode=... dbname=app_production # # -# ## All connection parameters are optional. # -# ## Without the dbname parameter, the driver will default to a database -# ## with the same name as the user. This dbname is just for instantiating a -# ## connection with the server and doesn't restrict the databases we are trying -# ## to grab metrics for. +# # All connection parameters are optional. +# # Without the dbname parameter, the driver will default to a database +# # with the same name as the user. This dbname is just for instantiating a +# # connection with the server and doesn't restrict the databases we are trying +# # to grab metrics for. # # # address = "host=localhost user=postgres sslmode=disable" # -# ## connection configuration. -# ## maxlifetime - specify the maximum lifetime of a connection. -# ## default is forever (0s) -# max_lifetime = "0s" +# ## A list of databases to pull metrics about. +# ## deprecated in 1.22.3; use the sqlquery option to specify database to use +# # databases = ["app_production", "testing"] # # ## Whether to use prepared statements when connecting to the database. # ## This should be set to false when connecting through a PgBouncer instance # ## with pool_mode set to transaction. -# # prepared_statements = true +# prepared_statements = true # -# ## A list of databases to pull metrics about. If not specified, metrics for all -# ## databases are gathered. -# ## databases = ["app_production", "testing"] +# # Define the toml config where the sql queries are stored +# # The script option can be used to specify the .sql file path. +# # If script and sqlquery options specified at same time, sqlquery will be used # # -# ## A custom name for the database that will be used as the "server" tag in the -# ## measurement output. If not specified, a default one generated from -# ## the connection address is used. -# # outputaddress = "db01" +# # the tagvalue field is used to define custom tags (separated by comas). +# # the query is expected to return columns which match the names of the +# # defined tags. The values in these columns must be of a string-type, +# # a number-type or a blob-type. # # -# ## Define the toml config where the sql queries are stored -# ## New queries can be added, if the withdbname is set to true and there is no -# ## databases defined in the 'databases field', the sql query is ended by a -# ## 'is not null' in order to make the query succeed. -# ## Example : -# ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become -# ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')" -# ## because the databases variable was set to ['postgres', 'pgbench' ] and the -# ## withdbname was true. Be careful that if the withdbname is set to false you -# ## don't have to define the where clause (aka with the dbname) the tagvalue -# ## field is used to define custom tags (separated by commas) -# ## The optional "measurement" value can be used to override the default -# ## output measurement name ("postgresql"). -# ## -# ## The script option can be used to specify the .sql file path. -# ## If script and sqlquery options specified at same time, sqlquery will be used -# ## -# ## the tagvalue field is used to define custom tags (separated by comas). -# ## the query is expected to return columns which match the names of the -# ## defined tags. The values in these columns must be of a string-type, -# ## a number-type or a blob-type. -# ## -# ## The timestamp field is used to override the data points timestamp value. By -# ## default, all rows inserted with current time. By setting a timestamp column, -# ## the row will be inserted with that column's value. -# ## -# ## Structure : -# ## [[inputs.postgresql_extensible.query]] -# ## sqlquery string -# ## version string -# ## withdbname boolean -# ## tagvalue string (comma separated) -# ## measurement string -# ## timestamp string +# # The timestamp field is used to override the data points timestamp value. By +# # default, all rows inserted with current time. By setting a timestamp column, +# # the row will be inserted with that column's value. +# # +# # Structure : +# # [[inputs.postgresql_extensible.query]] +# # sqlquery string +# # version string +# # withdbname boolean +# # tagvalue string (coma separated) +# # timestamp string # [[inputs.postgresql_extensible.query]] -# sqlquery="SELECT * FROM pg_stat_database" +# sqlquery="SELECT * FROM pg_stat_database where datname" # version=901 # withdbname=false # tagvalue="" -# measurement="" # [[inputs.postgresql_extensible.query]] -# sqlquery="SELECT * FROM pg_stat_bgwriter" +# script="your_sql-filepath.sql" # version=901 # withdbname=false -# tagvalue="postgresql.stats" +# tagvalue="" # # Read metrics from one or many prometheus clients @@ -8734,13 +8893,9 @@ # ## An array of urls to scrape metrics from. # urls = ["http://localhost:9100/metrics"] # -# ## Metric version controls the mapping from Prometheus metrics into -# ## Telegraf metrics. When using the prometheus_client output, use the same -# ## value in both plugins to ensure metrics are round-tripped without -# ## modification. -# ## -# ## example: metric_version = 1; -# ## metric_version = 2; recommended version +# ## Metric version controls the mapping from Prometheus metrics into Telegraf metrics. +# ## See "Metric Format Configuration" in plugins/inputs/prometheus/README.md for details. +# ## Valid options: 1, 2 # # metric_version = 1 # # ## Url tag name (tag containing scrapped url. optional, default is "url") @@ -8763,16 +8918,20 @@ # ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. # ## - prometheus.io/port: If port is not 9102 use this annotation # # monitor_kubernetes_pods = true +# # ## Get the list of pods to scrape with either the scope of # ## - cluster: the kubernetes watch api (default, no need to specify) # ## - node: the local cadvisor api; for scalability. Note that the config node_ip or the environment variable NODE_IP must be set to the host IP. # # pod_scrape_scope = "cluster" +# # ## Only for node scrape scope: node IP of the node that telegraf is running on. # ## Either this config or the environment variable NODE_IP must be set. # # node_ip = "10.180.1.1" -# ## Only for node scrape scope: interval in seconds for how often to get updated pod list for scraping. -# ## Default is 60 seconds. -# # pod_scrape_interval = 60 +# +# ## Only for node scrape scope: interval in seconds for how often to get updated pod list for scraping. +# ## Default is 60 seconds. +# # pod_scrape_interval = 60 +# # ## Restricts Kubernetes monitoring to a single namespace # ## ex: monitor_kubernetes_pods_namespace = "default" # # monitor_kubernetes_pods_namespace = "" @@ -8782,6 +8941,10 @@ # # eg. To scrape pods on a specific node # # kubernetes_field_selector = "spec.nodeName=$HOSTNAME" # +# # cache refresh interval to set the interval for re-sync of pods list. +# # Default is 60 minutes. +# # cache_refresh_interval = 60 +# # ## Scrape Services available in Consul Catalog # # [inputs.prometheus.consul] # # enabled = true @@ -8812,37 +8975,72 @@ # # tls_ca = /path/to/cafile # # tls_cert = /path/to/certfile # # tls_key = /path/to/keyfile +# # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false -# # Riemann protobuff listener. -# [[inputs.riemann_listener]] -# ## URL to listen on. +# # Read metrics from one or many redis servers +# [[inputs.redis]] +# ## specify servers via a url matching: +# ## [protocol://][:password]@address[:port] +# ## e.g. +# ## tcp://localhost:6379 +# ## tcp://:password@192.168.99.100 +# ## unix:///var/run/redis.sock +# ## +# ## If no servers are specified, then localhost is used as the host. +# ## If no port is specified, 6379 is used +# servers = ["tcp://localhost:6379"] +# +# ## Optional. Specify redis commands to retrieve values +# # [[inputs.redis.commands]] +# # # The command to run where each argument is a separate element +# # command = ["get", "sample-key"] +# # # The field to store the result in +# # field = "sample-key-value" +# # # The type of the result +# # # Can be "string", "integer", or "float" +# # type = "string" +# +# ## specify server password +# # password = "s#cr@t%" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true + + +# # Riemann protobuff listener +# [[inputs.rimann_listener]] +# ## URL to listen on # ## Default is "tcp://:5555" -# # service_address = "tcp://:8094" -# # service_address = "tcp://127.0.0.1:http" -# # service_address = "tcp4://:8094" -# # service_address = "tcp6://:8094" -# # service_address = "tcp6://[2001:db8::1]:8094" +# # service_address = "tcp://:8094" +# # service_address = "tcp://127.0.0.1:http" +# # service_address = "tcp4://:8094" +# # service_address = "tcp6://:8094" +# # service_address = "tcp6://[2001:db8::1]:8094" # # ## Maximum number of concurrent connections. # ## 0 (default) is unlimited. -# # max_connections = 1024 +# # max_connections = 1024 # ## Read timeout. # ## 0 (default) is unlimited. -# # read_timeout = "30s" +# # read_timeout = "30s" # ## Optional TLS configuration. -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" # ## Enables client authentication if set. -# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] # ## Maximum socket buffer size (in bytes when no unit specified). -# # read_buffer_size = "64KiB" +# # read_buffer_size = "64KiB" # ## Period between keep alive probes. # ## 0 disables keep alive probes. # ## Defaults to the OS configuration. -# # keep_alive_period = "5m" +# # keep_alive_period = "5m" # # SFlow V5 Protocol Listener @@ -8870,9 +9068,14 @@ # # service_address = "udp://:162" # ## # ## Path to mib files +# ## Used by the gosmi translator. +# ## To add paths when translating with netsnmp, use the MIBDIRS environment variable # # path = ["/usr/share/snmp/mibs"] # ## -# ## Snmp version, defaults to 2c +# ## Deprecated in 1.20.0; no longer running snmptranslate +# ## Timeout running snmptranslate command +# # timeout = "5s" +# ## Snmp version # # version = "2c" # ## SNMPv3 authentication and encryption options. # ## @@ -9012,15 +9215,15 @@ # # tag_columns_include = [] # # tag_columns_exclude = [] # -# ## Column names containing fields (explicit types) +# ## Column names containing fields (explicit types) # ## Convert the given columns to the corresponding type. Explicit type conversions take precedence over -# ## the automatic (driver-based) conversion below. -# ## NOTE: Columns should not be specified for multiple types or the resulting type is undefined. +# ## the automatic (driver-based) conversion below. +# ## NOTE: Columns should not be specified for multiple types or the resulting type is undefined. # # field_columns_float = [] # # field_columns_int = [] -# # field_columns_uint = [] -# # field_columns_bool = [] -# # field_columns_string = [] +# # field_columns_uint = [] +# # field_columns_bool = [] +# # field_columns_string = [] # # ## Column names containing fields (automatic types) # ## An empty include list is equivalent to '[*]' and all returned columns will be accepted. An empty @@ -9032,53 +9235,148 @@ # # Read metrics from Microsoft SQL Server # [[inputs.sqlserver]] -# ## Specify instances to monitor with a list of connection strings. -# ## All connection parameters are optional. -# ## By default, the host is localhost, listening on default port, TCP 1433. -# ## for Windows, the user is the currently running AD user (SSO). -# ## See https://github.com/denisenkom/go-mssqldb for detailed connection -# ## parameters, in particular, tls connections can be created like so: -# ## "encrypt=true;certificate=;hostNameInCertificate=" -# servers = [ -# "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", -# ] +# ## Specify instances to monitor with a list of connection strings. +# ## All connection parameters are optional. +# ## By default, the host is localhost, listening on default port, TCP 1433. +# ## for Windows, the user is the currently running AD user (SSO). +# ## See https://github.com/denisenkom/go-mssqldb for detailed connection +# ## parameters, in particular, tls connections can be created like so: +# ## "encrypt=true;certificate=;hostNameInCertificate=" +# servers = [ +# "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", +# ] # -# ## Authentication method -# ## valid methods: "connection_string", "AAD" -# # auth_method = "connection_string" +# ## Authentication method +# ## valid methods: "connection_string", "AAD" +# # auth_method = "connection_string" # -# ## "database_type" enables a specific set of queries depending on the database type. -# ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type. -# ## Possible values for database_type are - "SQLServer" or "AzureSQLDB" or "AzureSQLManagedInstance" or "AzureSQLPool" +# ## "database_type" enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2 +# ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type. +# ## Possible values for database_type are - "SQLServer" or "AzureSQLDB" or "AzureSQLManagedInstance" or "AzureSQLPool" # -# database_type = "SQLServer" +# database_type = "SQLServer" # -# ## A list of queries to include. If not specified, all the below listed queries are used. -# include_query = [] +# ## A list of queries to include. If not specified, all the below listed queries are used. +# include_query = [] # -# ## A list of queries to explicitly ignore. -# exclude_query = ["SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"] +# ## A list of queries to explicitly ignore. +# exclude_query = ["SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"] # -# ## Queries enabled by default for database_type = "SQLServer" are - -# ## SQLServerPerformanceCounters, SQLServerWaitStatsCategorized, SQLServerDatabaseIO, SQLServerProperties, SQLServerMemoryClerks, -# ## SQLServerSchedulers, SQLServerRequests, SQLServerVolumeSpace, SQLServerCpu, SQLServerAvailabilityReplicaStates, SQLServerDatabaseReplicaStates +# ## Queries enabled by default for database_type = "SQLServer" are - +# ## SQLServerPerformanceCounters, SQLServerWaitStatsCategorized, SQLServerDatabaseIO, SQLServerProperties, SQLServerMemoryClerks, +# ## SQLServerSchedulers, SQLServerRequests, SQLServerVolumeSpace, SQLServerCpu, SQLServerAvailabilityReplicaStates, SQLServerDatabaseReplicaStates, +# ## SQLServerRecentBackups # -# ## Queries enabled by default for database_type = "AzureSQLDB" are - -# ## AzureSQLDBResourceStats, AzureSQLDBResourceGovernance, AzureSQLDBWaitStats, AzureSQLDBDatabaseIO, AzureSQLDBServerProperties, -# ## AzureSQLDBOsWaitstats, AzureSQLDBMemoryClerks, AzureSQLDBPerformanceCounters, AzureSQLDBRequests, AzureSQLDBSchedulers +# ## Queries enabled by default for database_type = "AzureSQLDB" are - +# ## AzureSQLDBResourceStats, AzureSQLDBResourceGovernance, AzureSQLDBWaitStats, AzureSQLDBDatabaseIO, AzureSQLDBServerProperties, +# ## AzureSQLDBOsWaitstats, AzureSQLDBMemoryClerks, AzureSQLDBPerformanceCounters, AzureSQLDBRequests, AzureSQLDBSchedulers # -# ## Queries enabled by default for database_type = "AzureSQLManagedInstance" are - -# ## AzureSQLMIResourceStats, AzureSQLMIResourceGovernance, AzureSQLMIDatabaseIO, AzureSQLMIServerProperties, AzureSQLMIOsWaitstats, -# ## AzureSQLMIMemoryClerks, AzureSQLMIPerformanceCounters, AzureSQLMIRequests, AzureSQLMISchedulers +# ## Queries enabled by default for database_type = "AzureSQLManagedInstance" are - +# ## AzureSQLMIResourceStats, AzureSQLMIResourceGovernance, AzureSQLMIDatabaseIO, AzureSQLMIServerProperties, AzureSQLMIOsWaitstats, +# ## AzureSQLMIMemoryClerks, AzureSQLMIPerformanceCounters, AzureSQLMIRequests, AzureSQLMISchedulers # -# ## Queries enabled by default for database_type = "AzureSQLPool" are - -# ## AzureSQLPoolResourceStats, AzureSQLPoolResourceGovernance, AzureSQLPoolDatabaseIO, AzureSQLPoolWaitStats, -# ## AzureSQLPoolMemoryClerks, AzureSQLPoolPerformanceCounters, AzureSQLPoolSchedulers +# ## Queries enabled by default for database_type = "AzureSQLPool" are - +# ## AzureSQLPoolResourceStats, AzureSQLPoolResourceGovernance, AzureSQLPoolDatabaseIO, AzureSQLPoolWaitStats, +# ## AzureSQLPoolMemoryClerks, AzureSQLPoolPerformanceCounters, AzureSQLPoolSchedulers +# +# ## Following are old config settings +# ## You may use them only if you are using the earlier flavor of queries, however it is recommended to use +# ## the new mechanism of identifying the database_type there by use it's corresponding queries +# +# ## Optional parameter, setting this to 2 will use a new version +# ## of the collection queries that break compatibility with the original +# ## dashboards. +# ## Version 2 - is compatible from SQL Server 2012 and later versions and also for SQL Azure DB +# # query_version = 2 +# +# ## If you are using AzureDB, setting this to true will gather resource utilization metrics +# # azuredb = false +# +# ## Toggling this to true will emit an additional metric called "sqlserver_telegraf_health". +# ## This metric tracks the count of attempted queries and successful queries for each SQL instance specified in "servers". +# ## The purpose of this metric is to assist with identifying and diagnosing any connectivity or query issues. +# ## This setting/metric is optional and is disabled by default. +# # health_metric = false +# +# ## Possible queries accross different versions of the collectors +# ## Queries enabled by default for specific Database Type +# +# ## database_type = AzureSQLDB by default collects the following queries +# ## - AzureSQLDBWaitStats +# ## - AzureSQLDBResourceStats +# ## - AzureSQLDBResourceGovernance +# ## - AzureSQLDBDatabaseIO +# ## - AzureSQLDBServerProperties +# ## - AzureSQLDBOsWaitstats +# ## - AzureSQLDBMemoryClerks +# ## - AzureSQLDBPerformanceCounters +# ## - AzureSQLDBRequests +# ## - AzureSQLDBSchedulers +# +# ## database_type = AzureSQLManagedInstance by default collects the following queries +# ## - AzureSQLMIResourceStats +# ## - AzureSQLMIResourceGovernance +# ## - AzureSQLMIDatabaseIO +# ## - AzureSQLMIServerProperties +# ## - AzureSQLMIOsWaitstats +# ## - AzureSQLMIMemoryClerks +# ## - AzureSQLMIPerformanceCounters +# ## - AzureSQLMIRequests +# ## - AzureSQLMISchedulers +# +# ## database_type = AzureSQLPool by default collects the following queries +# ## - AzureSQLPoolResourceStats +# ## - AzureSQLPoolResourceGovernance +# ## - AzureSQLPoolDatabaseIO +# ## - AzureSQLPoolOsWaitStats, +# ## - AzureSQLPoolMemoryClerks +# ## - AzureSQLPoolPerformanceCounters +# ## - AzureSQLPoolSchedulers +# +# ## database_type = SQLServer by default collects the following queries +# ## - SQLServerPerformanceCounters +# ## - SQLServerWaitStatsCategorized +# ## - SQLServerDatabaseIO +# ## - SQLServerProperties +# ## - SQLServerMemoryClerks +# ## - SQLServerSchedulers +# ## - SQLServerRequests +# ## - SQLServerVolumeSpace +# ## - SQLServerCpu +# ## - SQLServerRecentBackups +# ## and following as optional (if mentioned in the include_query list) +# ## - SQLServerAvailabilityReplicaStates +# ## - SQLServerDatabaseReplicaStates +# +# ## Version 2 by default collects the following queries +# ## Version 2 is being deprecated, please consider using database_type. +# ## - PerformanceCounters +# ## - WaitStatsCategorized +# ## - DatabaseIO +# ## - ServerProperties +# ## - MemoryClerk +# ## - Schedulers +# ## - SqlRequests +# ## - VolumeSpace +# ## - Cpu +# +# ## Version 1 by default collects the following queries +# ## Version 1 is deprecated, please consider using database_type. +# ## - PerformanceCounters +# ## - WaitStatsCategorized +# ## - CPUHistory +# ## - DatabaseIO +# ## - DatabaseSize +# ## - DatabaseStats +# ## - DatabaseProperties +# ## - MemoryClerk +# ## - VolumeSpace +# ## - PerformanceMetrics -# # Statsd UDP/TCP Server +# # Statsd Server # [[inputs.statsd]] -# ## Protocol, must be "tcp", "udp", "udp4" or "udp6" (default=udp) +# ## Protocol, must be "tcp", "udp4", "udp6" or "udp" (default=udp) # protocol = "udp" # # ## MaxTCPConnection - applicable when protocol is set to tcp (default=250) @@ -9107,7 +9405,7 @@ # ## Reset timings & histograms every interval (default=true) # delete_timings = true # -# ## Percentiles to calculate for timing & histogram stats +# ## Percentiles to calculate for timing & histogram stats. # percentiles = [50.0, 90.0, 99.0, 99.9, 99.95, 100.0] # # ## separator to use between elements of a statsd metric @@ -9115,9 +9413,12 @@ # # ## Parses tags in the datadog statsd format # ## http://docs.datadoghq.com/guides/dogstatsd/ +# ## deprecated in 1.10; use datadog_extensions option instead # parse_data_dog_tags = false # -# ## Parses datadog extensions to the statsd format +# ## Parses extensions to statsd in the datadog statsd format +# ## currently supports metrics and datadog tags. +# ## http://docs.datadoghq.com/guides/dogstatsd/ # datadog_extensions = false # # ## Parses distributions metric as specified in the datadog statsd format @@ -9139,8 +9440,12 @@ # ## of percentiles but also increases the memory usage and cpu time. # percentile_limit = 1000 # +# ## Maximum socket buffer size in bytes, once the buffer fills up, metrics +# ## will start dropping. Defaults to the OS default. +# # read_buffer_size = 65535 +# # ## Max duration (TTL) for each metric to stay cached/reported without being updated. -# #max_ttl = "1000h" +# # max_ttl = "10h" # # ## Sanitize name method # ## By default, telegraf will pass names directly as they are received. @@ -9153,7 +9458,7 @@ # # Suricata stats and alerts plugin # [[inputs.suricata]] -# ## Data sink for Suricata stats and alerts logs +# ## Data sink for Suricata stats log. # # This is expected to be a filename of a # # unix socket to be created for listening. # source = "/var/run/suricata-stats.sock" @@ -9162,16 +9467,17 @@ # # becomes "detect_alert" when delimiter is "_". # delimiter = "_" # -# ## Detect alert logs -# # alerts = false +# # Detect alert logs +# alerts = false -# # Accepts syslog messages following RFC5424 format with transports as per RFC5426, RFC5425, or RFC6587 # [[inputs.syslog]] -# ## Specify an ip or hostname with port - eg., tcp://localhost:6514, tcp://10.0.0.1:6514 # ## Protocol, address and port to host the syslog receiver. # ## If no host is specified, then localhost is used. # ## If no port is specified, 6514 is used (RFC5425#section-4.1). +# ## ex: server = "tcp://localhost:6514" +# ## server = "udp://:6514" +# ## server = "unix:///var/run/telegraf-syslog.sock" # server = "tcp://:6514" # # ## TLS Config @@ -9197,7 +9503,7 @@ # ## The framing technique with which it is expected that messages are transported (default = "octet-counting"). # ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), # ## or the non-transparent framing technique (RFC6587#section-3.4.2). -# ## Must be one of "octet-counting", "non-transparent". +# ## Must be one of "octect-counting", "non-transparent". # # framing = "octet-counting" # # ## The trailer to be expected in case of non-transparent framing (default = "LF"). @@ -9267,232 +9573,42 @@ # ## Set the tag that will contain the path of the tailed file. If you don't want this tag, set it to an empty string. # # path_tag = "path" # +# ## Filters to apply to files before generating metrics +# ## "ansi_color" removes ANSI colors +# # filters = [] +# # ## multiline parser/codec # ## https://www.elastic.co/guide/en/logstash/2.4/plugins-filters-multiline.html # #[inputs.tail.multiline] -# ## The pattern should be a regexp which matches what you believe to be an -# ## indicator that the field is part of an event consisting of multiple lines of log data. +# ## The pattern should be a regexp which matches what you believe to be an indicator that the field is part of an event consisting of multiple lines of log data. # #pattern = "^\s" # -# ## This field must be either "previous" or "next". -# ## If a line matches the pattern, "previous" indicates that it belongs to the previous line, -# ## whereas "next" indicates that the line belongs to the next one. +# ## The field's value must be previous or next and indicates the relation to the +# ## multi-line event. # #match_which_line = "previous" # -# ## The invert_match field can be true or false (defaults to false). -# ## If true, a message not matching the pattern will constitute a match of the multiline -# ## filter and the what will be applied. (vice-versa is also true) +# ## The invert_match can be true or false (defaults to false). +# ## If true, a message not matching the pattern will constitute a match of the multiline filter and the what will be applied. (vice-versa is also true) # #invert_match = false # -# ## After the specified timeout, this plugin sends a multiline event even if no new pattern -# ## is found to start a new event. The default timeout is 5s. +# #After the specified timeout, this plugin sends the multiline event even if no new pattern is found to start a new event. The default is 5s. # #timeout = 5s +# ## DEPRECATED: The 'tcp_listener' plugin is deprecated in version 1.3.0, use 'inputs.socket_listener' instead. # # Generic TCP listener # [[inputs.tcp_listener]] -# ## DEPRECATED: The 'tcp_listener' plugin is deprecated in version 1.3.0, use 'inputs.socket_listener' instead. -# # DEPRECATED: the TCP listener plugin has been deprecated in favor of the # # socket_listener plugin # # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener +# ## DEPRECATED: The 'udp_listener' plugin is deprecated in version 1.3.0, use 'inputs.socket_listener' instead. # # Generic UDP listener # [[inputs.udp_listener]] -# ## DEPRECATED: The 'udp_listener' plugin is deprecated in version 1.3.0, use 'inputs.socket_listener' instead. -# # DEPRECATED: the TCP listener plugin has been deprecated in favor of the -# # socket_listener plugin # # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener -# # Read metrics from VMware vCenter -# [[inputs.vsphere]] -# ## List of vCenter URLs to be monitored. These three lines must be uncommented -# ## and edited for the plugin to work. -# vcenters = [ "https://vcenter.local/sdk" ] -# username = "user@corp.local" -# password = "secret" -# -# ## VMs -# ## Typical VM metrics (if omitted or empty, all metrics are collected) -# # vm_include = [ "/*/vm/**"] # Inventory path to VMs to collect (by default all are collected) -# # vm_exclude = [] # Inventory paths to exclude -# vm_metric_include = [ -# "cpu.demand.average", -# "cpu.idle.summation", -# "cpu.latency.average", -# "cpu.readiness.average", -# "cpu.ready.summation", -# "cpu.run.summation", -# "cpu.usagemhz.average", -# "cpu.used.summation", -# "cpu.wait.summation", -# "mem.active.average", -# "mem.granted.average", -# "mem.latency.average", -# "mem.swapin.average", -# "mem.swapinRate.average", -# "mem.swapout.average", -# "mem.swapoutRate.average", -# "mem.usage.average", -# "mem.vmmemctl.average", -# "net.bytesRx.average", -# "net.bytesTx.average", -# "net.droppedRx.summation", -# "net.droppedTx.summation", -# "net.usage.average", -# "power.power.average", -# "virtualDisk.numberReadAveraged.average", -# "virtualDisk.numberWriteAveraged.average", -# "virtualDisk.read.average", -# "virtualDisk.readOIO.latest", -# "virtualDisk.throughput.usage.average", -# "virtualDisk.totalReadLatency.average", -# "virtualDisk.totalWriteLatency.average", -# "virtualDisk.write.average", -# "virtualDisk.writeOIO.latest", -# "sys.uptime.latest", -# ] -# # vm_metric_exclude = [] ## Nothing is excluded by default -# # vm_instances = true ## true by default -# -# ## Hosts -# ## Typical host metrics (if omitted or empty, all metrics are collected) -# # host_include = [ "/*/host/**"] # Inventory path to hosts to collect (by default all are collected) -# # host_exclude [] # Inventory paths to exclude -# host_metric_include = [ -# "cpu.coreUtilization.average", -# "cpu.costop.summation", -# "cpu.demand.average", -# "cpu.idle.summation", -# "cpu.latency.average", -# "cpu.readiness.average", -# "cpu.ready.summation", -# "cpu.swapwait.summation", -# "cpu.usage.average", -# "cpu.usagemhz.average", -# "cpu.used.summation", -# "cpu.utilization.average", -# "cpu.wait.summation", -# "disk.deviceReadLatency.average", -# "disk.deviceWriteLatency.average", -# "disk.kernelReadLatency.average", -# "disk.kernelWriteLatency.average", -# "disk.numberReadAveraged.average", -# "disk.numberWriteAveraged.average", -# "disk.read.average", -# "disk.totalReadLatency.average", -# "disk.totalWriteLatency.average", -# "disk.write.average", -# "mem.active.average", -# "mem.latency.average", -# "mem.state.latest", -# "mem.swapin.average", -# "mem.swapinRate.average", -# "mem.swapout.average", -# "mem.swapoutRate.average", -# "mem.totalCapacity.average", -# "mem.usage.average", -# "mem.vmmemctl.average", -# "net.bytesRx.average", -# "net.bytesTx.average", -# "net.droppedRx.summation", -# "net.droppedTx.summation", -# "net.errorsRx.summation", -# "net.errorsTx.summation", -# "net.usage.average", -# "power.power.average", -# "storageAdapter.numberReadAveraged.average", -# "storageAdapter.numberWriteAveraged.average", -# "storageAdapter.read.average", -# "storageAdapter.write.average", -# "sys.uptime.latest", -# ] -# ## Collect IP addresses? Valid values are "ipv4" and "ipv6" -# # ip_addresses = ["ipv6", "ipv4" ] -# -# # host_metric_exclude = [] ## Nothing excluded by default -# # host_instances = true ## true by default -# -# -# ## Clusters -# # cluster_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected) -# # cluster_exclude = [] # Inventory paths to exclude -# # cluster_metric_include = [] ## if omitted or empty, all metrics are collected -# # cluster_metric_exclude = [] ## Nothing excluded by default -# # cluster_instances = false ## false by default -# -# ## Datastores -# # datastore_include = [ "/*/datastore/**"] # Inventory path to datastores to collect (by default all are collected) -# # datastore_exclude = [] # Inventory paths to exclude -# # datastore_metric_include = [] ## if omitted or empty, all metrics are collected -# # datastore_metric_exclude = [] ## Nothing excluded by default -# # datastore_instances = false ## false by default -# -# ## Datacenters -# # datacenter_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected) -# # datacenter_exclude = [] # Inventory paths to exclude -# datacenter_metric_include = [] ## if omitted or empty, all metrics are collected -# datacenter_metric_exclude = [ "*" ] ## Datacenters are not collected by default. -# # datacenter_instances = false ## false by default -# -# ## Plugin Settings -# ## separator character to use for measurement and field names (default: "_") -# # separator = "_" -# -# ## number of objects to retrieve per query for realtime resources (vms and hosts) -# ## set to 64 for vCenter 5.5 and 6.0 (default: 256) -# # max_query_objects = 256 -# -# ## number of metrics to retrieve per query for non-realtime resources (clusters and datastores) -# ## set to 64 for vCenter 5.5 and 6.0 (default: 256) -# # max_query_metrics = 256 -# -# ## number of go routines to use for collection and discovery of objects and metrics -# # collect_concurrency = 1 -# # discover_concurrency = 1 -# -# ## the interval before (re)discovering objects subject to metrics collection (default: 300s) -# # object_discovery_interval = "300s" -# -# ## timeout applies to any of the api request made to vcenter -# # timeout = "60s" -# -# ## When set to true, all samples are sent as integers. This makes the output -# ## data types backwards compatible with Telegraf 1.9 or lower. Normally all -# ## samples from vCenter, with the exception of percentages, are integer -# ## values, but under some conditions, some averaging takes place internally in -# ## the plugin. Setting this flag to "false" will send values as floats to -# ## preserve the full precision when averaging takes place. -# # use_int_samples = true -# -# ## Custom attributes from vCenter can be very useful for queries in order to slice the -# ## metrics along different dimension and for forming ad-hoc relationships. They are disabled -# ## by default, since they can add a considerable amount of tags to the resulting metrics. To -# ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include -# ## to select the attributes you want to include. -# ## By default, since they can add a considerable amount of tags to the resulting metrics. To -# ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include -# ## to select the attributes you want to include. -# # custom_attribute_include = [] -# # custom_attribute_exclude = ["*"] -# -# ## The number of vSphere 5 minute metric collection cycles to look back for non-realtime metrics. In -# ## some versions (6.7, 7.0 and possible more), certain metrics, such as cluster metrics, may be reported -# ## with a significant delay (>30min). If this happens, try increasing this number. Please note that increasing -# ## it too much may cause performance issues. -# # metric_lookback = 3 -# -# ## Optional SSL Config -# # ssl_ca = "/path/to/cafile" -# # ssl_cert = "/path/to/certfile" -# # ssl_key = "/path/to/keyfile" -# ## Use SSL but skip chain & host verification -# # insecure_skip_verify = false -# -# ## The Historical Interval value must match EXACTLY the interval in the daily -# # "Interval Duration" found on the VCenter server under Configure > General > Statistics > Statistic intervals -# # historical_interval = "5m" +# vm_metric_exclude = [ "*" ] # # A Webhooks Event collector @@ -9503,49 +9619,52 @@ # [inputs.webhooks.filestack] # path = "/filestack" # -# ## HTTP basic auth -# #username = "" -# #password = "" +# ## HTTP basic auth +# #username = "" +# #password = "" # # [inputs.webhooks.github] # path = "/github" # # secret = "" # -# ## HTTP basic auth -# #username = "" -# #password = "" +# ## HTTP basic auth +# #username = "" +# #password = "" # # [inputs.webhooks.mandrill] # path = "/mandrill" # -# ## HTTP basic auth -# #username = "" -# #password = "" +# ## HTTP basic auth +# #username = "" +# #password = "" # # [inputs.webhooks.rollbar] # path = "/rollbar" # -# ## HTTP basic auth -# #username = "" -# #password = "" +# ## HTTP basic auth +# #username = "" +# #password = "" # # [inputs.webhooks.papertrail] # path = "/papertrail" # -# ## HTTP basic auth -# #username = "" -# #password = "" +# ## HTTP basic auth +# #username = "" +# #password = "" # # [inputs.webhooks.particle] # path = "/particle" # -# ## HTTP basic auth -# #username = "" -# #password = "" +# ## HTTP basic auth +# #username = "" +# #password = "" +# +# [inputs.webhooks.artifactory] +# path = "/artifactory" # # This plugin implements the Zipkin http server to gather trace and timing data needed to troubleshoot latency problems in microservice architectures. # [[inputs.zipkin]] # # path = "/api/v1/spans" # URL path for span data -# # port = 9411 # Port on which Telegraf listens +# # port = 9411 # Port on which Telegraf listens