From dc29a7785d55f979357a0ea97a2461270e383680 Mon Sep 17 00:00:00 2001 From: "telegraf-tiger[bot]" <76974415+telegraf-tiger[bot]@users.noreply.github.com> Date: Tue, 13 Sep 2022 09:22:45 -0600 Subject: [PATCH] docs: Update etc/telegraf.conf and etc/telegraf_windows.conf (#11792) --- etc/telegraf.conf | 747 ++++++++++++++++++++++++++---------- etc/telegraf_windows.conf | 779 +++++++++++++++++++++++++++----------- 2 files changed, 1121 insertions(+), 405 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index cea87da97..fee7c84a3 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -285,6 +285,10 @@ # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false # +# ## Optional Proxy Configuration +# # use_proxy = false +# # proxy_url = "localhost:8888" +# # ## If true use batch serialization format instead of line based delimiting. # ## Only applies to data formats which are not line based such as JSON. # ## Recommended to set to true. @@ -484,6 +488,10 @@ # ## ex: endpoint_url = "http://localhost:8000" # # endpoint_url = "" # +# ## Set http_proxy +# # use_system_proxy = false +# # http_proxy_url = "http://localhost:8888" +# # ## Namespace for the CloudWatch MetricDatums # namespace = "InfluxData/Telegraf" # @@ -776,6 +784,11 @@ # ## Delay before the process is restarted after an unexpected termination # restart_delay = "10s" # +# ## Flag to determine whether execd should throw error when part of metrics is unserializable +# ## Setting this to true will skip the unserializable metrics and process the rest of metrics +# ## Setting this to false will throw error when encountering unserializable metrics and none will be processed +# # ignore_serialization_error = false +# # ## Data format to export. # ## Each data format has its own unique set of configuration options, read # ## more about them here: @@ -1118,6 +1131,53 @@ # debug = false +# # Save metrics to an IoTDB Database +# [[outputs.iotdb]] +# ## Configuration of IoTDB server connection +# host = "127.0.0.1" +# # port = "6667" +# +# ## Configuration of authentication +# # user = "root" +# # password = "root" +# +# ## Timeout to open a new session. +# ## A value of zero means no timeout. +# # timeout = "5s" +# +# ## Configuration of type conversion for 64-bit unsigned int +# ## IoTDB currently DOES NOT support unsigned integers (version 13.x). +# ## 32-bit unsigned integers are safely converted into 64-bit signed integers by the plugin, +# ## however, this is not true for 64-bit values in general as overflows may occur. +# ## The following setting allows to specify the handling of 64-bit unsigned integers. +# ## Available values are: +# ## - "int64" -- convert to 64-bit signed integers and accept overflows +# ## - "int64_clip" -- convert to 64-bit signed integers and clip the values on overflow to 9,223,372,036,854,775,807 +# ## - "text" -- convert to the string representation of the value +# # uint64_conversion = "int64_clip" +# +# ## Configuration of TimeStamp +# ## TimeStamp is always saved in 64bits int. timestamp_precision specifies the unit of timestamp. +# ## Available value: +# ## "second", "millisecond", "microsecond", "nanosecond"(default) +# # timestamp_precision = "nanosecond" +# +# ## Handling of tags +# ## Tags are not fully supported by IoTDB. +# ## A guide with suggestions on how to handle tags can be found here: +# ## https://iotdb.apache.org/UserGuide/Master/API/InfluxDB-Protocol.html +# ## +# ## Available values are: +# ## - "fields" -- convert tags to fields in the measurement +# ## - "device_id" -- attach tags to the device ID +# ## +# ## For Example, a metric named "root.sg.device" with the tags `tag1: "private"` and `tag2: "working"` and +# ## fields `s1: 100` and `s2: "hello"` will result in the following representations in IoTDB +# ## - "fields" -- root.sg.device, s1=100, s2="hello", tag1="private", tag2="working" +# ## - "device_id" -- root.sg.device.private.working, s1=100, s2="hello" +# # convert_tags_to = "device_id" + + # # Configuration for the Kafka server to send metrics to # [[outputs.kafka]] # ## URLs of kafka brokers @@ -1438,9 +1498,14 @@ # [[outputs.mqtt]] # ## MQTT Brokers # ## The list of brokers should only include the hostname or IP address and the -# ## port to the broker. This should follow the format '{host}:{port}'. For -# ## example, "localhost:1883" or "127.0.0.1:8883". -# servers = ["localhost:1883"] +# ## port to the broker. This should follow the format `[{scheme}://]{host}:{port}`. For +# ## example, `localhost:1883` or `mqtt://localhost:1883`. +# ## Scheme can be any of the following: tcp://, mqtt://, tls://, mqtts:// +# ## non-TLS and TLS servers can not be mix-and-matched. +# servers = ["localhost:1883", ] # or ["mqtts://tls.example.com:1883"] +# +# ## Protocol can be `3.1.1` or `5`. Default is `3.1.1` +# # procotol = "3.1.1" # # ## MQTT Topic for Producer Messages # ## MQTT outputs send metrics to this topic format: @@ -1597,6 +1662,17 @@ # ## Supports: "gzip", "none" # # compression = "gzip" # +# ## Configuration options for the Coralogix dialect +# ## Enable the following section of you use this plugin with a Coralogix endpoint +# # [outputs.opentelemetry.coralogix] +# # ## Your Coralogix private key (required). +# # ## Please note that this is sensitive data! +# # private_key = "your_coralogix_key" +# # +# # ## Application and subsystem names for the metrics (required) +# # application = "$NAMESPACE" +# # subsystem = "$HOSTNAME" +# # ## Additional OpenTelemetry resource attributes # # [outputs.opentelemetry.attributes] # # "service.name" = "demo" @@ -1634,6 +1710,88 @@ # separator = "_" +# # Publishes metrics to a postgresql database +# [[outputs.postgresql]] +# ## Specify connection address via the standard libpq connection string: +# ## host=... user=... password=... sslmode=... dbname=... +# ## Or a URL: +# ## postgres://[user[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full] +# ## See https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING +# ## +# ## All connection parameters are optional. Environment vars are also supported. +# ## e.g. PGPASSWORD, PGHOST, PGUSER, PGDATABASE +# ## All supported vars can be found here: +# ## https://www.postgresql.org/docs/current/libpq-envars.html +# ## +# ## Non-standard parameters: +# ## pool_max_conns (default: 1) - Maximum size of connection pool for parallel (per-batch per-table) inserts. +# ## pool_min_conns (default: 0) - Minimum size of connection pool. +# ## pool_max_conn_lifetime (default: 0s) - Maximum age of a connection before closing. +# ## pool_max_conn_idle_time (default: 0s) - Maximum idle time of a connection before closing. +# ## pool_health_check_period (default: 0s) - Duration between health checks on idle connections. +# # connection = "" +# +# ## Postgres schema to use. +# # schema = "public" +# +# ## Store tags as foreign keys in the metrics table. Default is false. +# # tags_as_foreign_keys = false +# +# ## Suffix to append to table name (measurement name) for the foreign tag table. +# # tag_table_suffix = "_tag" +# +# ## Deny inserting metrics if the foreign tag can't be inserted. +# # foreign_tag_constraint = false +# +# ## Store all tags as a JSONB object in a single 'tags' column. +# # tags_as_jsonb = false +# +# ## Store all fields as a JSONB object in a single 'fields' column. +# # fields_as_jsonb = false +# +# ## Templated statements to execute when creating a new table. +# # create_templates = [ +# # '''CREATE TABLE {{ .table }} ({{ .columns }})''', +# # ] +# +# ## Templated statements to execute when adding columns to a table. +# ## Set to an empty list to disable. Points containing tags for which there is no column will be skipped. Points +# ## containing fields for which there is no column will have the field omitted. +# # add_column_templates = [ +# # '''ALTER TABLE {{ .table }} ADD COLUMN IF NOT EXISTS {{ .columns|join ", ADD COLUMN IF NOT EXISTS " }}''', +# # ] +# +# ## Templated statements to execute when creating a new tag table. +# # tag_table_create_templates = [ +# # '''CREATE TABLE {{ .table }} ({{ .columns }}, PRIMARY KEY (tag_id))''', +# # ] +# +# ## Templated statements to execute when adding columns to a tag table. +# ## Set to an empty list to disable. Points containing tags for which there is no column will be skipped. +# # tag_table_add_column_templates = [ +# # '''ALTER TABLE {{ .table }} ADD COLUMN IF NOT EXISTS {{ .columns|join ", ADD COLUMN IF NOT EXISTS " }}''', +# # ] +# +# ## The postgres data type to use for storing unsigned 64-bit integer values (Postgres does not have a native +# ## unsigned 64-bit integer type). +# ## The value can be one of: +# ## numeric - Uses the PostgreSQL "numeric" data type. +# ## uint8 - Requires pguint extension (https://github.com/petere/pguint) +# # uint64_type = "numeric" +# +# ## When using pool_max_conns>1, and a temporary error occurs, the query is retried with an incremental backoff. This +# ## controls the maximum backoff duration. +# # retry_max_backoff = "15s" +# +# ## Approximate number of tag IDs to store in in-memory cache (when using tags_as_foreign_keys). +# ## This is an optimization to skip inserting known tag IDs. +# ## Each entry consumes approximately 34 bytes of memory. +# # tag_cache_size = 100000 +# +# ## Enable & set the log level for the Postgres driver. +# # log_level = "warn" # trace, debug, info, warn, error, none + + # # Configuration for the Prometheus client to spawn # [[outputs.prometheus_client]] # ## Address to listen on. @@ -1677,23 +1835,22 @@ # ## Export metric collection time. # # export_timestamp = false -# # Configuration for the RedisTimeSeries server to send metrics to -# [[outputs.redistimeseries]] + +# # ## The address of the RedisTimeSeries server. # address = "127.0.0.1:6379" -# ## password to login Redis -# password = "" # -# ## username (optional) -# username = "" -# # redis database number (optional, must be an integer) -# database = 0 +# ## Redis ACL credentials +# # username = "" +# # password = "" +# # database = 0 # -# ## optional TLS configurations -# tls_ca = "/etc/telegraf/ca.pem -# tls_cert = "/etc/telegraf/cert.pem" -# tls_key = "/etc/telegraf/key.pem" -# insecure_skip_verify = false +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# # insecure_skip_verify = false + # # Configuration for Riemann to send metrics to # [[outputs.riemann]] @@ -1942,6 +2099,20 @@ # ## the unsigned option. This is useful for a database like ClickHouse where # ## the unsigned value should use a value like "uint64". # # conversion_style = "unsigned_suffix" +# +# ## Maximum amount of time a connection may be idle. "0s" means connections are +# ## never closed due to idle time. +# # connection_max_idle_time = "0s" +# +# ## Maximum amount of time a connection may be reused. "0s" means connections +# ## are never closed due to age. +# # connection_max_lifetime = "0s" +# +# ## Maximum number of connections in the idle connection pool. 0 means unlimited. +# # connection_max_idle = 2 +# +# ## Maximum number of open connections to the database. 0 means unlimited. +# # connection_max_open = 0 # # Configuration for Google Cloud Stackdriver to send metrics to @@ -1962,6 +2133,26 @@ # # location = "eu-north0" +# # Configuration for active mq with stomp protocol to send metrics to +# [[outputs.stomp]] +# host = "localhost:61613" +# +# ## Queue name for producer messages +# queueName = "telegraf" +# +# ## Username and password if required by the Active MQ server. +# # username = "" +# # password = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Data format to output. +# data_format = "json" + + # # A plugin that can send metrics to Sumo Logic HTTP metric collector. # [[outputs.sumologic]] # ## Unique URL generated for your HTTP Metrics Source. @@ -2240,22 +2431,20 @@ # # insecure_skip_verify = false -# # Configuration for Wavefront server to send metrics to # [[outputs.wavefront]] -# ## Url for Wavefront Direct Ingestion. For Wavefront Proxy Ingestion, see -# ## the 'host' and 'port' options below. +# ## Url for Wavefront API or Wavefront Proxy instance. # url = "https://metrics.wavefront.com" # -# ## Authentication Token for Wavefront. Only required if using Direct Ingestion +# ## Authentication Token for Wavefront. Required if using Direct Ingestion. Not required if using a Wavefront Proxy. # #token = "DUMMY_TOKEN" # -# ## Maximum number of metrics to send per batch for Direct Ingestion. Ignored unless 'url' is set. This value should be higher than the `metric_batch_size`. Default is 10,000. Values higher than 40,000 are not recommended. +# ## Maximum number of metrics to send per HTTP request. This value should be higher than the `metric_batch_size`. Default is 10,000. Values higher than 40,000 are not recommended. # # http_maximum_batch_size = 10000 # -# ## DNS name of the wavefront proxy server. Do not use if url is specified +# ## Deprecated. DNS name of the Wavefront server or Wavefront Proxy. Use the `url` field instead. # #host = "wavefront.example.com" # -# ## Port that the Wavefront proxy server listens on. Do not use if url is specified +# ## Deprecated. Wavefront proxy port. Use the `url` field instead. # #port = 2878 # # ## prefix for metrics keys @@ -2663,16 +2852,20 @@ # # additional_tag = "tag_value" -# # Parse a value in a specified field/tag(s) and add the result in a new metric +# # Parse a value in a specified field(s)/tag(s) and add the result in a new metric # [[processors.parser]] # ## The name of the fields whose value will be parsed. # parse_fields = ["message"] # +# ## The name of the tags whose value will be parsed. +# # parse_tags = [] +# # ## If true, incoming metrics are not emitted. -# drop_original = false +# # drop_original = false # # ## If set to override, emitted metrics will be merged by overriding the # ## original metric using the newly parsed metrics. +# ## Only has effect when drop_original is set to false. # merge = "override" # # ## The dataformat to be read from files @@ -3579,10 +3772,11 @@ # # insecure_skip_verify = false -# # Collects performance metrics from the MON, OSD, MDS and RGW nodes in a Ceph storage cluster. +# # Collects performance metrics from the MON, OSD, MDS and RGW nodes +# # in a Ceph storage cluster. # [[inputs.ceph]] -# ## This is the recommended interval to poll. Too frequent and you will lose -# ## data points due to timeouts during rebalancing and recovery +# ## This is the recommended interval to poll. Too frequent and you +# ## will lose data points due to timeouts during rebalancing and recovery # interval = '1m' # # ## All configuration values are optional, defaults are shown below @@ -3602,9 +3796,9 @@ # ## suffix used to identify socket files # socket_suffix = "asok" # -# ## Ceph user to authenticate as, ceph will search for the corresponding keyring -# ## e.g. client.admin.keyring in /etc/ceph, or the explicit path defined in the -# ## client section of ceph.conf for example: +# ## Ceph user to authenticate as, ceph will search for the corresponding +# ## keyring e.g. client.admin.keyring in /etc/ceph, or the explicit path +# ## defined in the client section of ceph.conf for example: # ## # ## [client.telegraf] # ## keyring = /etc/ceph/client.telegraf.keyring @@ -3618,8 +3812,8 @@ # ## Whether to gather statistics via the admin socket # gather_admin_socket_stats = true # -# ## Whether to gather statistics via ceph commands, requires ceph_user and ceph_config -# ## to be specified +# ## Whether to gather statistics via ceph commands, requires ceph_user +# ## and ceph_config to be specified # gather_cluster_stats = false @@ -3652,7 +3846,8 @@ # # ## Amazon Credentials # ## Credentials are loaded in the following order -# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 1) Web identity provider credentials via STS if role_arn and +# ## web_identity_token_file are specified # ## 2) Assumed credentials via STS if role_arn is specified # ## 3) explicit credentials from 'access_key' and 'secret_key' # ## 4) shared profile from 'profile' @@ -3678,27 +3873,33 @@ # # use_system_proxy = false # # http_proxy_url = "http://localhost:8888" # -# # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all -# # metrics are made available to the 1 minute period. Some are collected at -# # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring. -# # Note that if a period is configured that is smaller than the minimum for a -# # particular metric, that metric will not be returned by the Cloudwatch API -# # and will not be collected by Telegraf. +# ## The minimum period for Cloudwatch metrics is 1 minute (60s). However not +# ## all metrics are made available to the 1 minute period. Some are collected +# ## at 3 minute, 5 minute, or larger intervals. +# ## See https://aws.amazon.com/cloudwatch/faqs/#monitoring. +# ## Note that if a period is configured that is smaller than the minimum for a +# ## particular metric, that metric will not be returned by the Cloudwatch API +# ## and will not be collected by Telegraf. # # -# ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s) +# ## Requested CloudWatch aggregation Period (required) +# ## Must be a multiple of 60s. # period = "5m" # -# ## Collection Delay (required - must account for metrics availability via CloudWatch API) +# ## Collection Delay (required) +# ## Must account for metrics availability via CloudWatch API # delay = "5m" # # ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid # ## gaps or overlap in pulled data # interval = "5m" # -# ## Recommended if "delay" and "period" are both within 3 hours of request time. Invalid values will be ignored. -# ## Recently Active feature will only poll for CloudWatch ListMetrics values that occurred within the last 3 Hours. -# ## If enabled, it will reduce total API usage of the CloudWatch ListMetrics API and require less memory to retain. -# ## Do not enable if "period" or "delay" is longer than 3 hours, as it will not return data more than 3 hours old. +# ## Recommended if "delay" and "period" are both within 3 hours of request +# ## time. Invalid values will be ignored. Recently Active feature will only +# ## poll for CloudWatch ListMetrics values that occurred within the last 3h. +# ## If enabled, it will reduce total API usage of the CloudWatch ListMetrics +# ## API and require less memory to retain. +# ## Do not enable if "period" or "delay" is longer than 3 hours, as it will +# ## not return data more than 3 hours old. # ## See https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_ListMetrics.html # #recently_active = "PT3H" # @@ -3707,21 +3908,27 @@ # # ## Metric Statistic Namespaces (required) # namespaces = ["AWS/ELB"] -# # A single metric statistic namespace that will be appended to namespaces on startup +# ## Single metric statistic namespace appended to namespaces on startup # # namespace = "AWS/ELB" # -# ## Maximum requests per second. Note that the global default AWS rate limit is -# ## 50 reqs/sec, so if you define multiple namespaces, these should add up to a -# ## maximum of 50. +# ## Maximum requests per second. Note that the global default AWS rate limit +# ## is 50 reqs/sec, so if you define multiple namespaces, these should add up +# ## to a maximum of 50. # ## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html # # ratelimit = 25 # # ## Timeout for http requests made by the cloudwatch client. # # timeout = "5s" # +# ## Batch Size +# ## The size of each batch to send requests to Cloudwatch. 500 is the +# ## suggested largest size. If a request gets to large (413 errors), consider +# ## reducing this amount. +# # batch_size = 500 +# # ## Namespace-wide statistic filters. These allow fewer queries to be made to # ## cloudwatch. -# # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ] +# # statistic_include = ["average", "sum", "minimum", "maximum", sample_count"] # # statistic_exclude = [] # # ## Metrics to Pull @@ -3732,11 +3939,12 @@ # # # # ## Statistic filters for Metric. These allow for retrieving specific # # ## statistics for an individual metric. -# # # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ] +# # # statistic_include = ["average", "sum", "minimum", "maximum", sample_count"] # # # statistic_exclude = [] # # -# # ## Dimension filters for Metric. All dimensions defined for the metric names -# # ## must be specified in order to retrieve the metric statistics. +# # ## Dimension filters for Metric. +# # ## All dimensions defined for the metric names must be specified in order +# # ## to retrieve the metric statistics. # # ## 'value' has wildcard / 'glob' matching support such as 'p-*'. # # [[inputs.cloudwatch.metrics.dimensions]] # # name = "LoadBalancerName" @@ -4052,34 +4260,42 @@ # # Reads metrics from DPDK applications using v2 telemetry interface. # [[inputs.dpdk]] -# ## Path to DPDK telemetry socket. This shall point to v2 version of DPDK telemetry interface. +# ## Path to DPDK telemetry socket. This shall point to v2 version of DPDK +# ## telemetry interface. # # socket_path = "/var/run/dpdk/rte/dpdk_telemetry.v2" # -# ## Duration that defines how long the connected socket client will wait for a response before terminating connection. -# ## This includes both writing to and reading from socket. Since it's local socket access -# ## to a fast packet processing application, the timeout should be sufficient for most users. +# ## Duration that defines how long the connected socket client will wait for +# ## a response before terminating connection. +# ## This includes both writing to and reading from socket. Since it's local +# ## socket access to a fast packet processing application, the timeout should +# ## be sufficient for most users. # ## Setting the value to 0 disables the timeout (not recommended) # # socket_access_timeout = "200ms" # # ## Enables telemetry data collection for selected device types. -# ## Adding "ethdev" enables collection of telemetry from DPDK NICs (stats, xstats, link_status). -# ## Adding "rawdev" enables collection of telemetry from DPDK Raw Devices (xstats). +# ## Adding "ethdev" enables collection of telemetry from DPDK NICs +# ## (stats, xstats, link_status). +# ## Adding "rawdev" enables collection of telemetry from DPDK Raw Devices +# ## (xstats). # # device_types = ["ethdev"] # # ## List of custom, application-specific telemetry commands to query -# ## The list of available commands depend on the application deployed. Applications can register their own commands -# ## via telemetry library API http://doc.dpdk.org/guides/prog_guide/telemetry_lib.html#registering-commands -# ## For e.g. L3 Forwarding with Power Management Sample Application this could be: +# ## The list of available commands depend on the application deployed. +# ## Applications can register their own commands via telemetry library API +# ## http://doc.dpdk.org/guides/prog_guide/telemetry_lib.html#registering-commands +# ## For L3 Forwarding with Power Management Sample Application this could be: # ## additional_commands = ["/l3fwd-power/stats"] # # additional_commands = [] # # ## Allows turning off collecting data for individual "ethdev" commands. -# ## Remove "/ethdev/link_status" from list to start getting link status metrics. +# ## Remove "/ethdev/link_status" from list to gather link status metrics. # [inputs.dpdk.ethdev] # exclude_commands = ["/ethdev/link_status"] # -# ## When running multiple instances of the plugin it's recommended to add a unique tag to each instance to identify -# ## metrics exposed by an instance of DPDK application. This is useful when multiple DPDK apps run on a single host. +# ## When running multiple instances of the plugin it's recommended to add a +# ## unique tag to each instance to identify metrics exposed by an instance +# ## of DPDK application. This is useful when multiple DPDK apps run on a +# ## single host. # ## [inputs.dpdk.tags] # ## dpdk_instance = "my-fwd-app" @@ -4139,20 +4355,22 @@ # ## Set cluster_stats to true when you want to obtain cluster stats. # cluster_stats = false # -# ## Only gather cluster_stats from the master node. To work this require local = true +# ## Only gather cluster_stats from the master node. +# ## To work this require local = true # cluster_stats_only_from_master = true # # ## Indices to collect; can be one or more indices names or _all -# ## Use of wildcards is allowed. Use a wildcard at the end to retrieve index names that end with a changing value, like a date. +# ## Use of wildcards is allowed. Use a wildcard at the end to retrieve index +# ## names that end with a changing value, like a date. # indices_include = ["_all"] # # ## One of "shards", "cluster", "indices" # ## Currently only "shards" is implemented # indices_level = "shards" # -# ## node_stats is a list of sub-stats that you want to have gathered. Valid options -# ## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http", -# ## "breaker". Per default, all stats are gathered. +# ## node_stats is a list of sub-stats that you want to have gathered. +# ## Valid options are "indices", "os", "process", "jvm", "thread_pool", +# ## "fs", "transport", "http", "breaker". Per default, all stats are gathered. # # node_stats = ["jvm", "http"] # # ## HTTP Basic Authentication username and password. @@ -4166,10 +4384,12 @@ # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false # -# ## Sets the number of most recent indices to return for indices that are configured with a date-stamped suffix. -# ## Each 'indices_include' entry ending with a wildcard (*) or glob matching pattern will group together all indices that match it, and -# ## sort them by the date or number after the wildcard. Metrics then are gathered for only the 'num_most_recent_indices' amount of most -# ## recent indices. +# ## Sets the number of most recent indices to return for indices that are +# ## configured with a date-stamped suffix. Each 'indices_include' entry +# ## ending with a wildcard (*) or glob matching pattern will group together +# ## all indices that match it, and sort them by the date or number after +# ## the wildcard. Metrics then are gathered for only the +# ## 'num_most_recent_indices' amount of most recent indices. # # num_most_recent_indices = 0 @@ -4235,7 +4455,8 @@ # # metric_function = "avg" # # ## Fields to be used as tags -# ## Must be text, non-analyzed fields. Metric aggregations are performed per tag +# ## Must be text, non-analyzed fields. Metric aggregations are performed +# ## per tag # # tags = ["field.keyword", "field2.keyword"] # # ## Set to true to not ignore documents when the tag(s) above are missing @@ -4566,6 +4787,8 @@ # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" # # tls_key = "/etc/telegraf/key.pem" +# ## Minimal TLS version to accept by the client +# # tls_min_version = "TLS12" # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false # @@ -4598,7 +4821,8 @@ # ## List of urls to query. # # urls = ["http://localhost"] # -# ## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set) +# ## Set http_proxy. +# ## Telegraf uses the system wide proxy settings if it's is not set. # # http_proxy = "http://localhost:8888" # # ## Set response_timeout (default 5 seconds) @@ -4624,12 +4848,14 @@ # # ''' # # ## Optional name of the field that will contain the body of the response. -# ## By default it is set to an empty String indicating that the body's content won't be added +# ## By default it is set to an empty String indicating that the body's +# ## content won't be added # # response_body_field = '' # # ## Maximum allowed HTTP response body size in bytes. # ## 0 means to use the default of 32MiB. -# ## If the response body size exceeds this limit a "body_read_error" will be raised +# ## If the response body size exceeds this limit a "body_read_error" will +# ## be raised. # # response_body_max_size = "32MiB" # # ## Optional substring or regex match in body of the response (case sensitive) @@ -4638,9 +4864,10 @@ # # response_string_match = "\".*_status\".?:.?\"up\"" # # ## Expected response status code. -# ## The status code of the response is compared to this value. If they match, the field -# ## "response_status_code_match" will be 1, otherwise it will be 0. If the -# ## expected status code is 0, the check is disabled and the field won't be added. +# ## The status code of the response is compared to this value. If they match, +# ## the field "response_status_code_match" will be 1, otherwise it will be 0. +# ## If the expected status code is 0, the check is disabled and the field +# ## won't be added. # # response_status_code = 0 # # ## Optional TLS Config @@ -4657,8 +4884,9 @@ # # Host = "github.com" # # ## Optional setting to map response http headers into tags -# ## If the http header is not present on the request, no corresponding tag will be added -# ## If multiple instances of the http header are present, only the first value will be used +# ## If the http header is not present on the request, no corresponding tag will +# ## be added. If multiple instances of the http header are present, only the +# ## first value will be used. # # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"} # # ## Interface to use when dialing an address @@ -4717,9 +4945,11 @@ # # Gathers huge pages measurements. # [[inputs.hugepages]] # ## Supported huge page types: -# ## - "root" - based on root huge page control directory: /sys/kernel/mm/hugepages -# ## - "per_node" - based on per NUMA node directories: /sys/devices/system/node/node[0-9]*/hugepages -# ## - "meminfo" - based on /proc/meminfo file +# ## - "root" - based on root huge page control directory: +# ## /sys/kernel/mm/hugepages +# ## - "per_node" - based on per NUMA node directories: +# ## /sys/devices/system/node/node[0-9]*/hugepages +# ## - "meminfo" - based on /proc/meminfo file # # types = ["root", "per_node"] @@ -4778,21 +5008,30 @@ # timeout = "5s" -# # Intel PowerStat plugin enables monitoring of platform metrics (power, TDP) and per-CPU metrics like temperature, power and utilization. +# # Intel PowerStat plugin enables monitoring of platform metrics (power, TDP) +# # and per-CPU metrics like temperature, power and utilization. # [[inputs.intel_powerstat]] -# ## The user can choose which package metrics are monitored by the plugin with the package_metrics setting: -# ## - The default, will collect "current_power_consumption", "current_dram_power_consumption" and "thermal_design_power" -# ## - Setting this value to an empty array means no package metrics will be collected -# ## - Finally, a user can specify individual metrics to capture from the supported options list +# ## The user can choose which package metrics are monitored by the plugin with +# ## the package_metrics setting: +# ## - The default, will collect "current_power_consumption", +# ## "current_dram_power_consumption" and "thermal_design_power" +# ## - Leaving this setting empty means no package metrics will be collected +# ## - Finally, a user can specify individual metrics to capture from the +# ## supported options list # ## Supported options: -# ## "current_power_consumption", "current_dram_power_consumption", "thermal_design_power", "max_turbo_frequency", "uncore_frequency" +# ## "current_power_consumption", "current_dram_power_consumption", +# ## "thermal_design_power", "max_turbo_frequency", "uncore_frequency" # # package_metrics = ["current_power_consumption", "current_dram_power_consumption", "thermal_design_power"] # -# ## The user can choose which per-CPU metrics are monitored by the plugin in cpu_metrics array. -# ## Empty or missing array means no per-CPU specific metrics will be collected by the plugin. +# ## The user can choose which per-CPU metrics are monitored by the plugin in +# ## cpu_metrics array. +# ## Empty or missing array means no per-CPU specific metrics will be collected +# ## by the plugin. # ## Supported options: -# ## "cpu_frequency", "cpu_c0_state_residency", "cpu_c1_state_residency", "cpu_c6_state_residency", "cpu_busy_cycles", "cpu_temperature", "cpu_busy_frequency" -# ## ATTENTION: cpu_busy_cycles option is DEPRECATED - superseded by cpu_c0_state_residency +# ## "cpu_frequency", "cpu_c0_state_residency", "cpu_c1_state_residency", +# ## "cpu_c6_state_residency", "cpu_busy_cycles", "cpu_temperature", +# ## "cpu_busy_frequency" +# ## ATTENTION: cpu_busy_cycles is DEPRECATED - use cpu_c0_state_residency # # cpu_metrics = [] @@ -4894,11 +5133,13 @@ # [[inputs.iptables]] # ## iptables require root access on most systems. # ## Setting 'use_sudo' to true will make use of sudo to run iptables. -# ## Users must configure sudo to allow telegraf user to run iptables with no password. +# ## Users must configure sudo to allow telegraf user to run iptables with +# ## no password. # ## iptables can be restricted to only list command "iptables -nvL". # use_sudo = false # ## Setting 'use_lock' to true runs iptables with the "-w" option. -# ## Adjust your sudo settings appropriately if using this option ("iptables -w 5 -nvl") +# ## Adjust your sudo settings appropriately if using this option +# ## ("iptables -w 5 -nvl") # use_lock = false # ## Define an alternate executable, such as "ip6tables". Default is "iptables". # # binary = "ip6tables" @@ -5033,6 +5274,10 @@ # # password = "" # # response_timeout = "5s" # +# ## Optional origin URL to include as a header in the request. Some endpoints +# ## may reject an empty origin. +# # origin = "" +# # ## Optional TLS config # # tls_ca = "/var/private/ca.pem" # # tls_cert = "/var/private/client.pem" @@ -5058,6 +5303,10 @@ # # password = "" # # response_timeout = "5s" # +# ## Optional origin URL to include as a header in the request. Some endpoints +# ## may reject an empty origin. +# # origin = "" +# # ## Optional TLS config # # tls_ca = "/var/private/ca.pem" # # tls_cert = "/var/private/client.pem" @@ -5132,10 +5381,15 @@ # # namespace = "default" # # ## Use bearer token for authorization. ('bearer_token' takes priority) +# ## # ## If both of these are empty, we'll use the default serviceaccount: # ## at: /run/secrets/kubernetes.io/serviceaccount/token -# # bearer_token = "/path/to/bearer/token" +# ## +# ## To auto-refresh the token, please use a file with the bearer_token option. +# ## If given a string, Telegraf cannot refresh the token periodically. +# # bearer_token = "/run/secrets/kubernetes.io/serviceaccount/token" # ## OR +# ## deprecated in 1.24.0; use bearer_token with a file # # bearer_token_string = "abc_123" # # ## Set response_timeout (default 5 seconds) @@ -5143,8 +5397,9 @@ # # ## Optional Resources to exclude from gathering # ## Leave them with blank with try to gather everything available. -# ## Values can be - "daemonsets", deployments", "endpoints", "ingress", "nodes", -# ## "persistentvolumes", "persistentvolumeclaims", "pods", "services", "statefulsets" +# ## Values can be - "daemonsets", deployments", "endpoints", "ingress", +# ## "nodes", "persistentvolumes", "persistentvolumeclaims", "pods", "services", +# ## "statefulsets" # # resource_exclude = [ "deployments", "nodes", "statefulsets" ] # # ## Optional Resources to include when gathering @@ -5181,7 +5436,11 @@ # ## Use bearer token for authorization. ('bearer_token' takes priority) # ## If both of these are empty, we'll use the default serviceaccount: # ## at: /run/secrets/kubernetes.io/serviceaccount/token -# # bearer_token = "/path/to/bearer/token" +# ## +# ## To re-read the token at each interval, please use a file with the +# ## bearer_token option. If given a string, Telegraf will always use that +# ## token. +# # bearer_token = "/run/secrets/kubernetes.io/serviceaccount/token" # ## OR # # bearer_token_string = "abc_123" # @@ -5208,6 +5467,20 @@ # servers = ["127.0.0.1:4010"] +# # Provides Linux CPU metrics +# [[inputs.linux_cpu]] +# ## Path for sysfs filesystem. +# ## See https://www.kernel.org/doc/Documentation/filesystems/sysfs.txt +# ## Defaults: +# # host_sys = "/sys" +# +# ## CPU metrics collected by the plugin. +# ## Supported options: +# ## "cpufreq", "thermal" +# ## Defaults: +# # metrics = ["cpufreq"] + + # # Provides Linux sysctl fs metrics # [[inputs.linux_sysctl_fs]] # # no configuration @@ -5579,7 +5852,7 @@ # { address=3, name="motor1_overheating"}, # ] # -# [[inputs.modbus.request.tags]] +# [inputs.modbus.request.tags] # machine = "impresser" # location = "main building" # @@ -5598,7 +5871,7 @@ # { address=8, name="power_factor", type="INT64", scale=0.01 }, # ] # -# [[inputs.modbus.request.tags]] +# [inputs.modbus.request.tags] # machine = "impresser" # location = "main building" # @@ -5614,7 +5887,7 @@ # { address=4, name="hours", type="UINT32" }, # will result in UIN64 field # ] # -# [[inputs.modbus.request.tags]] +# [inputs.modbus.request.tags] # machine = "impresser" # location = "main building" # @@ -5668,6 +5941,12 @@ # # tls_key = "/etc/telegraf/key.pem" # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false +# +# ## Specifies plugin behavior regarding disconnected servers +# ## Available choices : +# ## - error: telegraf will return an error on startup if one the servers is unreachable +# ## - skip: telegraf will skip unreachable servers on both startup and gather +# # disconnected_servers_behavior = "error" # # Read metrics and status information about processes managed by Monit @@ -5743,7 +6022,8 @@ # ## if the list is empty, then metrics are gathered from all database tables # # table_schema_databases = [] # -# ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list +# ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided +# ## in the list above # # gather_table_schema = false # # ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST @@ -5809,7 +6089,7 @@ # ## example: interval_slow = "30m" # # interval_slow = "" # -# ## Optional TLS Config (will be used if tls=custom parameter specified in server uri) +# ## Optional TLS Config (used if tls=custom parameter specified in server uri) # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" # # tls_key = "/etc/telegraf/key.pem" @@ -6091,8 +6371,25 @@ # # Get standard NTP query metrics, requires ntpq executable. # [[inputs.ntpq]] +# ## Servers to query with ntpq. +# ## If no server is given, the local machine is queried. +# # servers = [] +# # ## If false, set the -n ntpq flag. Can reduce metric gather time. -# dns_lookup = true +# ## DEPRECATED since 1.24.0: add '-n' to 'options' instead to skip DNS lookup +# # dns_lookup = true +# +# ## Options to pass to the ntpq command. +# # options = "-p" +# +# ## Output format for the 'reach' field. +# ## Available values are +# ## octal -- output as is in octal representation e.g. 377 (default) +# ## decimal -- convert value to decimal representation e.g. 371 -> 249 +# ## count -- count the number of bits in the value. This represents +# ## the number of successful reaches, e.g. 37 -> 5 +# ## ratio -- output the ratio of successful attempts e.g. 37 -> 5/8 = 0.625 +# # reach_format = "octal" # # Pulls statistics from nvidia GPUs attached to the host @@ -6233,9 +6530,9 @@ # # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver -# [[inputs.opensmtpd]] -# ## If running as a restricted user you can prepend sudo for additional access: -# #use_sudo = false +# [[inputs.opensmtpd]] +# ## If running as a restricted user you can prepend sudo for additional access: +# #use_sudo = false # # ## The default location of the smtpctl binary can be overridden with: # binary = "/usr/sbin/smtpctl" @@ -6520,7 +6817,11 @@ # ## API connection configuration. The API token was introduced in Proxmox v6.2. Required permissions for user and token: PVEAuditor role on /. # base_url = "https://localhost:8006/api2/json" # api_token = "USER@REALM!TOKENID=UUID" +# # ## Node name, defaults to OS hostname +# ## Unless Telegraf is on the same host as Proxmox, setting this is required +# ## for Telegraf to successfully connect to Proxmox. If not on the same host, +# ## leaving this empty will often lead to a "search domain is not set" error. # # node_name = "" # # ## Optional TLS Config @@ -7075,6 +7376,17 @@ # # value = 'one_of("sda", "sdb")' +# # Gathers information about processes that running under supervisor using XML-RPC API +# [[inputs.supervisor]] +# ## Url of supervisor's XML-RPC endpoint if basic auth enabled in supervisor http server, +# ## than you have to add credentials to url (ex. http://login:pass@localhost:9001/RPC2) +# # url="http://localhost:9001/RPC2" +# ## With settings below you can manage gathering additional information about processes +# ## If both of them empty, then all additional information will be collected. +# ## Currently supported supported additional metrics are: pid, rc +# # metrics_include = [] + + # # Get synproxy counter statistics from procfs # [[inputs.synproxy]] # # no configuration @@ -7463,18 +7775,21 @@ # # public_key_id = "" # # role_name = "" # -# ## Specify the ali cloud region list to be queried for metrics and objects discovery -# ## If not set, all supported regions (see below) would be covered, it can provide a significant load on API, so the recommendation here -# ## is to limit the list as much as possible. Allowed values: https://www.alibabacloud.com/help/zh/doc-detail/40654.htm +# ## Specify ali cloud regions to be queried for metric and object discovery +# ## If not set, all supported regions (see below) would be covered, it can +# ## provide a significant load on API, so the recommendation here is to +# ## limit the list as much as possible. +# ## Allowed values: https://www.alibabacloud.com/help/zh/doc-detail/40654.htm # ## Default supported regions are: -# ## 21 items: cn-qingdao,cn-beijing,cn-zhangjiakou,cn-huhehaote,cn-hangzhou,cn-shanghai,cn-shenzhen, -# ## cn-heyuan,cn-chengdu,cn-hongkong,ap-southeast-1,ap-southeast-2,ap-southeast-3,ap-southeast-5, -# ## ap-south-1,ap-northeast-1,us-west-1,us-east-1,eu-central-1,eu-west-1,me-east-1 +# ## cn-qingdao,cn-beijing,cn-zhangjiakou,cn-huhehaote,cn-hangzhou, +# ## cn-shanghai, cn-shenzhen, cn-heyuan,cn-chengdu,cn-hongkong, +# ## ap-southeast-1,ap-southeast-2,ap-southeast-3,ap-southeast-5, +# ## ap-south-1,ap-northeast-1, us-west-1,us-east-1,eu-central-1, +# ## eu-west-1,me-east-1 # ## -# ## From discovery perspective it set the scope for object discovery, the discovered info can be used to enrich -# ## the metrics with objects attributes/tags. Discovery is supported not for all projects (if not supported, then -# ## it will be reported on the start - for example for 'acs_cdn' project: -# ## 'E! [inputs.aliyuncms] Discovery tool is not activated: no discovery support for project "acs_cdn"' ) +# ## From discovery perspective it set the scope for object discovery, +# ## the discovered info can be used to enrich the metrics with objects +# ## attributes/tags. Discovery is not supported for all projects. # ## Currently, discovery supported for the following projects: # ## - acs_ecs_dashboard # ## - acs_rds_dashboard @@ -7482,22 +7797,23 @@ # ## - acs_vpc_eip # regions = ["cn-hongkong"] # -# # The minimum period for AliyunCMS metrics is 1 minute (60s). However not all -# # metrics are made available to the 1 minute period. Some are collected at -# # 3 minute, 5 minute, or larger intervals. -# # See: https://help.aliyun.com/document_detail/51936.html?spm=a2c4g.11186623.2.18.2bc1750eeOw1Pv -# # Note that if a period is configured that is smaller than the minimum for a -# # particular metric, that metric will not be returned by the Aliyun OpenAPI -# # and will not be collected by Telegraf. -# # -# ## Requested AliyunCMS aggregation Period (required - must be a multiple of 60s) +# ## Requested AliyunCMS aggregation Period (required) +# ## The period must be multiples of 60s and the minimum for AliyunCMS metrics +# ## is 1 minute (60s). However not all metrics are made available to the +# ## one minute period. Some are collected at 3 minute, 5 minute, or larger +# ## intervals. +# ## See: https://help.aliyun.com/document_detail/51936.html?spm=a2c4g.11186623.2.18.2bc1750eeOw1Pv +# ## Note that if a period is configured that is smaller than the minimum for +# ## a particular metric, that metric will not be returned by Aliyun's +# ## OpenAPI and will not be collected by Telegraf. # period = "5m" # -# ## Collection Delay (required - must account for metrics availability via AliyunCMS API) +# ## Collection Delay (required) +# ## The delay must account for metrics availability via AliyunCMS API. # delay = "1m" # -# ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid -# ## gaps or overlap in pulled data +# ## Recommended: use metric 'interval' that is a multiple of 'period' +# ## to avoid gaps or overlap in pulled data # interval = "5m" # # ## Metric Statistic Project (required) @@ -7512,15 +7828,19 @@ # ## Metrics to Pull (Required) # [[inputs.aliyuncms.metrics]] # ## Metrics names to be requested, -# ## described here (per project): https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq +# ## Description can be found here (per project): +# ## https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq # names = ["InstanceActiveConnection", "InstanceNewConnection"] # -# ## Dimension filters for Metric (these are optional). -# ## This allows to get additional metric dimension. If dimension is not specified it can be returned or -# ## the data can be aggregated - it depends on particular metric, you can find details here: https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq +# ## Dimension filters for Metric (optional). +# ## This allows to get additional metric dimension. If dimension is not +# ## specified it can be returned or the data can be aggregated - it depends +# ## on particular metric, you can find details here: +# ## https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq # ## -# ## Note, that by default dimension filter includes the list of discovered objects in scope (if discovery is enabled) -# ## Values specified here would be added into the list of discovered objects. +# ## Note, that by default dimension filter includes the list of discovered +# ## objects in scope (if discovery is enabled). +# # Values specified here would be added into the list of discovered objects. # ## You can specify either single dimension: # #dimensions = '{"instanceId": "p-example"}' # @@ -7529,19 +7849,22 @@ # # ## Enrichment tags, can be added from discovery (if supported) # ## Notation is : -# ## To figure out which fields are available, consult the Describe API per project. +# ## To figure out which fields are available, consult the Describe +# ## API per project. # ## For example, for SLB: https://api.aliyun.com/#/?product=Slb&version=2014-05-15&api=DescribeLoadBalancers¶ms={}&tab=MOCK&lang=GO # #tag_query_path = [ # # "address:Address", # # "name:LoadBalancerName", # # "cluster_owner:Tags.Tag[?TagKey=='cs.cluster.name'].TagValue | [0]" # # ] -# ## The following tags added by default: regionId (if discovery enabled), userId, instanceId. +# ## The following tags added by default: +# ## regionId (if discovery enabled), userId, instanceId. # -# ## Allow metrics without discovery data, if discovery is enabled. If set to true, then metric without discovery -# ## data would be emitted, otherwise dropped. This cane be of help, in case debugging dimension filters, or partial coverage -# ## of discovery scope vs monitoring scope -# #allow_dps_without_discovery = false +# ## Allow metrics without discovery data, if discovery is enabled. +# ## If set to true, then metric without discovery data would be emitted, otherwise dropped. +# ## This cane be of help, in case debugging dimension filters, or partial coverage of +# ## discovery scope vs monitoring scope +# # allow_dps_without_discovery = false # # AMQP consumer plugin @@ -7678,43 +8001,52 @@ # # dnpath = '{"Name": "show ip route summary","prop": [{"Key": "routes","Value": "string"}, {"Key": "best-paths","Value": "string"}]}' # # dnpath2 = '{"Name": "show processes cpu","prop": [{"Key": "kernel_percent","Value": "float"}, {"Key": "idle_percent","Value": "float"}, {"Key": "process","Value": "string"}, {"Key": "user_percent","Value": "float"}, {"Key": "onesec","Value": "float"}]}' # # dnpath3 = '{"Name": "show processes memory physical","prop": [{"Key": "processname","Value": "string"}]}' +# +# ## Additional GRPC connection settings. +# [inputs.cisco_telemetry_mdt.grpc_enforcement_policy] +# ## GRPC permit keepalives without calls, set to true if your clients are +# ## sending pings without calls in-flight. This can sometimes happen on IOS-XE +# ## devices where the GRPC connection is left open but subscriptions have been +# ## removed, and adding subsequent subscriptions does not keep a stable session. +# # permit_keepalive_without_calls = false +# +# ## GRPC minimum timeout between successive pings, decreasing this value may +# ## help if this plugin is closing connections with ENHANCE_YOUR_CALM (too_many_pings). +# # keepalive_minimum_time = "5m" # # Read metrics from one or many ClickHouse servers # [[inputs.clickhouse]] # ## Username for authorization on ClickHouse server -# ## example: username = "default" # username = "default" # # ## Password for authorization on ClickHouse server -# ## example: password = "super_secret" +# # password = "" # # ## HTTP(s) timeout while getting metrics values -# ## The timeout includes connection time, any redirects, and reading the response body. -# ## example: timeout = 1s +# ## The timeout includes connection time, any redirects, and reading the +# ## response body. # # timeout = 5s # # ## List of servers for metrics scraping # ## metrics scrape via HTTP(s) clickhouse interface # ## https://clickhouse.tech/docs/en/interfaces/http/ -# ## example: servers = ["http://127.0.0.1:8123","https://custom-server.mdb.yandexcloud.net"] -# servers = ["http://127.0.0.1:8123"] +# servers = ["http://127.0.0.1:8123"] # -# ## If "auto_discovery"" is "true" plugin tries to connect to all servers available in the cluster -# ## with using same "user:password" described in "user" and "password" parameters -# ## and get this server hostname list from "system.clusters" table -# ## see +# ## If "auto_discovery"" is "true" plugin tries to connect to all servers +# ## available in the cluster with using same "user:password" described in +# ## "user" and "password" parameters and get this server hostname list from +# ## "system.clusters" table. See # ## - https://clickhouse.tech/docs/en/operations/system_tables/#system-clusters # ## - https://clickhouse.tech/docs/en/operations/server_settings/settings/#server_settings_remote_servers # ## - https://clickhouse.tech/docs/en/operations/table_engines/distributed/ # ## - https://clickhouse.tech/docs/en/operations/table_engines/replication/#creating-replicated-tables -# ## example: auto_discovery = false # # auto_discovery = true # # ## Filter cluster names in "system.clusters" when "auto_discovery" is "true" # ## when this filter present then "WHERE cluster IN (...)" filter will apply -# ## please use only full cluster names here, regexp and glob filters is not allowed -# ## for "/etc/clickhouse-server/config.d/remote.xml" +# ## please use only full cluster names here, regexp and glob filters is not +# ## allowed for "/etc/clickhouse-server/config.d/remote.xml" # ## # ## # ## @@ -7734,8 +8066,9 @@ # ## example: cluster_include = ["my-own-cluster"] # # cluster_include = [] # -# ## Filter cluster names in "system.clusters" when "auto_discovery" is "true" -# ## when this filter present then "WHERE cluster NOT IN (...)" filter will apply +# ## Filter cluster names in "system.clusters" when "auto_discovery" is +# ## "true" when this filter present then "WHERE cluster NOT IN (...)" +# ## filter will apply # ## example: cluster_exclude = ["my-internal-not-discovered-cluster"] # # cluster_exclude = [] # @@ -7836,16 +8169,17 @@ # # ## Maximum duration before timing out read of the request # # read_timeout = "10s" -# ## Maximum duration before timing out write of the response. This should be set to a value -# ## large enough that you can send at least 'metric_batch_size' number of messages within the -# ## duration. +# ## Maximum duration before timing out write of the response. This should be +# ## set to a value large enough that you can send at least 'metric_batch_size' +# ## number of messages within the duration. # # write_timeout = "10s" # # ## Maximum allowed http request body size in bytes. # ## 0 means to use the default of 524,288,00 bytes (500 mebibytes) # # max_body_size = "500MB" # -# ## Whether to add the pubsub metadata, such as message attributes and subscription as a tag. +# ## Whether to add the pubsub metadata, such as message attributes and +# ## subscription as a tag. # # add_meta = false # # ## Optional. Maximum messages to read from PubSub that have not been written @@ -7895,7 +8229,8 @@ # ## Optional access key for Firehose security. # # access_key = "test-key" # -# ## An optional flag to keep Metric Streams metrics compatible with CloudWatch's API naming +# ## An optional flag to keep Metric Streams metrics compatible with +# ## CloudWatch's API naming # # api_compatability = false # # ## Set one or more allowed client CA certificate file names to @@ -7909,12 +8244,15 @@ # # Ingests files in a directory and then moves them to a target directory. # [[inputs.directory_monitor]] -# ## The directory to monitor and read files from. +# ## The directory to monitor and read files from (including sub-directories if "recursive" is true). # directory = "" # # -# ## The directory to move finished files to. +# ## The directory to move finished files to (maintaining directory hierachy from source). # finished_directory = "" # # +# ## Setting recursive to true will make the plugin recursively walk the directory and process all sub-directories. +# # recursive = false +# # # ## The directory to move files to upon file error. # ## If not provided, erroring files will stay in the monitored directory. # # error_directory = "" @@ -8132,6 +8470,9 @@ # ## enable client-side TLS and define CA to authenticate the device # # enable_tls = true # # tls_ca = "/etc/telegraf/ca.pem" +# ## Minimal TLS version to accept by the client +# # tls_min_version = "TLS12" +# ## Use TLS but skip chain & host verification # # insecure_skip_verify = true # # ## define client-side TLS certificate & key to authenticate to the device @@ -8269,6 +8610,9 @@ # # tls_cert = "/etc/telegraf/cert.pem" # # tls_key = "/etc/telegraf/key.pem" # +# ## Minimal TLS version accepted by the server +# # tls_min_version = "TLS12" +# # ## Optional username and password to accept for HTTP basic authentication. # ## You probably want to make sure you have TLS configured above for this. # # basic_username = "foobar" @@ -8420,35 +8764,35 @@ # # Read Intel RDT metrics # [[inputs.intel_rdt]] -# ## Optionally set sampling interval to Nx100ms. -# ## This value is propagated to pqos tool. Interval format is defined by pqos itself. -# ## If not provided or provided 0, will be set to 10 = 10x100ms = 1s. -# # sampling_interval = "10" +# ## Optionally set sampling interval to Nx100ms. +# ## This value is propagated to pqos tool. Interval format is defined by pqos itself. +# ## If not provided or provided 0, will be set to 10 = 10x100ms = 1s. +# # sampling_interval = "10" # -# ## Optionally specify the path to pqos executable. -# ## If not provided, auto discovery will be performed. -# # pqos_path = "/usr/local/bin/pqos" +# ## Optionally specify the path to pqos executable. +# ## If not provided, auto discovery will be performed. +# # pqos_path = "/usr/local/bin/pqos" # -# ## Optionally specify if IPC and LLC_Misses metrics shouldn't be propagated. -# ## If not provided, default value is false. -# # shortened_metrics = false +# ## Optionally specify if IPC and LLC_Misses metrics shouldn't be propagated. +# ## If not provided, default value is false. +# # shortened_metrics = false # -# ## Specify the list of groups of CPU core(s) to be provided as pqos input. -# ## Mandatory if processes aren't set and forbidden if processes are specified. -# ## e.g. ["0-3", "4,5,6"] or ["1-3,4"] -# # cores = ["0-3"] +# ## Specify the list of groups of CPU core(s) to be provided as pqos input. +# ## Mandatory if processes aren't set and forbidden if processes are specified. +# ## e.g. ["0-3", "4,5,6"] or ["1-3,4"] +# # cores = ["0-3"] # -# ## Specify the list of processes for which Metrics will be collected. -# ## Mandatory if cores aren't set and forbidden if cores are specified. -# ## e.g. ["qemu", "pmd"] -# # processes = ["process"] +# ## Specify the list of processes for which Metrics will be collected. +# ## Mandatory if cores aren't set and forbidden if cores are specified. +# ## e.g. ["qemu", "pmd"] +# # processes = ["process"] # -# ## Specify if the pqos process should be called with sudo. -# ## Mandatory if the telegraf process does not run as root. -# # use_sudo = false +# ## Specify if the pqos process should be called with sudo. +# ## Mandatory if the telegraf process does not run as root. +# # use_sudo = false -# # Read JTI OpenConfig Telemetry from listed sensors +# # Subscribe and receive OpenConfig Telemetry data using JTI # [[inputs.jti_openconfig_telemetry]] # ## List of device addresses to collect telemetry from # servers = ["localhost:1883"] @@ -8488,6 +8832,8 @@ # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" # # tls_key = "/etc/telegraf/key.pem" +# ## Minimal TLS version to accept by the client +# # tls_min_version = "TLS12" # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false # @@ -8593,6 +8939,13 @@ # ## '2 * max_processing_time'. # # max_processing_time = "100ms" # +# ## The default number of message bytes to fetch from the broker in each +# ## request (default 1MB). This should be larger than the majority of +# ## your messages, or else the consumer will spend a lot of time +# ## negotiating sizes and not actually consuming. Similar to the JVM's +# ## `fetch.message.max.bytes`. +# # consumer_fetch_default = "1MB" +# # ## Data format to consume. # ## Each data format has its own unique set of configuration options, read # ## more about them here: @@ -8872,8 +9225,19 @@ # servers = ["nats://localhost:4222"] # # ## subject(s) to consume +# ## If you use jetstream you need to set the subjects +# ## in jetstream_subjects # subjects = ["telegraf"] # +# ## jetstream subjects +# ## jetstream is a streaming technology inside of nats. +# ## With jetstream the nats-server persists messages and +# ## a consumer can consume historical messages. This is +# ## useful when telegraf needs to restart it don't miss a +# ## message. You need to configure the nats-server. +# ## https://docs.nats.io/nats-concepts/jetstream. +# jetstream_subjects = ["js_telegraf"] +# # ## name a queue group # queue_group = "telegraf_consumers" # @@ -9128,6 +9492,9 @@ # ## Restricts Kubernetes monitoring to a single namespace # ## ex: monitor_kubernetes_pods_namespace = "default" # # monitor_kubernetes_pods_namespace = "" +# ## The name of the label for the pod that is being scraped. +# ## Default is 'namespace' but this can conflict with metrics that have the label 'namespace' +# # pod_namespace_label_name = "namespace" # # label selector to target pods which have the label # # kubernetes_label_selector = "env=dev,app=nginx" # # field selector to target pods diff --git a/etc/telegraf_windows.conf b/etc/telegraf_windows.conf index 1d8f81265..f0ceda0a5 100644 --- a/etc/telegraf_windows.conf +++ b/etc/telegraf_windows.conf @@ -21,96 +21,96 @@ ## Environment variables can be used as tags, and throughout the config file # user = "$USER" -# Configuration for telegraf agent -[agent] - ## Default data collection interval for all inputs - interval = "10s" - ## Rounds collection interval to 'interval' - ## ie, if interval="10s" then always collect on :00, :10, :20, etc. - round_interval = true - - ## Telegraf will send metrics to outputs in batches of at most - ## metric_batch_size metrics. - ## This controls the size of writes that Telegraf sends to output plugins. - metric_batch_size = 1000 - - ## Maximum number of unwritten metrics per output. Increasing this value - ## allows for longer periods of output downtime without dropping metrics at the - ## cost of higher maximum memory usage. - metric_buffer_limit = 10000 - - ## Collection jitter is used to jitter the collection by a random amount. - ## Each plugin will sleep for a random time within jitter before collecting. - ## This can be used to avoid many plugins querying things like sysfs at the - ## same time, which can have a measurable effect on the system. - collection_jitter = "0s" - - ## Collection offset is used to shift the collection by the given amount. - ## This can be be used to avoid many plugins querying constraint devices - ## at the same time by manually scheduling them in time. - # collection_offset = "0s" - - ## Default flushing interval for all outputs. Maximum flush_interval will be - ## flush_interval + flush_jitter - flush_interval = "10s" - ## Jitter the flush interval by a random amount. This is primarily to avoid - ## large write spikes for users running a large number of telegraf instances. - ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s - flush_jitter = "0s" - - ## Collected metrics are rounded to the precision specified. Precision is - ## specified as an interval with an integer + unit (e.g. 0s, 10ms, 2us, 4s). - ## Valid time units are "ns", "us" (or "µs"), "ms", "s". - ## - ## By default or when set to "0s", precision will be set to the same - ## timestamp order as the collection interval, with the maximum being 1s: - ## ie, when interval = "10s", precision will be "1s" - ## when interval = "250ms", precision will be "1ms" - ## - ## Precision will NOT be used for service inputs. It is up to each individual - ## service input to set the timestamp at the appropriate precision. - precision = "0s" - - ## Log at debug level. - # debug = false - ## Log only error level messages. - # quiet = false - - ## Log target controls the destination for logs and can be one of "file", - ## "stderr" or, on Windows, "eventlog". When set to "file", the output file - ## is determined by the "logfile" setting. - # logtarget = "file" - - ## Name of the file to be logged to when using the "file" logtarget. If set to - ## the empty string then logs are written to stderr. - # logfile = "" - - ## The logfile will be rotated after the time interval specified. When set - ## to 0 no time based rotation is performed. Logs are rotated only when - ## written to, if there is no log activity rotation may be delayed. - # logfile_rotation_interval = "0h" - - ## The logfile will be rotated when it becomes larger than the specified - ## size. When set to 0 no size based rotation is performed. - # logfile_rotation_max_size = "0MB" - - ## Maximum number of rotated archives to keep, any older logs are deleted. - ## If set to -1, no archives are removed. - # logfile_rotation_max_archives = 5 - - ## Pick a timezone to use when logging or type 'local' for local time. - ## Example: America/Chicago - # log_with_timezone = "" - - ## Override default hostname, if empty use os.Hostname() - hostname = "" - ## If set to true, do no set the "host" tag in the telegraf agent. - omit_hostname = false - - ## Method of translating SNMP objects. Can be "netsnmp" which - ## translates by calling external programs snmptranslate and snmptable, - ## or "gosmi" which translates using the built-in gosmi library. - # snmp_translator = "netsnmp" +# Configuration for telegraf agent +[agent] + ## Default data collection interval for all inputs + interval = "10s" + ## Rounds collection interval to 'interval' + ## ie, if interval="10s" then always collect on :00, :10, :20, etc. + round_interval = true + + ## Telegraf will send metrics to outputs in batches of at most + ## metric_batch_size metrics. + ## This controls the size of writes that Telegraf sends to output plugins. + metric_batch_size = 1000 + + ## Maximum number of unwritten metrics per output. Increasing this value + ## allows for longer periods of output downtime without dropping metrics at the + ## cost of higher maximum memory usage. + metric_buffer_limit = 10000 + + ## Collection jitter is used to jitter the collection by a random amount. + ## Each plugin will sleep for a random time within jitter before collecting. + ## This can be used to avoid many plugins querying things like sysfs at the + ## same time, which can have a measurable effect on the system. + collection_jitter = "0s" + + ## Collection offset is used to shift the collection by the given amount. + ## This can be be used to avoid many plugins querying constraint devices + ## at the same time by manually scheduling them in time. + # collection_offset = "0s" + + ## Default flushing interval for all outputs. Maximum flush_interval will be + ## flush_interval + flush_jitter + flush_interval = "10s" + ## Jitter the flush interval by a random amount. This is primarily to avoid + ## large write spikes for users running a large number of telegraf instances. + ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s + flush_jitter = "0s" + + ## Collected metrics are rounded to the precision specified. Precision is + ## specified as an interval with an integer + unit (e.g. 0s, 10ms, 2us, 4s). + ## Valid time units are "ns", "us" (or "µs"), "ms", "s". + ## + ## By default or when set to "0s", precision will be set to the same + ## timestamp order as the collection interval, with the maximum being 1s: + ## ie, when interval = "10s", precision will be "1s" + ## when interval = "250ms", precision will be "1ms" + ## + ## Precision will NOT be used for service inputs. It is up to each individual + ## service input to set the timestamp at the appropriate precision. + precision = "0s" + + ## Log at debug level. + # debug = false + ## Log only error level messages. + # quiet = false + + ## Log target controls the destination for logs and can be one of "file", + ## "stderr" or, on Windows, "eventlog". When set to "file", the output file + ## is determined by the "logfile" setting. + # logtarget = "file" + + ## Name of the file to be logged to when using the "file" logtarget. If set to + ## the empty string then logs are written to stderr. + # logfile = "" + + ## The logfile will be rotated after the time interval specified. When set + ## to 0 no time based rotation is performed. Logs are rotated only when + ## written to, if there is no log activity rotation may be delayed. + # logfile_rotation_interval = "0h" + + ## The logfile will be rotated when it becomes larger than the specified + ## size. When set to 0 no size based rotation is performed. + # logfile_rotation_max_size = "0MB" + + ## Maximum number of rotated archives to keep, any older logs are deleted. + ## If set to -1, no archives are removed. + # logfile_rotation_max_archives = 5 + + ## Pick a timezone to use when logging or type 'local' for local time. + ## Example: America/Chicago + # log_with_timezone = "" + + ## Override default hostname, if empty use os.Hostname() + hostname = "" + ## If set to true, do no set the "host" tag in the telegraf agent. + omit_hostname = false + + ## Method of translating SNMP objects. Can be "netsnmp" which + ## translates by calling external programs snmptranslate and snmptable, + ## or "gosmi" which translates using the built-in gosmi library. + # snmp_translator = "netsnmp" ############################################################################### # OUTPUT PLUGINS # @@ -285,6 +285,10 @@ # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false # +# ## Optional Proxy Configuration +# # use_proxy = false +# # proxy_url = "localhost:8888" +# # ## If true use batch serialization format instead of line based delimiting. # ## Only applies to data formats which are not line based such as JSON. # ## Recommended to set to true. @@ -484,6 +488,10 @@ # ## ex: endpoint_url = "http://localhost:8000" # # endpoint_url = "" # +# ## Set http_proxy +# # use_system_proxy = false +# # http_proxy_url = "http://localhost:8888" +# # ## Namespace for the CloudWatch MetricDatums # namespace = "InfluxData/Telegraf" # @@ -776,6 +784,11 @@ # ## Delay before the process is restarted after an unexpected termination # restart_delay = "10s" # +# ## Flag to determine whether execd should throw error when part of metrics is unserializable +# ## Setting this to true will skip the unserializable metrics and process the rest of metrics +# ## Setting this to false will throw error when encountering unserializable metrics and none will be processed +# # ignore_serialization_error = false +# # ## Data format to export. # ## Each data format has its own unique set of configuration options, read # ## more about them here: @@ -1118,6 +1131,53 @@ # debug = false +# # Save metrics to an IoTDB Database +# [[outputs.iotdb]] +# ## Configuration of IoTDB server connection +# host = "127.0.0.1" +# # port = "6667" +# +# ## Configuration of authentication +# # user = "root" +# # password = "root" +# +# ## Timeout to open a new session. +# ## A value of zero means no timeout. +# # timeout = "5s" +# +# ## Configuration of type conversion for 64-bit unsigned int +# ## IoTDB currently DOES NOT support unsigned integers (version 13.x). +# ## 32-bit unsigned integers are safely converted into 64-bit signed integers by the plugin, +# ## however, this is not true for 64-bit values in general as overflows may occur. +# ## The following setting allows to specify the handling of 64-bit unsigned integers. +# ## Available values are: +# ## - "int64" -- convert to 64-bit signed integers and accept overflows +# ## - "int64_clip" -- convert to 64-bit signed integers and clip the values on overflow to 9,223,372,036,854,775,807 +# ## - "text" -- convert to the string representation of the value +# # uint64_conversion = "int64_clip" +# +# ## Configuration of TimeStamp +# ## TimeStamp is always saved in 64bits int. timestamp_precision specifies the unit of timestamp. +# ## Available value: +# ## "second", "millisecond", "microsecond", "nanosecond"(default) +# # timestamp_precision = "nanosecond" +# +# ## Handling of tags +# ## Tags are not fully supported by IoTDB. +# ## A guide with suggestions on how to handle tags can be found here: +# ## https://iotdb.apache.org/UserGuide/Master/API/InfluxDB-Protocol.html +# ## +# ## Available values are: +# ## - "fields" -- convert tags to fields in the measurement +# ## - "device_id" -- attach tags to the device ID +# ## +# ## For Example, a metric named "root.sg.device" with the tags `tag1: "private"` and `tag2: "working"` and +# ## fields `s1: 100` and `s2: "hello"` will result in the following representations in IoTDB +# ## - "fields" -- root.sg.device, s1=100, s2="hello", tag1="private", tag2="working" +# ## - "device_id" -- root.sg.device.private.working, s1=100, s2="hello" +# # convert_tags_to = "device_id" + + # # Configuration for the Kafka server to send metrics to # [[outputs.kafka]] # ## URLs of kafka brokers @@ -1438,9 +1498,14 @@ # [[outputs.mqtt]] # ## MQTT Brokers # ## The list of brokers should only include the hostname or IP address and the -# ## port to the broker. This should follow the format '{host}:{port}'. For -# ## example, "localhost:1883" or "127.0.0.1:8883". -# servers = ["localhost:1883"] +# ## port to the broker. This should follow the format `[{scheme}://]{host}:{port}`. For +# ## example, `localhost:1883` or `mqtt://localhost:1883`. +# ## Scheme can be any of the following: tcp://, mqtt://, tls://, mqtts:// +# ## non-TLS and TLS servers can not be mix-and-matched. +# servers = ["localhost:1883", ] # or ["mqtts://tls.example.com:1883"] +# +# ## Protocol can be `3.1.1` or `5`. Default is `3.1.1` +# # procotol = "3.1.1" # # ## MQTT Topic for Producer Messages # ## MQTT outputs send metrics to this topic format: @@ -1597,6 +1662,17 @@ # ## Supports: "gzip", "none" # # compression = "gzip" # +# ## Configuration options for the Coralogix dialect +# ## Enable the following section of you use this plugin with a Coralogix endpoint +# # [outputs.opentelemetry.coralogix] +# # ## Your Coralogix private key (required). +# # ## Please note that this is sensitive data! +# # private_key = "your_coralogix_key" +# # +# # ## Application and subsystem names for the metrics (required) +# # application = "$NAMESPACE" +# # subsystem = "$HOSTNAME" +# # ## Additional OpenTelemetry resource attributes # # [outputs.opentelemetry.attributes] # # "service.name" = "demo" @@ -1634,6 +1710,88 @@ # separator = "_" +# # Publishes metrics to a postgresql database +# [[outputs.postgresql]] +# ## Specify connection address via the standard libpq connection string: +# ## host=... user=... password=... sslmode=... dbname=... +# ## Or a URL: +# ## postgres://[user[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full] +# ## See https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING +# ## +# ## All connection parameters are optional. Environment vars are also supported. +# ## e.g. PGPASSWORD, PGHOST, PGUSER, PGDATABASE +# ## All supported vars can be found here: +# ## https://www.postgresql.org/docs/current/libpq-envars.html +# ## +# ## Non-standard parameters: +# ## pool_max_conns (default: 1) - Maximum size of connection pool for parallel (per-batch per-table) inserts. +# ## pool_min_conns (default: 0) - Minimum size of connection pool. +# ## pool_max_conn_lifetime (default: 0s) - Maximum age of a connection before closing. +# ## pool_max_conn_idle_time (default: 0s) - Maximum idle time of a connection before closing. +# ## pool_health_check_period (default: 0s) - Duration between health checks on idle connections. +# # connection = "" +# +# ## Postgres schema to use. +# # schema = "public" +# +# ## Store tags as foreign keys in the metrics table. Default is false. +# # tags_as_foreign_keys = false +# +# ## Suffix to append to table name (measurement name) for the foreign tag table. +# # tag_table_suffix = "_tag" +# +# ## Deny inserting metrics if the foreign tag can't be inserted. +# # foreign_tag_constraint = false +# +# ## Store all tags as a JSONB object in a single 'tags' column. +# # tags_as_jsonb = false +# +# ## Store all fields as a JSONB object in a single 'fields' column. +# # fields_as_jsonb = false +# +# ## Templated statements to execute when creating a new table. +# # create_templates = [ +# # '''CREATE TABLE {{ .table }} ({{ .columns }})''', +# # ] +# +# ## Templated statements to execute when adding columns to a table. +# ## Set to an empty list to disable. Points containing tags for which there is no column will be skipped. Points +# ## containing fields for which there is no column will have the field omitted. +# # add_column_templates = [ +# # '''ALTER TABLE {{ .table }} ADD COLUMN IF NOT EXISTS {{ .columns|join ", ADD COLUMN IF NOT EXISTS " }}''', +# # ] +# +# ## Templated statements to execute when creating a new tag table. +# # tag_table_create_templates = [ +# # '''CREATE TABLE {{ .table }} ({{ .columns }}, PRIMARY KEY (tag_id))''', +# # ] +# +# ## Templated statements to execute when adding columns to a tag table. +# ## Set to an empty list to disable. Points containing tags for which there is no column will be skipped. +# # tag_table_add_column_templates = [ +# # '''ALTER TABLE {{ .table }} ADD COLUMN IF NOT EXISTS {{ .columns|join ", ADD COLUMN IF NOT EXISTS " }}''', +# # ] +# +# ## The postgres data type to use for storing unsigned 64-bit integer values (Postgres does not have a native +# ## unsigned 64-bit integer type). +# ## The value can be one of: +# ## numeric - Uses the PostgreSQL "numeric" data type. +# ## uint8 - Requires pguint extension (https://github.com/petere/pguint) +# # uint64_type = "numeric" +# +# ## When using pool_max_conns>1, and a temporary error occurs, the query is retried with an incremental backoff. This +# ## controls the maximum backoff duration. +# # retry_max_backoff = "15s" +# +# ## Approximate number of tag IDs to store in in-memory cache (when using tags_as_foreign_keys). +# ## This is an optimization to skip inserting known tag IDs. +# ## Each entry consumes approximately 34 bytes of memory. +# # tag_cache_size = 100000 +# +# ## Enable & set the log level for the Postgres driver. +# # log_level = "warn" # trace, debug, info, warn, error, none + + # # Configuration for the Prometheus client to spawn # [[outputs.prometheus_client]] # ## Address to listen on. @@ -1678,6 +1836,22 @@ # # export_timestamp = false +# +# ## The address of the RedisTimeSeries server. +# address = "127.0.0.1:6379" +# +# ## Redis ACL credentials +# # username = "" +# # password = "" +# # database = 0 +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# # insecure_skip_verify = false + + # # Configuration for Riemann to send metrics to # [[outputs.riemann]] # ## The full TCP or UDP URL of the Riemann server @@ -1925,6 +2099,20 @@ # ## the unsigned option. This is useful for a database like ClickHouse where # ## the unsigned value should use a value like "uint64". # # conversion_style = "unsigned_suffix" +# +# ## Maximum amount of time a connection may be idle. "0s" means connections are +# ## never closed due to idle time. +# # connection_max_idle_time = "0s" +# +# ## Maximum amount of time a connection may be reused. "0s" means connections +# ## are never closed due to age. +# # connection_max_lifetime = "0s" +# +# ## Maximum number of connections in the idle connection pool. 0 means unlimited. +# # connection_max_idle = 2 +# +# ## Maximum number of open connections to the database. 0 means unlimited. +# # connection_max_open = 0 # # Configuration for Google Cloud Stackdriver to send metrics to @@ -1945,6 +2133,26 @@ # # location = "eu-north0" +# # Configuration for active mq with stomp protocol to send metrics to +# [[outputs.stomp]] +# host = "localhost:61613" +# +# ## Queue name for producer messages +# queueName = "telegraf" +# +# ## Username and password if required by the Active MQ server. +# # username = "" +# # password = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Data format to output. +# data_format = "json" + + # # A plugin that can send metrics to Sumo Logic HTTP metric collector. # [[outputs.sumologic]] # ## Unique URL generated for your HTTP Metrics Source. @@ -2223,22 +2431,20 @@ # # insecure_skip_verify = false -# # Configuration for Wavefront server to send metrics to # [[outputs.wavefront]] -# ## Url for Wavefront Direct Ingestion. For Wavefront Proxy Ingestion, see -# ## the 'host' and 'port' options below. +# ## Url for Wavefront API or Wavefront Proxy instance. # url = "https://metrics.wavefront.com" # -# ## Authentication Token for Wavefront. Only required if using Direct Ingestion +# ## Authentication Token for Wavefront. Required if using Direct Ingestion. Not required if using a Wavefront Proxy. # #token = "DUMMY_TOKEN" # -# ## Maximum number of metrics to send per batch for Direct Ingestion. Ignored unless 'url' is set. This value should be higher than the `metric_batch_size`. Default is 10,000. Values higher than 40,000 are not recommended. +# ## Maximum number of metrics to send per HTTP request. This value should be higher than the `metric_batch_size`. Default is 10,000. Values higher than 40,000 are not recommended. # # http_maximum_batch_size = 10000 # -# ## DNS name of the wavefront proxy server. Do not use if url is specified +# ## Deprecated. DNS name of the Wavefront server or Wavefront Proxy. Use the `url` field instead. # #host = "wavefront.example.com" # -# ## Port that the Wavefront proxy server listens on. Do not use if url is specified +# ## Deprecated. Wavefront proxy port. Use the `url` field instead. # #port = 2878 # # ## prefix for metrics keys @@ -2646,16 +2852,20 @@ # # additional_tag = "tag_value" -# # Parse a value in a specified field/tag(s) and add the result in a new metric +# # Parse a value in a specified field(s)/tag(s) and add the result in a new metric # [[processors.parser]] # ## The name of the fields whose value will be parsed. # parse_fields = ["message"] # +# ## The name of the tags whose value will be parsed. +# # parse_tags = [] +# # ## If true, incoming metrics are not emitted. -# drop_original = false +# # drop_original = false # # ## If set to override, emitted metrics will be merged by overriding the # ## original metric using the newly parsed metrics. +# ## Only has effect when drop_original is set to false. # merge = "override" # # ## The dataformat to be read from files @@ -3549,10 +3759,11 @@ # # insecure_skip_verify = false -# # Collects performance metrics from the MON, OSD, MDS and RGW nodes in a Ceph storage cluster. +# # Collects performance metrics from the MON, OSD, MDS and RGW nodes +# # in a Ceph storage cluster. # [[inputs.ceph]] -# ## This is the recommended interval to poll. Too frequent and you will lose -# ## data points due to timeouts during rebalancing and recovery +# ## This is the recommended interval to poll. Too frequent and you +# ## will lose data points due to timeouts during rebalancing and recovery # interval = '1m' # # ## All configuration values are optional, defaults are shown below @@ -3572,9 +3783,9 @@ # ## suffix used to identify socket files # socket_suffix = "asok" # -# ## Ceph user to authenticate as, ceph will search for the corresponding keyring -# ## e.g. client.admin.keyring in /etc/ceph, or the explicit path defined in the -# ## client section of ceph.conf for example: +# ## Ceph user to authenticate as, ceph will search for the corresponding +# ## keyring e.g. client.admin.keyring in /etc/ceph, or the explicit path +# ## defined in the client section of ceph.conf for example: # ## # ## [client.telegraf] # ## keyring = /etc/ceph/client.telegraf.keyring @@ -3588,8 +3799,8 @@ # ## Whether to gather statistics via the admin socket # gather_admin_socket_stats = true # -# ## Whether to gather statistics via ceph commands, requires ceph_user and ceph_config -# ## to be specified +# ## Whether to gather statistics via ceph commands, requires ceph_user +# ## and ceph_config to be specified # gather_cluster_stats = false @@ -3622,7 +3833,8 @@ # # ## Amazon Credentials # ## Credentials are loaded in the following order -# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 1) Web identity provider credentials via STS if role_arn and +# ## web_identity_token_file are specified # ## 2) Assumed credentials via STS if role_arn is specified # ## 3) explicit credentials from 'access_key' and 'secret_key' # ## 4) shared profile from 'profile' @@ -3648,27 +3860,33 @@ # # use_system_proxy = false # # http_proxy_url = "http://localhost:8888" # -# # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all -# # metrics are made available to the 1 minute period. Some are collected at -# # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring. -# # Note that if a period is configured that is smaller than the minimum for a -# # particular metric, that metric will not be returned by the Cloudwatch API -# # and will not be collected by Telegraf. +# ## The minimum period for Cloudwatch metrics is 1 minute (60s). However not +# ## all metrics are made available to the 1 minute period. Some are collected +# ## at 3 minute, 5 minute, or larger intervals. +# ## See https://aws.amazon.com/cloudwatch/faqs/#monitoring. +# ## Note that if a period is configured that is smaller than the minimum for a +# ## particular metric, that metric will not be returned by the Cloudwatch API +# ## and will not be collected by Telegraf. # # -# ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s) +# ## Requested CloudWatch aggregation Period (required) +# ## Must be a multiple of 60s. # period = "5m" # -# ## Collection Delay (required - must account for metrics availability via CloudWatch API) +# ## Collection Delay (required) +# ## Must account for metrics availability via CloudWatch API # delay = "5m" # # ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid # ## gaps or overlap in pulled data # interval = "5m" # -# ## Recommended if "delay" and "period" are both within 3 hours of request time. Invalid values will be ignored. -# ## Recently Active feature will only poll for CloudWatch ListMetrics values that occurred within the last 3 Hours. -# ## If enabled, it will reduce total API usage of the CloudWatch ListMetrics API and require less memory to retain. -# ## Do not enable if "period" or "delay" is longer than 3 hours, as it will not return data more than 3 hours old. +# ## Recommended if "delay" and "period" are both within 3 hours of request +# ## time. Invalid values will be ignored. Recently Active feature will only +# ## poll for CloudWatch ListMetrics values that occurred within the last 3h. +# ## If enabled, it will reduce total API usage of the CloudWatch ListMetrics +# ## API and require less memory to retain. +# ## Do not enable if "period" or "delay" is longer than 3 hours, as it will +# ## not return data more than 3 hours old. # ## See https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_ListMetrics.html # #recently_active = "PT3H" # @@ -3677,21 +3895,27 @@ # # ## Metric Statistic Namespaces (required) # namespaces = ["AWS/ELB"] -# # A single metric statistic namespace that will be appended to namespaces on startup +# ## Single metric statistic namespace appended to namespaces on startup # # namespace = "AWS/ELB" # -# ## Maximum requests per second. Note that the global default AWS rate limit is -# ## 50 reqs/sec, so if you define multiple namespaces, these should add up to a -# ## maximum of 50. +# ## Maximum requests per second. Note that the global default AWS rate limit +# ## is 50 reqs/sec, so if you define multiple namespaces, these should add up +# ## to a maximum of 50. # ## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html # # ratelimit = 25 # # ## Timeout for http requests made by the cloudwatch client. # # timeout = "5s" # +# ## Batch Size +# ## The size of each batch to send requests to Cloudwatch. 500 is the +# ## suggested largest size. If a request gets to large (413 errors), consider +# ## reducing this amount. +# # batch_size = 500 +# # ## Namespace-wide statistic filters. These allow fewer queries to be made to # ## cloudwatch. -# # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ] +# # statistic_include = ["average", "sum", "minimum", "maximum", sample_count"] # # statistic_exclude = [] # # ## Metrics to Pull @@ -3702,11 +3926,12 @@ # # # # ## Statistic filters for Metric. These allow for retrieving specific # # ## statistics for an individual metric. -# # # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ] +# # # statistic_include = ["average", "sum", "minimum", "maximum", sample_count"] # # # statistic_exclude = [] # # -# # ## Dimension filters for Metric. All dimensions defined for the metric names -# # ## must be specified in order to retrieve the metric statistics. +# # ## Dimension filters for Metric. +# # ## All dimensions defined for the metric names must be specified in order +# # ## to retrieve the metric statistics. # # ## 'value' has wildcard / 'glob' matching support such as 'p-*'. # # [[inputs.cloudwatch.metrics.dimensions]] # # name = "LoadBalancerName" @@ -4059,20 +4284,22 @@ # ## Set cluster_stats to true when you want to obtain cluster stats. # cluster_stats = false # -# ## Only gather cluster_stats from the master node. To work this require local = true +# ## Only gather cluster_stats from the master node. +# ## To work this require local = true # cluster_stats_only_from_master = true # # ## Indices to collect; can be one or more indices names or _all -# ## Use of wildcards is allowed. Use a wildcard at the end to retrieve index names that end with a changing value, like a date. +# ## Use of wildcards is allowed. Use a wildcard at the end to retrieve index +# ## names that end with a changing value, like a date. # indices_include = ["_all"] # # ## One of "shards", "cluster", "indices" # ## Currently only "shards" is implemented # indices_level = "shards" # -# ## node_stats is a list of sub-stats that you want to have gathered. Valid options -# ## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http", -# ## "breaker". Per default, all stats are gathered. +# ## node_stats is a list of sub-stats that you want to have gathered. +# ## Valid options are "indices", "os", "process", "jvm", "thread_pool", +# ## "fs", "transport", "http", "breaker". Per default, all stats are gathered. # # node_stats = ["jvm", "http"] # # ## HTTP Basic Authentication username and password. @@ -4086,10 +4313,12 @@ # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false # -# ## Sets the number of most recent indices to return for indices that are configured with a date-stamped suffix. -# ## Each 'indices_include' entry ending with a wildcard (*) or glob matching pattern will group together all indices that match it, and -# ## sort them by the date or number after the wildcard. Metrics then are gathered for only the 'num_most_recent_indices' amount of most -# ## recent indices. +# ## Sets the number of most recent indices to return for indices that are +# ## configured with a date-stamped suffix. Each 'indices_include' entry +# ## ending with a wildcard (*) or glob matching pattern will group together +# ## all indices that match it, and sort them by the date or number after +# ## the wildcard. Metrics then are gathered for only the +# ## 'num_most_recent_indices' amount of most recent indices. # # num_most_recent_indices = 0 @@ -4155,7 +4384,8 @@ # # metric_function = "avg" # # ## Fields to be used as tags -# ## Must be text, non-analyzed fields. Metric aggregations are performed per tag +# ## Must be text, non-analyzed fields. Metric aggregations are performed +# ## per tag # # tags = ["field.keyword", "field2.keyword"] # # ## Set to true to not ignore documents when the tag(s) above are missing @@ -4486,6 +4716,8 @@ # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" # # tls_key = "/etc/telegraf/key.pem" +# ## Minimal TLS version to accept by the client +# # tls_min_version = "TLS12" # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false # @@ -4518,7 +4750,8 @@ # ## List of urls to query. # # urls = ["http://localhost"] # -# ## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set) +# ## Set http_proxy. +# ## Telegraf uses the system wide proxy settings if it's is not set. # # http_proxy = "http://localhost:8888" # # ## Set response_timeout (default 5 seconds) @@ -4544,12 +4777,14 @@ # # ''' # # ## Optional name of the field that will contain the body of the response. -# ## By default it is set to an empty String indicating that the body's content won't be added +# ## By default it is set to an empty String indicating that the body's +# ## content won't be added # # response_body_field = '' # # ## Maximum allowed HTTP response body size in bytes. # ## 0 means to use the default of 32MiB. -# ## If the response body size exceeds this limit a "body_read_error" will be raised +# ## If the response body size exceeds this limit a "body_read_error" will +# ## be raised. # # response_body_max_size = "32MiB" # # ## Optional substring or regex match in body of the response (case sensitive) @@ -4558,9 +4793,10 @@ # # response_string_match = "\".*_status\".?:.?\"up\"" # # ## Expected response status code. -# ## The status code of the response is compared to this value. If they match, the field -# ## "response_status_code_match" will be 1, otherwise it will be 0. If the -# ## expected status code is 0, the check is disabled and the field won't be added. +# ## The status code of the response is compared to this value. If they match, +# ## the field "response_status_code_match" will be 1, otherwise it will be 0. +# ## If the expected status code is 0, the check is disabled and the field +# ## won't be added. # # response_status_code = 0 # # ## Optional TLS Config @@ -4577,8 +4813,9 @@ # # Host = "github.com" # # ## Optional setting to map response http headers into tags -# ## If the http header is not present on the request, no corresponding tag will be added -# ## If multiple instances of the http header are present, only the first value will be used +# ## If the http header is not present on the request, no corresponding tag will +# ## be added. If multiple instances of the http header are present, only the +# ## first value will be used. # # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"} # # ## Interface to use when dialing an address @@ -4901,6 +5138,10 @@ # # password = "" # # response_timeout = "5s" # +# ## Optional origin URL to include as a header in the request. Some endpoints +# ## may reject an empty origin. +# # origin = "" +# # ## Optional TLS config # # tls_ca = "/var/private/ca.pem" # # tls_cert = "/var/private/client.pem" @@ -4926,6 +5167,10 @@ # # password = "" # # response_timeout = "5s" # +# ## Optional origin URL to include as a header in the request. Some endpoints +# ## may reject an empty origin. +# # origin = "" +# # ## Optional TLS config # # tls_ca = "/var/private/ca.pem" # # tls_cert = "/var/private/client.pem" @@ -4995,10 +5240,15 @@ # # namespace = "default" # # ## Use bearer token for authorization. ('bearer_token' takes priority) +# ## # ## If both of these are empty, we'll use the default serviceaccount: # ## at: /run/secrets/kubernetes.io/serviceaccount/token -# # bearer_token = "/path/to/bearer/token" +# ## +# ## To auto-refresh the token, please use a file with the bearer_token option. +# ## If given a string, Telegraf cannot refresh the token periodically. +# # bearer_token = "/run/secrets/kubernetes.io/serviceaccount/token" # ## OR +# ## deprecated in 1.24.0; use bearer_token with a file # # bearer_token_string = "abc_123" # # ## Set response_timeout (default 5 seconds) @@ -5006,8 +5256,9 @@ # # ## Optional Resources to exclude from gathering # ## Leave them with blank with try to gather everything available. -# ## Values can be - "daemonsets", deployments", "endpoints", "ingress", "nodes", -# ## "persistentvolumes", "persistentvolumeclaims", "pods", "services", "statefulsets" +# ## Values can be - "daemonsets", deployments", "endpoints", "ingress", +# ## "nodes", "persistentvolumes", "persistentvolumeclaims", "pods", "services", +# ## "statefulsets" # # resource_exclude = [ "deployments", "nodes", "statefulsets" ] # # ## Optional Resources to include when gathering @@ -5044,7 +5295,11 @@ # ## Use bearer token for authorization. ('bearer_token' takes priority) # ## If both of these are empty, we'll use the default serviceaccount: # ## at: /run/secrets/kubernetes.io/serviceaccount/token -# # bearer_token = "/path/to/bearer/token" +# ## +# ## To re-read the token at each interval, please use a file with the +# ## bearer_token option. If given a string, Telegraf will always use that +# ## token. +# # bearer_token = "/run/secrets/kubernetes.io/serviceaccount/token" # ## OR # # bearer_token_string = "abc_123" # @@ -5417,7 +5672,7 @@ # { address=3, name="motor1_overheating"}, # ] # -# [[inputs.modbus.request.tags]] +# [inputs.modbus.request.tags] # machine = "impresser" # location = "main building" # @@ -5436,7 +5691,7 @@ # { address=8, name="power_factor", type="INT64", scale=0.01 }, # ] # -# [[inputs.modbus.request.tags]] +# [inputs.modbus.request.tags] # machine = "impresser" # location = "main building" # @@ -5452,7 +5707,7 @@ # { address=4, name="hours", type="UINT32" }, # will result in UIN64 field # ] # -# [[inputs.modbus.request.tags]] +# [inputs.modbus.request.tags] # machine = "impresser" # location = "main building" # @@ -5506,6 +5761,12 @@ # # tls_key = "/etc/telegraf/key.pem" # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false +# +# ## Specifies plugin behavior regarding disconnected servers +# ## Available choices : +# ## - error: telegraf will return an error on startup if one the servers is unreachable +# ## - skip: telegraf will skip unreachable servers on both startup and gather +# # disconnected_servers_behavior = "error" # # Read metrics and status information about processes managed by Monit @@ -5581,7 +5842,8 @@ # ## if the list is empty, then metrics are gathered from all database tables # # table_schema_databases = [] # -# ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list +# ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided +# ## in the list above # # gather_table_schema = false # # ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST @@ -5647,7 +5909,7 @@ # ## example: interval_slow = "30m" # # interval_slow = "" # -# ## Optional TLS Config (will be used if tls=custom parameter specified in server uri) +# ## Optional TLS Config (used if tls=custom parameter specified in server uri) # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" # # tls_key = "/etc/telegraf/key.pem" @@ -5929,8 +6191,25 @@ # # Get standard NTP query metrics, requires ntpq executable. # [[inputs.ntpq]] +# ## Servers to query with ntpq. +# ## If no server is given, the local machine is queried. +# # servers = [] +# # ## If false, set the -n ntpq flag. Can reduce metric gather time. -# dns_lookup = true +# ## DEPRECATED since 1.24.0: add '-n' to 'options' instead to skip DNS lookup +# # dns_lookup = true +# +# ## Options to pass to the ntpq command. +# # options = "-p" +# +# ## Output format for the 'reach' field. +# ## Available values are +# ## octal -- output as is in octal representation e.g. 377 (default) +# ## decimal -- convert value to decimal representation e.g. 371 -> 249 +# ## count -- count the number of bits in the value. This represents +# ## the number of successful reaches, e.g. 37 -> 5 +# ## ratio -- output the ratio of successful attempts e.g. 37 -> 5/8 = 0.625 +# # reach_format = "octal" # # Pulls statistics from nvidia GPUs attached to the host @@ -6351,7 +6630,11 @@ # ## API connection configuration. The API token was introduced in Proxmox v6.2. Required permissions for user and token: PVEAuditor role on /. # base_url = "https://localhost:8006/api2/json" # api_token = "USER@REALM!TOKENID=UUID" +# # ## Node name, defaults to OS hostname +# ## Unless Telegraf is on the same host as Proxmox, setting this is required +# ## for Telegraf to successfully connect to Proxmox. If not on the same host, +# ## leaving this empty will often lead to a "search domain is not set" error. # # node_name = "" # # ## Optional TLS Config @@ -6881,6 +7164,17 @@ # # value = 'one_of("sda", "sdb")' +# # Gathers information about processes that running under supervisor using XML-RPC API +# [[inputs.supervisor]] +# ## Url of supervisor's XML-RPC endpoint if basic auth enabled in supervisor http server, +# ## than you have to add credentials to url (ex. http://login:pass@localhost:9001/RPC2) +# # url="http://localhost:9001/RPC2" +# ## With settings below you can manage gathering additional information about processes +# ## If both of them empty, then all additional information will be collected. +# ## Currently supported supported additional metrics are: pid, rc +# # metrics_include = [] + + # # Get synproxy counter statistics from procfs # [[inputs.synproxy]] # # no configuration @@ -7403,18 +7697,21 @@ # # public_key_id = "" # # role_name = "" # -# ## Specify the ali cloud region list to be queried for metrics and objects discovery -# ## If not set, all supported regions (see below) would be covered, it can provide a significant load on API, so the recommendation here -# ## is to limit the list as much as possible. Allowed values: https://www.alibabacloud.com/help/zh/doc-detail/40654.htm +# ## Specify ali cloud regions to be queried for metric and object discovery +# ## If not set, all supported regions (see below) would be covered, it can +# ## provide a significant load on API, so the recommendation here is to +# ## limit the list as much as possible. +# ## Allowed values: https://www.alibabacloud.com/help/zh/doc-detail/40654.htm # ## Default supported regions are: -# ## 21 items: cn-qingdao,cn-beijing,cn-zhangjiakou,cn-huhehaote,cn-hangzhou,cn-shanghai,cn-shenzhen, -# ## cn-heyuan,cn-chengdu,cn-hongkong,ap-southeast-1,ap-southeast-2,ap-southeast-3,ap-southeast-5, -# ## ap-south-1,ap-northeast-1,us-west-1,us-east-1,eu-central-1,eu-west-1,me-east-1 +# ## cn-qingdao,cn-beijing,cn-zhangjiakou,cn-huhehaote,cn-hangzhou, +# ## cn-shanghai, cn-shenzhen, cn-heyuan,cn-chengdu,cn-hongkong, +# ## ap-southeast-1,ap-southeast-2,ap-southeast-3,ap-southeast-5, +# ## ap-south-1,ap-northeast-1, us-west-1,us-east-1,eu-central-1, +# ## eu-west-1,me-east-1 # ## -# ## From discovery perspective it set the scope for object discovery, the discovered info can be used to enrich -# ## the metrics with objects attributes/tags. Discovery is supported not for all projects (if not supported, then -# ## it will be reported on the start - for example for 'acs_cdn' project: -# ## 'E! [inputs.aliyuncms] Discovery tool is not activated: no discovery support for project "acs_cdn"' ) +# ## From discovery perspective it set the scope for object discovery, +# ## the discovered info can be used to enrich the metrics with objects +# ## attributes/tags. Discovery is not supported for all projects. # ## Currently, discovery supported for the following projects: # ## - acs_ecs_dashboard # ## - acs_rds_dashboard @@ -7422,22 +7719,23 @@ # ## - acs_vpc_eip # regions = ["cn-hongkong"] # -# # The minimum period for AliyunCMS metrics is 1 minute (60s). However not all -# # metrics are made available to the 1 minute period. Some are collected at -# # 3 minute, 5 minute, or larger intervals. -# # See: https://help.aliyun.com/document_detail/51936.html?spm=a2c4g.11186623.2.18.2bc1750eeOw1Pv -# # Note that if a period is configured that is smaller than the minimum for a -# # particular metric, that metric will not be returned by the Aliyun OpenAPI -# # and will not be collected by Telegraf. -# # -# ## Requested AliyunCMS aggregation Period (required - must be a multiple of 60s) +# ## Requested AliyunCMS aggregation Period (required) +# ## The period must be multiples of 60s and the minimum for AliyunCMS metrics +# ## is 1 minute (60s). However not all metrics are made available to the +# ## one minute period. Some are collected at 3 minute, 5 minute, or larger +# ## intervals. +# ## See: https://help.aliyun.com/document_detail/51936.html?spm=a2c4g.11186623.2.18.2bc1750eeOw1Pv +# ## Note that if a period is configured that is smaller than the minimum for +# ## a particular metric, that metric will not be returned by Aliyun's +# ## OpenAPI and will not be collected by Telegraf. # period = "5m" # -# ## Collection Delay (required - must account for metrics availability via AliyunCMS API) +# ## Collection Delay (required) +# ## The delay must account for metrics availability via AliyunCMS API. # delay = "1m" # -# ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid -# ## gaps or overlap in pulled data +# ## Recommended: use metric 'interval' that is a multiple of 'period' +# ## to avoid gaps or overlap in pulled data # interval = "5m" # # ## Metric Statistic Project (required) @@ -7452,15 +7750,19 @@ # ## Metrics to Pull (Required) # [[inputs.aliyuncms.metrics]] # ## Metrics names to be requested, -# ## described here (per project): https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq +# ## Description can be found here (per project): +# ## https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq # names = ["InstanceActiveConnection", "InstanceNewConnection"] # -# ## Dimension filters for Metric (these are optional). -# ## This allows to get additional metric dimension. If dimension is not specified it can be returned or -# ## the data can be aggregated - it depends on particular metric, you can find details here: https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq +# ## Dimension filters for Metric (optional). +# ## This allows to get additional metric dimension. If dimension is not +# ## specified it can be returned or the data can be aggregated - it depends +# ## on particular metric, you can find details here: +# ## https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq # ## -# ## Note, that by default dimension filter includes the list of discovered objects in scope (if discovery is enabled) -# ## Values specified here would be added into the list of discovered objects. +# ## Note, that by default dimension filter includes the list of discovered +# ## objects in scope (if discovery is enabled). +# # Values specified here would be added into the list of discovered objects. # ## You can specify either single dimension: # #dimensions = '{"instanceId": "p-example"}' # @@ -7469,19 +7771,22 @@ # # ## Enrichment tags, can be added from discovery (if supported) # ## Notation is : -# ## To figure out which fields are available, consult the Describe API per project. +# ## To figure out which fields are available, consult the Describe +# ## API per project. # ## For example, for SLB: https://api.aliyun.com/#/?product=Slb&version=2014-05-15&api=DescribeLoadBalancers¶ms={}&tab=MOCK&lang=GO # #tag_query_path = [ # # "address:Address", # # "name:LoadBalancerName", # # "cluster_owner:Tags.Tag[?TagKey=='cs.cluster.name'].TagValue | [0]" # # ] -# ## The following tags added by default: regionId (if discovery enabled), userId, instanceId. +# ## The following tags added by default: +# ## regionId (if discovery enabled), userId, instanceId. # -# ## Allow metrics without discovery data, if discovery is enabled. If set to true, then metric without discovery -# ## data would be emitted, otherwise dropped. This cane be of help, in case debugging dimension filters, or partial coverage -# ## of discovery scope vs monitoring scope -# #allow_dps_without_discovery = false +# ## Allow metrics without discovery data, if discovery is enabled. +# ## If set to true, then metric without discovery data would be emitted, otherwise dropped. +# ## This cane be of help, in case debugging dimension filters, or partial coverage of +# ## discovery scope vs monitoring scope +# # allow_dps_without_discovery = false # # AMQP consumer plugin @@ -7618,43 +7923,52 @@ # # dnpath = '{"Name": "show ip route summary","prop": [{"Key": "routes","Value": "string"}, {"Key": "best-paths","Value": "string"}]}' # # dnpath2 = '{"Name": "show processes cpu","prop": [{"Key": "kernel_percent","Value": "float"}, {"Key": "idle_percent","Value": "float"}, {"Key": "process","Value": "string"}, {"Key": "user_percent","Value": "float"}, {"Key": "onesec","Value": "float"}]}' # # dnpath3 = '{"Name": "show processes memory physical","prop": [{"Key": "processname","Value": "string"}]}' +# +# ## Additional GRPC connection settings. +# [inputs.cisco_telemetry_mdt.grpc_enforcement_policy] +# ## GRPC permit keepalives without calls, set to true if your clients are +# ## sending pings without calls in-flight. This can sometimes happen on IOS-XE +# ## devices where the GRPC connection is left open but subscriptions have been +# ## removed, and adding subsequent subscriptions does not keep a stable session. +# # permit_keepalive_without_calls = false +# +# ## GRPC minimum timeout between successive pings, decreasing this value may +# ## help if this plugin is closing connections with ENHANCE_YOUR_CALM (too_many_pings). +# # keepalive_minimum_time = "5m" # # Read metrics from one or many ClickHouse servers # [[inputs.clickhouse]] # ## Username for authorization on ClickHouse server -# ## example: username = "default" # username = "default" # # ## Password for authorization on ClickHouse server -# ## example: password = "super_secret" +# # password = "" # # ## HTTP(s) timeout while getting metrics values -# ## The timeout includes connection time, any redirects, and reading the response body. -# ## example: timeout = 1s +# ## The timeout includes connection time, any redirects, and reading the +# ## response body. # # timeout = 5s # # ## List of servers for metrics scraping # ## metrics scrape via HTTP(s) clickhouse interface # ## https://clickhouse.tech/docs/en/interfaces/http/ -# ## example: servers = ["http://127.0.0.1:8123","https://custom-server.mdb.yandexcloud.net"] -# servers = ["http://127.0.0.1:8123"] +# servers = ["http://127.0.0.1:8123"] # -# ## If "auto_discovery"" is "true" plugin tries to connect to all servers available in the cluster -# ## with using same "user:password" described in "user" and "password" parameters -# ## and get this server hostname list from "system.clusters" table -# ## see +# ## If "auto_discovery"" is "true" plugin tries to connect to all servers +# ## available in the cluster with using same "user:password" described in +# ## "user" and "password" parameters and get this server hostname list from +# ## "system.clusters" table. See # ## - https://clickhouse.tech/docs/en/operations/system_tables/#system-clusters # ## - https://clickhouse.tech/docs/en/operations/server_settings/settings/#server_settings_remote_servers # ## - https://clickhouse.tech/docs/en/operations/table_engines/distributed/ # ## - https://clickhouse.tech/docs/en/operations/table_engines/replication/#creating-replicated-tables -# ## example: auto_discovery = false # # auto_discovery = true # # ## Filter cluster names in "system.clusters" when "auto_discovery" is "true" # ## when this filter present then "WHERE cluster IN (...)" filter will apply -# ## please use only full cluster names here, regexp and glob filters is not allowed -# ## for "/etc/clickhouse-server/config.d/remote.xml" +# ## please use only full cluster names here, regexp and glob filters is not +# ## allowed for "/etc/clickhouse-server/config.d/remote.xml" # ## # ## # ## @@ -7674,8 +7988,9 @@ # ## example: cluster_include = ["my-own-cluster"] # # cluster_include = [] # -# ## Filter cluster names in "system.clusters" when "auto_discovery" is "true" -# ## when this filter present then "WHERE cluster NOT IN (...)" filter will apply +# ## Filter cluster names in "system.clusters" when "auto_discovery" is +# ## "true" when this filter present then "WHERE cluster NOT IN (...)" +# ## filter will apply # ## example: cluster_exclude = ["my-internal-not-discovered-cluster"] # # cluster_exclude = [] # @@ -7776,16 +8091,17 @@ # # ## Maximum duration before timing out read of the request # # read_timeout = "10s" -# ## Maximum duration before timing out write of the response. This should be set to a value -# ## large enough that you can send at least 'metric_batch_size' number of messages within the -# ## duration. +# ## Maximum duration before timing out write of the response. This should be +# ## set to a value large enough that you can send at least 'metric_batch_size' +# ## number of messages within the duration. # # write_timeout = "10s" # # ## Maximum allowed http request body size in bytes. # ## 0 means to use the default of 524,288,00 bytes (500 mebibytes) # # max_body_size = "500MB" # -# ## Whether to add the pubsub metadata, such as message attributes and subscription as a tag. +# ## Whether to add the pubsub metadata, such as message attributes and +# ## subscription as a tag. # # add_meta = false # # ## Optional. Maximum messages to read from PubSub that have not been written @@ -7835,7 +8151,8 @@ # ## Optional access key for Firehose security. # # access_key = "test-key" # -# ## An optional flag to keep Metric Streams metrics compatible with CloudWatch's API naming +# ## An optional flag to keep Metric Streams metrics compatible with +# ## CloudWatch's API naming # # api_compatability = false # # ## Set one or more allowed client CA certificate file names to @@ -7849,12 +8166,15 @@ # # Ingests files in a directory and then moves them to a target directory. # [[inputs.directory_monitor]] -# ## The directory to monitor and read files from. +# ## The directory to monitor and read files from (including sub-directories if "recursive" is true). # directory = "" # # -# ## The directory to move finished files to. +# ## The directory to move finished files to (maintaining directory hierachy from source). # finished_directory = "" # # +# ## Setting recursive to true will make the plugin recursively walk the directory and process all sub-directories. +# # recursive = false +# # # ## The directory to move files to upon file error. # ## If not provided, erroring files will stay in the monitored directory. # # error_directory = "" @@ -8072,6 +8392,9 @@ # ## enable client-side TLS and define CA to authenticate the device # # enable_tls = true # # tls_ca = "/etc/telegraf/ca.pem" +# ## Minimal TLS version to accept by the client +# # tls_min_version = "TLS12" +# ## Use TLS but skip chain & host verification # # insecure_skip_verify = true # # ## define client-side TLS certificate & key to authenticate to the device @@ -8209,6 +8532,9 @@ # # tls_cert = "/etc/telegraf/cert.pem" # # tls_key = "/etc/telegraf/key.pem" # +# ## Minimal TLS version accepted by the server +# # tls_min_version = "TLS12" +# # ## Optional username and password to accept for HTTP basic authentication. # ## You probably want to make sure you have TLS configured above for this. # # basic_username = "foobar" @@ -8349,6 +8675,8 @@ # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" # # tls_key = "/etc/telegraf/key.pem" +# ## Minimal TLS version to accept by the client +# # tls_min_version = "TLS12" # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false # @@ -8454,6 +8782,13 @@ # ## '2 * max_processing_time'. # # max_processing_time = "100ms" # +# ## The default number of message bytes to fetch from the broker in each +# ## request (default 1MB). This should be larger than the majority of +# ## your messages, or else the consumer will spend a lot of time +# ## negotiating sizes and not actually consuming. Similar to the JVM's +# ## `fetch.message.max.bytes`. +# # consumer_fetch_default = "1MB" +# # ## Data format to consume. # ## Each data format has its own unique set of configuration options, read # ## more about them here: @@ -8733,8 +9068,19 @@ # servers = ["nats://localhost:4222"] # # ## subject(s) to consume +# ## If you use jetstream you need to set the subjects +# ## in jetstream_subjects # subjects = ["telegraf"] # +# ## jetstream subjects +# ## jetstream is a streaming technology inside of nats. +# ## With jetstream the nats-server persists messages and +# ## a consumer can consume historical messages. This is +# ## useful when telegraf needs to restart it don't miss a +# ## message. You need to configure the nats-server. +# ## https://docs.nats.io/nats-concepts/jetstream. +# jetstream_subjects = ["js_telegraf"] +# # ## name a queue group # queue_group = "telegraf_consumers" # @@ -8989,6 +9335,9 @@ # ## Restricts Kubernetes monitoring to a single namespace # ## ex: monitor_kubernetes_pods_namespace = "default" # # monitor_kubernetes_pods_namespace = "" +# ## The name of the label for the pod that is being scraped. +# ## Default is 'namespace' but this can conflict with metrics that have the label 'namespace' +# # pod_namespace_label_name = "namespace" # # label selector to target pods which have the label # # kubernetes_label_selector = "env=dev,app=nginx" # # field selector to target pods