Update changelog

(cherry picked from commit 6cdf98fb58afc1263c522d7d42ee524024026494)
This commit is contained in:
David Reimschussel 2021-03-03 14:02:36 -07:00
parent 431d06acc0
commit dd9e924832
2 changed files with 500 additions and 37 deletions

View File

@ -1,3 +1,66 @@
## v1.18.0-rc0 [2021-03-03]
#### Bugfixes
- [#7312](https://github.com/influxdata/telegraf/pull/7312) `inputs.docker` [plugins/input/docker] Make perdevice affect also cpu and add class granularity through perdevice_include/total_include
- [#8397](https://github.com/influxdata/telegraf/pull/8397) `outputs.dynatrace` Dynatrace Plugin: Make conversion to counters possible / Changed large bulk handling
- [#8655](https://github.com/influxdata/telegraf/pull/8655) `inputs.sqlserver` SqlServer - fix for default server list
- [#8703](https://github.com/influxdata/telegraf/pull/8703) `inputs.docker` Use consistent container name in docker input plugin
- [#8902](https://github.com/influxdata/telegraf/pull/8902) `inputs.snmp` Fix max_repetitions signedness issues
- [#8817](https://github.com/influxdata/telegraf/pull/8817) `outputs.kinesis` outputs.kinesis - log record error count
- [#8833](https://github.com/influxdata/telegraf/pull/8833) `inputs.sqlserver` Bug Fix - SQL Server HADR queries for SQL Versions
- [#8628](https://github.com/influxdata/telegraf/pull/8628) `inputs.modbus` fix: reading multiple holding registers in modbus input plugin
- [#8885](https://github.com/influxdata/telegraf/pull/8885) `inputs.statsd` Fix statsd concurrency bug
- [#8393](https://github.com/influxdata/telegraf/pull/8393) `inputs.sqlserver` SQL Perfmon counters - synced queries from v2 to all db types
- [#8873](https://github.com/influxdata/telegraf/pull/8873) `processors.ifname` Fix mutex locking around ifname cache
- [#8720](https://github.com/influxdata/telegraf/pull/8720) `parsers.influx` fix: remove ambiguity on '\v' from line-protocol parser
- [#8678](https://github.com/influxdata/telegraf/pull/8678) `inputs.redis` Fix Redis output field type inconsistencies
#### Features
- [#8887](https://github.com/influxdata/telegraf/pull/8887) `inputs.procstat` Add PPID field to procstat input plugin
- [#8852](https://github.com/influxdata/telegraf/pull/8852) `processors.starlark` Add Starlark script for estimating Line Protocol cardinality
- [#8828](https://github.com/influxdata/telegraf/pull/8828) `serializers.msgpack` Add MessagePack output data format
- [#8915](https://github.com/influxdata/telegraf/pull/8915) `inputs.cloudwatch` add proxy
- [#8910](https://github.com/influxdata/telegraf/pull/8910) Display error message on badly formatted config string array (eg. namepass)
- [#8785](https://github.com/influxdata/telegraf/pull/8785) `inputs.diskio` Non systemd support with unittest
- [#8850](https://github.com/influxdata/telegraf/pull/8850) `inputs.snmp` Support more snmpv3 authentication protocols
- [#8813](https://github.com/influxdata/telegraf/pull/8813) `inputs.redfish` added member_id as tag(as it is a unique value) for redfish plugin and added address of the server when the status is other than 200 for better debugging
- [#8613](https://github.com/influxdata/telegraf/pull/8613) `inputs.phpfpm` Support exclamation mark to create non-matching list in tail plugin
- [#8179](https://github.com/influxdata/telegraf/pull/8179) `inputs.statsd` Add support for datadog distributions metric
- [#8803](https://github.com/influxdata/telegraf/pull/8803) Add default retry for load config via url
- [#8816](https://github.com/influxdata/telegraf/pull/8816) Code Signing for Windows
- [#8772](https://github.com/influxdata/telegraf/pull/8772) `processors.starlark` Allow to provide constants to a starlark script
- [#8749](https://github.com/influxdata/telegraf/pull/8749) `outputs.newrelic` Add HTTP proxy setting to New Relic output plugin
- [#8543](https://github.com/influxdata/telegraf/pull/8543) `inputs.elasticsearch` Add configurable number of 'most recent' date-stamped indices to gather in Elasticsearch input
#### New Input Plugins
- [#8834](https://github.com/influxdata/telegraf/pull/8834) Input plugin for RavenDB
- [#8525](https://github.com/influxdata/telegraf/pull/8525) Add CSGO SRCDS input plugin
- [#8751](https://github.com/influxdata/telegraf/pull/8751) Adding a new directory monitor input plugin.
- [#6653](https://github.com/influxdata/telegraf/pull/6653) Add Beat input plugin
#### New Output Plugins
- [#8398](https://github.com/influxdata/telegraf/pull/8398) Sensu Go Output Plugin for Telegraf
- [#8450](https://github.com/influxdata/telegraf/pull/8450) plugin: output loki
- [#6714](https://github.com/influxdata/telegraf/pull/6714) SignalFx Output
#### New Aggregator Plugins
- [#3762](https://github.com/influxdata/telegraf/pull/3762) Add Derivative Aggregator Plugin
- [#8594](https://github.com/influxdata/telegraf/pull/8594) Add quantile aggregator plugin
#### New Processor Plugins
- [#8707](https://github.com/influxdata/telegraf/pull/8707) AWS EC2 metadata processor Using StreamingProcessor
#### New External Plugins
- [#8897](https://github.com/influxdata/telegraf/pull/8897) add SMCIPMITool input to external plugin list
## v1.17.3 [2021-02-17] ## v1.17.3 [2021-02-17]
#### Bugfixes #### Bugfixes

View File

@ -496,6 +496,9 @@
# #
# ## Connection timeout, defaults to "5s" if not set. # ## Connection timeout, defaults to "5s" if not set.
# timeout = "5s" # timeout = "5s"
#
# ## If you want to convert values represented as gauges to counters, add the metric names here
# additional_counters = [ ]
# # Configuration for Elasticsearch to send metrics to. # # Configuration for Elasticsearch to send metrics to.
@ -1066,6 +1069,33 @@
# # url = "https://listener.logz.io:8071" # # url = "https://listener.logz.io:8071"
# # Send logs to Loki
# [[outputs.loki]]
# ## The domain of Loki
# domain = "https://loki.domain.tld"
#
# ## Endpoint to write api
# # endpoint = "/loki/api/v1/push"
#
# ## Connection timeout, defaults to "5s" if not set.
# # timeout = "5s"
#
# ## Basic auth credential
# # username = "loki"
# # password = "pass"
#
# ## Additional HTTP headers
# # http_headers = {"X-Scope-OrgID" = "1"}
#
# ## If the request must be gzip encoded
# # gzip_request = false
#
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# # Configuration for MQTT server to send metrics to # # Configuration for MQTT server to send metrics to
# [[outputs.mqtt]] # [[outputs.mqtt]]
# servers = ["localhost:1883"] # required. # servers = ["localhost:1883"] # required.
@ -1158,6 +1188,10 @@
# #
# ## Timeout for writes to the New Relic API. # ## Timeout for writes to the New Relic API.
# # timeout = "15s" # # timeout = "15s"
#
# ## HTTP Proxy override. If unset use values from the standard
# ## proxy environment variables to determine proxy, if any.
# # http_proxy = "http://corporate.proxy:3128"
# # Send telegraf measurements to NSQD # # Send telegraf measurements to NSQD
@ -1293,6 +1327,116 @@
# separator = " " # separator = " "
# # Send aggregate metrics to Sensu Monitor
# [[outputs.sensu-go]]
# ## BACKEND API URL is the Sensu Backend API root URL to send metrics to
# ## (protocol, host, and port only). The output plugin will automatically
# ## append the corresponding backend API path
# ## /api/core/v2/namespaces/:entity_namespace/events/:entity_name/:check_name).
# ##
# ## Backend Events API reference:
# ## https://docs.sensu.io/sensu-go/latest/api/events/
# ##
# ## AGENT API URL is the Sensu Agent API root URL to send metrics to
# ## (protocol, host, and port only). The output plugin will automatically
# ## append the correspeonding agent API path (/events).
# ##
# ## Agent API Events API reference:
# ## https://docs.sensu.io/sensu-go/latest/api/events/
# ##
# ## NOTE: if backend_api_url and agent_api_url and api_key are set, the output
# ## plugin will use backend_api_url. If backend_api_url and agent_api_url are
# ## not provided, the output plugin will default to use an agent_api_url of
# ## http://127.0.0.1:3031
# ##
# # backend_api_url = "http://127.0.0.1:8080"
# # agent_api_url = "http://127.0.0.1:3031"
#
# ## API KEY is the Sensu Backend API token
# ## Generate a new API token via:
# ##
# ## $ sensuctl cluster-role create telegraf --verb create --resource events,entities
# ## $ sensuctl cluster-role-binding create telegraf --cluster-role telegraf --group telegraf
# ## $ sensuctl user create telegraf --group telegraf --password REDACTED
# ## $ sensuctl api-key grant telegraf
# ##
# ## For more information on Sensu RBAC profiles & API tokens, please visit:
# ## - https://docs.sensu.io/sensu-go/latest/reference/rbac/
# ## - https://docs.sensu.io/sensu-go/latest/reference/apikeys/
# ##
# # api_key = "${SENSU_API_KEY}"
#
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
#
# ## Timeout for HTTP message
# # timeout = "5s"
#
# ## HTTP Content-Encoding for write request body, can be set to "gzip" to
# ## compress body or "identity" to apply no encoding.
# # content_encoding = "identity"
#
# ## Sensu Event details
# ##
# ## Below are the event details to be sent to Sensu. The main portions of the
# ## event are the check, entity, and metrics specifications. For more information
# ## on Sensu events and its components, please visit:
# ## - Events - https://docs.sensu.io/sensu-go/latest/reference/events
# ## - Checks - https://docs.sensu.io/sensu-go/latest/reference/checks
# ## - Entities - https://docs.sensu.io/sensu-go/latest/reference/entities
# ## - Metrics - https://docs.sensu.io/sensu-go/latest/reference/events#metrics
# ##
# ## Check specification
# ## The check name is the name to give the Sensu check associated with the event
# ## created. This maps to check.metatadata.name in the event.
# [outputs.sensu-go.check]
# name = "telegraf"
#
# ## Entity specification
# ## Configure the entity name and namespace, if necessary. This will be part of
# ## the entity.metadata in the event.
# ##
# ## NOTE: if the output plugin is configured to send events to a
# ## backend_api_url and entity_name is not set, the value returned by
# ## os.Hostname() will be used; if the output plugin is configured to send
# ## events to an agent_api_url, entity_name and entity_namespace are not used.
# # [outputs.sensu-go.entity]
# # name = "server-01"
# # namespace = "default"
#
# ## Metrics specification
# ## Configure the tags for the metrics that are sent as part of the Sensu event
# # [outputs.sensu-go.tags]
# # source = "telegraf"
#
# ## Configure the handler(s) for processing the provided metrics
# # [outputs.sensu-go.metrics]
# # handlers = ["influxdb","elasticsearch"]
# # Send metrics and events to SignalFx
# [[outputs.signalfx]]
# ## SignalFx Org Access Token
# access_token = "my-secret-token"
#
# ## The SignalFx realm that your organization resides in
# signalfx_realm = "us9" # Required if ingest_url is not set
#
# ## You can optionally provide a custom ingest url instead of the
# ## signalfx_realm option above if you are using a gateway or proxy
# ## instance. This option takes precident over signalfx_realm.
# ingest_url = "https://my-custom-ingest/"
#
# ## Event typed metrics are omitted by default,
# ## If you require an event typed metric you must specify the
# ## metric name in the following list.
# included_event_names = ["plugin.metric_name"]
# # Generic socket writer capable of handling multiple socket types. # # Generic socket writer capable of handling multiple socket types.
# [[outputs.socket_writer]] # [[outputs.socket_writer]]
# ## URL to connect to # ## URL to connect to
@ -1698,6 +1842,55 @@
############################################################################### ###############################################################################
# # Attach AWS EC2 metadata to metrics
# [[processors.aws_ec2]]
# ## Instance identity document tags to attach to metrics.
# ## For more information see:
# ## https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-identity-documents.html
# ##
# ## Available tags:
# ## * accountId
# ## * architecture
# ## * availabilityZone
# ## * billingProducts
# ## * imageId
# ## * instanceId
# ## * instanceType
# ## * kernelId
# ## * pendingTime
# ## * privateIp
# ## * ramdiskId
# ## * region
# ## * version
# imds_tags = []
#
# ## EC2 instance tags retrieved with DescribeTags action.
# ## In case tag is empty upon retrieval it's omitted when tagging metrics.
# ## Note that in order for this to work, role attached to EC2 instance or AWS
# ## credentials available from the environment must have a policy attached, that
# ## allows ec2:DescribeTags.
# ##
# ## For more information see:
# ## https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeTags.html
# ec2_tags = []
#
# ## Timeout for http requests made by against aws ec2 metadata endpoint.
# timeout = "10s"
#
# ## ordered controls whether or not the metrics need to stay in the same order
# ## this plugin received them in. If false, this plugin will change the order
# ## with requests hitting cached results moving through immediately and not
# ## waiting on slower lookups. This may cause issues for you if you are
# ## depending on the order of metrics staying the same. If so, set this to true.
# ## Keeping the metrics ordered may be slightly slower.
# ordered = false
#
# ## max_parallel_calls is the maximum number of AWS API calls to be in flight
# ## at the same time.
# ## It's probably best to keep this number fairly low.
# max_parallel_calls = 10
# # Clone metrics and apply modifications. # # Clone metrics and apply modifications.
# [[processors.clone]] # [[processors.clone]]
# ## All modifications on inputs and aggregators can be overridden: # ## All modifications on inputs and aggregators can be overridden:
@ -2092,6 +2285,13 @@
# #
# ## File containing a Starlark script. # ## File containing a Starlark script.
# # script = "/usr/local/bin/myscript.star" # # script = "/usr/local/bin/myscript.star"
#
# ## The constants of the Starlark script.
# # [processors.starlark.constants]
# # max_size = 10
# # threshold = 0.75
# # default_name = "Julia"
# # debug_mode = true
# # Perform string processing on tags, fields, and measurements # # Perform string processing on tags, fields, and measurements
@ -2245,6 +2445,49 @@
# # stats = ["count", "min", "max", "mean", "stdev", "s2", "sum"] # # stats = ["count", "min", "max", "mean", "stdev", "s2", "sum"]
# # Calculates a derivative for every field.
# [[aggregators.derivative]]
# ## The period in which to flush the aggregator.
# period = "30s"
# ##
# ## If true, the original metric will be dropped by the
# ## aggregator and will not get sent to the output plugins.
# drop_original = false
# ##
# ## This aggregator will estimate a derivative for each field, which is
# ## contained in both the first and last metric of the aggregation interval.
# ## Without further configuration the derivative will be calculated with
# ## respect to the time difference between these two measurements in seconds.
# ## The formula applied is for every field:
# ##
# ## value_last - value_first
# ## derivative = --------------------------
# ## time_difference_in_seconds
# ##
# ## The resulting derivative will be named *fieldname_rate*. The suffix
# ## "_rate" can be configured by the *suffix* parameter. When using a
# ## derivation variable you can include its name for more clarity.
# # suffix = "_rate"
# ##
# ## As an abstraction the derivative can be calculated not only by the time
# ## difference but by the difference of a field, which is contained in the
# ## measurement. This field is assumed to be monotonously increasing. This
# ## feature is used by specifying a *variable*.
# ## Make sure the specified variable is not filtered and exists in the metrics
# ## passed to this aggregator!
# # variable = ""
# ##
# ## When using a field as the derivation parameter the name of that field will
# ## be used for the resulting derivative, e.g. *fieldname_by_parameter*.
# ##
# ## Note, that the calculation is based on the actual timestamp of the
# ## measurements. When there is only one measurement during that period, the
# ## measurement will be rolled over to the next period. The maximum number of
# ## such roll-overs can be configured with a default of 10.
# # max_roll_over = 10
# ##
# # Report the final metric of a series # # Report the final metric of a series
# [[aggregators.final]] # [[aggregators.final]]
# ## The period on which to flush & clear the aggregator. # ## The period on which to flush & clear the aggregator.
@ -2308,6 +2551,34 @@
# drop_original = false # drop_original = false
# # Keep the aggregate quantiles of each metric passing through.
# [[aggregators.quantile]]
# ## General Aggregator Arguments:
# ## The period on which to flush & clear the aggregator.
# period = "30s"
#
# ## If true, the original metric will be dropped by the
# ## aggregator and will not get sent to the output plugins.
# drop_original = false
#
# ## Quantiles to output in the range [0,1]
# # quantiles = [0.25, 0.5, 0.75]
#
# ## Type of aggregation algorithm
# ## Supported are:
# ## "t-digest" -- approximation using centroids, can cope with large number of samples
# ## "exact R7" -- exact computation also used by Excel or NumPy (Hyndman & Fan 1996 R7)
# ## "exact R8" -- exact computation (Hyndman & Fan 1996 R8)
# ## NOTE: Do not use "exact" algorithms with large number of samples
# ## to not impair performance or memory consumption!
# # algorithm = "t-digest"
#
# ## Compression for approximation (t-digest). The value needs to be
# ## greater or equal to 1.0. Smaller values will result in more
# ## performance but less accuracy.
# # compression = 100.0
# # Count the occurrence of values in fields. # # Count the occurrence of values in fields.
# [[aggregators.valuecounter]] # [[aggregators.valuecounter]]
# ## General Aggregator Arguments: # ## General Aggregator Arguments:
@ -2562,6 +2833,41 @@
# tubes = ["notifications"] # tubes = ["notifications"]
# # Read metrics exposed by Beat
# [[inputs.beat]]
# ## An URL from which to read Beat-formatted JSON
# ## Default is "http://127.0.0.1:5066".
# url = "http://127.0.0.1:5066"
#
# ## Enable collection of the listed stats
# ## An empty list means collect all. Available options are currently
# ## "beat", "libbeat", "system" and "filebeat".
# # include = ["beat", "libbeat", "filebeat"]
#
# ## HTTP method
# # method = "GET"
#
# ## Optional HTTP headers
# # headers = {"X-Special-Header" = "Special-Value"}
#
# ## Override HTTP "Host" header
# # host_header = "logstash.example.com"
#
# ## Timeout for HTTP requests
# # timeout = "5s"
#
# ## Optional HTTP Basic Auth credentials
# # username = "username"
# # password = "pa$$word"
#
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
# # Read BIND nameserver XML statistics # # Read BIND nameserver XML statistics
# [[inputs.bind]] # [[inputs.bind]]
# ## An array of BIND XML statistics URI to gather stats. # ## An array of BIND XML statistics URI to gather stats.
@ -2713,6 +3019,9 @@
# ## ex: endpoint_url = "http://localhost:8000" # ## ex: endpoint_url = "http://localhost:8000"
# # endpoint_url = "" # # endpoint_url = ""
# #
# ## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set)
# # http_proxy_url = "http://localhost:8888"
#
# # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all # # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all
# # metrics are made available to the 1 minute period. Some are collected at # # metrics are made available to the 1 minute period. Some are collected at
# # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring. # # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
@ -2854,6 +3163,18 @@
# # basic_password = "p@ssw0rd" # # basic_password = "p@ssw0rd"
# # Fetch metrics from a CSGO SRCDS
# [[inputs.csgo]]
# ## Specify servers using the following format:
# ## servers = [
# ## ["ip1:port1", "rcon_password1"],
# ## ["ip2:port2", "rcon_password2"],
# ## ]
# #
# ## If no servers are specified, no data will be collected
# servers = []
# # Input plugin for DC/OS metrics # # Input plugin for DC/OS metrics
# [[inputs.dcos]] # [[inputs.dcos]]
# ## The DC/OS cluster URL. # ## The DC/OS cluster URL.
@ -2966,13 +3287,30 @@
# ## Timeout for docker list, info, and stats commands # ## Timeout for docker list, info, and stats commands
# timeout = "5s" # timeout = "5s"
# #
# ## Whether to report for each container per-device blkio (8:0, 8:1...) and # ## Whether to report for each container per-device blkio (8:0, 8:1...),
# ## network (eth0, eth1, ...) stats or not # ## network (eth0, eth1, ...) and cpu (cpu0, cpu1, ...) stats or not.
# ## Usage of this setting is discouraged since it will be deprecated in favor of 'perdevice_include'.
# ## Default value is 'true' for backwards compatibility, please set it to 'false' so that 'perdevice_include' setting
# ## is honored.
# perdevice = true # perdevice = true
# #
# ## Whether to report for each container total blkio and network stats or not # ## Specifies for which classes a per-device metric should be issued
# ## Possible values are 'cpu' (cpu0, cpu1, ...), 'blkio' (8:0, 8:1, ...) and 'network' (eth0, eth1, ...)
# ## Please note that this setting has no effect if 'perdevice' is set to 'true'
# # perdevice_include = ["cpu"]
#
# ## Whether to report for each container total blkio and network stats or not.
# ## Usage of this setting is discouraged since it will be deprecated in favor of 'total_include'.
# ## Default value is 'false' for backwards compatibility, please set it to 'true' so that 'total_include' setting
# ## is honored.
# total = false # total = false
# #
# ## Specifies for which classes a total metric should be issued. Total is an aggregated of the 'perdevice' values.
# ## Possible values are 'cpu', 'blkio' and 'network'
# ## Total 'cpu' is reported directly by Docker daemon, and 'network' and 'blkio' totals are aggregated by this plugin.
# ## Please note that this setting has no effect if 'total' is set to 'false'
# # total_include = ["cpu", "blkio", "network"]
#
# ## Which environment variables should we use as a tag # ## Which environment variables should we use as a tag
# ##tag_env = ["JAVA_HOME", "HEAP_SIZE"] # ##tag_env = ["JAVA_HOME", "HEAP_SIZE"]
# #
@ -3065,6 +3403,7 @@
# cluster_stats_only_from_master = true # cluster_stats_only_from_master = true
# #
# ## Indices to collect; can be one or more indices names or _all # ## Indices to collect; can be one or more indices names or _all
# ## Use of wildcards is allowed. Use a wildcard at the end to retrieve index names that end with a changing value, like a date.
# indices_include = ["_all"] # indices_include = ["_all"]
# #
# ## One of "shards", "cluster", "indices" # ## One of "shards", "cluster", "indices"
@ -3085,6 +3424,11 @@
# # tls_key = "/etc/telegraf/key.pem" # # tls_key = "/etc/telegraf/key.pem"
# ## Use TLS but skip chain & host verification # ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false # # insecure_skip_verify = false
#
# ## Sets the number of most recent indices to return for indices that are configured with a date-stamped suffix.
# ## Each 'indices_include' entry ending with a wildcard (*) or glob matching pattern will group together all indices that match it, and sort them
# ## by the date or number after the wildcard. Metrics then are gathered for only the 'num_most_recent_indices' amount of most recent indices.
# # num_most_recent_indices = 0
# # Returns ethtool statistics for given interfaces # # Returns ethtool statistics for given interfaces
@ -3583,6 +3927,17 @@
# timeout = "5s" # timeout = "5s"
# # Intel PowerStat plugin enables monitoring of platform metrics (power, TDP) and Core metrics like temperature, power and utilization.
# [[inputs.intel_powerstat]]
# ## All global metrics are always collected by Intel PowerStat plugin.
# ## User can choose which per-CPU metrics are monitored by the plugin in cpu_metrics array.
# ## Empty array means no per-CPU specific metrics will be collected by the plugin - in this case only platform level
# ## telemetry will be exposed by Intel PowerStat plugin.
# ## Supported options:
# ## "cpu_frequency", "cpu_busy_frequency", "cpu_temperature", "cpu_c1_state_residency", "cpu_c6_state_residency", "cpu_busy_cycles"
# # cpu_metrics = []
# # Collect statistics about itself # # Collect statistics about itself
# [[inputs.internal]] # [[inputs.internal]]
# ## If true, collect telegraf memory stats. # ## If true, collect telegraf memory stats.
@ -5059,31 +5414,26 @@
# urls = ["http://localhost:8080/_raindrops"] # urls = ["http://localhost:8080/_raindrops"]
# # RAS plugin exposes counter metrics for Machine Check Errors provided by RASDaemon (sqlite3 output is required). # # Reads metrics from RavenDB servers via the Monitoring Endpoints
# [[inputs.ras]]
# ## Optional path to RASDaemon sqlite3 database.
# ## Default: /var/lib/rasdaemon/ras-mc_event.db
# # db_path = ""
# [[inputs.ravendb]] # [[inputs.ravendb]]
# ## Node URL and port that RavenDB is listening on. # ## Node URL and port that RavenDB is listening on
# url = "https://localhost:8080" # url = "https://localhost:8080"
# #
# ## RavenDB X509 client certificate setup # ## RavenDB X509 client certificate setup
# tls_cert = "/etc/telegraf/raven.crt" # # tls_cert = "/etc/telegraf/raven.crt"
# tls_key = "/etc/telegraf/raven.key" # # tls_key = "/etc/telegraf/raven.key"
# #
# ## Optional request timeout # ## Optional request timeout
# ## # ##
# ## Timeout, specifies the amount of time to wait # ## Timeout, specifies the amount of time to wait
# ## for a server's response headers after fully writing the request and # ## for a server's response headers after fully writing the request and
# ## time limit for requests made by this client. # ## time limit for requests made by this client
# # timeout = "5s" # # timeout = "5s"
# #
# ## List of statistics which are collected # ## List of statistics which are collected
# # At least one is required # # At least one is required
# # Allowed values: server, databases, indexes, collections # # Allowed values: server, databases, indexes, collections
# # #
# # stats_include = ["server", "databases", "indexes", "collections"] # # stats_include = ["server", "databases", "indexes", "collections"]
# #
# ## List of db where database stats are collected # ## List of db where database stats are collected
@ -5098,6 +5448,7 @@
# ## If empty, all collections from all db are concerned # ## If empty, all collections from all db are concerned
# # collection_stats_dbs = [] # # collection_stats_dbs = []
# # Read CPU, Fans, Powersupply and Voltage metrics of hardware server through redfish APIs # # Read CPU, Fans, Powersupply and Voltage metrics of hardware server through redfish APIs
# [[inputs.redfish]] # [[inputs.redfish]]
# ## Server url # ## Server url
@ -5282,7 +5633,7 @@
# ## # ##
# ## Security Name. # ## Security Name.
# # sec_name = "myuser" # # sec_name = "myuser"
# ## Authentication protocol; one of "MD5", "SHA", or "". # ## Authentication protocol; one of "MD5", "SHA", "SHA224", "SHA256", "SHA384", "SHA512" or "".
# # auth_protocol = "MD5" # # auth_protocol = "MD5"
# ## Authentication password. # ## Authentication password.
# # auth_password = "pass" # # auth_password = "pass"
@ -6183,6 +6534,46 @@
# data_format = "influx" # data_format = "influx"
# # Ingests files in a directory and then moves them to a target directory.
# [[inputs.directory_monitor]]
# ## The directory to monitor and read files from.
# directory = ""
# #
# ## The directory to move finished files to.
# finished_directory = ""
# #
# ## The directory to move files to upon file error.
# ## If not provided, erroring files will stay in the monitored directory.
# # error_directory = ""
# #
# ## The amount of time a file is allowed to sit in the directory before it is picked up.
# ## This time can generally be low but if you choose to have a very large file written to the directory and it's potentially slow,
# ## set this higher so that the plugin will wait until the file is fully copied to the directory.
# # directory_duration_threshold = "50ms"
# #
# ## A list of the only file names to monitor, if necessary. Supports regex. If left blank, all files are ingested.
# # files_to_monitor = ["^.*\.csv"]
# #
# ## A list of files to ignore, if necessary. Supports regex.
# # files_to_ignore = [".DS_Store"]
# #
# ## Maximum lines of the file to process that have not yet be written by the
# ## output. For best throughput set to the size of the output's metric_buffer_limit.
# ## Warning: setting this number higher than the output's metric_buffer_limit can cause dropped metrics.
# # max_buffered_metrics = 10000
# #
# ## The maximum amount of file paths to queue up for processing at once, before waiting until files are processed to find more files.
# ## Lowering this value will result in *slightly* less memory use, with a potential sacrifice in speed efficiency, if absolutely necessary.
# # file_queue_size = 100000
# #
# ## The dataformat to be read from the files.
# ## Each data format has its own unique set of configuration options, read
# ## more about them here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
# ## NOTE: We currently only support parsing newline-delimited JSON. See the format here: https://github.com/ndjson/ndjson-spec
# data_format = "influx"
# # Read logging output from the Docker engine # # Read logging output from the Docker engine
# [[inputs.docker_log]] # [[inputs.docker_log]]
# ## Docker Endpoint # ## Docker Endpoint
@ -7188,6 +7579,13 @@
# # insecure_skip_verify = false # # insecure_skip_verify = false
# # RAS plugin exposes counter metrics for Machine Check Errors provided by RASDaemon (sqlite3 output is required).
# [[inputs.ras]]
# ## Optional path to RASDaemon sqlite3 database.
# ## Default: /var/lib/rasdaemon/ras-mc_event.db
# # db_path = ""
# # Riemann protobuff listener. # # Riemann protobuff listener.
# [[inputs.riemann_listener]] # [[inputs.riemann_listener]]
# ## URL to listen on. # ## URL to listen on.
@ -7365,7 +7763,8 @@
# ## Parses datadog extensions to the statsd format # ## Parses datadog extensions to the statsd format
# datadog_extensions = false # datadog_extensions = false
# #
# ## Parses distributions metric from datadog's extension to the statsd format # ## Parses distributions metric as specified in the datadog statsd format
# ## https://docs.datadoghq.com/developers/metrics/types/?tab=distribution#definition
# datadog_distributions = false # datadog_distributions = false
# #
# ## Statsd data translation templates, more info can be read here: # ## Statsd data translation templates, more info can be read here:
@ -7457,7 +7856,8 @@
# ## "/var/log/**.log" -> recursively find all .log files in /var/log # ## "/var/log/**.log" -> recursively find all .log files in /var/log
# ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log # ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log
# ## "/var/log/apache.log" -> just tail the apache log file # ## "/var/log/apache.log" -> just tail the apache log file
# ## # ## "/var/log/log[!1-2]* -> tail files without 1-2
# ## "/var/log/log[^1-2]* -> identical behavior as above
# ## See https://github.com/gobwas/glob for more examples # ## See https://github.com/gobwas/glob for more examples
# ## # ##
# files = ["/var/mymetrics.out"] # files = ["/var/mymetrics.out"]