diff --git a/CHANGELOG.md b/CHANGELOG.md index 053e9ee59..42ca26772 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,56 @@ +## v1.20.0-rc0 [2021-09-02] + +#### Release Notes + + - [#9642](https://github.com/influxdata/telegraf/pull/9642) Build with Golang 1.17 + +#### Bugfixes + + - [#9700](https://github.com/influxdata/telegraf/pull/9700) Update thrift module to 0.14.2 and zipkin-go-opentracing 0.4.5 + - [#9587](https://github.com/influxdata/telegraf/pull/9587) `outputs.opentelemetry` Use headers config in grpc requests + - [#9713](https://github.com/influxdata/telegraf/pull/9713) Update runc module to v1.0.0-rc95 to address CVE-2021-30465 + - [#9699](https://github.com/influxdata/telegraf/pull/9699) Migrate dgrijalva/jwt-go to golang-jwt/jwt/v4 + - [#9139](https://github.com/influxdata/telegraf/pull/9139) `serializers.prometheus` Update timestamps and expiration time as new data arrives + - [#9625](https://github.com/influxdata/telegraf/pull/9625) `outputs.graylog` Output timestamp with fractional seconds + - [#9655](https://github.com/influxdata/telegraf/pull/9655) Update cloud.google.com/go/pubsub module from 1.2.0 to 1.15.0 + - [#9674](https://github.com/influxdata/telegraf/pull/9674) `inputs.mongodb` Change command based on server version + - [#9676](https://github.com/influxdata/telegraf/pull/9676) `outputs.dynatrace` Remove hardcoded int value + - [#9619](https://github.com/influxdata/telegraf/pull/9619) `outputs.influxdb_v2` Increase accepted retry-after header values. + - [#9652](https://github.com/influxdata/telegraf/pull/9652) Update github.com/tinylib/msgp module from 1.1.5 to 1.1.6 + - [#9471](https://github.com/influxdata/telegraf/pull/9471) `inputs.sql` Make timeout apply to single query + +#### Features + + - [#9665](https://github.com/influxdata/telegraf/pull/9665) `inputs.systemd_units` feat(plugins/inputs/systemd_units): add pattern support + - [#9598](https://github.com/influxdata/telegraf/pull/9598) `outputs.sql` Add bool datatype + - [#9386](https://github.com/influxdata/telegraf/pull/9386) `inputs.cloudwatch` Pull metrics from multiple AWS CloudWatch namespaces + - [#9411](https://github.com/influxdata/telegraf/pull/9411) `inputs.cloudwatch` Support AWS Web Identity Provider + - [#9570](https://github.com/influxdata/telegraf/pull/9570) `inputs.modbus` Add support for RTU over TCP + - [#9488](https://github.com/influxdata/telegraf/pull/9488) `inputs.procstat` Support cgroup globs and include systemd unit children + - [#9322](https://github.com/influxdata/telegraf/pull/9322) `inputs.suricata` Support alert event type + - [#5464](https://github.com/influxdata/telegraf/pull/5464) `inputs.prometheus` Add ability to query Consul Service catalog + - [#8641](https://github.com/influxdata/telegraf/pull/8641) `outputs.prometheus_client` Add Landing page + - [#9529](https://github.com/influxdata/telegraf/pull/9529) `inputs.http_listener_v2` Allows multiple paths and add path_tag + - [#9395](https://github.com/influxdata/telegraf/pull/9395) Add cookie authentication to HTTP input and output plugins + - [#8454](https://github.com/influxdata/telegraf/pull/8454) `inputs.syslog` Add RFC3164 support + - [#9351](https://github.com/influxdata/telegraf/pull/9351) `inputs.jenkins` Add option to include nodes by name + - [#9277](https://github.com/influxdata/telegraf/pull/9277) Add JSON, MessagePack, and Protocol-buffers format support to the XPath parser + - [#9343](https://github.com/influxdata/telegraf/pull/9343) `inputs.snmp_trap` Improve MIB lookup performance + - [#9342](https://github.com/influxdata/telegraf/pull/9342) `outputs.newrelic` Add option to override metric_url + - [#9306](https://github.com/influxdata/telegraf/pull/9306) `inputs.smart` Add power mode status + +#### New Input Plugins + + - [#9602](https://github.com/influxdata/telegraf/pull/9602) Add rocm_smi input to monitor AMD GPUs + - [#9101](https://github.com/influxdata/telegraf/pull/9101) Add mdstat input to gather from /proc/mdstat collection + - [#3536](https://github.com/influxdata/telegraf/pull/3536) Add Elasticsearch query input + +#### New Output Plugins + + - [#9228](https://github.com/influxdata/telegraf/pull/9228) Add OpenTelemetry output + - [#9426](https://github.com/influxdata/telegraf/pull/9426) Add Azure Data Explorer(ADX) output + + ## v1.19.3 [2021-08-18] #### Bugfixes diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 43b1f8f3a..fabd26161 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -315,6 +315,30 @@ # # "ai.cloud.roleInstance" = "kubernetes_pod_name" +# # Sends metrics to Azure Data Explorer +# [[outputs.azure_data_explorer]] +# ## Azure Data Exlorer cluster endpoint +# ## ex: endpoint_url = "https://clustername.australiasoutheast.kusto.windows.net" +# endpoint_url = "" +# +# ## The Azure Data Explorer database that the metrics will be ingested into. +# ## The plugin will NOT generate this database automatically, it's expected that this database already exists before ingestion. +# ## ex: "exampledatabase" +# database = "" +# +# ## Timeout for Azure Data Explorer operations +# # timeout = "20s" +# +# ## Type of metrics grouping used when pushing to Azure Data Explorer. +# ## Default is "TablePerMetric" for one table per different metric. +# ## For more information, please check the plugin README. +# # metrics_grouping_type = "TablePerMetric" +# +# ## Name of the single table to store all the metrics (Only needed if metrics_grouping_type is "SingleTable"). +# # table_name = "" +# + + # # Send aggregate metrics to Azure Monitor # [[outputs.azure_monitor]] # ## Timeout for HTTP writes. @@ -404,16 +428,19 @@ # # ## Amazon Credentials # ## Credentials are loaded in the following order -# ## 1) Assumed credentials via STS if role_arn is specified -# ## 2) explicit credentials from 'access_key' and 'secret_key' -# ## 3) shared profile from 'profile' -# ## 4) environment variables -# ## 5) shared credentials file -# ## 6) EC2 Instance Profile +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile # #access_key = "" # #secret_key = "" # #token = "" # #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" # #profile = "" # #shared_credential_file = "" # @@ -452,16 +479,19 @@ # # ## Amazon Credentials # ## Credentials are loaded in the following order -# ## 1) Assumed credentials via STS if role_arn is specified -# ## 2) explicit credentials from 'access_key' and 'secret_key' -# ## 3) shared profile from 'profile' -# ## 4) environment variables -# ## 5) shared credentials file -# ## 6) EC2 Instance Profile +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile # #access_key = "" # #secret_key = "" # #token = "" # #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" # #profile = "" # #shared_credential_file = "" # @@ -809,6 +839,15 @@ # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false # +# ## Optional Cookie authentication +# # cookie_auth_url = "https://localhost/authMe" +# # cookie_auth_method = "POST" +# # cookie_auth_username = "username" +# # cookie_auth_password = "pa$$word" +# # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' +# ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie +# # cookie_auth_renewal = "5m" +# # ## Data format to output. # ## Each data format has it's own unique set of configuration options, read # ## more about them here: @@ -1049,16 +1088,19 @@ # # ## Amazon Credentials # ## Credentials are loaded in the following order -# ## 1) Assumed credentials via STS if role_arn is specified -# ## 2) explicit credentials from 'access_key' and 'secret_key' -# ## 3) shared profile from 'profile' -# ## 4) environment variables -# ## 5) shared credentials file -# ## 6) EC2 Instance Profile +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile # #access_key = "" # #secret_key = "" # #token = "" # #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" # #profile = "" # #shared_credential_file = "" # @@ -1268,6 +1310,10 @@ # ## HTTP Proxy override. If unset use values from the standard # ## proxy environment variables to determine proxy, if any. # # http_proxy = "http://corporate.proxy:3128" +# +# ## Metric URL override to enable geographic location endpoints. +# # If not set use values from the standard +# # metric_url = "https://metric-api.newrelic.com/metric/v1" # # Send telegraf measurements to NSQD @@ -1284,6 +1330,41 @@ # data_format = "influx" +# # Send OpenTelemetry metrics over gRPC +# [[outputs.opentelemetry]] +# ## Override the default (localhost:4317) OpenTelemetry gRPC service +# ## address:port +# # service_address = "localhost:4317" +# +# ## Override the default (5s) request timeout +# # timeout = "5s" +# +# ## Optional TLS Config. +# ## +# ## Root certificates for verifying server certificates encoded in PEM format. +# # tls_ca = "/etc/telegraf/ca.pem" +# ## The public and private keypairs for the client encoded in PEM format. +# ## May contain intermediate certificates. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS, but skip TLS chain and host verification. +# # insecure_skip_verify = false +# ## Send the specified TLS server name via SNI. +# # tls_server_name = "foo.example.com" +# +# ## Override the default (gzip) compression used to send data. +# ## Supports: "gzip", "none" +# # compression = "gzip" +# +# ## Additional OpenTelemetry resource attributes +# # [outputs.opentelemetry.attributes] +# # "service.name" = "demo" +# +# ## Additional gRPC request metadata +# # [outputs.opentelemetry.headers] +# # key1 = "value1" + + # # Configuration for OpenTSDB server to send metrics to # [[outputs.opentsdb]] # ## prefix for metrics keys @@ -1748,16 +1829,19 @@ # # ## Amazon Credentials # ## Credentials are loaded in the following order: -# ## 1) Assumed credentials via STS if role_arn is specified -# ## 2) Explicit credentials from 'access_key' and 'secret_key' -# ## 3) Shared profile from 'profile' -# ## 4) Environment variables -# ## 5) Shared credentials file -# ## 6) EC2 Instance Profile +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile # #access_key = "" # #secret_key = "" # #token = "" # #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" # #profile = "" # #shared_credential_file = "" # @@ -1870,7 +1954,7 @@ # ## Print Warp 10 error body # # print_error_body = false # -# ## Max string error size +# ## Max string error size # # max_string_error_size = 511 # # ## Optional TLS Config @@ -2896,6 +2980,15 @@ # # num_histogram_buckets = 100 # default: 10 +# # Query statistics from AMD Graphics cards using rocm-smi binary +# [[inputs.amd_rocm_smi]] +# ## Optional: path to rocm-smi binary, defaults to $PATH via exec.LookPath +# # bin_path = "/opt/rocm/bin/rocm-smi" +# +# ## Optional: timeout for GPU polling +# # timeout = "5s" + + # # Read Apache status information (mod_status) # [[inputs.apache]] # ## An array of URLs to gather from, must be directed at the machine @@ -3163,16 +3256,19 @@ # # ## Amazon Credentials # ## Credentials are loaded in the following order -# ## 1) Assumed credentials via STS if role_arn is specified -# ## 2) explicit credentials from 'access_key' and 'secret_key' -# ## 3) shared profile from 'profile' -# ## 4) environment variables -# ## 5) shared credentials file -# ## 6) EC2 Instance Profile +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile # # access_key = "" # # secret_key = "" # # token = "" # # role_arn = "" +# # web_identity_token_file = "" +# # role_session_name = "" # # profile = "" # # shared_credential_file = "" # @@ -3212,8 +3308,10 @@ # ## Configure the TTL for the internal cache of metrics. # # cache_ttl = "1h" # -# ## Metric Statistic Namespace (required) -# namespace = "AWS/ELB" +# ## Metric Statistic Namespaces (required) +# namespaces = ["AWS/ELB"] +# # A single metric statistic namespace that will be appended to namespaces on startup +# # namespace = "AWS/ELB" # # ## Maximum requests per second. Note that the global default AWS rate limit is # ## 50 reqs/sec, so if you define multiple namespaces, these should add up to a @@ -3632,6 +3730,72 @@ # # num_most_recent_indices = 0 +# # Derive metrics from aggregating Elasticsearch query results +# [[inputs.elasticsearch_query]] +# ## The full HTTP endpoint URL for your Elasticsearch instance +# ## Multiple urls can be specified as part of the same cluster, +# ## this means that only ONE of the urls will be written to each interval. +# urls = [ "http://node1.es.example.com:9200" ] # required. +# +# ## Elasticsearch client timeout, defaults to "5s". +# # timeout = "5s" +# +# ## Set to true to ask Elasticsearch a list of all cluster nodes, +# ## thus it is not necessary to list all nodes in the urls config option +# # enable_sniffer = false +# +# ## Set the interval to check if the Elasticsearch nodes are available +# ## This option is only used if enable_sniffer is also set (0s to disable it) +# # health_check_interval = "10s" +# +# ## HTTP basic authentication details (eg. when using x-pack) +# # username = "telegraf" +# # password = "mypassword" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# [[inputs.elasticsearch_query.aggregation]] +# ## measurement name for the results of the aggregation query +# measurement_name = "measurement" +# +# ## Elasticsearch indexes to query (accept wildcards). +# index = "index-*" +# +# ## The date/time field in the Elasticsearch index (mandatory). +# date_field = "@timestamp" +# +# ## Time window to query (eg. "1m" to query documents from last minute). +# ## Normally should be set to same as collection interval +# query_period = "1m" +# +# ## Lucene query to filter results +# # filter_query = "*" +# +# ## Fields to aggregate values (must be numeric fields) +# # metric_fields = ["metric"] +# +# ## Aggregation function to use on the metric fields +# ## Must be set if 'metric_fields' is set +# ## Valid values are: avg, sum, min, max, sum +# # metric_function = "avg" +# +# ## Fields to be used as tags +# ## Must be text, non-analyzed fields. Metric aggregations are performed per tag +# # tags = ["field.keyword", "field2.keyword"] +# +# ## Set to true to not ignore documents when the tag(s) above are missing +# # include_missing_tag = false +# +# ## String value of the tag when the tag does not exist +# ## Used when include_missing_tag is true +# # missing_tag_value = "null" + + # # Returns ethtool statistics for given interfaces # [[inputs.ethtool]] # ## List of interfaces to pull metrics for @@ -3944,6 +4108,15 @@ # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false # +# ## Optional Cookie authentication +# # cookie_auth_url = "https://localhost/authMe" +# # cookie_auth_method = "POST" +# # cookie_auth_username = "username" +# # cookie_auth_password = "pa$$word" +# # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' +# ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie +# # cookie_auth_renewal = "5m" +# # ## Amount of time allowed to complete the HTTP request # # timeout = "5s" # @@ -4286,7 +4459,9 @@ # # job_include = [ "*" ] # # job_exclude = [ ] # -# ## Nodes to exclude from gathering +# ## Nodes to include or exclude from gathering +# ## When using both lists, node_exclude has priority. +# # node_include = [ "*" ] # # node_exclude = [ ] # # ## Worker pool for jenkins plugin only @@ -4343,19 +4518,19 @@ # ## List of metrics collected on above servers # ## Each metric consists in a name, a jmx path and either # ## a pass or drop slice attribute. -# ## This collect all heap memory usage metrics. +# ## This collect all heap memory usage metrics. # [[inputs.jolokia.metrics]] # name = "heap_memory_usage" # mbean = "java.lang:type=Memory" # attribute = "HeapMemoryUsage" # -# ## This collect thread counts metrics. +# ## This collect thread counts metrics. # [[inputs.jolokia.metrics]] # name = "thread_count" # mbean = "java.lang:type=Threading" # attribute = "TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount" # -# ## This collect number of class loaded/unloaded counts metrics. +# ## This collect number of class loaded/unloaded counts metrics. # [[inputs.jolokia.metrics]] # name = "class_count" # mbean = "java.lang:type=ClassLoading" @@ -4637,6 +4812,13 @@ # # timeout = "5s" +# # Get md array statistics from /proc/mdstat +# [[inputs.mdstat]] +# ## Sets file path +# ## If not specified, then default is /proc/mdstat +# # file_name = "/proc/mdstat" + + # # Read metrics from one or many memcached servers # [[inputs.memcached]] # ## An array of address to gather stats about. Specify an ip on hostname @@ -4708,7 +4890,7 @@ # [[inputs.modbus]] # ## Connection Configuration # ## -# ## The plugin supports connections to PLCs via MODBUS/TCP or +# ## The plugin supports connections to PLCs via MODBUS/TCP, RTU over TCP, ASCII over TCP or # ## via serial line communication in binary (RTU) or readable (ASCII) encoding # ## # ## Device name @@ -4735,8 +4917,11 @@ # # data_bits = 8 # # parity = "N" # # stop_bits = 1 -# # transmission_mode = "RTU" # +# ## For Modbus over TCP you can choose between "TCP", "RTUoverTCP" and "ASCIIoverTCP" +# ## default behaviour is "TCP" if the controller is TCP +# ## For Serial you can choose between "RTU" and "ASCII" +# # transmission_mode = "RTU" # # ## Measurements # ## @@ -5543,9 +5728,10 @@ # # pattern = "nginx" # ## user as argument for pgrep (ie, pgrep -u ) # # user = "nginx" -# ## Systemd unit name +# ## Systemd unit name, supports globs when include_systemd_children is set to true # # systemd_unit = "nginx.service" -# ## CGroup name or path +# # include_systemd_children = false +# ## CGroup name or path, supports globs # # cgroup = "systemd/system.slice/nginx.service" # # ## Windows service name @@ -5785,13 +5971,6 @@ # # Specify a list of one or more riak http servers # servers = ["http://localhost:8098"] -# # Query statistics from AMD Graphics cards using rocm-smi binary -# [[inputs.amd_rocm_smi]] -# ## Optional: path to rocm-smi binary, defaults to $PATH via exec.LookPath -# # bin_path = "/opt/rocm/bin/rocm-smi" -# -# ## Optional: timeout for GPU polling -# # timeout = "5s" # # Read API usage and limits for a Salesforce organisation # [[inputs.salesforce]] @@ -6186,6 +6365,13 @@ # ## values are "socket", "target", "device", "mount", "automount", "swap", # ## "timer", "path", "slice" and "scope ": # # unittype = "service" +# # +# ## Filter for a specific pattern, default is "" (i.e. all), other possible +# ## values are valid pattern for systemctl, e.g. "a*" for all units with +# ## names starting with "a" +# # pattern = "" +# ## pattern = "telegraf* influxdb*" +# ## pattern = "a*" # # Reads metrics from a Teamspeak 3 Server via ServerQuery @@ -6293,6 +6479,219 @@ # ## General connection timeout # # timeout = "5s" +# # Input plugin to collect Windows Event Log messages +# [[inputs.win_eventlog]] +# ## Telegraf should have Administrator permissions to subscribe for some Windows Events channels +# ## (System log, for example) +# +# ## LCID (Locale ID) for event rendering +# ## 1033 to force English language +# ## 0 to use default Windows locale +# # locale = 0 +# +# ## Name of eventlog, used only if xpath_query is empty +# ## Example: "Application" +# # eventlog_name = "" +# +# ## xpath_query can be in defined short form like "Event/System[EventID=999]" +# ## or you can form a XML Query. Refer to the Consuming Events article: +# ## https://docs.microsoft.com/en-us/windows/win32/wes/consuming-events +# ## XML query is the recommended form, because it is most flexible +# ## You can create or debug XML Query by creating Custom View in Windows Event Viewer +# ## and then copying resulting XML here +# xpath_query = ''' +# +# +# +# *[System[( (EventID >= 5152 and EventID <= 5158) or EventID=5379 or EventID=4672)]] +# +# +# +# +# +# +# +# +# +# +# +# +# +# +# ''' +# +# ## System field names: +# ## "Source", "EventID", "Version", "Level", "Task", "Opcode", "Keywords", "TimeCreated", +# ## "EventRecordID", "ActivityID", "RelatedActivityID", "ProcessID", "ThreadID", "ProcessName", +# ## "Channel", "Computer", "UserID", "UserName", "Message", "LevelText", "TaskText", "OpcodeText" +# +# ## In addition to System, Data fields can be unrolled from additional XML nodes in event. +# ## Human-readable representation of those nodes is formatted into event Message field, +# ## but XML is more machine-parsable +# +# # Process UserData XML to fields, if this node exists in Event XML +# process_userdata = true +# +# # Process EventData XML to fields, if this node exists in Event XML +# process_eventdata = true +# +# ## Separator character to use for unrolled XML Data field names +# separator = "_" +# +# ## Get only first line of Message field. For most events first line is usually more than enough +# only_first_line_of_message = true +# +# ## Parse timestamp from TimeCreated.SystemTime event field. +# ## Will default to current time of telegraf processing on parsing error or if set to false +# timestamp_from_event = true +# +# ## Fields to include as tags. Globbing supported ("Level*" for both "Level" and "LevelText") +# event_tags = ["Source", "EventID", "Level", "LevelText", "Task", "TaskText", "Opcode", "OpcodeText", "Keywords", "Channel", "Computer"] +# +# ## Default list of fields to send. All fields are sent by default. Globbing supported +# event_fields = ["*"] +# +# ## Fields to exclude. Also applied to data fields. Globbing supported +# exclude_fields = ["TimeCreated", "Binary", "Data_Address*"] +# +# ## Skip those tags or fields if their value is empty or equals to zero. Globbing supported +# exclude_empty = ["*ActivityID", "UserID"] + + +# # Input plugin to counterPath Performance Counters on Windows operating systems +# [[inputs.win_perf_counters]] +# ## By default this plugin returns basic CPU and Disk statistics. +# ## See the README file for more examples. +# ## Uncomment examples below or write your own as you see fit. If the system +# ## being polled for data does not have the Object at startup of the Telegraf +# ## agent, it will not be gathered. +# ## Settings: +# # PrintValid = false # Print All matching performance counters +# # Whether request a timestamp along with the PerfCounter data or just use current time +# # UsePerfCounterTime=true +# # If UseWildcardsExpansion params is set to true, wildcards (partial wildcards in instance names and wildcards in counters names) in configured counter paths will be expanded +# # and in case of localized Windows, counter paths will be also localized. It also returns instance indexes in instance names. +# # If false, wildcards (not partial) in instance names will still be expanded, but instance indexes will not be returned in instance names. +# #UseWildcardsExpansion = false +# # Period after which counters will be reread from configuration and wildcards in counter paths expanded +# CountersRefreshInterval="1m" +# +# [[inputs.win_perf_counters.object]] +# # Processor usage, alternative to native, reports on a per core. +# ObjectName = "Processor" +# Instances = ["*"] +# Counters = [ +# "% Idle Time", +# "% Interrupt Time", +# "% Privileged Time", +# "% User Time", +# "% Processor Time", +# "% DPC Time", +# ] +# Measurement = "win_cpu" +# # Set to true to include _Total instance when querying for all (*). +# # IncludeTotal=false +# # Print out when the performance counter is missing from object, counter or instance. +# # WarnOnMissing = false +# +# [[inputs.win_perf_counters.object]] +# # Disk times and queues +# ObjectName = "LogicalDisk" +# Instances = ["*"] +# Counters = [ +# "% Idle Time", +# "% Disk Time", +# "% Disk Read Time", +# "% Disk Write Time", +# "% User Time", +# "% Free Space", +# "Current Disk Queue Length", +# "Free Megabytes", +# ] +# Measurement = "win_disk" +# +# [[inputs.win_perf_counters.object]] +# ObjectName = "PhysicalDisk" +# Instances = ["*"] +# Counters = [ +# "Disk Read Bytes/sec", +# "Disk Write Bytes/sec", +# "Current Disk Queue Length", +# "Disk Reads/sec", +# "Disk Writes/sec", +# "% Disk Time", +# "% Disk Read Time", +# "% Disk Write Time", +# ] +# Measurement = "win_diskio" +# +# [[inputs.win_perf_counters.object]] +# ObjectName = "Network Interface" +# Instances = ["*"] +# Counters = [ +# "Bytes Received/sec", +# "Bytes Sent/sec", +# "Packets Received/sec", +# "Packets Sent/sec", +# "Packets Received Discarded", +# "Packets Outbound Discarded", +# "Packets Received Errors", +# "Packets Outbound Errors", +# ] +# Measurement = "win_net" +# +# +# [[inputs.win_perf_counters.object]] +# ObjectName = "System" +# Counters = [ +# "Context Switches/sec", +# "System Calls/sec", +# "Processor Queue Length", +# "System Up Time", +# ] +# Instances = ["------"] +# Measurement = "win_system" +# +# [[inputs.win_perf_counters.object]] +# # Example counterPath where the Instance portion must be removed to get data back, +# # such as from the Memory object. +# ObjectName = "Memory" +# Counters = [ +# "Available Bytes", +# "Cache Faults/sec", +# "Demand Zero Faults/sec", +# "Page Faults/sec", +# "Pages/sec", +# "Transition Faults/sec", +# "Pool Nonpaged Bytes", +# "Pool Paged Bytes", +# "Standby Cache Reserve Bytes", +# "Standby Cache Normal Priority Bytes", +# "Standby Cache Core Bytes", +# ] +# Instances = ["------"] # Use 6 x - to remove the Instance bit from the counterPath. +# Measurement = "win_mem" +# +# [[inputs.win_perf_counters.object]] +# # Example query where the Instance portion must be removed to get data back, +# # such as from the Paging File object. +# ObjectName = "Paging File" +# Counters = [ +# "% Usage", +# ] +# Instances = ["_Total"] +# Measurement = "win_swap" + + +# # Input plugin to report Windows services info. +# [[inputs.win_services]] +# ## Names of the services to monitor. Leave empty to monitor all the available services on the host. Globs accepted. +# service_names = [ +# "LanmanServer", +# "TermService", +# "Win*", +# ] + # # A plugin to collect stats from Varnish HTTP Cache # [[inputs.varnish]] @@ -7138,7 +7537,14 @@ # service_address = ":8080" # # ## Path to listen to. -# # path = "/telegraf" +# ## This option is deprecated and only available for backward-compatibility. Please use paths instead. +# # path = "" +# +# ## Paths to listen to. +# # paths = ["/telegraf"] +# +# ## Save path as http_listener_v2_path tag if set to true +# # path_tag = false # # ## HTTP methods to accept. # # methods = ["POST", "PUT"] @@ -7149,7 +7555,7 @@ # # write_timeout = "10s" # # ## Maximum allowed http request body size in bytes. -# ## 0 means to use the default of 524,288,00 bytes (500 mebibytes) +# ## 0 means to use the default of 524,288,000 bytes (500 mebibytes) # # max_body_size = "500MB" # # ## Part of the request to consume. Available options are "body" and @@ -7450,16 +7856,19 @@ # # ## Amazon Credentials # ## Credentials are loaded in the following order -# ## 1) Assumed credentials via STS if role_arn is specified -# ## 2) explicit credentials from 'access_key' and 'secret_key' -# ## 3) shared profile from 'profile' -# ## 4) environment variables -# ## 5) shared credentials file -# ## 6) EC2 Instance Profile +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile # # access_key = "" # # secret_key = "" # # token = "" # # role_arn = "" +# # web_identity_token_file = "" +# # role_session_name = "" # # profile = "" # # shared_credential_file = "" # @@ -7723,16 +8132,28 @@ # # Receive OpenTelemetry traces, metrics, and logs over gRPC # [[inputs.opentelemetry]] -# ## Override the OpenTelemetry gRPC service address:port +# ## Override the default (0.0.0.0:4317) destination OpenTelemetry gRPC service +# ## address:port # # service_address = "0.0.0.0:4317" # -# ## Override the default request timeout +# ## Override the default (5s) new connection timeout # # timeout = "5s" # -# ## Select a schema for metrics: prometheus-v1 or prometheus-v2 +# ## Override the default (prometheus-v1) metrics schema. +# ## Supports: "prometheus-v1", "prometheus-v2" # ## For more information about the alternatives, read the Prometheus input # ## plugin notes. # # metrics_schema = "prometheus-v1" +# +# ## Optional TLS Config. +# ## For advanced options: https://github.com/influxdata/telegraf/blob/v1.18.3/docs/TLS.md +# ## +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# ## Add service certificate and key. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" # # Read metrics from one or many pgbouncer servers @@ -7909,6 +8330,19 @@ # # eg. To scrape pods on a specific node # # kubernetes_field_selector = "spec.nodeName=$HOSTNAME" # +# ## Scrape Services available in Consul Catalog +# # [inputs.prometheus.consul] +# # enabled = true +# # agent = "http://localhost:8500" +# # query_interval = "5m" +# +# # [[inputs.prometheus.consul.query]] +# # name = "a service name" +# # tag = "a service tag" +# # url = 'http://{{if ne .ServiceAddress ""}}{{.ServiceAddress}}{{else}}{{.Address}}{{end}}:{{.ServicePort}}/{{with .ServiceMeta.metrics_path}}{{.}}{{else}}metrics{{end}}' +# # [inputs.prometheus.consul.query.tags] +# # host = "{{.Node}}" +# # ## Use bearer token for authorization. ('bearer_token' takes priority) # # bearer_token = "/path/to/bearer/token" # ## OR @@ -7989,6 +8423,10 @@ # ## 1024. See README.md for details # ## # # service_address = "udp://:162" +# ## +# ## Path to mib files +# # path = ["/usr/share/snmp/mibs"] +# ## # ## Timeout running snmptranslate command # # timeout = "5s" # ## Snmp version, defaults to 2c @@ -8082,6 +8520,7 @@ # dsn = "username:password@mysqlserver:3307/dbname?param=value" # # ## Timeout for any operation +# ## Note that the timeout for queries is per query not per gather. # # timeout = "5s" # # ## Connection time limits @@ -8282,9 +8721,9 @@ # #max_ttl = "1000h" -# # Suricata stats plugin +# # Suricata stats and alerts plugin # [[inputs.suricata]] -# ## Data sink for Suricata stats log +# ## Data sink for Suricata stats and alerts logs # # This is expected to be a filename of a # # unix socket to be created for listening. # source = "/var/run/suricata-stats.sock" @@ -8292,6 +8731,9 @@ # # Delimiter for flattening field keys, e.g. subitem "alert" of "detect" # # becomes "detect_alert" when delimiter is "_". # delimiter = "_" +# +# ## Detect alert logs +# # alerts = false # # Accepts syslog messages following RFC5424 format with transports as per RFC5426, RFC5425, or RFC6587 @@ -8336,6 +8778,11 @@ # ## By default best effort parsing is off. # # best_effort = false # +# ## The RFC standard to use for message parsing +# ## By default RFC5424 is used. RFC3164 only supports UDP transport (no streaming support) +# ## Must be one of "RFC5424", or "RFC3164". +# # syslog_standard = "RFC5424" +# # ## Character to prepend to SD-PARAMs (default = "_"). # ## A syslog message can contain multiple parameters and multiple identifiers within structured data section. # ## Eg., [id1 name1="val1" name2="val2"][id2 name1="val1" nameA="valA"]