Update telegraf.conf
This commit is contained in:
parent
b8717730b7
commit
c8a412e995
|
|
@ -294,6 +294,9 @@
|
|||
# ## Instrumentation key of the Application Insights resource.
|
||||
# instrumentation_key = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxx"
|
||||
#
|
||||
# ## Regions that require endpoint modification https://docs.microsoft.com/en-us/azure/azure-monitor/app/custom-endpoints
|
||||
# # endpoint_url = "https://dc.services.visualstudio.com/v2/track"
|
||||
#
|
||||
# ## Timeout for closing (default: 5s).
|
||||
# # timeout = "5s"
|
||||
#
|
||||
|
|
@ -462,6 +465,39 @@
|
|||
# # no configuration
|
||||
|
||||
|
||||
# # Send telegraf metrics to a Dynatrace environment
|
||||
# [[outputs.dynatrace]]
|
||||
# ## For usage with the Dynatrace OneAgent you can omit any configuration,
|
||||
# ## the only requirement is that the OneAgent is running on the same host.
|
||||
# ## Only setup environment url and token if you want to monitor a Host without the OneAgent present.
|
||||
# ##
|
||||
# ## Your Dynatrace environment URL.
|
||||
# ## For Dynatrace OneAgent you can leave this empty or set it to "http://127.0.0.1:14499/metrics/ingest" (default)
|
||||
# ## For Dynatrace SaaS environments the URL scheme is "https://{your-environment-id}.live.dynatrace.com/api/v2/metrics/ingest"
|
||||
# ## For Dynatrace Managed environments the URL scheme is "https://{your-domain}/e/{your-environment-id}/api/v2/metrics/ingest"
|
||||
# url = ""
|
||||
#
|
||||
# ## Your Dynatrace API token.
|
||||
# ## Create an API token within your Dynatrace environment, by navigating to Settings > Integration > Dynatrace API
|
||||
# ## The API token needs data ingest scope permission. When using OneAgent, no API token is required.
|
||||
# api_token = ""
|
||||
#
|
||||
# ## Optional prefix for metric names (e.g.: "telegraf.")
|
||||
# prefix = "telegraf."
|
||||
#
|
||||
# ## Optional TLS Config
|
||||
# # tls_ca = "/etc/telegraf/ca.pem"
|
||||
# # tls_cert = "/etc/telegraf/cert.pem"
|
||||
# # tls_key = "/etc/telegraf/key.pem"
|
||||
#
|
||||
# ## Optional flag for ignoring tls certificate check
|
||||
# # insecure_skip_verify = false
|
||||
#
|
||||
#
|
||||
# ## Connection timeout, defaults to "5s" if not set.
|
||||
# timeout = "5s"
|
||||
|
||||
|
||||
# # Configuration for Elasticsearch to send metrics to.
|
||||
# [[outputs.elasticsearch]]
|
||||
# ## The full HTTP endpoint URL for your Elasticsearch instance
|
||||
|
|
@ -516,6 +552,7 @@
|
|||
# ## it will enable data resend and update metric points avoiding duplicated metrics with diferent id's
|
||||
# force_document_id = false
|
||||
|
||||
|
||||
# # Send metrics to command as input over stdin
|
||||
# [[outputs.exec]]
|
||||
# ## Command to ingest metrics via stdin.
|
||||
|
|
@ -2229,6 +2266,27 @@
|
|||
# # tls_key = "/etc/telegraf/key.pem"
|
||||
# ## If false, skip chain & host verification
|
||||
# # insecure_skip_verify = true
|
||||
#
|
||||
# # Feature Options
|
||||
# # Add namespace variable to limit the namespaces executed on
|
||||
# # Leave blank to do all
|
||||
# # disable_query_namespaces = true # default false
|
||||
# # namespaces = ["namespace1", "namespace2"]
|
||||
#
|
||||
# # Enable set level telmetry
|
||||
# # query_sets = true # default: false
|
||||
# # Add namespace set combinations to limit sets executed on
|
||||
# # Leave blank to do all sets
|
||||
# # sets = ["namespace1/set1", "namespace1/set2", "namespace3"]
|
||||
#
|
||||
# # Histograms
|
||||
# # enable_ttl_histogram = true # default: false
|
||||
# # enable_object_size_linear_histogram = true # default: false
|
||||
#
|
||||
# # by default, aerospike produces a 100 bucket histogram
|
||||
# # this is not great for most graphing tools, this will allow
|
||||
# # the ability to squash this to a smaller number of buckets
|
||||
# # num_histogram_buckets = 100 # default: 10
|
||||
|
||||
|
||||
# # Read Apache status information (mod_status)
|
||||
|
|
@ -2430,9 +2488,9 @@
|
|||
# ## want to monitor if you have a large number of cgroups, to avoid
|
||||
# ## any cardinality issues.
|
||||
# # paths = [
|
||||
# # "/cgroup/memory",
|
||||
# # "/cgroup/memory/child1",
|
||||
# # "/cgroup/memory/child2/*",
|
||||
# # "/sys/fs/cgroup/memory",
|
||||
# # "/sys/fs/cgroup/memory/child1",
|
||||
# # "/sys/fs/cgroup/memory/child2/*",
|
||||
# # ]
|
||||
# ## cgroup stat fields, as file names, globs are supported.
|
||||
# ## these file names are appended to each path from above.
|
||||
|
|
@ -2488,6 +2546,13 @@
|
|||
# ## gaps or overlap in pulled data
|
||||
# interval = "5m"
|
||||
#
|
||||
# ## Recommended if "delay" and "period" are both within 3 hours of request time. Invalid values will be ignored.
|
||||
# ## Recently Active feature will only poll for CloudWatch ListMetrics values that occurred within the last 3 Hours.
|
||||
# ## If enabled, it will reduce total API usage of the CloudWatch ListMetrics API and require less memory to retain.
|
||||
# ## Do not enable if "period" or "delay" is longer than 3 hours, as it will not return data more than 3 hours old.
|
||||
# ## See https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_ListMetrics.html
|
||||
# #recently_active = "PT3H"
|
||||
#
|
||||
# ## Configure the TTL for the internal cache of metrics.
|
||||
# # cache_ttl = "1h"
|
||||
#
|
||||
|
|
@ -3182,7 +3247,7 @@
|
|||
# ## If the response body size exceeds this limit a "body_read_error" will be raised
|
||||
# # response_body_max_size = "32MiB"
|
||||
#
|
||||
# ## Optional substring or regex match in body of the response
|
||||
# ## Optional substring or regex match in body of the response (case sensitive)
|
||||
# # response_string_match = "\"service_status\": \"up\""
|
||||
# # response_string_match = "ok"
|
||||
# # response_string_match = "\".*_status\".?:.?\"up\""
|
||||
|
|
@ -3916,17 +3981,18 @@
|
|||
# ## |---BA, DCBA - Little Endian
|
||||
# ## |---BADC - Mid-Big Endian
|
||||
# ## |---CDAB - Mid-Little Endian
|
||||
# ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, FLOAT32, FLOAT32-IEEE (the IEEE 754 binary representation)
|
||||
# ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, FLOAT32-IEEE (the IEEE 754 binary representation)
|
||||
# ## FLOAT32, FIXED, UFIXED (fixed-point representation on input)
|
||||
# ## scale - the final numeric variable representation
|
||||
# ## address - variable address
|
||||
#
|
||||
# holding_registers = [
|
||||
# { name = "power_factor", byte_order = "AB", data_type = "FLOAT32", scale=0.01, address = [8]},
|
||||
# { name = "voltage", byte_order = "AB", data_type = "FLOAT32", scale=0.1, address = [0]},
|
||||
# { name = "energy", byte_order = "ABCD", data_type = "FLOAT32", scale=0.001, address = [5,6]},
|
||||
# { name = "current", byte_order = "ABCD", data_type = "FLOAT32", scale=0.001, address = [1,2]},
|
||||
# { name = "frequency", byte_order = "AB", data_type = "FLOAT32", scale=0.1, address = [7]},
|
||||
# { name = "power", byte_order = "ABCD", data_type = "FLOAT32", scale=0.1, address = [3,4]},
|
||||
# { name = "power_factor", byte_order = "AB", data_type = "FIXED", scale=0.01, address = [8]},
|
||||
# { name = "voltage", byte_order = "AB", data_type = "FIXED", scale=0.1, address = [0]},
|
||||
# { name = "energy", byte_order = "ABCD", data_type = "FIXED", scale=0.001, address = [5,6]},
|
||||
# { name = "current", byte_order = "ABCD", data_type = "FIXED", scale=0.001, address = [1,2]},
|
||||
# { name = "frequency", byte_order = "AB", data_type = "UFIXED", scale=0.1, address = [7]},
|
||||
# { name = "power", byte_order = "ABCD", data_type = "UFIXED", scale=0.1, address = [3,4]},
|
||||
# ]
|
||||
# input_registers = [
|
||||
# { name = "tank_level", byte_order = "AB", data_type = "INT16", scale=1.0, address = [0]},
|
||||
|
|
@ -4283,6 +4349,25 @@
|
|||
# # insecure_skip_verify = false
|
||||
|
||||
|
||||
# # A plugin to collect stats from the NSD authoritative DNS name server
|
||||
# [[inputs.nsd]]
|
||||
# ## Address of server to connect to, optionally ':port'. Defaults to the
|
||||
# ## address in the nsd config file.
|
||||
# server = "127.0.0.1:8953"
|
||||
#
|
||||
# ## If running as a restricted user you can prepend sudo for additional access:
|
||||
# # use_sudo = false
|
||||
#
|
||||
# ## The default location of the nsd-control binary can be overridden with:
|
||||
# # binary = "/usr/sbin/nsd-control"
|
||||
#
|
||||
# ## The default location of the nsd config file can be overridden with:
|
||||
# # config_file = "/etc/nsd/nsd.conf"
|
||||
#
|
||||
# ## The default timeout of 1s can be overridden with:
|
||||
# # timeout = "1s"
|
||||
|
||||
|
||||
# # Read NSQ topic and channel statistics.
|
||||
# [[inputs.nsq]]
|
||||
# ## An array of NSQD HTTP API endpoints
|
||||
|
|
@ -4323,6 +4408,61 @@
|
|||
# # timeout = "5s"
|
||||
|
||||
|
||||
# # Retrieve data from OPCUA devices
|
||||
# [[inputs.opcua]]
|
||||
# [[inputs.opcua]]
|
||||
# ## Device name
|
||||
# # name = "localhost"
|
||||
# #
|
||||
# ## OPC UA Endpoint URL
|
||||
# # endpoint = "opc.tcp://localhost:4840"
|
||||
# #
|
||||
# ## Maximum time allowed to establish a connect to the endpoint.
|
||||
# # connect_timeout = "10s"
|
||||
# #
|
||||
# ## Maximum time allowed for a request over the estabilished connection.
|
||||
# # request_timeout = "5s"
|
||||
# #
|
||||
# ## Security policy, one of "None", "Basic128Rsa15", "Basic256",
|
||||
# ## "Basic256Sha256", or "auto"
|
||||
# # security_policy = "auto"
|
||||
# #
|
||||
# ## Security mode, one of "None", "Sign", "SignAndEncrypt", or "auto"
|
||||
# # security_mode = "auto"
|
||||
# #
|
||||
# ## Path to cert.pem. Required when security mode or policy isn't "None".
|
||||
# ## If cert path is not supplied, self-signed cert and key will be generated.
|
||||
# # certificate = "/etc/telegraf/cert.pem"
|
||||
# #
|
||||
# ## Path to private key.pem. Required when security mode or policy isn't "None".
|
||||
# ## If key path is not supplied, self-signed cert and key will be generated.
|
||||
# # private_key = "/etc/telegraf/key.pem"
|
||||
# #
|
||||
# ## Authentication Method, one of "Certificate", "UserName", or "Anonymous". To
|
||||
# ## authenticate using a specific ID, select 'Certificate' or 'UserName'
|
||||
# # auth_method = "Anonymous"
|
||||
# #
|
||||
# ## Username. Required for auth_method = "UserName"
|
||||
# # username = ""
|
||||
# #
|
||||
# ## Password. Required for auth_method = "UserName"
|
||||
# # password = ""
|
||||
# #
|
||||
# ## Node ID configuration
|
||||
# ## name - the variable name
|
||||
# ## namespace - integer value 0 thru 3
|
||||
# ## identifier_type - s=string, i=numeric, g=guid, b=opaque
|
||||
# ## identifier - tag as shown in opcua browser
|
||||
# ## data_type - boolean, byte, short, int, uint, uint16, int16,
|
||||
# ## uint32, int32, float, double, string, datetime, number
|
||||
# ## Example:
|
||||
# ## {name="ProductUri", namespace="0", identifier_type="i", identifier="2262", data_type="string", description="http://open62541.org"}
|
||||
# nodes = [
|
||||
# {name="", namespace="", identifier_type="", identifier="", data_type="", description=""},
|
||||
# {name="", namespace="", identifier_type="", identifier="", data_type="", description=""},
|
||||
# ]
|
||||
|
||||
|
||||
# # OpenLDAP cn=Monitor plugin
|
||||
# [[inputs.openldap]]
|
||||
# host = "localhost"
|
||||
|
|
@ -4575,6 +4715,23 @@
|
|||
# # pid_finder = "pgrep"
|
||||
|
||||
|
||||
# # Provides metrics from Proxmox nodes (Proxmox Virtual Environment > 6.2).
|
||||
# [[inputs.proxmox]]
|
||||
# ## API connection configuration. The API token was introduced in Proxmox v6.2. Required permissions for user and token: PVEAuditor role on /.
|
||||
# base_url = "https://localhost:8006/api2/json"
|
||||
# api_token = "USER@REALM!TOKENID=UUID"
|
||||
#
|
||||
# ## Optional TLS Config
|
||||
# # tls_ca = "/etc/telegraf/ca.pem"
|
||||
# # tls_cert = "/etc/telegraf/cert.pem"
|
||||
# # tls_key = "/etc/telegraf/key.pem"
|
||||
# ## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
#
|
||||
# # HTTP response timeout (default: 5s)
|
||||
# response_timeout = "5s"
|
||||
|
||||
|
||||
# # Reads last_run_summary.yaml file and converts to measurements
|
||||
# [[inputs.puppetagent]]
|
||||
# ## Location of puppet last run summary file
|
||||
|
|
@ -4677,6 +4834,12 @@
|
|||
# ## If no port is specified, 6379 is used
|
||||
# servers = ["tcp://localhost:6379"]
|
||||
#
|
||||
# ## Optional. Specify redis commands to retrieve values
|
||||
# # [[inputs.redis.commands]]
|
||||
# # command = ["get", "sample-key"]
|
||||
# # field = "sample-key-value"
|
||||
# # type = "string"
|
||||
#
|
||||
# ## specify server password
|
||||
# # password = "s#cr@t%"
|
||||
#
|
||||
|
|
@ -4745,16 +4908,24 @@
|
|||
# # Read metrics from storage devices supporting S.M.A.R.T.
|
||||
# [[inputs.smart]]
|
||||
# ## Optionally specify the path to the smartctl executable
|
||||
# # path = "/usr/bin/smartctl"
|
||||
# # path_smartctl = "/usr/bin/smartctl"
|
||||
#
|
||||
# ## On most platforms smartctl requires root access.
|
||||
# ## Setting 'use_sudo' to true will make use of sudo to run smartctl.
|
||||
# ## Sudo must be configured to to allow the telegraf user to run smartctl
|
||||
# ## Optionally specify the path to the nvme-cli executable
|
||||
# # path_nvme = "/usr/bin/nvme"
|
||||
#
|
||||
# ## Optionally specify if vendor specific attributes should be propagated for NVMe disk case
|
||||
# ## ["auto-on"] - automatically find and enable additional vendor specific disk info
|
||||
# ## ["vendor1", "vendor2", ...] - e.g. "Intel" enable additional Intel specific disk info
|
||||
# # enable_extensions = ["auto-on"]
|
||||
#
|
||||
# ## On most platforms used cli utilities requires root access.
|
||||
# ## Setting 'use_sudo' to true will make use of sudo to run smartctl or nvme-cli.
|
||||
# ## Sudo must be configured to allow the telegraf user to run smartctl or nvme-cli
|
||||
# ## without a password.
|
||||
# # use_sudo = false
|
||||
#
|
||||
# ## Skip checking disks in this power mode. Defaults to
|
||||
# ## "standby" to not wake up disks that have stoped rotating.
|
||||
# ## "standby" to not wake up disks that have stopped rotating.
|
||||
# ## See --nocheck in the man pages for smartctl.
|
||||
# ## smartctl version 5.41 and 5.42 have faulty detection of
|
||||
# ## power mode and might require changing this value to
|
||||
|
|
@ -4765,16 +4936,15 @@
|
|||
# ## information from each drive into the 'smart_attribute' measurement.
|
||||
# # attributes = false
|
||||
#
|
||||
# ## Optionally specify devices to exclude from reporting.
|
||||
# ## Optionally specify devices to exclude from reporting if disks auto-discovery is performed.
|
||||
# # excludes = [ "/dev/pass6" ]
|
||||
#
|
||||
# ## Optionally specify devices and device type, if unset
|
||||
# ## a scan (smartctl --scan) for S.M.A.R.T. devices will
|
||||
# ## done and all found will be included except for the
|
||||
# ## excluded in excludes.
|
||||
# # devices = [ "/dev/ada0 -d atacam" ]
|
||||
# ## a scan (smartctl --scan and smartctl --scan -d nvme) for S.M.A.R.T. devices will be done
|
||||
# ## and all found will be included except for the excluded in excludes.
|
||||
# # devices = [ "/dev/ada0 -d atacam", "/dev/nvme0"]
|
||||
#
|
||||
# ## Timeout for the smartctl command to complete.
|
||||
# ## Timeout for the cli command to complete.
|
||||
# # timeout = "30s"
|
||||
|
||||
|
||||
|
|
@ -4952,6 +5122,15 @@
|
|||
# # "Server=192.168.1.10;Port=1433;User Id=<user>;Password=<pw>;app name=telegraf;log=1;",
|
||||
# # ]
|
||||
#
|
||||
# ## This enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2
|
||||
# ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type.
|
||||
# ## Possible values for database_type are
|
||||
# ## "AzureSQLDB"
|
||||
# ## "SQLServer"
|
||||
# ## "AzureSQLManagedInstance"
|
||||
# # database_type = "AzureSQLDB"
|
||||
#
|
||||
#
|
||||
# ## Optional parameter, setting this to 2 will use a new version
|
||||
# ## of the collection queries that break compatibility with the original
|
||||
# ## dashboards.
|
||||
|
|
@ -4972,6 +5151,7 @@
|
|||
# ## - SqlRequests
|
||||
# ## - VolumeSpace
|
||||
# ## - Cpu
|
||||
#
|
||||
# ## Version 1:
|
||||
# ## - PerformanceCounters
|
||||
# ## - WaitStatsCategorized
|
||||
|
|
@ -4984,6 +5164,11 @@
|
|||
# ## - VolumeSpace
|
||||
# ## - PerformanceMetrics
|
||||
#
|
||||
#
|
||||
# ## Queries enabled by default for specific Database Type
|
||||
# ## database_type = AzureSQLDB
|
||||
# ## AzureDBWaitStats, AzureDBResourceStats, AzureDBResourceGovernance, sqlAzureDBDatabaseIO
|
||||
#
|
||||
# ## A list of queries to include. If not specified, all the above listed queries are used.
|
||||
# # include_query = []
|
||||
#
|
||||
|
|
@ -5476,7 +5661,7 @@
|
|||
# # Read metrics from one or many ClickHouse servers
|
||||
# [[inputs.clickhouse]]
|
||||
# ## Username for authorization on ClickHouse server
|
||||
# ## example: user = "default""
|
||||
# ## example: username = "default""
|
||||
# username = "default"
|
||||
#
|
||||
# ## Password for authorization on ClickHouse server
|
||||
|
|
@ -5993,6 +6178,34 @@
|
|||
# # basic_password = "barfoo"
|
||||
|
||||
|
||||
# # Accept metrics over InfluxDB 2.x HTTP API
|
||||
# [[inputs.influxdb_v2_listener]]
|
||||
# ## Address and port to host InfluxDB listener on
|
||||
# service_address = ":9999"
|
||||
#
|
||||
# ## Maximum allowed HTTP request body size in bytes.
|
||||
# ## 0 means to use the default of 32MiB.
|
||||
# # max_body_size = "32MiB"
|
||||
#
|
||||
# ## Optional tag to determine the bucket.
|
||||
# ## If the write has a bucket in the query string then it will be kept in this tag name.
|
||||
# ## This tag can be used in downstream outputs.
|
||||
# ## The default value of nothing means it will be off and the database will not be recorded.
|
||||
# # bucket_tag = ""
|
||||
#
|
||||
# ## Set one or more allowed client CA certificate file names to
|
||||
# ## enable mutually authenticated TLS connections
|
||||
# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
|
||||
#
|
||||
# ## Add service certificate and key
|
||||
# # tls_cert = "/etc/telegraf/cert.pem"
|
||||
# # tls_key = "/etc/telegraf/key.pem"
|
||||
#
|
||||
# ## Optional token to accept for HTTP authentication.
|
||||
# ## You probably want to make sure you have TLS configured above for this.
|
||||
# # token = "some-long-shared-secret-token"
|
||||
|
||||
|
||||
# # Read JTI OpenConfig Telemetry from listed sensors
|
||||
# [[inputs.jti_openconfig_telemetry]]
|
||||
# ## List of device addresses to collect telemetry from
|
||||
|
|
@ -6848,6 +7061,27 @@
|
|||
# ## more about them here:
|
||||
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
# data_format = "influx"
|
||||
#
|
||||
# ## multiline parser/codec
|
||||
# ## https://www.elastic.co/guide/en/logstash/2.4/plugins-filters-multiline.html
|
||||
# #[inputs.tail.multiline]
|
||||
# ## The pattern should be a regexp which matches what you believe to be an
|
||||
# ## indicator that the field is part of an event consisting of multiple lines of log data.
|
||||
# #pattern = "^\s"
|
||||
#
|
||||
# ## This field must be either "previous" or "next".
|
||||
# ## If a line matches the pattern, "previous" indicates that it belongs to the previous line,
|
||||
# ## whereas "next" indicates that the line belongs to the next one.
|
||||
# #match_which_line = "previous"
|
||||
#
|
||||
# ## The invert_match field can be true or false (defaults to false).
|
||||
# ## If true, a message not matching the pattern will constitute a match of the multiline
|
||||
# ## filter and the what will be applied. (vice-versa is also true)
|
||||
# #invert_match = false
|
||||
#
|
||||
# ## After the specified timeout, this plugin sends a multiline event even if no new pattern
|
||||
# ## is found to start a new event. The default timeout is 5s.
|
||||
# #timeout = 5s
|
||||
|
||||
|
||||
# # Generic TCP listener
|
||||
|
|
|
|||
Loading…
Reference in New Issue