feat: update etc/telegraf.conf and etc/telegraf_windows.conf (#10201)
Co-authored-by: Tiger Bot <>
This commit is contained in:
parent
9787f4ac45
commit
75c48d48d7
|
|
@ -317,7 +317,7 @@
|
|||
|
||||
# # Sends metrics to Azure Data Explorer
|
||||
# [[outputs.azure_data_explorer]]
|
||||
# ## Azure Data Exlorer cluster endpoint
|
||||
# ## Azure Data Explorer cluster endpoint
|
||||
# ## ex: endpoint_url = "https://clustername.australiasoutheast.kusto.windows.net"
|
||||
# endpoint_url = ""
|
||||
#
|
||||
|
|
@ -337,6 +337,9 @@
|
|||
# ## Name of the single table to store all the metrics (Only needed if metrics_grouping_type is "SingleTable").
|
||||
# # table_name = ""
|
||||
#
|
||||
# ## Creates tables and relevant mapping if set to true(default).
|
||||
# ## Skips table and mapping creation if set to false, this is useful for running Telegraf with the lowest possible permissions i.e. table ingestor role.
|
||||
# # create_tables = true
|
||||
|
||||
|
||||
# # Send aggregate metrics to Azure Monitor
|
||||
|
|
@ -370,6 +373,24 @@
|
|||
# # endpoint_url = "https://monitoring.core.usgovcloudapi.net"
|
||||
|
||||
|
||||
# # Configuration for Google Cloud BigQuery to send entries
|
||||
# [[outputs.bigquery]]
|
||||
# ## Credentials File
|
||||
# credentials_file = "/path/to/service/account/key.json"
|
||||
#
|
||||
# ## Google Cloud Platform Project
|
||||
# project = "my-gcp-project"
|
||||
#
|
||||
# ## The namespace for the metric descriptor
|
||||
# dataset = "telegraf"
|
||||
#
|
||||
# ## Timeout for BigQuery operations.
|
||||
# # timeout = "5s"
|
||||
#
|
||||
# ## Character to replace hyphens on Metric name
|
||||
# # replace_hyphen_to = "_"
|
||||
|
||||
|
||||
# # Publish Telegraf metrics to a Google Cloud PubSub topic
|
||||
# [[outputs.cloud_pubsub]]
|
||||
# ## Required. Name of Google Cloud Platform (GCP) Project that owns
|
||||
|
|
@ -657,6 +678,22 @@
|
|||
# force_document_id = false
|
||||
|
||||
|
||||
# # Configuration for Event Hubs output plugin
|
||||
# [[outputs.event_hubs]]
|
||||
# ## The full connection string to the Event Hub (required)
|
||||
# ## The shared access key must have "Send" permissions on the target Event Hub.
|
||||
# connection_string = "Endpoint=sb://namespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=superSecret1234=;EntityPath=hubName"
|
||||
#
|
||||
# ## Client timeout (defaults to 30s)
|
||||
# # timeout = "30s"
|
||||
#
|
||||
# ## Data format to output.
|
||||
# ## Each data format has its own unique set of configuration options, read
|
||||
# ## more about them here:
|
||||
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
# data_format = "json"
|
||||
|
||||
|
||||
# # Send metrics to command as input over stdin
|
||||
# [[outputs.exec]]
|
||||
# ## Command to ingest metrics via stdin.
|
||||
|
|
@ -772,6 +809,40 @@
|
|||
# ## "telegraf" will be used.
|
||||
# ## example: short_message_field = "message"
|
||||
# # short_message_field = ""
|
||||
#
|
||||
# ## According to GELF payload specification, additional fields names must be prefixed
|
||||
# ## with an underscore. Previous versions did not prefix custom field 'name' with underscore.
|
||||
# ## Set to true for backward compatibility.
|
||||
# # name_field_no_prefix = false
|
||||
#
|
||||
# ## Optional TLS Config
|
||||
# # tls_ca = "/etc/telegraf/ca.pem"
|
||||
# # tls_cert = "/etc/telegraf/cert.pem"
|
||||
# # tls_key = "/etc/telegraf/key.pem"
|
||||
# ## Use TLS but skip chain & host verification
|
||||
# # insecure_skip_verify = false
|
||||
|
||||
|
||||
# # Send telegraf metrics to GroundWork Monitor
|
||||
# [[outputs.groundwork]]
|
||||
# ## URL of your groundwork instance.
|
||||
# url = "https://groundwork.example.com"
|
||||
#
|
||||
# ## Agent uuid for GroundWork API Server.
|
||||
# agent_id = ""
|
||||
#
|
||||
# ## Username and password to access GroundWork API.
|
||||
# username = ""
|
||||
# password = ""
|
||||
#
|
||||
# ## Default display name for the host with services(metrics).
|
||||
# # default_host = "telegraf"
|
||||
#
|
||||
# ## Default service state.
|
||||
# # default_service_state = "SERVICE_OK"
|
||||
#
|
||||
# ## The name of the tag that contains the hostname.
|
||||
# # resource_tag = "host"
|
||||
|
||||
|
||||
# # Configurable HTTP health check resource based on metrics
|
||||
|
|
@ -857,6 +928,11 @@
|
|||
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
# # data_format = "influx"
|
||||
#
|
||||
# ## Use batch serialization format (default) instead of line based format.
|
||||
# ## Batch format is more efficient and should be used unless line based
|
||||
# ## format is really needed.
|
||||
# # use_batch_format = true
|
||||
#
|
||||
# ## HTTP Content-Encoding for write request body, can be set to "gzip" to
|
||||
# ## compress body or "identity" to apply no encoding.
|
||||
# # content_encoding = "identity"
|
||||
|
|
@ -1077,6 +1153,9 @@
|
|||
# ## SASL protocol version. When connecting to Azure EventHub set to 0.
|
||||
# # sasl_version = 1
|
||||
#
|
||||
# # Disable Kafka metadata full fetch
|
||||
# # metadata_full = false
|
||||
#
|
||||
# ## Data format to output.
|
||||
# ## Each data format has its own unique set of configuration options, read
|
||||
# ## more about them here:
|
||||
|
|
@ -1217,6 +1296,42 @@
|
|||
# # tls_key = "/etc/telegraf/key.pem"
|
||||
|
||||
|
||||
# # Sends metrics to MongoDB
|
||||
# [[outputs.mongodb]]
|
||||
# # connection string examples for mongodb
|
||||
# dsn = "mongodb://localhost:27017"
|
||||
# # dsn = "mongodb://mongod1:27017,mongod2:27017,mongod3:27017/admin&replicaSet=myReplSet&w=1"
|
||||
#
|
||||
# # overrides serverSelectionTimeoutMS in dsn if set
|
||||
# # timeout = "30s"
|
||||
#
|
||||
# # default authentication, optional
|
||||
# # authentication = "NONE"
|
||||
#
|
||||
# # for SCRAM-SHA-256 authentication
|
||||
# # authentication = "SCRAM"
|
||||
# # username = "root"
|
||||
# # password = "***"
|
||||
#
|
||||
# # for x509 certificate authentication
|
||||
# # authentication = "X509"
|
||||
# # tls_ca = "ca.pem"
|
||||
# # tls_key = "client.pem"
|
||||
# # # tls_key_pwd = "changeme" # required for encrypted tls_key
|
||||
# # insecure_skip_verify = false
|
||||
#
|
||||
# # database to store measurements and time series collections
|
||||
# # database = "telegraf"
|
||||
#
|
||||
# # granularity can be seconds, minutes, or hours.
|
||||
# # configuring this value will be based on your input collection frequency.
|
||||
# # see https://docs.mongodb.com/manual/core/timeseries-collections/#create-a-time-series-collection
|
||||
# # granularity = "seconds"
|
||||
#
|
||||
# # optionally set a TTL to automatically expire documents from the measurement collections.
|
||||
# # ttl = "360h"
|
||||
|
||||
|
||||
# # Configuration for MQTT server to send metrics to
|
||||
# [[outputs.mqtt]]
|
||||
# servers = ["localhost:1883"] # required.
|
||||
|
|
@ -2413,7 +2528,7 @@
|
|||
# [[processors.printer]]
|
||||
|
||||
|
||||
# # Transforms tag and field values with regex pattern
|
||||
# # Transforms tag and field values as well as measurement, tag and field names with regex pattern
|
||||
# [[processors.regex]]
|
||||
# ## Tag and field conversions defined in a separate sub-tables
|
||||
# # [[processors.regex.tags]]
|
||||
|
|
@ -2443,6 +2558,38 @@
|
|||
# # pattern = ".*category=(\\w+).*"
|
||||
# # replacement = "${1}"
|
||||
# # result_key = "search_category"
|
||||
#
|
||||
# ## Rename metric fields
|
||||
# # [[processors.regex.field_rename]]
|
||||
# # ## Regular expression to match on a field name
|
||||
# # pattern = "^search_(\\w+)d$"
|
||||
# # ## Matches of the pattern will be replaced with this string. Use ${1}
|
||||
# # ## notation to use the text of the first submatch.
|
||||
# # replacement = "${1}"
|
||||
# # ## If the new field name already exists, you can either "overwrite" the
|
||||
# # ## existing one with the value of the renamed field OR you can "keep"
|
||||
# # ## both the existing and source field.
|
||||
# # # result_key = "keep"
|
||||
#
|
||||
# ## Rename metric tags
|
||||
# # [[processors.regex.tag_rename]]
|
||||
# # ## Regular expression to match on a tag name
|
||||
# # pattern = "^search_(\\w+)d$"
|
||||
# # ## Matches of the pattern will be replaced with this string. Use ${1}
|
||||
# # ## notation to use the text of the first submatch.
|
||||
# # replacement = "${1}"
|
||||
# # ## If the new tag name already exists, you can either "overwrite" the
|
||||
# # ## existing one with the value of the renamed tag OR you can "keep"
|
||||
# # ## both the existing and source tag.
|
||||
# # # result_key = "keep"
|
||||
#
|
||||
# ## Rename metrics
|
||||
# # [[processors.regex.metric_rename]]
|
||||
# # ## Regular expression to match on an metric name
|
||||
# # pattern = "^search_(\\w+)d$"
|
||||
# # ## Matches of the pattern will be replaced with this string. Use ${1}
|
||||
# # ## notation to use the text of the first submatch.
|
||||
# # replacement = "${1}"
|
||||
|
||||
|
||||
# # Rename measurements, tags, and fields that pass through this filter.
|
||||
|
|
@ -2825,6 +2972,37 @@
|
|||
# # compression = 100.0
|
||||
|
||||
|
||||
# # Aggregate metrics using a Starlark script
|
||||
# [[aggregators.starlark]]
|
||||
# ## The Starlark source can be set as a string in this configuration file, or
|
||||
# ## by referencing a file containing the script. Only one source or script
|
||||
# ## should be set at once.
|
||||
# ##
|
||||
# ## Source of the Starlark script.
|
||||
# source = '''
|
||||
# state = {}
|
||||
#
|
||||
# def add(metric):
|
||||
# state["last"] = metric
|
||||
#
|
||||
# def push():
|
||||
# return state.get("last")
|
||||
#
|
||||
# def reset():
|
||||
# state.clear()
|
||||
# '''
|
||||
#
|
||||
# ## File containing a Starlark script.
|
||||
# # script = "/usr/local/bin/myscript.star"
|
||||
#
|
||||
# ## The constants of the Starlark script.
|
||||
# # [aggregators.starlark.constants]
|
||||
# # max_size = 10
|
||||
# # threshold = 0.75
|
||||
# # default_name = "Julia"
|
||||
# # debug_mode = true
|
||||
|
||||
|
||||
# # Count the occurrence of values in fields.
|
||||
# [[aggregators.valuecounter]]
|
||||
# ## General Aggregator Arguments:
|
||||
|
|
@ -3765,6 +3943,15 @@
|
|||
#
|
||||
# ## List of interfaces to ignore when pulling metrics.
|
||||
# # interface_exclude = ["eth1"]
|
||||
#
|
||||
# ## Some drivers declare statistics with extra whitespace, different spacing,
|
||||
# ## and mix cases. This list, when enabled, can be used to clean the keys.
|
||||
# ## Here are the current possible normalizations:
|
||||
# ## * snakecase: converts fooBarBaz to foo_bar_baz
|
||||
# ## * trim: removes leading and trailing whitespace
|
||||
# ## * lower: changes all capitalized letters to lowercase
|
||||
# ## * underscore: replaces spaces with underscores
|
||||
# # normalize_keys = ["snakecase", "trim", "lower", "underscore"]
|
||||
|
||||
|
||||
# # Read metrics from one or more commands that can output to stdout
|
||||
|
|
@ -4832,6 +5019,11 @@
|
|||
# # data_bits = 8
|
||||
# # parity = "N"
|
||||
# # stop_bits = 1
|
||||
# # transmission_mode = "RTU"
|
||||
#
|
||||
# ## Trace the connection to the modbus device as debug messages
|
||||
# ## Note: You have to enable telegraf's debug mode to see those messages!
|
||||
# # debug_connection = false
|
||||
#
|
||||
# ## For Modbus over TCP you can choose between "TCP", "RTUoverTCP" and "ASCIIoverTCP"
|
||||
# ## default behaviour is "TCP" if the controller is TCP
|
||||
|
|
@ -4885,6 +5077,15 @@
|
|||
# { name = "tank_ph", byte_order = "AB", data_type = "INT16", scale=1.0, address = [1]},
|
||||
# { name = "pump1_speed", byte_order = "ABCD", data_type = "INT32", scale=1.0, address = [3,4]},
|
||||
# ]
|
||||
#
|
||||
# ## Enable workarounds required by some devices to work correctly
|
||||
# # [inputs.modbus.workarounds]
|
||||
# ## Pause between read requests sent to the device. This might be necessary for (slow) serial devices.
|
||||
# # pause_between_requests = "0ms"
|
||||
# ## Close the connection after every gather cycle. Usually the plugin closes the connection after a certain
|
||||
# ## idle-timeout, however, if you query a device with limited simultaneous connectivity (e.g. serial devices)
|
||||
# ## from multiple instances you might want to only stay connected during gather and disconnect afterwards.
|
||||
# # close_connection_after_gather = false
|
||||
|
||||
|
||||
# # Read metrics from one or many MongoDB servers
|
||||
|
|
@ -4894,7 +5095,7 @@
|
|||
# ## For example:
|
||||
# ## mongodb://user:auth_key@10.10.3.30:27017,
|
||||
# ## mongodb://10.10.3.33:18832,
|
||||
# servers = ["mongodb://127.0.0.1:27017"]
|
||||
# servers = ["mongodb://127.0.0.1:27017?connect=direct"]
|
||||
#
|
||||
# ## When true, collect cluster status
|
||||
# ## Note that the query that counts jumbo chunks triggers a COLLSCAN, which
|
||||
|
|
@ -5333,7 +5534,9 @@
|
|||
|
||||
# # Pulls statistics from nvidia GPUs attached to the host
|
||||
# [[inputs.nvidia_smi]]
|
||||
# ## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath
|
||||
# ## Optional: path to nvidia-smi binary, defaults "/usr/bin/nvidia-smi"
|
||||
# ## We will first try to locate the nvidia-smi binary with the explicitly specified value (or default value),
|
||||
# ## if it is not found, we will try to locate it on PATH(exec.LookPath), if it is still not found, an error will be returned
|
||||
# # bin_path = "/usr/bin/nvidia-smi"
|
||||
#
|
||||
# ## Optional: timeout for GPU polling
|
||||
|
|
@ -5472,6 +5675,59 @@
|
|||
# timeout = 1000
|
||||
|
||||
|
||||
# # Collects performance metrics from OpenStack services
|
||||
# [[inputs.openstack]]
|
||||
# ## The recommended interval to poll is '30m'
|
||||
#
|
||||
# ## The identity endpoint to authenticate against and get the service catalog from.
|
||||
# authentication_endpoint = "https://my.openstack.cloud:5000"
|
||||
#
|
||||
# ## The domain to authenticate against when using a V3 identity endpoint.
|
||||
# # domain = "default"
|
||||
#
|
||||
# ## The project to authenticate as.
|
||||
# # project = "admin"
|
||||
#
|
||||
# ## User authentication credentials. Must have admin rights.
|
||||
# username = "admin"
|
||||
# password = "password"
|
||||
#
|
||||
# ## Available services are:
|
||||
# ## "agents", "aggregates", "flavors", "hypervisors", "networks", "nova_services",
|
||||
# ## "ports", "projects", "servers", "services", "stacks", "storage_pools", "subnets", "volumes"
|
||||
# # enabled_services = ["services", "projects", "hypervisors", "flavors", "networks", "volumes"]
|
||||
#
|
||||
# ## Collect Server Diagnostics
|
||||
# # server_diagnotics = false
|
||||
#
|
||||
# ## output secrets (such as adminPass(for server) and UserID(for volume)).
|
||||
# # output_secrets = false
|
||||
#
|
||||
# ## Amount of time allowed to complete the HTTP(s) request.
|
||||
# # timeout = "5s"
|
||||
#
|
||||
# ## HTTP Proxy support
|
||||
# # http_proxy_url = ""
|
||||
#
|
||||
# ## Optional TLS Config
|
||||
# # tls_ca = /path/to/cafile
|
||||
# # tls_cert = /path/to/certfile
|
||||
# # tls_key = /path/to/keyfile
|
||||
# ## Use TLS but skip chain & host verification
|
||||
# # insecure_skip_verify = false
|
||||
#
|
||||
# ## Options for tags received from Openstack
|
||||
# # tag_prefix = "openstack_tag_"
|
||||
# # tag_value = "true"
|
||||
#
|
||||
# ## Timestamp format for timestamp data recieved from Openstack.
|
||||
# ## If false format is unix nanoseconds.
|
||||
# # human_readable_timestamps = false
|
||||
#
|
||||
# ## Measure Openstack call duration
|
||||
# # measure_openstack_requests = false
|
||||
|
||||
|
||||
# # Read current weather and forecasts data from openweathermap.org
|
||||
# [[inputs.openweathermap]]
|
||||
# ## OpenWeatherMap API key.
|
||||
|
|
@ -5967,6 +6223,9 @@
|
|||
# ## SNMP version; can be 1, 2, or 3.
|
||||
# # version = 2
|
||||
#
|
||||
# ## Path to mib files
|
||||
# # path = ["/usr/share/snmp/mibs"]
|
||||
#
|
||||
# ## Agent host tag; the tag used to reference the source host
|
||||
# # agent_host_tag = "agent_host"
|
||||
#
|
||||
|
|
@ -6403,6 +6662,11 @@
|
|||
# # and in case of localized Windows, counter paths will be also localized. It also returns instance indexes in instance names.
|
||||
# # If false, wildcards (not partial) in instance names will still be expanded, but instance indexes will not be returned in instance names.
|
||||
# #UseWildcardsExpansion = false
|
||||
# # When running on a localized version of Windows and with UseWildcardsExpansion = true, Windows will
|
||||
# # localize object and counter names. When LocalizeWildcardsExpansion = false, use the names in object.Counters instead
|
||||
# # of the localized names. Only Instances can have wildcards in this case. ObjectName and Counters must not have wildcards when this
|
||||
# # setting is false.
|
||||
# #LocalizeWildcardsExpansion = true
|
||||
# # Period after which counters will be reread from configuration and wildcards in counter paths expanded
|
||||
# CountersRefreshInterval="1m"
|
||||
#
|
||||
|
|
@ -6521,6 +6785,7 @@
|
|||
# "TermService",
|
||||
# "Win*",
|
||||
# ]
|
||||
# #excluded_service_names = [] # optional, list of service names to exclude
|
||||
|
||||
|
||||
# # Collect Wireguard server interface and peer statistics
|
||||
|
|
@ -6602,30 +6867,6 @@
|
|||
###############################################################################
|
||||
|
||||
|
||||
# # Listener capable of handling KNX bus messages provided through a KNX-IP Interface.
|
||||
# [[inputs.KNXListener]]
|
||||
# ## Type of KNX-IP interface.
|
||||
# ## Can be either "tunnel" or "router".
|
||||
# # service_type = "tunnel"
|
||||
#
|
||||
# ## Address of the KNX-IP interface.
|
||||
# service_address = "localhost:3671"
|
||||
#
|
||||
# ## Measurement definition(s)
|
||||
# # [[inputs.knx_listener.measurement]]
|
||||
# # ## Name of the measurement
|
||||
# # name = "temperature"
|
||||
# # ## Datapoint-Type (DPT) of the KNX messages
|
||||
# # dpt = "9.001"
|
||||
# # ## List of Group-Addresses (GAs) assigned to the measurement
|
||||
# # addresses = ["5/5/1"]
|
||||
#
|
||||
# # [[inputs.knx_listener.measurement]]
|
||||
# # name = "illumination"
|
||||
# # dpt = "9.004"
|
||||
# # addresses = ["5/5/3"]
|
||||
|
||||
|
||||
# # Pull Metric Statistics from Aliyun CMS
|
||||
# [[inputs.aliyuncms]]
|
||||
# ## Aliyun Credentials
|
||||
|
|
@ -7570,6 +7811,9 @@
|
|||
# ## SASL protocol version. When connecting to Azure EventHub set to 0.
|
||||
# # sasl_version = 1
|
||||
#
|
||||
# # Disable Kafka metadata full fetch
|
||||
# # metadata_full = false
|
||||
#
|
||||
# ## Name of the consumer group.
|
||||
# # consumer_group = "telegraf_metrics_consumers"
|
||||
#
|
||||
|
|
@ -7602,6 +7846,15 @@
|
|||
# ## waiting until the next flush_interval.
|
||||
# # max_undelivered_messages = 1000
|
||||
#
|
||||
# ## Maximum amount of time the consumer should take to process messages. If
|
||||
# ## the debug log prints messages from sarama about 'abandoning subscription
|
||||
# ## to [topic] because consuming was taking too long', increase this value to
|
||||
# ## longer than the time taken by the output plugin(s).
|
||||
# ##
|
||||
# ## Note that the effective timeout could be between 'max_processing_time' and
|
||||
# ## '2 * max_processing_time'.
|
||||
# # max_processing_time = "100ms"
|
||||
#
|
||||
# ## Data format to consume.
|
||||
# ## Each data format has its own unique set of configuration options, read
|
||||
# ## more about them here:
|
||||
|
|
@ -7794,23 +8047,25 @@
|
|||
# # Read metrics from MQTT topic(s)
|
||||
# [[inputs.mqtt_consumer]]
|
||||
# ## Broker URLs for the MQTT server or cluster. To connect to multiple
|
||||
# ## clusters or standalone servers, use a seperate plugin instance.
|
||||
# ## clusters or standalone servers, use a separate plugin instance.
|
||||
# ## example: servers = ["tcp://localhost:1883"]
|
||||
# ## servers = ["ssl://localhost:1883"]
|
||||
# ## servers = ["ws://localhost:1883"]
|
||||
# servers = ["tcp://127.0.0.1:1883"]
|
||||
#
|
||||
# ## Topics that will be subscribed to.
|
||||
# topics = [
|
||||
# "telegraf/host01/cpu",
|
||||
# "telegraf/+/mem",
|
||||
# "sensors/#",
|
||||
# ]
|
||||
#
|
||||
# ## Enable extracting tag values from MQTT topics
|
||||
# ## _ denotes an ignored entry in the topic path
|
||||
# # topic_tags = "_/format/client/_"
|
||||
# # topic_measurement = "measurement/_/_/_"
|
||||
# # topic_fields = "_/_/_/temperature"
|
||||
# ## The message topic will be stored in a tag specified by this value. If set
|
||||
# ## to the empty string no topic tag will be created.
|
||||
# # topic_tag = "topic"
|
||||
#
|
||||
# ## QoS policy for messages
|
||||
# ## 0 = at most once
|
||||
# ## 1 = at least once
|
||||
|
|
@ -7819,10 +8074,8 @@
|
|||
# ## When using a QoS of 1 or 2, you should enable persistent_session to allow
|
||||
# ## resuming unacknowledged messages.
|
||||
# # qos = 0
|
||||
#
|
||||
# ## Connection timeout for initial connection in seconds
|
||||
# # connection_timeout = "30s"
|
||||
#
|
||||
# ## Maximum messages to read from the broker that have not been written by an
|
||||
# ## output. For best throughput set based on the number of metrics within
|
||||
# ## each message and the size of the output's metric_batch_size.
|
||||
|
|
@ -7832,33 +8085,37 @@
|
|||
# ## full batch is collected and the write is triggered immediately without
|
||||
# ## waiting until the next flush_interval.
|
||||
# # max_undelivered_messages = 1000
|
||||
#
|
||||
# ## Persistent session disables clearing of the client session on connection.
|
||||
# ## In order for this option to work you must also set client_id to identify
|
||||
# ## the client. To receive messages that arrived while the client is offline,
|
||||
# ## also set the qos option to 1 or 2 and don't forget to also set the QoS when
|
||||
# ## publishing.
|
||||
# # persistent_session = false
|
||||
#
|
||||
# ## If unset, a random client ID will be generated.
|
||||
# # client_id = ""
|
||||
#
|
||||
# ## Username and password to connect MQTT server.
|
||||
# # username = "telegraf"
|
||||
# # password = "metricsmetricsmetricsmetrics"
|
||||
#
|
||||
# ## Optional TLS Config
|
||||
# # tls_ca = "/etc/telegraf/ca.pem"
|
||||
# # tls_cert = "/etc/telegraf/cert.pem"
|
||||
# # tls_key = "/etc/telegraf/key.pem"
|
||||
# ## Use TLS but skip chain & host verification
|
||||
# # insecure_skip_verify = false
|
||||
#
|
||||
# ## Data format to consume.
|
||||
# ## Each data format has its own unique set of configuration options, read
|
||||
# ## more about them here:
|
||||
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
# data_format = "influx"
|
||||
# ## Enable extracting tag values from MQTT topics
|
||||
# ## _ denotes an ignored entry in the topic path
|
||||
# ## [[inputs.mqtt_consumer.topic_parsing]]
|
||||
# ## topic = ""
|
||||
# ## measurement = ""
|
||||
# ## tags = ""
|
||||
# ## fields = ""
|
||||
# ## [inputs.mqtt_consumer.topic_parsing.types]
|
||||
# ##
|
||||
|
||||
|
||||
# # Read metrics from NATS subject(s)
|
||||
|
|
@ -8415,42 +8672,34 @@
|
|||
#
|
||||
# ## "database_type" enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2
|
||||
# ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type.
|
||||
# ## Possible values for database_type are - "AzureSQLDB" or "AzureSQLManagedInstance" or "SQLServer"
|
||||
# ## Possible values for database_type are - "SQLServer" or "AzureSQLDB" or "AzureSQLManagedInstance" or "AzureSQLPool"
|
||||
#
|
||||
# database_type = "SQLServer"
|
||||
#
|
||||
# ## A list of queries to include. If not specified, all the below listed queries are used.
|
||||
# include_query = []
|
||||
#
|
||||
# ## A list of queries to explicitly ignore.
|
||||
# exclude_query = ["SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"]
|
||||
#
|
||||
# ## Queries enabled by default for database_type = "SQLServer" are -
|
||||
# ## SQLServerPerformanceCounters, SQLServerWaitStatsCategorized, SQLServerDatabaseIO, SQLServerProperties, SQLServerMemoryClerks,
|
||||
# ## SQLServerSchedulers, SQLServerRequests, SQLServerVolumeSpace, SQLServerCpu, SQLServerAvailabilityReplicaStates, SQLServerDatabaseReplicaStates
|
||||
#
|
||||
# ## Queries enabled by default for database_type = "AzureSQLDB" are -
|
||||
# ## AzureSQLDBResourceStats, AzureSQLDBResourceGovernance, AzureSQLDBWaitStats, AzureSQLDBDatabaseIO, AzureSQLDBServerProperties,
|
||||
# ## AzureSQLDBOsWaitstats, AzureSQLDBMemoryClerks, AzureSQLDBPerformanceCounters, AzureSQLDBRequests, AzureSQLDBSchedulers
|
||||
#
|
||||
# # database_type = "AzureSQLDB"
|
||||
#
|
||||
# ## A list of queries to include. If not specified, all the above listed queries are used.
|
||||
# # include_query = []
|
||||
#
|
||||
# ## A list of queries to explicitly ignore.
|
||||
# # exclude_query = []
|
||||
#
|
||||
# ## Queries enabled by default for database_type = "AzureSQLManagedInstance" are -
|
||||
# ## AzureSQLMIResourceStats, AzureSQLMIResourceGovernance, AzureSQLMIDatabaseIO, AzureSQLMIServerProperties, AzureSQLMIOsWaitstats,
|
||||
# ## AzureSQLMIMemoryClerks, AzureSQLMIPerformanceCounters, AzureSQLMIRequests, AzureSQLMISchedulers
|
||||
#
|
||||
# # database_type = "AzureSQLManagedInstance"
|
||||
# ## Queries enabled by default for database_type = "AzureSQLPool" are -
|
||||
# ## AzureSQLPoolResourceStats, AzureSQLPoolResourceGovernance, AzureSQLPoolDatabaseIO, AzureSQLPoolWaitStats,
|
||||
# ## AzureSQLPoolMemoryClerks, AzureSQLPoolPerformanceCounters, AzureSQLPoolSchedulers
|
||||
#
|
||||
# # include_query = []
|
||||
#
|
||||
# # exclude_query = []
|
||||
#
|
||||
# ## Queries enabled by default for database_type = "SQLServer" are -
|
||||
# ## SQLServerPerformanceCounters, SQLServerWaitStatsCategorized, SQLServerDatabaseIO, SQLServerProperties, SQLServerMemoryClerks,
|
||||
# ## SQLServerSchedulers, SQLServerRequests, SQLServerVolumeSpace, SQLServerCpu
|
||||
#
|
||||
# database_type = "SQLServer"
|
||||
#
|
||||
# include_query = []
|
||||
#
|
||||
# ## SQLServerAvailabilityReplicaStates and SQLServerDatabaseReplicaStates are optional queries and hence excluded here as default
|
||||
# exclude_query = ["SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"]
|
||||
#
|
||||
# ## Following are old config settings, you may use them only if you are using the earlier flavor of queries, however it is recommended to use
|
||||
# ## Following are old config settings
|
||||
# ## You may use them only if you are using the earlier flavor of queries, however it is recommended to use
|
||||
# ## the new mechanism of identifying the database_type there by use it's corresponding queries
|
||||
#
|
||||
# ## Optional parameter, setting this to 2 will use a new version
|
||||
|
|
|
|||
Loading…
Reference in New Issue