From 9e3fc73bc89828d79a26381637b56476734f7346 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 7 Jul 2020 14:45:48 -0700 Subject: [PATCH] Update sample configuration --- etc/telegraf.conf | 847 ++++++++++++++++++++++++++++++++++------------ 1 file changed, 622 insertions(+), 225 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 239f77c60..ce9732489 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -222,7 +222,7 @@ # # ## Additional exchange arguments. # # exchange_arguments = { } -# # exchange_arguments = {"hash_propery" = "timestamp"} +# # exchange_arguments = {"hash_property" = "timestamp"} # # ## Authentication credentials for the PLAIN auth_method. # # username = "" @@ -448,13 +448,13 @@ # # Configuration for DataDog API to send metrics to. # [[outputs.datadog]] # ## Datadog API key -# apikey = "my-secret-key" # required. -# -# # The base endpoint URL can optionally be specified but it defaults to: -# #url = "https://app.datadoghq.com/api/v1/series" +# apikey = "my-secret-key" # # ## Connection timeout. # # timeout = "5s" +# +# ## Write URL override; useful for debugging. +# # url = "https://app.datadoghq.com/api/v1/series" # # Send metrics to nowhere at all @@ -529,6 +529,21 @@ # # data_format = "influx" +# # Run executable as long-running output plugin +# [[outputs.execd]] +# ## Program to run as daemon +# command = ["my-telegraf-output", "--some-flag", "value"] +# +# ## Delay before the process is restarted after an unexpected termination +# restart_delay = "10s" +# +# ## Data format to export. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + # # Send telegraf metrics to file(s) # [[outputs.file]] # ## Files to write to, "stdout" is a specially handled file. @@ -536,7 +551,7 @@ # # ## Use batch serialization format instead of line based delimiting. The # ## batch format allows for the production of non line based output formats and -# ## may more effiently encode metric groups. +# ## may more efficiently encode metric groups. # # use_batch_format = false # # ## The file will be rotated after the time interval specified. When set @@ -572,9 +587,20 @@ # # ## Enable Graphite tags support # # graphite_tag_support = false +# # ## Character for separating metric name and field for Graphite tags # # graphite_separator = "." # +# ## Graphite templates patterns +# ## 1. Template for cpu +# ## 2. Template for disk* +# ## 3. Default template +# # templates = [ +# # "cpu tags.measurement.host.field", +# # "disk* measurement.field", +# # "host.measurement.tags.field" +# #] +# # ## timeout in seconds for the write connection to graphite # timeout = 2 # @@ -745,7 +771,7 @@ # template = "host.tags.measurement.field" # ## Timeout in seconds to connect # timeout = "2s" -# ## Display Communcation to Instrumental +# ## Display Communication to Instrumental # debug = false @@ -902,7 +928,7 @@ # streamname = "StreamName" # ## DEPRECATED: PartitionKey as used for sharding data. # partitionkey = "PartitionKey" -# ## DEPRECATED: If set the paritionKey will be a random UUID on every put. +# ## DEPRECATED: If set the partitionKey will be a random UUID on every put. # ## This allows for scaling across multiple shards in a stream. # ## This will cause issues with ordering. # use_random_partitionkey = false @@ -941,7 +967,7 @@ # # Configuration for Librato API to send metrics to. # [[outputs.librato]] -# ## Librator API Docs +# ## Librato API Docs # ## http://dev.librato.com/v1/metrics-authentication # ## Librato API user # api_user = "telegraf@influxdb.com" # required. @@ -1037,6 +1063,18 @@ # data_format = "influx" +# # Send metrics to New Relic metrics endpoint +# [[outputs.newrelic]] +# ## New Relic Insights API key +# insights_key = "insights api key" +# +# ## Prefix to add to add to metric name for easy identification. +# # metric_prefix = "" +# +# ## Timeout for writes to the New Relic API. +# # timeout = "15s" + + # # Send telegraf measurements to NSQD # [[outputs.nsq]] # ## Location of nsqd instance listening on TCP @@ -1197,6 +1235,11 @@ # ## Defaults to the OS configuration. # # keep_alive_period = "5m" # +# ## Content encoding for packet-based connections (i.e. UDP, unixgram). +# ## Can be set to "gzip" or to "identity" to apply no encoding. +# ## +# # content_encoding = "identity" +# # ## Data format to generate. # ## Each data format has its own unique set of configuration options, read # ## more about them here: @@ -1215,7 +1258,7 @@ # ## Custom resource type # # resource_type = "generic_node" # -# ## Additonal resource labels +# ## Additional resource labels # # [outputs.stackdriver.resource_labels] # # node_id = "$HOSTNAME" # # namespace = "myapp" @@ -1254,13 +1297,13 @@ # ## be one of "octet-counting", "non-transparent". # # framing = "octet-counting" # -# ## The trailer to be expected in case of non-trasparent framing (default = "LF"). +# ## The trailer to be expected in case of non-transparent framing (default = "LF"). # ## Must be one of "LF", or "NUL". # # trailer = "LF" # # ## SD-PARAMs settings # ## Syslog messages can contain key/value pairs within zero or more -# ## structured data sections. For each unrecognised metric tag/field a +# ## structured data sections. For each unrecognized metric tag/field a # ## SD-PARAMS is created. # ## # ## Example: @@ -1276,8 +1319,8 @@ # # sdparam_separator = "_" # # ## Default sdid used for tags/fields that don't contain a prefix defined in -# ## the explict sdids setting below If no default is specified, no SD-PARAMs -# ## will be used for unrecognised field. +# ## the explicit sdids setting below If no default is specified, no SD-PARAMs +# ## will be used for unrecognized field. # # default_sdid = "default@32473" # # ## List of explicit prefixes to extract from tag/field keys and use as the @@ -1357,7 +1400,7 @@ # #convert_paths = true # # ## Use Strict rules to sanitize metric and tag names from invalid characters -# ## When enabled forward slash (/) and comma (,) will be accpeted +# ## When enabled forward slash (/) and comma (,) will be accepted # #use_strict = false # # ## Use Regex to sanitize metric and tag names from invalid characters @@ -1370,6 +1413,10 @@ # ## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default is true # #convert_bool = true # +# ## Truncate metric tags to a total of 254 characters for the tag name value. Wavefront will reject any +# ## data point exceeding this limit if not truncated. Defaults to 'false' to provide backwards compatibility. +# #truncate_tags = false +# # ## Define a mapping, namespaced by metric prefix, from string values to numeric values # ## deprecated in 1.9; use the enum processor plugin # #[[outputs.wavefront.string_to_number.elasticsearch]] @@ -1427,20 +1474,28 @@ # # Dates measurements, tags, and fields that pass through this filter. # [[processors.date]] -# ## New tag to create -# tag_key = "month" +# ## New tag to create +# tag_key = "month" # -# ## Date format string, must be a representation of the Go "reference time" -# ## which is "Mon Jan 2 15:04:05 -0700 MST 2006". -# date_format = "Jan" +# ## New field to create (cannot set both field_key and tag_key) +# # field_key = "month" # -# ## Offset duration added to the date string when writing the new tag. -# # date_offset = "0s" +# ## Date format string, must be a representation of the Go "reference time" +# ## which is "Mon Jan 2 15:04:05 -0700 MST 2006". +# date_format = "Jan" # -# ## Timezone to use when creating the tag. This can be set to one of -# ## "UTC", "Local", or to a location name in the IANA Time Zone database. -# ## example: timezone = "America/Los_Angeles" -# # timezone = "UTC" +# ## If destination is a field, date format can also be one of +# ## "unix", "unix_ms", "unix_us", or "unix_ns", which will insert an integer field. +# # date_format = "unix" +# +# ## Offset duration added to the date string when writing the new tag. +# # date_offset = "0s" +# +# ## Timezone to use when creating the tag or field using a reference time +# ## string. This can be set to one of "UTC", "Local", or to a location name +# ## in the IANA Time Zone database. +# ## example: timezone = "America/Los_Angeles" +# # timezone = "UTC" # # Filter metrics with repeating field values @@ -1449,6 +1504,23 @@ # dedup_interval = "600s" +# # Defaults sets default value(s) for specified fields that are not set on incoming metrics. +# [[processors.defaults]] +# ## Ensures a set of fields always exists on your metric(s) with their +# ## respective default value. +# ## For any given field pair (key = default), if it's not set, a field +# ## is set on the metric with the specified default. +# ## +# ## A field is considered not set if it is nil on the incoming metric; +# ## or it is not nil but its value is an empty string or is a string +# ## of one or more spaces. +# ## = +# # [processors.defaults.fields] +# # field_1 = "bar" +# # time_idle = 0 +# # is_error = true + + # # Map enum values according to given table. # [[processors.enum]] # [[processors.enum.mapping]] @@ -1474,6 +1546,103 @@ # red = 3 +# # Run executable as long-running processor plugin +# [[processors.execd]] +# ## Program to run as daemon +# ## eg: command = ["/path/to/your_program", "arg1", "arg2"] +# command = ["cat"] +# +# ## Delay before the process is restarted after an unexpected termination +# restart_delay = "10s" + + +# # Performs file path manipulations on tags and fields +# [[processors.filepath]] +# ## Treat the tag value as a path and convert it to its last element, storing the result in a new tag +# # [[processors.filepath.basename]] +# # tag = "path" +# # dest = "basepath" +# +# ## Treat the field value as a path and keep all but the last element of path, typically the path's directory +# # [[processors.filepath.dirname]] +# # field = "path" +# +# ## Treat the tag value as a path, converting it to its the last element without its suffix +# # [[processors.filepath.stem]] +# # tag = "path" +# +# ## Treat the tag value as a path, converting it to the shortest path name equivalent +# ## to path by purely lexical processing +# # [[processors.filepath.clean]] +# # tag = "path" +# +# ## Treat the tag value as a path, converting it to a relative path that is lexically +# ## equivalent to the source path when joined to 'base_path' +# # [[processors.filepath.rel]] +# # tag = "path" +# # base_path = "/var/log" +# +# ## Treat the tag value as a path, replacing each separator character in path with a '/' character. Has only +# ## effect on Windows +# # [[processors.filepath.toslash]] +# # tag = "path" + + +# # Add a tag of the network interface name looked up over SNMP by interface number +# [[processors.ifname]] +# ## Name of tag holding the interface number +# # tag = "ifIndex" +# +# ## Name of output tag where service name will be added +# # dest = "ifName" +# +# ## Name of tag of the SNMP agent to request the interface name from +# # agent = "agent" +# +# ## Timeout for each request. +# # timeout = "5s" +# +# ## SNMP version; can be 1, 2, or 3. +# # version = 2 +# +# ## SNMP community string. +# # community = "public" +# +# ## Number of retries to attempt. +# # retries = 3 +# +# ## The GETBULK max-repetitions parameter. +# # max_repetitions = 10 +# +# ## SNMPv3 authentication and encryption options. +# ## +# ## Security Name. +# # sec_name = "myuser" +# ## Authentication protocol; one of "MD5", "SHA", or "". +# # auth_protocol = "MD5" +# ## Authentication password. +# # auth_password = "pass" +# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". +# # sec_level = "authNoPriv" +# ## Context Name. +# # context_name = "" +# ## Privacy protocol used for encrypted messages; one of "DES", "AES" or "". +# # priv_protocol = "" +# ## Privacy password used for encrypted messages. +# # priv_password = "" +# +# ## max_parallel_lookups is the maximum number of SNMP requests to +# ## make at the same time. +# # max_parallel_lookups = 100 +# +# ## ordered controls whether or not the metrics need to stay in the +# ## same order this plugin received them in. If false, this plugin +# ## may change the order when data is cached. If you need metrics to +# ## stay in order set this to true. keeping the metrics ordered may +# ## be slightly slower +# # ordered = false + + # # Apply metric modifications using override semantics. # [[processors.override]] # ## All modifications on inputs and aggregators can be overridden: @@ -1513,6 +1682,19 @@ # value_key = "value" +# # Given a tag of a TCP or UDP port number, add a tag of the service name looked up in the system services file +# [[processors.port_name]] +# [[processors.port_name]] +# ## Name of tag holding the port number +# # tag = "port" +# +# ## Name of output tag where service name will be added +# # dest = "service" +# +# ## Default tcp or udp +# # default_protocol = "tcp" + + # # Print all metrics that pass through this filter. # [[processors.printer]] @@ -1553,6 +1735,54 @@ # [[processors.rename]] +# # ReverseDNS does a reverse lookup on IP addresses to retrieve the DNS name +# [[processors.reverse_dns]] +# ## For optimal performance, you may want to limit which metrics are passed to this +# ## processor. eg: +# ## namepass = ["my_metric_*"] +# +# ## cache_ttl is how long the dns entries should stay cached for. +# ## generally longer is better, but if you expect a large number of diverse lookups +# ## you'll want to consider memory use. +# cache_ttl = "24h" +# +# ## lookup_timeout is how long should you wait for a single dns request to repsond. +# ## this is also the maximum acceptable latency for a metric travelling through +# ## the reverse_dns processor. After lookup_timeout is exceeded, a metric will +# ## be passed on unaltered. +# ## multiple simultaneous resolution requests for the same IP will only make a +# ## single rDNS request, and they will all wait for the answer for this long. +# lookup_timeout = "3s" +# +# ## max_parallel_lookups is the maximum number of dns requests to be in flight +# ## at the same time. Requesting hitting cached values do not count against this +# ## total, and neither do mulptiple requests for the same IP. +# ## It's probably best to keep this number fairly low. +# max_parallel_lookups = 10 +# +# ## ordered controls whether or not the metrics need to stay in the same order +# ## this plugin received them in. If false, this plugin will change the order +# ## with requests hitting cached results moving through immediately and not +# ## waiting on slower lookups. This may cause issues for you if you are +# ## depending on the order of metrics staying the same. If so, set this to true. +# ## keeping the metrics ordered may be slightly slower. +# ordered = false +# +# [[processors.reverse_dns.lookup]] +# ## get the ip from the field "source_ip", and put the result in the field "source_name" +# field = "source_ip" +# dest = "source_name" +# +# [[processors.reverse_dns.lookup]] +# ## get the ip from the tag "destination_ip", and put the result in the tag +# ## "destination_name". +# tag = "destination_ip" +# dest = "destination_name" +# +# ## If you would prefer destination_name to be a field instead, you can use a +# ## processors.converter after this one, specifying the order attribute. + + # # Add the S2 Cell ID as a tag based on latitude and longitude fields # [[processors.s2geo]] # ## The name of the lat and lon fields containing WGS-84 latitude and @@ -1567,6 +1797,22 @@ # # cell_level = 9 +# # Process metrics using a Starlark script +# [[processors.starlark]] +# ## The Starlark source can be set as a string in this configuration file, or +# ## by referencing a file containing the script. Only one source or script +# ## should be set at once. +# ## +# ## Source of the Starlark script. +# source = ''' +# def apply(metric): +# return metric +# ''' +# +# ## File containing a Starlark script. +# # script = "/usr/local/bin/myscript.star" + + # # Perform string processing on tags, fields, and measurements # [[processors.strings]] # ## Convert a tag value to uppercase @@ -1681,7 +1927,7 @@ # # add_rank_fields = [] # # ## These settings provide a way to know what values the plugin is generating -# ## when aggregating metrics. The 'add_agregate_field' setting allows to +# ## when aggregating metrics. The 'add_aggregate_field' setting allows to # ## specify for which fields the final aggregation value is required. If the # ## list is non empty, then a field will be added to each every metric for # ## each field present in this setting. This field will contain @@ -2078,7 +2324,7 @@ # # insecure_skip_verify = false -# # Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster. +# # Collects performance metrics from the MON, OSD, MDS and RGW nodes in a Ceph storage cluster. # [[inputs.ceph]] # ## This is the recommended interval to poll. Too frequent and you will lose # ## data points due to timeouts during rebalancing and recovery @@ -2095,6 +2341,8 @@ # ## prefix of MON and OSD socket files, used to determine socket type # mon_prefix = "ceph-mon" # osd_prefix = "ceph-osd" +# mds_prefix = "ceph-mds" +# rgw_prefix = "ceph-client" # # ## suffix used to identify socket files # socket_suffix = "asok" @@ -2227,7 +2475,7 @@ # "nf_conntrack_count","nf_conntrack_max"] # # ## Directories to search within for the conntrack files above. -# ## Missing directrories will be ignored. +# ## Missing directories will be ignored. # dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"] @@ -2358,7 +2606,7 @@ # # domains = ["."] # # ## Query record type. -# ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. +# ## Possible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. # # record_type = "A" # # ## Dns server port. @@ -2439,10 +2687,12 @@ # filters = [""] -# # Read metrics about docker containers from Fargate/ECS v2 meta endpoints. +# # Read metrics about docker containers from Fargate/ECS v2, v3 meta endpoints. # [[inputs.ecs]] -# ## ECS metadata url -# # endpoint_url = "http://169.254.170.2" +# ## ECS metadata url. +# ## Metadata v2 API is used if set explicitly. Otherwise, +# ## v3 metadata endpoint API is used if available. +# # endpoint_url = "" # # ## Containers to include and exclude. Globs accepted. # ## Note that an empty array for both will include all containers @@ -2569,25 +2819,30 @@ # # timeout = "5s" -# # Reload and gather from file[s] on telegraf's interval. +# # Parse a complete file each interval # [[inputs.file]] -# ## Files to parse each interval. -# ## These accept standard unix glob matching rules, but with the addition of -# ## ** as a "super asterisk". ie: -# ## /var/log/**.log -> recursively find all .log files in /var/log -# ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log -# ## /var/log/apache.log -> only read the apache log file -# files = ["/var/log/apache/access.log"] +# ## Files to parse each interval. Accept standard unix glob matching rules, +# ## as well as ** to match recursive files and directories. +# files = ["/tmp/metrics.out"] +# +# ## Name a tag containing the name of the file the data was parsed from. Leave empty +# ## to disable. +# # file_tag = "" +# +# ## Character encoding to use when interpreting the file contents. Invalid +# ## characters are replaced using the unicode replacement character. When set +# ## to the empty string the data is not decoded to text. +# ## ex: character_encoding = "utf-8" +# ## character_encoding = "utf-16le" +# ## character_encoding = "utf-16be" +# ## character_encoding = "" +# # character_encoding = "" # # ## The dataformat to be read from files # ## Each data format has its own unique set of configuration options, read # ## more about them here: # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md # data_format = "influx" -# -# ## Name a tag containing the name of the file the data was parsed from. Leave empty -# ## to disable. -# # file_tag = "" # # Count files in a directory @@ -2786,6 +3041,10 @@ # ## Optional HTTP headers # # headers = {"X-Special-Header" = "Special-Value"} # +# ## Optional file with Bearer token +# ## file content is added as an Authorization header +# # bearer_token = "/path/to/file" +# # ## Optional HTTP Basic Auth Credentials # # username = "username" # # password = "pa$$word" @@ -2838,11 +3097,28 @@ # ## Whether to follow redirects from the server (defaults to false) # # follow_redirects = false # +# ## Optional file with Bearer token +# ## file content is added as an Authorization header +# # bearer_token = "/path/to/file" +# +# ## Optional HTTP Basic Auth Credentials +# # username = "username" +# # password = "pa$$word" +# # ## Optional HTTP Request Body # # body = ''' # # {'fake':'data'} # # ''' # +# ## Optional name of the field that will contain the body of the response. +# ## By default it is set to an empty String indicating that the body's content won't be added +# # response_body_field = '' +# +# ## Maximum allowed HTTP response body size in bytes. +# ## 0 means to use the default of 32MiB. +# ## If the response body size exceeds this limit a "body_read_error" will be raised +# # response_body_max_size = "32MiB" +# # ## Optional substring or regex match in body of the response # # response_string_match = "\"service_status\": \"up\"" # # response_string_match = "ok" @@ -2859,6 +3135,11 @@ # # [inputs.http_response.headers] # # Host = "github.com" # +# ## Optional setting to map response http headers into tags +# ## If the http header is not present on the request, no corresponding tag will be added +# ## If multiple instances of the http header are present, only the first value will be used +# # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"} +# # ## Interface to use when dialing an address # # interface = "eth0" @@ -3298,6 +3579,12 @@ # ## Overrides resource_exclude if both set. # # resource_include = [ "deployments", "nodes", "statefulsets" ] # +# ## selectors to include and exclude as tags. Globs accepted. +# ## Note that an empty array for both will include all selectors as tags +# ## selector_exclude overrides selector_include if both set. +# # selector_include = [] +# # selector_exclude = ["*"] +# # ## Optional TLS Config # # tls_ca = "/path/to/cafile" # # tls_cert = "/path/to/certfile" @@ -3407,7 +3694,7 @@ # # campaign_id = "" -# # Retrives information on a specific host in a MarkLogic Cluster +# # Retrieves information on a specific host in a MarkLogic Cluster # [[inputs.marklogic]] # ## Base URL of the MarkLogic HTTP Server. # url = "http://localhost:8002" @@ -3506,76 +3793,83 @@ # # Retrieve data from MODBUS slave devices # [[inputs.modbus]] -# ## Connection Configuration -# ## -# ## The plugin supports connections to PLCs via MODBUS/TCP or -# ## via serial line communication in binary (RTU) or readable (ASCII) encoding -# ## -# ## Device name -# name = "Device" +# ## Connection Configuration +# ## +# ## The plugin supports connections to PLCs via MODBUS/TCP or +# ## via serial line communication in binary (RTU) or readable (ASCII) encoding +# ## +# ## Device name +# name = "Device" # -# ## Slave ID - addresses a MODBUS device on the bus -# ## Range: 0 - 255 [0 = broadcast; 248 - 255 = reserved] -# slave_id = 1 +# ## Slave ID - addresses a MODBUS device on the bus +# ## Range: 0 - 255 [0 = broadcast; 248 - 255 = reserved] +# slave_id = 1 # -# ## Timeout for each request -# timeout = "1s" +# ## Timeout for each request +# timeout = "1s" # -# # TCP - connect via Modbus/TCP -# controller = "tcp://localhost:502" +# ## Maximum number of retries and the time to wait between retries +# ## when a slave-device is busy. +# # busy_retries = 0 +# # busy_retries_wait = "100ms" # -# # Serial (RS485; RS232) -# #controller = "file:///dev/ttyUSB0" -# #baud_rate = 9600 -# #data_bits = 8 -# #parity = "N" -# #stop_bits = 1 -# #transmission_mode = "RTU" +# # TCP - connect via Modbus/TCP +# controller = "tcp://localhost:502" +# +# ## Serial (RS485; RS232) +# # controller = "file:///dev/ttyUSB0" +# # baud_rate = 9600 +# # data_bits = 8 +# # parity = "N" +# # stop_bits = 1 +# # transmission_mode = "RTU" # # -# ## Measurements -# ## +# ## Measurements +# ## # -# ## Digital Variables, Discrete Inputs and Coils -# ## name - the variable name -# ## address - variable address +# ## Digital Variables, Discrete Inputs and Coils +# ## measurement - the (optional) measurement name, defaults to "modbus" +# ## name - the variable name +# ## address - variable address # -# discrete_inputs = [ -# { name = "start", address = [0]}, -# { name = "stop", address = [1]}, -# { name = "reset", address = [2]}, -# { name = "emergency_stop", address = [3]}, -# ] -# coils = [ -# { name = "motor1_run", address = [0]}, -# { name = "motor1_jog", address = [1]}, -# { name = "motor1_stop", address = [2]}, -# ] +# discrete_inputs = [ +# { name = "start", address = [0]}, +# { name = "stop", address = [1]}, +# { name = "reset", address = [2]}, +# { name = "emergency_stop", address = [3]}, +# ] +# coils = [ +# { name = "motor1_run", address = [0]}, +# { name = "motor1_jog", address = [1]}, +# { name = "motor1_stop", address = [2]}, +# ] # -# ## Analog Variables, Input Registers and Holding Registers -# ## name - the variable name -# ## byte_order - the ordering of bytes -# ## |---AB, ABCD - Big Endian -# ## |---BA, DCBA - Little Endian -# ## |---BADC - Mid-Big Endian -# ## |---CDAB - Mid-Little Endian -# ## data_type - UINT16, INT16, INT32, UINT32, FLOAT32, FLOAT32-IEEE (the IEEE 754 binary representation) -# ## scale - the final numeric variable representation -# ## address - variable address +# ## Analog Variables, Input Registers and Holding Registers +# ## measurement - the (optional) measurement name, defaults to "modbus" +# ## name - the variable name +# ## byte_order - the ordering of bytes +# ## |---AB, ABCD - Big Endian +# ## |---BA, DCBA - Little Endian +# ## |---BADC - Mid-Big Endian +# ## |---CDAB - Mid-Little Endian +# ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, FLOAT32, FLOAT32-IEEE (the IEEE 754 binary representation) +# ## scale - the final numeric variable representation +# ## address - variable address # -# holding_registers = [ -# { name = "power_factor", byte_order = "AB", data_type = "FLOAT32", scale=0.01, address = [8]}, -# { name = "voltage", byte_order = "AB", data_type = "FLOAT32", scale=0.1, address = [0]}, -# { name = "energy", byte_order = "ABCD", data_type = "FLOAT32", scale=0.001, address = [5,6]}, -# { name = "current", byte_order = "ABCD", data_type = "FLOAT32", scale=0.001, address = [1,2]}, -# { name = "frequency", byte_order = "AB", data_type = "FLOAT32", scale=0.1, address = [7]}, -# { name = "power", byte_order = "ABCD", data_type = "FLOAT32", scale=0.1, address = [3,4]}, -# ] -# input_registers = [ -# { name = "tank_level", byte_order = "AB", data_type = "INT16", scale=1.0, address = [0]}, -# { name = "tank_ph", byte_order = "AB", data_type = "INT16", scale=1.0, address = [1]}, -# { name = "pump1_speed", byte_order = "ABCD", data_type = "INT32", scale=1.0, address = [3,4]}, -# ] +# holding_registers = [ +# { name = "power_factor", byte_order = "AB", data_type = "FLOAT32", scale=0.01, address = [8]}, +# { name = "voltage", byte_order = "AB", data_type = "FLOAT32", scale=0.1, address = [0]}, +# { name = "energy", byte_order = "ABCD", data_type = "FLOAT32", scale=0.001, address = [5,6]}, +# { name = "current", byte_order = "ABCD", data_type = "FLOAT32", scale=0.001, address = [1,2]}, +# { name = "frequency", byte_order = "AB", data_type = "FLOAT32", scale=0.1, address = [7]}, +# { name = "power", byte_order = "ABCD", data_type = "FLOAT32", scale=0.1, address = [3,4]}, +# ] +# input_registers = [ +# { name = "tank_level", byte_order = "AB", data_type = "INT16", scale=1.0, address = [0]}, +# { name = "tank_ph", byte_order = "AB", data_type = "INT16", scale=1.0, address = [1]}, +# { name = "pump1_speed", byte_order = "ABCD", data_type = "INT32", scale=1.0, address = [3,4]}, +# ] # # Read metrics from one or many MongoDB servers @@ -3587,6 +3881,11 @@ # ## mongodb://10.10.3.33:18832, # servers = ["mongodb://127.0.0.1:27017"] # +# ## When true, collect cluster status +# ## Note that the query that counts jumbo chunks triggers a COLLSCAN, which +# ## may have an impact on performance. +# # gather_cluster_status = true +# # ## When true, collect per database stats # # gather_perdb_stats = false # @@ -3675,7 +3974,7 @@ # ## <1.6: metric_version = 1 (or unset) # metric_version = 2 # -# ## if the list is empty, then metrics are gathered from all databasee tables +# ## if the list is empty, then metrics are gathered from all database tables # # table_schema_databases = [] # # ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list @@ -3859,6 +4158,22 @@ # # insecure_skip_verify = false +# # Read Nginx virtual host traffic status module information (nginx-module-sts) +# [[inputs.nginx_sts]] +# ## An array of ngx_http_status_module or status URI to gather stats. +# urls = ["http://localhost/status"] +# +# ## HTTP response timeout (default: 5s) +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + # # Read nginx_upstream_check module status information (https://github.com/yaoweibin/nginx_upstream_check_module) # [[inputs.nginx_upstream_check]] # ## An URL where Nginx Upstream check module is enabled @@ -3982,7 +4297,7 @@ # # timeout = "5ms" -# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver +# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver # [[inputs.opensmtpd]] # ## If running as a restricted user you can prepend sudo for additional access: # #use_sudo = false @@ -3990,7 +4305,7 @@ # ## The default location of the smtpctl binary can be overridden with: # binary = "/usr/sbin/smtpctl" # -# ## The default timeout of 1000ms can be overriden with (in milliseconds): +# ## The default timeout of 1000ms can be overridden with (in milliseconds): # timeout = 1000 @@ -4182,9 +4497,12 @@ # ## When true add the full cmdline as a tag. # # cmdline_tag = false # -# ## Add PID as a tag instead of a field; useful to differentiate between -# ## processes whose tags are otherwise the same. Can create a large number -# ## of series, use judiciously. +# ## Add the PID as a tag instead of as a field. When collecting multiple +# ## processes with otherwise matching tags this setting should be enabled to +# ## ensure each process has a unique identity. +# ## +# ## Enabling this option may result in a large number of series, especially +# ## when processes have a short lifetime. # # pid_tag = false # # ## Method to use when finding process IDs. Can be one of 'pgrep', or @@ -4194,7 +4512,7 @@ # # pid_finder = "pgrep" -# # Reads last_run_summary.yaml file and converts to measurments +# # Reads last_run_summary.yaml file and converts to measurements # [[inputs.puppetagent]] # ## Location of puppet last run summary file # location = "/var/lib/puppet/state/last_run_summary.yaml" @@ -4260,6 +4578,29 @@ # urls = ["http://localhost:8080/_raindrops"] +# # Read CPU, Fans, Powersupply and Voltage metrics of hardware server through redfish APIs +# [[inputs.redfish]] +# ## Server url +# address = "https://127.0.0.1:5000" +# +# ## Username, Password for hardware server +# username = "root" +# password = "password123456" +# +# ## ComputerSystemId +# computer_system_id="2M220100SL" +# +# ## Amount of time allowed to complete the HTTP request +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + # # Read metrics from one or many redis servers # [[inputs.redis]] # ## specify servers via a url matching: @@ -4537,46 +4878,54 @@ # # Read metrics from Microsoft SQL Server # [[inputs.sqlserver]] -# ## Specify instances to monitor with a list of connection strings. -# ## All connection parameters are optional. -# ## By default, the host is localhost, listening on default port, TCP 1433. -# ## for Windows, the user is the currently running AD user (SSO). -# ## See https://github.com/denisenkom/go-mssqldb for detailed connection -# ## parameters, in particular, tls connections can be created like so: -# ## "encrypt=true;certificate=;hostNameInCertificate=" -# # servers = [ -# # "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", -# # ] +# ## Specify instances to monitor with a list of connection strings. +# ## All connection parameters are optional. +# ## By default, the host is localhost, listening on default port, TCP 1433. +# ## for Windows, the user is the currently running AD user (SSO). +# ## See https://github.com/denisenkom/go-mssqldb for detailed connection +# ## parameters, in particular, tls connections can be created like so: +# ## "encrypt=true;certificate=;hostNameInCertificate=" +# # servers = [ +# # "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", +# # ] # -# ## Optional parameter, setting this to 2 will use a new version -# ## of the collection queries that break compatibility with the original -# ## dashboards. -# query_version = 2 +# ## Optional parameter, setting this to 2 will use a new version +# ## of the collection queries that break compatibility with the original +# ## dashboards. +# ## Version 2 - is compatible from SQL Server 2012 and later versions and also for SQL Azure DB +# query_version = 2 # -# ## If you are using AzureDB, setting this to true will gather resource utilization metrics -# # azuredb = false +# ## If you are using AzureDB, setting this to true will gather resource utilization metrics +# # azuredb = false # -# ## Possible queries: -# ## - PerformanceCounters -# ## - WaitStatsCategorized -# ## - DatabaseIO -# ## - DatabaseProperties -# ## - CPUHistory -# ## - DatabaseSize -# ## - DatabaseStats -# ## - MemoryClerk -# ## - VolumeSpace -# ## - PerformanceMetrics -# ## - Schedulers -# ## - AzureDBResourceStats -# ## - AzureDBResourceGovernance -# ## - SqlRequests -# ## - ServerProperties -# ## A list of queries to include. If not specified, all the above listed queries are used. -# # include_query = [] +# ## Possible queries +# ## Version 2: +# ## - PerformanceCounters +# ## - WaitStatsCategorized +# ## - DatabaseIO +# ## - ServerProperties +# ## - MemoryClerk +# ## - Schedulers +# ## - SqlRequests +# ## - VolumeSpace +# ## - Cpu +# ## Version 1: +# ## - PerformanceCounters +# ## - WaitStatsCategorized +# ## - CPUHistory +# ## - DatabaseIO +# ## - DatabaseSize +# ## - DatabaseStats +# ## - DatabaseProperties +# ## - MemoryClerk +# ## - VolumeSpace +# ## - PerformanceMetrics # -# ## A list of queries to explicitly ignore. -# exclude_query = [ 'Schedulers' , 'SqlRequests'] +# ## A list of queries to include. If not specified, all the above listed queries are used. +# # include_query = [] +# +# ## A list of queries to explicitly ignore. +# exclude_query = [ 'Schedulers' , 'SqlRequests'] # # Gather timeseries from Google Cloud Platform v3 monitoring API @@ -4814,7 +5163,7 @@ # ## The default location of the unbound config file can be overridden with: # # config_file = "/etc/unbound/unbound.conf" # -# ## The default timeout of 1s can be overriden with: +# ## The default timeout of 1s can be overridden with: # # timeout = "1s" # # ## When set to true, thread metrics are tagged with the thread id. @@ -4834,7 +5183,7 @@ # ## servers = ["tcp://localhost:5050", "http://localhost:1717", "unix:///tmp/statsock"] # servers = ["tcp://127.0.0.1:1717"] # -# ## General connection timout +# ## General connection timeout # # timeout = "5s" @@ -4853,7 +5202,7 @@ # stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"] # # ## Optional name for the varnish instance (or working directory) to query -# ## Usually appened after -n in varnish cli +# ## Usually append after -n in varnish cli # # instance_name = instanceName # # ## Timeout for varnishstat command @@ -4963,7 +5312,7 @@ # # ## Additional exchange arguments. # # exchange_arguments = { } -# # exchange_arguments = {"hash_propery" = "timestamp"} +# # exchange_arguments = {"hash_property" = "timestamp"} # # ## AMQP queue name. # queue = "telegraf" @@ -5036,64 +5385,6 @@ # ] -# # Cisco GNMI telemetry input plugin based on GNMI telemetry data produced in IOS XR -# [[inputs.cisco_telemetry_gnmi]] -# ## Address and port of the GNMI GRPC server -# addresses = ["10.49.234.114:57777"] -# -# ## define credentials -# username = "cisco" -# password = "cisco" -# -# ## GNMI encoding requested (one of: "proto", "json", "json_ietf") -# # encoding = "proto" -# -# ## redial in case of failures after -# redial = "10s" -# -# ## enable client-side TLS and define CA to authenticate the device -# # enable_tls = true -# # tls_ca = "/etc/telegraf/ca.pem" -# # insecure_skip_verify = true -# -# ## define client-side TLS certificate & key to authenticate to the device -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# -# ## GNMI subscription prefix (optional, can usually be left empty) -# ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths -# # origin = "" -# # prefix = "" -# # target = "" -# -# ## Define additional aliases to map telemetry encoding paths to simple measurement names -# #[inputs.cisco_telemetry_gnmi.aliases] -# # ifcounters = "openconfig:/interfaces/interface/state/counters" -# -# [[inputs.cisco_telemetry_gnmi.subscription]] -# ## Name of the measurement that will be emitted -# name = "ifcounters" -# -# ## Origin and path of the subscription -# ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths -# ## -# ## origin usually refers to a (YANG) data model implemented by the device -# ## and path to a specific substructe inside it that should be subscribed to (similar to an XPath) -# ## YANG models can be found e.g. here: https://github.com/YangModels/yang/tree/master/vendor/cisco/xr -# origin = "openconfig-interfaces" -# path = "/interfaces/interface/state/counters" -# -# # Subscription mode (one of: "target_defined", "sample", "on_change") and interval -# subscription_mode = "sample" -# sample_interval = "10s" -# -# ## Suppress redundant transmissions when measured values are unchanged -# # suppress_redundant = false -# -# ## If suppression is enabled, send updates at least every X seconds anyway -# # heartbeat_interval = "60s" - - # # Cisco model-driven telemetry (MDT) input plugin for IOS XR, IOS XE and NX-OS platforms # [[inputs.cisco_telemetry_mdt]] # ## Telemetry transport can be "tcp" or "grpc". TLS is only supported when @@ -5456,6 +5747,64 @@ # data_format = "influx" +# # gNMI telemetry input plugin +# [[inputs.gnmi]] +# ## Address and port of the gNMI GRPC server +# addresses = ["10.49.234.114:57777"] +# +# ## define credentials +# username = "cisco" +# password = "cisco" +# +# ## gNMI encoding requested (one of: "proto", "json", "json_ietf") +# # encoding = "proto" +# +# ## redial in case of failures after +# redial = "10s" +# +# ## enable client-side TLS and define CA to authenticate the device +# # enable_tls = true +# # tls_ca = "/etc/telegraf/ca.pem" +# # insecure_skip_verify = true +# +# ## define client-side TLS certificate & key to authenticate to the device +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## gNMI subscription prefix (optional, can usually be left empty) +# ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths +# # origin = "" +# # prefix = "" +# # target = "" +# +# ## Define additional aliases to map telemetry encoding paths to simple measurement names +# #[inputs.gnmi.aliases] +# # ifcounters = "openconfig:/interfaces/interface/state/counters" +# +# [[inputs.gnmi.subscription]] +# ## Name of the measurement that will be emitted +# name = "ifcounters" +# +# ## Origin and path of the subscription +# ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths +# ## +# ## origin usually refers to a (YANG) data model implemented by the device +# ## and path to a specific substructure inside it that should be subscribed to (similar to an XPath) +# ## YANG models can be found e.g. here: https://github.com/YangModels/yang/tree/master/vendor/cisco/xr +# origin = "openconfig-interfaces" +# path = "/interfaces/interface/state/counters" +# +# # Subscription mode (one of: "target_defined", "sample", "on_change") and interval +# subscription_mode = "sample" +# sample_interval = "10s" +# +# ## Suppress redundant transmissions when measured values are unchanged +# # suppress_redundant = false +# +# ## If suppression is enabled, send updates at least every X seconds anyway +# # heartbeat_interval = "60s" + + # # Accept metrics over InfluxDB 1.x HTTP API # [[inputs.http_listener]] # ## Address and port to host InfluxDB listener on @@ -5476,6 +5825,10 @@ # ## The default value of nothing means it will be off and the database will not be recorded. # # database_tag = "" # +# ## If set the retention policy specified in the write query will be added as +# ## the value of this tag name. +# # retention_policy_tag = "" +# # ## Set one or more allowed client CA certificate file names to # ## enable mutually authenticated TLS connections # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] @@ -5527,6 +5880,11 @@ # # basic_username = "foobar" # # basic_password = "barfoo" # +# ## Optional setting to map http headers into tags +# ## If the http header is not present on the request, no corresponding tag will be added +# ## If multiple instances of the http header are present, only the first value will be used +# # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"} +# # ## Data format to consume. # ## Each data format has its own unique set of configuration options, read # ## more about them here: @@ -5554,6 +5912,10 @@ # ## The default value of nothing means it will be off and the database will not be recorded. # # database_tag = "" # +# ## If set the retention policy specified in the write query will be added as +# ## the value of this tag name. +# # retention_policy_tag = "" +# # ## Set one or more allowed client CA certificate file names to # ## enable mutually authenticated TLS connections # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] @@ -5832,8 +6194,11 @@ # # Read metrics from MQTT topic(s) # [[inputs.mqtt_consumer]] -# ## MQTT broker URLs to be used. The format should be scheme://host:port, -# ## schema can be tcp, ssl, or ws. +# ## Broker URLs for the MQTT server or cluster. To connect to multiple +# ## clusters or standalone servers, use a seperate plugin instance. +# ## example: servers = ["tcp://localhost:1883"] +# ## servers = ["ssl://localhost:1883"] +# ## servers = ["ws://localhost:1883"] # servers = ["tcp://127.0.0.1:1883"] # # ## Topics that will be subscribed to. @@ -5984,7 +6349,7 @@ # ## postgres://[pqgotest[:password]]@localhost[/dbname]\ # ## ?sslmode=[disable|verify-ca|verify-full] # ## or a simple string: -# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production +# ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production # ## # ## All connection parameters are optional. # ## @@ -5997,7 +6362,7 @@ # ## postgres://[pqgotest[:password]]@localhost[/dbname]\ # ## ?sslmode=[disable|verify-ca|verify-full] # ## or a simple string: -# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production +# ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production # ## # ## All connection parameters are optional. # ## @@ -6032,7 +6397,7 @@ # ## postgres://[pqgotest[:password]]@localhost[/dbname]\ # ## ?sslmode=[disable|verify-ca|verify-full] # ## or a simple string: -# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production +# ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production # # # ## All connection parameters are optional. # # ## Without the dbname parameter, the driver will default to a database @@ -6178,6 +6543,22 @@ # # service_address = "udp://:162" # ## Timeout running snmptranslate command # # timeout = "5s" +# ## Snmp version, defaults to 2c +# # version = "2c" +# ## SNMPv3 authentication and encryption options. +# ## +# ## Security Name. +# # sec_name = "myuser" +# ## Authentication protocol; one of "MD5", "SHA" or "". +# # auth_protocol = "MD5" +# ## Authentication password. +# # auth_password = "pass" +# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". +# # sec_level = "authNoPriv" +# ## Privacy protocol used for encrypted messages; one of "DES", "AES", "AES192", "AES192C", "AES256", "AES256C" or "". +# # priv_protocol = "" +# ## Privacy password used for encrypted messages. +# # priv_password = "" # # Generic socket listener capable of handling multiple socket types. @@ -6347,7 +6728,7 @@ # ## Must be one of "octet-counting", "non-transparent". # # framing = "octet-counting" # -# ## The trailer to be expected in case of non-trasparent framing (default = "LF"). +# ## The trailer to be expected in case of non-transparent framing (default = "LF"). # ## Must be one of "LF", or "NUL". # # trailer = "LF" # @@ -6363,9 +6744,9 @@ # # sdparam_separator = "_" -# # Stream a log file, like the tail -f command +# # Parse the new lines appended to a file # [[inputs.tail]] -# ## files to tail. +# ## File names or a pattern to tail. # ## These accept standard unix glob matching rules, but with the addition of # ## ** as a "super asterisk". ie: # ## "/var/log/**.log" -> recursively find all .log files in /var/log @@ -6375,14 +6756,30 @@ # ## See https://github.com/gobwas/glob for more examples # ## # files = ["/var/mymetrics.out"] +# # ## Read file from beginning. -# from_beginning = false +# # from_beginning = false +# # ## Whether file is a named pipe -# pipe = false +# # pipe = false # # ## Method used to watch for file updates. Can be either "inotify" or "poll". # # watch_method = "inotify" # +# ## Maximum lines of the file to process that have not yet be written by the +# ## output. For best throughput set based on the number of metrics on each +# ## line and the size of the output's metric_batch_size. +# # max_undelivered_lines = 1000 +# +# ## Character encoding to use when interpreting the file contents. Invalid +# ## characters are replaced using the unicode replacement character. When set +# ## to the empty string the data is not decoded to text. +# ## ex: character_encoding = "utf-8" +# ## character_encoding = "utf-16le" +# ## character_encoding = "utf-16be" +# ## character_encoding = "" +# # character_encoding = "" +# # ## Data format to consume. # ## Each data format has its own unique set of configuration options, read # ## more about them here: @@ -6539,11 +6936,11 @@ # ## separator character to use for measurement and field names (default: "_") # # separator = "_" # -# ## number of objects to retreive per query for realtime resources (vms and hosts) +# ## number of objects to retrieve per query for realtime resources (vms and hosts) # ## set to 64 for vCenter 5.5 and 6.0 (default: 256) # # max_query_objects = 256 # -# ## number of metrics to retreive per query for non-realtime resources (clusters and datastores) +# ## number of metrics to retrieve per query for non-realtime resources (clusters and datastores) # ## set to 64 for vCenter 5.5 and 6.0 (default: 256) # # max_query_metrics = 256 # @@ -6568,10 +6965,10 @@ # ## Custom attributes from vCenter can be very useful for queries in order to slice the # ## metrics along different dimension and for forming ad-hoc relationships. They are disabled # ## by default, since they can add a considerable amount of tags to the resulting metrics. To -# ## enable, simply set custom_attribute_exlude to [] (empty set) and use custom_attribute_include +# ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include # ## to select the attributes you want to include. # ## By default, since they can add a considerable amount of tags to the resulting metrics. To -# ## enable, simply set custom_attribute_exlude to [] (empty set) and use custom_attribute_include +# ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include # ## to select the attributes you want to include. # # custom_attribute_include = [] # # custom_attribute_exclude = ["*"]