chore(processors): migrate sample configs into separate files (#11125)

This commit is contained in:
Sebastian Spaink 2022-05-18 11:29:43 -05:00 committed by GitHub
parent 2488d75edd
commit d345348834
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
26 changed files with 625 additions and 0 deletions

View File

@ -0,0 +1,47 @@
# Attach AWS EC2 metadata to metrics
[[processors.aws_ec2]]
## Instance identity document tags to attach to metrics.
## For more information see:
## https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-identity-documents.html
##
## Available tags:
## * accountId
## * architecture
## * availabilityZone
## * billingProducts
## * imageId
## * instanceId
## * instanceType
## * kernelId
## * pendingTime
## * privateIp
## * ramdiskId
## * region
## * version
imds_tags = []
## EC2 instance tags retrieved with DescribeTags action.
## In case tag is empty upon retrieval it's omitted when tagging metrics.
## Note that in order for this to work, role attached to EC2 instance or AWS
## credentials available from the environment must have a policy attached, that
## allows ec2:DescribeTags.
##
## For more information see:
## https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeTags.html
ec2_tags = []
## Timeout for http requests made by against aws ec2 metadata endpoint.
timeout = "10s"
## ordered controls whether or not the metrics need to stay in the same order
## this plugin received them in. If false, this plugin will change the order
## with requests hitting cached results moving through immediately and not
## waiting on slower lookups. This may cause issues for you if you are
## depending on the order of metrics staying the same. If so, set this to true.
## Keeping the metrics ordered may be slightly slower.
ordered = false
## max_parallel_calls is the maximum number of AWS API calls to be in flight
## at the same time.
## It's probably best to keep this number fairly low.
max_parallel_calls = 10

View File

@ -0,0 +1,10 @@
# Apply metric modifications using override semantics.
[[processors.clone]]
## All modifications on inputs and aggregators can be overridden:
# name_override = "new_name"
# name_prefix = "new_name_prefix"
# name_suffix = "new_name_suffix"
## Tags to be added (all values must be strings)
# [processors.clone.tags]
# additional_tag = "tag_value"

View File

@ -0,0 +1,28 @@
# Convert values to another metric value type
[[processors.converter]]
## Tags to convert
##
## The table key determines the target type, and the array of key-values
## select the keys to convert. The array may contain globs.
## <target-type> = [<tag-key>...]
[processors.converter.tags]
measurement = []
string = []
integer = []
unsigned = []
boolean = []
float = []
## Fields to convert
##
## The table key determines the target type, and the array of key-values
## select the keys to convert. The array may contain globs.
## <target-type> = [<field-key>...]
[processors.converter.fields]
measurement = []
tag = []
string = []
integer = []
unsigned = []
boolean = []
float = []

View File

@ -0,0 +1,24 @@
# Dates measurements, tags, and fields that pass through this filter.
[[processors.date]]
## New tag to create
tag_key = "month"
## New field to create (cannot set both field_key and tag_key)
# field_key = "month"
## Date format string, must be a representation of the Go "reference time"
## which is "Mon Jan 2 15:04:05 -0700 MST 2006".
date_format = "Jan"
## If destination is a field, date format can also be one of
## "unix", "unix_ms", "unix_us", or "unix_ns", which will insert an integer field.
# date_format = "unix"
## Offset duration added to the date string when writing the new tag.
# date_offset = "0s"
## Timezone to use when creating the tag or field using a reference time
## string. This can be set to one of "UTC", "Local", or to a location name
## in the IANA Time Zone database.
## example: timezone = "America/Los_Angeles"
# timezone = "UTC"

View File

@ -0,0 +1,4 @@
# Filter metrics with repeating field values
[[processors.dedup]]
## Maximum time to suppress output
dedup_interval = "600s"

View File

@ -0,0 +1,15 @@
## Set default fields on your metric(s) when they are nil or empty
[[processors.defaults]]
## Ensures a set of fields always exists on your metric(s) with their
## respective default value.
## For any given field pair (key = default), if it's not set, a field
## is set on the metric with the specified default.
##
## A field is considered not set if it is nil on the incoming metric;
## or it is not nil but its value is an empty string or is a string
## of one or more spaces.
## <target-field> = <value>
[processors.defaults.fields]
field_1 = "bar"
time_idle = 0
is_error = true

View File

@ -0,0 +1,23 @@
# Map enum values according to given table.
[[processors.enum]]
[[processors.enum.mapping]]
## Name of the field to map. Globs accepted.
field = "status"
## Name of the tag to map. Globs accepted.
# tag = "status"
## Destination tag or field to be used for the mapped value. By default the
## source tag or field is used, overwriting the original value.
dest = "status_code"
## Default value to be used for all values not contained in the mapping
## table. When unset and no match is found, the original field will remain
## unmodified and the destination tag or field will not be created.
# default = 0
## Table of mappings
[processors.enum.mapping.value_mappings]
green = 1
amber = 2
red = 3

View File

@ -0,0 +1,15 @@
# Run executable as long-running processor plugin
[[processors.execd]]
## One program to run as daemon.
## NOTE: process and each argument should each be their own string
## eg: command = ["/path/to/your_program", "arg1", "arg2"]
command = ["cat"]
## Environment variables
## Array of "key=value" pairs to pass as environment variables
## e.g. "KEY=value", "USERNAME=John Doe",
## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs"
# environment = []
## Delay before the process is restarted after an unexpected termination
# restart_delay = "10s"

View File

@ -0,0 +1,30 @@
# Performs file path manipulations on tags and fields
[[processors.filepath]]
## Treat the tag value as a path and convert it to its last element, storing the result in a new tag
# [[processors.filepath.basename]]
# tag = "path"
# dest = "basepath"
## Treat the field value as a path and keep all but the last element of path, typically the path's directory
# [[processors.filepath.dirname]]
# field = "path"
## Treat the tag value as a path, converting it to its the last element without its suffix
# [[processors.filepath.stem]]
# tag = "path"
## Treat the tag value as a path, converting it to the shortest path name equivalent
## to path by purely lexical processing
# [[processors.filepath.clean]]
# tag = "path"
## Treat the tag value as a path, converting it to a relative path that is lexically
## equivalent to the source path when joined to 'base_path'
# [[processors.filepath.rel]]
# tag = "path"
# base_path = "/var/log"
## Treat the tag value as a path, replacing each separator character in path with a '/' character. Has only
## effect on Windows
# [[processors.filepath.toslash]]
# tag = "path"

View File

@ -0,0 +1,58 @@
# Add a tag of the network interface name looked up over SNMP by interface number
[[processors.ifname]]
## Name of tag holding the interface number
# tag = "ifIndex"
## Name of output tag where service name will be added
# dest = "ifName"
## Name of tag of the SNMP agent to request the interface name from
# agent = "agent"
## Timeout for each request.
# timeout = "5s"
## SNMP version; can be 1, 2, or 3.
# version = 2
## SNMP community string.
# community = "public"
## Number of retries to attempt.
# retries = 3
## The GETBULK max-repetitions parameter.
# max_repetitions = 10
## SNMPv3 authentication and encryption options.
##
## Security Name.
# sec_name = "myuser"
## Authentication protocol; one of "MD5", "SHA", or "".
# auth_protocol = "MD5"
## Authentication password.
# auth_password = "pass"
## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv".
# sec_level = "authNoPriv"
## Context Name.
# context_name = ""
## Privacy protocol used for encrypted messages; one of "DES", "AES" or "".
# priv_protocol = ""
## Privacy password used for encrypted messages.
# priv_password = ""
## max_parallel_lookups is the maximum number of SNMP requests to
## make at the same time.
# max_parallel_lookups = 100
## ordered controls whether or not the metrics need to stay in the
## same order this plugin received them in. If false, this plugin
## may change the order when data is cached. If you need metrics to
## stay in order set this to true. keeping the metrics ordered may
## be slightly slower
# ordered = false
## cache_ttl is the amount of time interface names are cached for a
## given agent. After this period elapses if names are needed they
## will be retrieved again.
# cache_ttl = "8h"

View File

@ -0,0 +1,21 @@
# Adds noise to numerical fields
[[processors.noise]]
## Specified the type of the random distribution.
## Can be "laplacian", "gaussian" or "uniform".
# type = "laplacian
## Center of the distribution.
## Only used for Laplacian and Gaussian distributions.
# mu = 0.0
## Scale parameter for the Laplacian or Gaussian distribution
# scale = 1.0
## Upper and lower bound of the Uniform distribution
# min = -1.0
# max = 1.0
## Apply the noise only to numeric fields matching the filter criteria below.
## Excludes takes precedence over includes.
# include_fields = []
# exclude_fields = []

View File

@ -0,0 +1,10 @@
# Apply metric modifications using override semantics.
[[processors.override]]
## All modifications on inputs and aggregators can be overridden:
# name_override = "new_name"
# name_prefix = "new_name_prefix"
# name_suffix = "new_name_suffix"
## Tags to be added (all values must be strings)
# [processors.override.tags]
# additional_tag = "tag_value"

View File

@ -0,0 +1,17 @@
# Parse a value in a specified field/tag(s) and add the result in a new metric
[[processors.parser]]
## The name of the fields whose value will be parsed.
parse_fields = ["message"]
## If true, incoming metrics are not emitted.
drop_original = false
## If set to override, emitted metrics will be merged by overriding the
## original metric using the newly parsed metrics.
merge = "override"
## The dataformat to be read from files
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"

View File

@ -0,0 +1,6 @@
# Rotate a single valued metric into a multi field metric
[[processors.pivot]]
## Tag to use for naming the new field.
tag_key = "name"
## Field to use as the value of the new field.
value_key = "value"

View File

@ -0,0 +1,18 @@
# Given a tag/field of a TCP or UDP port number, add a tag/field of the service name looked up in the system services file
[[processors.port_name]]
## Name of tag holding the port number
# tag = "port"
## Or name of the field holding the port number
# field = "port"
## Name of output tag or field (depending on the source) where service name will be added
# dest = "service"
## Default tcp or udp
# default_protocol = "tcp"
## Tag containing the protocol (tcp or udp, case-insensitive)
# protocol_tag = "proto"
## Field containing the protocol (tcp or udp, case-insensitive)
# protocol_field = "proto"

View File

@ -0,0 +1,2 @@
# Print all metrics that pass through this filter.
[[processors.printer]]

View File

@ -0,0 +1,64 @@
# Transforms tag and field values as well as measurement, tag and field names with regex pattern
[[processors.regex]]
namepass = ["nginx_requests"]
# Tag and field conversions defined in a separate sub-tables
[[processors.regex.tags]]
## Tag to change, "*" will change every tag
key = "resp_code"
## Regular expression to match on a tag value
pattern = "^(\\d)\\d\\d$"
## Matches of the pattern will be replaced with this string. Use ${1}
## notation to use the text of the first submatch.
replacement = "${1}xx"
[[processors.regex.fields]]
## Field to change
key = "request"
## All the power of the Go regular expressions available here
## For example, named subgroups
pattern = "^/api(?P<method>/[\\w/]+)\\S*"
replacement = "${method}"
## If result_key is present, a new field will be created
## instead of changing existing field
result_key = "method"
# Multiple conversions may be applied for one field sequentially
# Let's extract one more value
[[processors.regex.fields]]
key = "request"
pattern = ".*category=(\\w+).*"
replacement = "${1}"
result_key = "search_category"
# Rename metric fields
[[processors.regex.field_rename]]
## Regular expression to match on a field name
pattern = "^search_(\\w+)d$"
## Matches of the pattern will be replaced with this string. Use ${1}
## notation to use the text of the first submatch.
replacement = "${1}"
## If the new field name already exists, you can either "overwrite" the
## existing one with the value of the renamed field OR you can "keep"
## both the existing and source field.
# result_key = "keep"
# Rename metric tags
# [[processors.regex.tag_rename]]
# ## Regular expression to match on a tag name
# pattern = "^search_(\\w+)d$"
# ## Matches of the pattern will be replaced with this string. Use ${1}
# ## notation to use the text of the first submatch.
# replacement = "${1}"
# ## If the new tag name already exists, you can either "overwrite" the
# ## existing one with the value of the renamed tag OR you can "keep"
# ## both the existing and source tag.
# # result_key = "keep"
# Rename metrics
# [[processors.regex.metric_rename]]
# ## Regular expression to match on an metric name
# pattern = "^search_(\\w+)d$"
# ## Matches of the pattern will be replaced with this string. Use ${1}
# ## notation to use the text of the first submatch.
# replacement = "${1}"

View File

@ -0,0 +1,18 @@
# Rename measurements, tags, and fields that pass through this filter.
[[processors.rename]]
## Specify one sub-table per rename operation.
[[processors.rename.replace]]
measurement = "network_interface_throughput"
dest = "throughput"
[[processors.rename.replace]]
tag = "hostname"
dest = "host"
[[processors.rename.replace]]
field = "lower"
dest = "min"
[[processors.rename.replace]]
field = "upper"
dest = "max"

View File

@ -0,0 +1,46 @@
# ReverseDNS does a reverse lookup on IP addresses to retrieve the DNS name
[[processors.reverse_dns]]
## For optimal performance, you may want to limit which metrics are passed to this
## processor. eg:
## namepass = ["my_metric_*"]
## cache_ttl is how long the dns entries should stay cached for.
## generally longer is better, but if you expect a large number of diverse lookups
## you'll want to consider memory use.
cache_ttl = "24h"
## lookup_timeout is how long should you wait for a single dns request to repsond.
## this is also the maximum acceptable latency for a metric travelling through
## the reverse_dns processor. After lookup_timeout is exceeded, a metric will
## be passed on unaltered.
## multiple simultaneous resolution requests for the same IP will only make a
## single rDNS request, and they will all wait for the answer for this long.
lookup_timeout = "3s"
## max_parallel_lookups is the maximum number of dns requests to be in flight
## at the same time. Requesting hitting cached values do not count against this
## total, and neither do mulptiple requests for the same IP.
## It's probably best to keep this number fairly low.
max_parallel_lookups = 10
## ordered controls whether or not the metrics need to stay in the same order
## this plugin received them in. If false, this plugin will change the order
## with requests hitting cached results moving through immediately and not
## waiting on slower lookups. This may cause issues for you if you are
## depending on the order of metrics staying the same. If so, set this to true.
## keeping the metrics ordered may be slightly slower.
ordered = false
[[processors.reverse_dns.lookup]]
## get the ip from the field "source_ip", and put the result in the field "source_name"
field = "source_ip"
dest = "source_name"
[[processors.reverse_dns.lookup]]
## get the ip from the tag "destination_ip", and put the result in the tag
## "destination_name".
tag = "destination_ip"
dest = "destination_name"
## If you would prefer destination_name to be a field instead, you can use a
## processors.converter after this one, specifying the order attribute.

View File

@ -0,0 +1,12 @@
# Add the S2 Cell ID as a tag based on latitude and longitude fields
[[processors.s2geo]]
## The name of the lat and lon fields containing WGS-84 latitude and
## longitude in decimal degrees.
# lat_field = "lat"
# lon_field = "lon"
## New tag to create
# tag_key = "s2_cell_id"
## Cell level (see https://s2geometry.io/resources/s2cell_statistics.html)
# cell_level = 9

View File

@ -0,0 +1,21 @@
# Process metrics using a Starlark script
[[processors.starlark]]
## The Starlark source can be set as a string in this configuration file, or
## by referencing a file containing the script. Only one source or script
## should be set at once.
## Source of the Starlark script.
source = '''
def apply(metric):
return metric
'''
## File containing a Starlark script.
# script = "/usr/local/bin/myscript.star"
## The constants of the Starlark script.
# [processors.starlark.constants]
# max_size = 10
# threshold = 0.75
# default_name = "Julia"
# debug_mode = true

View File

@ -0,0 +1,59 @@
# Perform string processing on tags, fields, and measurements
[[processors.strings]]
## Convert a field value to lowercase and store in a new field
# [[processors.strings.lowercase]]
# field = "uri_stem"
# dest = "uri_stem_normalised"
## Convert a tag value to uppercase
# [[processors.strings.uppercase]]
# tag = "method"
## Convert a field value to titlecase
# [[processors.strings.titlecase]]
# field = "status"
## Trim leading and trailing whitespace using the default cutset
# [[processors.strings.trim]]
# field = "message"
## Trim leading characters in cutset
# [[processors.strings.trim_left]]
# field = "message"
# cutset = "\t"
## Trim trailing characters in cutset
# [[processors.strings.trim_right]]
# field = "message"
# cutset = "\r\n"
## Trim the given prefix from the field
# [[processors.strings.trim_prefix]]
# field = "my_value"
# prefix = "my_"
## Trim the given suffix from the field
# [[processors.strings.trim_suffix]]
# field = "read_count"
# suffix = "_count"
## Replace all non-overlapping instances of old with new
# [[processors.strings.replace]]
# measurement = "*"
# old = ":"
# new = "_"
## Trims strings based on width
# [[processors.strings.left]]
# field = "message"
# width = 10
## Decode a base64 encoded utf-8 string
# [[processors.strings.base64decode]]
# field = "message"
## Sanitize a string to ensure it is a valid utf-8 string
## Each run of invalid UTF-8 byte sequences is replaced by the replacement string, which may be empty
# [[processors.strings.valid_utf8]]
# field = "message"
# replacement = ""

View File

@ -0,0 +1,7 @@
# Restricts the number of tags that can pass through this filter and chooses which tags to preserve when over the limit.
[[processors.tag_limit]]
## Maximum number of tags to preserve
limit = 3
## List of tags to preferentially preserve
keep = ["environment", "region"]

View File

@ -0,0 +1,9 @@
# Uses a Go template to create a new tag
[[processors.template]]
## Tag to set with the output of the template.
tag = "topic"
## Go template used to create the tag value. In order to ease TOML
## escaping requirements, you may wish to use single quotes around the
## template string.
template = '{{ .Tag "hostname" }}.{{ .Tag "level" }}'

View File

@ -0,0 +1,55 @@
# Print all metrics that pass through this filter.
[[processors.topk]]
## How many seconds between aggregations
# period = 10
## How many top buckets to return per field
## Every field specified to aggregate over will return k number of results.
## For example, 1 field with k of 10 will return 10 buckets. While 2 fields
## with k of 3 will return 6 buckets.
# k = 10
## Over which tags should the aggregation be done. Globs can be specified, in
## which case any tag matching the glob will aggregated over. If set to an
## empty list is no aggregation over tags is done
# group_by = ['*']
## The field(s) to aggregate
## Each field defined is used to create an independent aggregation. Each
## aggregation will return k buckets. If a metric does not have a defined
## field the metric will be dropped from the aggregation. Considering using
## the defaults processor plugin to ensure fields are set if required.
# fields = ["value"]
## What aggregation function to use. Options: sum, mean, min, max
# aggregation = "mean"
## Instead of the top k largest metrics, return the bottom k lowest metrics
# bottomk = false
## The plugin assigns each metric a GroupBy tag generated from its name and
## tags. If this setting is different than "" the plugin will add a
## tag (which name will be the value of this setting) to each metric with
## the value of the calculated GroupBy tag. Useful for debugging
# add_groupby_tag = ""
## These settings provide a way to know the position of each metric in
## the top k. The 'add_rank_field' setting allows to specify for which
## fields the position is required. If the list is non empty, then a field
## will be added to each and every metric for each string present in this
## setting. This field will contain the ranking of the group that
## the metric belonged to when aggregated over that field.
## The name of the field will be set to the name of the aggregation field,
## suffixed with the string '_topk_rank'
# add_rank_fields = []
## These settings provide a way to know what values the plugin is generating
## when aggregating metrics. The 'add_aggregate_field' setting allows to
## specify for which fields the final aggregation value is required. If the
## list is non empty, then a field will be added to each every metric for
## each field present in this setting. This field will contain
## the computed aggregation for the group that the metric belonged to when
## aggregated over that field.
## The name of the field will be set to the name of the aggregation field,
## suffixed with the string '_topk_aggregate'
# add_aggregate_fields = []

View File

@ -0,0 +1,6 @@
# Rotate multi field metric into several single field metrics
[[processors.unpivot]]
## Tag to use for the name.
tag_key = "name"
## Field to use for the name of the value.
value_key = "value"