feat: migrate processor plugins to new sample config format (#10913)

This commit is contained in:
Sebastian Spaink 2022-04-06 15:49:41 -05:00 committed by GitHub
parent 43017e14dd
commit be0008f9e2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
81 changed files with 481 additions and 1099 deletions

View File

@ -8,7 +8,12 @@ to metrics associated with EC2 instances.
## Configuration
```toml
# Attach AWS EC2 metadata to metrics
[[processors.aws_ec2]]
## Instance identity document tags to attach to metrics.
## For more information see:
## https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-identity-documents.html
##
## Available tags:
## * accountId
## * architecture

View File

@ -35,54 +35,6 @@ type AwsEc2Processor struct {
instanceID string
}
const sampleConfig = `
## Instance identity document tags to attach to metrics.
## For more information see:
## https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-identity-documents.html
##
## Available tags:
## * accountId
## * architecture
## * availabilityZone
## * billingProducts
## * imageId
## * instanceId
## * instanceType
## * kernelId
## * pendingTime
## * privateIp
## * ramdiskId
## * region
## * version
imds_tags = []
## EC2 instance tags retrieved with DescribeTags action.
## In case tag is empty upon retrieval it's omitted when tagging metrics.
## Note that in order for this to work, role attached to EC2 instance or AWS
## credentials available from the environment must have a policy attached, that
## allows ec2:DescribeTags.
##
## For more information see:
## https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeTags.html
ec2_tags = []
## Timeout for http requests made by against aws ec2 metadata endpoint.
timeout = "10s"
## ordered controls whether or not the metrics need to stay in the same order
## this plugin received them in. If false, this plugin will change the order
## with requests hitting cached results moving through immediately and not
## waiting on slower lookups. This may cause issues for you if you are
## depending on the order of metrics staying the same. If so, set this to true.
## Keeping the metrics ordered may be slightly slower.
ordered = false
## max_parallel_calls is the maximum number of AWS API calls to be in flight
## at the same time.
## It's probably best to keep this number fairly low.
max_parallel_calls = 10
`
const (
DefaultMaxOrderedQueueSize = 10_000
DefaultMaxParallelCalls = 10
@ -105,14 +57,6 @@ var allowedImdsTags = map[string]struct{}{
"version": {},
}
func (r *AwsEc2Processor) SampleConfig() string {
return sampleConfig
}
func (r *AwsEc2Processor) Description() string {
return "Attach AWS EC2 metadata to metrics"
}
func (r *AwsEc2Processor) Add(metric telegraf.Metric, _ telegraf.Accumulator) error {
r.parallel.Enqueue(metric)
return nil

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../../tools/generate_plugindata/main.go
//go:generate go run ../../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package ec2
func (r *AwsEc2Processor) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -3,7 +3,6 @@ package ec2
import (
"testing"
"github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
@ -46,12 +45,3 @@ func TestBasicInitInvalidTagsReturnAnError(t *testing.T) {
err := p.Init()
require.Error(t, err)
}
func TestLoadingConfig(t *testing.T) {
confFile := []byte("[[processors.aws_ec2]]" + "\n" + sampleConfig)
c := config.NewConfig()
err := c.LoadConfigData(confFile)
require.NoError(t, err)
require.Len(t, c.Processors, 1)
}

View File

@ -5,17 +5,6 @@ import (
"github.com/influxdata/telegraf/plugins/processors"
)
var sampleConfig = `
## All modifications on inputs and aggregators can be overridden:
# name_override = "new_name"
# name_prefix = "new_name_prefix"
# name_suffix = "new_name_suffix"
## Tags to be added (all values must be strings)
# [processors.clone.tags]
# additional_tag = "tag_value"
`
type Clone struct {
NameOverride string
NamePrefix string
@ -23,14 +12,6 @@ type Clone struct {
Tags map[string]string
}
func (c *Clone) SampleConfig() string {
return sampleConfig
}
func (c *Clone) Description() string {
return "Clone metrics and apply modifications."
}
func (c *Clone) Apply(in ...telegraf.Metric) []telegraf.Metric {
cloned := []telegraf.Metric{}

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package clone
func (c *Clone) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -13,35 +13,6 @@ import (
"github.com/influxdata/telegraf/plugins/processors"
)
var sampleConfig = `
## Tags to convert
##
## The table key determines the target type, and the array of key-values
## select the keys to convert. The array may contain globs.
## <target-type> = [<tag-key>...]
[processors.converter.tags]
measurement = []
string = []
integer = []
unsigned = []
boolean = []
float = []
## Fields to convert
##
## The table key determines the target type, and the array of key-values
## select the keys to convert. The array may contain globs.
## <target-type> = [<field-key>...]
[processors.converter.fields]
measurement = []
tag = []
string = []
integer = []
unsigned = []
boolean = []
float = []
`
type Conversion struct {
Measurement []string `toml:"measurement"`
Tag []string `toml:"tag"`
@ -71,14 +42,6 @@ type ConversionFilter struct {
Float filter.Filter
}
func (p *Converter) SampleConfig() string {
return sampleConfig
}
func (p *Converter) Description() string {
return "Convert values to another metric value type"
}
func (p *Converter) Init() error {
return p.compile()
}

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package converter
func (p *Converter) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -13,6 +13,7 @@ A few example usecases include:
## Configuration
```toml
# Dates measurements, tags, and fields that pass through this filter.
[[processors.date]]
## New tag to create
tag_key = "month"

View File

@ -9,31 +9,6 @@ import (
"github.com/influxdata/telegraf/plugins/processors"
)
const sampleConfig = `
## New tag to create
tag_key = "month"
## New field to create (cannot set both field_key and tag_key)
# field_key = "month"
## Date format string, must be a representation of the Go "reference time"
## which is "Mon Jan 2 15:04:05 -0700 MST 2006".
date_format = "Jan"
## If destination is a field, date format can also be one of
## "unix", "unix_ms", "unix_us", or "unix_ns", which will insert an integer field.
# date_format = "unix"
## Offset duration added to the date string when writing the new tag.
# date_offset = "0s"
## Timezone to use when creating the tag or field using a reference time
## string. This can be set to one of "UTC", "Local", or to a location name
## in the IANA Time Zone database.
## example: timezone = "America/Los_Angeles"
# timezone = "UTC"
`
const defaultTimezone = "UTC"
type Date struct {
@ -46,14 +21,6 @@ type Date struct {
location *time.Location
}
func (d *Date) SampleConfig() string {
return sampleConfig
}
func (d *Date) Description() string {
return "Dates measurements, tags, and fields that pass through this filter."
}
func (d *Date) Init() error {
// Check either TagKey or FieldKey specified
if len(d.FieldKey) > 0 && len(d.TagKey) > 0 {

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package date
func (d *Date) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -5,6 +5,7 @@ Filter metrics whose field values are exact repetitions of the previous values.
## Configuration
```toml
# Filter metrics with repeating field values
[[processors.dedup]]
## Maximum time to suppress output
dedup_interval = "600s"

View File

@ -8,25 +8,12 @@ import (
"github.com/influxdata/telegraf/plugins/processors"
)
var sampleConfig = `
## Maximum time to suppress output
dedup_interval = "600s"
`
type Dedup struct {
DedupInterval config.Duration `toml:"dedup_interval"`
FlushTime time.Time
Cache map[uint64]telegraf.Metric
}
func (d *Dedup) SampleConfig() string {
return sampleConfig
}
func (d *Dedup) Description() string {
return "Filter metrics with repeating field values"
}
// Remove expired items from cache
func (d *Dedup) cleanup() {
// No need to cleanup cache too often. Lets save some CPU

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package dedup
func (d *Dedup) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -15,8 +15,15 @@ Telegraf minimum version: Telegraf 1.15.0
```toml
## Set default fields on your metric(s) when they are nil or empty
[[processors.defaults]]
## This table determines what fields will be inserted in your metric(s)
## Ensures a set of fields always exists on your metric(s) with their
## respective default value.
## For any given field pair (key = default), if it's not set, a field
## is set on the metric with the specified default.
##
## A field is considered not set if it is nil on the incoming metric;
## or it is not nil but its value is an empty string or is a string
## of one or more spaces.
## <target-field> = <value>
[processors.defaults.fields]
field_1 = "bar"
time_idle = 0

View File

@ -7,38 +7,12 @@ import (
"github.com/influxdata/telegraf/plugins/processors"
)
const sampleConfig = `
## Ensures a set of fields always exists on your metric(s) with their
## respective default value.
## For any given field pair (key = default), if it's not set, a field
## is set on the metric with the specified default.
##
## A field is considered not set if it is nil on the incoming metric;
## or it is not nil but its value is an empty string or is a string
## of one or more spaces.
## <target-field> = <value>
# [processors.defaults.fields]
# field_1 = "bar"
# time_idle = 0
# is_error = true
`
// Defaults is a processor for ensuring certain fields always exist
// on your Metrics with at least a default value.
type Defaults struct {
DefaultFieldsSets map[string]interface{} `toml:"fields"`
}
// SampleConfig represents a sample toml config for this plugin.
func (def *Defaults) SampleConfig() string {
return sampleConfig
}
// Description is a brief description of this processor plugin's behaviour.
func (def *Defaults) Description() string {
return "Defaults sets default value(s) for specified fields that are not set on incoming metrics."
}
// Apply contains the main implementation of this processor.
// For each metric in 'inputMetrics', it goes over each default pair.
// If the field in the pair does not exist on the metric, the associated default is added.

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package defaults
func (def *Defaults) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -12,6 +12,7 @@ source tag or field is overwritten.
## Configuration
```toml
# Map enum values according to given table.
[[processors.enum]]
[[processors.enum.mapping]]
## Name of the field to map. Globs accepted.

View File

@ -9,30 +9,6 @@ import (
"github.com/influxdata/telegraf/plugins/processors"
)
var sampleConfig = `
[[processors.enum.mapping]]
## Name of the field to map. Globs accepted.
field = "status"
## Name of the tag to map. Globs accepted.
# tag = "status"
## Destination tag or field to be used for the mapped value. By default the
## source tag or field is used, overwriting the original value.
dest = "status_code"
## Default value to be used for all values not contained in the mapping
## table. When unset, the unmodified value for the field will be used if no
## match is found.
# default = 0
## Table of mappings
[processors.enum.mapping.value_mappings]
green = 1
amber = 2
red = 3
`
type EnumMapper struct {
Mappings []Mapping `toml:"mapping"`
@ -71,14 +47,6 @@ func (mapper *EnumMapper) Init() error {
return nil
}
func (mapper *EnumMapper) SampleConfig() string {
return sampleConfig
}
func (mapper *EnumMapper) Description() string {
return "Map enum values according to given table."
}
func (mapper *EnumMapper) Apply(in ...telegraf.Metric) []telegraf.Metric {
for i := 0; i < len(in); i++ {
in[i] = mapper.applyMappings(in[i])

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package enum
func (mapper *EnumMapper) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -23,6 +23,7 @@ Telegraf minimum version: Telegraf 1.15.0
## Configuration
```toml
# Run executable as long-running processor plugin
[[processors.execd]]
## One program to run as daemon.
## NOTE: process and each argument should each be their own string

View File

@ -17,15 +17,6 @@ import (
"github.com/influxdata/telegraf/plugins/serializers"
)
const sampleConfig = `
## Program to run as daemon
## eg: command = ["/path/to/your_program", "arg1", "arg2"]
command = ["cat"]
## Delay before the process is restarted after an unexpected termination
restart_delay = "10s"
`
type Execd struct {
Command []string `toml:"command"`
RestartDelay config.Duration `toml:"restart_delay"`
@ -51,14 +42,6 @@ func New() *Execd {
}
}
func (e *Execd) SampleConfig() string {
return sampleConfig
}
func (e *Execd) Description() string {
return "Run executable as long-running processor plugin"
}
func (e *Execd) Start(acc telegraf.Accumulator) error {
var err error
e.parser, err = parsers.NewParser(e.parserConfig)

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package execd
func (e *Execd) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -1,210 +1,212 @@
<!-- markdownlint-disable MD024 -->
# Filepath Processor Plugin
The `filepath` processor plugin maps certain go functions from [path/filepath](https://golang.org/pkg/path/filepath/)
onto tag and field values. Values can be modified in place or stored in another key.
Implemented functions are:
* [Base](https://golang.org/pkg/path/filepath/#Base) (accessible through `[[processors.filepath.basename]]`)
* [Rel](https://golang.org/pkg/path/filepath/#Rel) (accessible through `[[processors.filepath.rel]]`)
* [Dir](https://golang.org/pkg/path/filepath/#Dir) (accessible through `[[processors.filepath.dir]]`)
* [Clean](https://golang.org/pkg/path/filepath/#Clean) (accessible through `[[processors.filepath.clean]]`)
* [ToSlash](https://golang.org/pkg/path/filepath/#ToSlash) (accessible through `[[processors.filepath.toslash]]`)
On top of that, the plugin provides an extra function to retrieve the final path component without its extension. This
function is accessible through the `[[processors.filepath.stem]]` configuration item.
Please note that, in this implementation, these functions are processed in the order that they appear above( except for
`stem` that is applied in the first place).
Specify the `tag` and/or `field` that you want processed in each section and optionally a `dest` if you want the result
stored in a new tag or field.
If you plan to apply multiple transformations to the same `tag`/`field`, bear in mind the processing order stated above.
Telegraf minimum version: Telegraf 1.15.0
## Configuration
```toml
[[processors.filepath]]
## Treat the tag value as a path and convert it to its last element, storing the result in a new tag
# [[processors.filepath.basename]]
# tag = "path"
# dest = "basepath"
## Treat the field value as a path and keep all but the last element of path, typically the path's directory
# [[processors.filepath.dirname]]
# field = "path"
## Treat the tag value as a path, converting it to its the last element without its suffix
# [[processors.filepath.stem]]
# tag = "path"
## Treat the tag value as a path, converting it to the shortest path name equivalent
## to path by purely lexical processing
# [[processors.filepath.clean]]
# tag = "path"
## Treat the tag value as a path, converting it to a relative path that is lexically
## equivalent to the source path when joined to 'base_path'
# [[processors.filepath.rel]]
# tag = "path"
# base_path = "/var/log"
## Treat the tag value as a path, replacing each separator character in path with a '/' character. Has only
## effect on Windows
# [[processors.filepath.toslash]]
# tag = "path"
```
## Considerations
### Clean
Even though `clean` is provided a standalone function, it is also invoked when using the `rel` and `dirname` functions,
so there is no need to use it along with them.
That is:
```toml
[[processors.filepath]]
[[processors.filepath.dir]]
tag = "path"
[[processors.filepath.clean]]
tag = "path"
```
Is equivalent to:
```toml
[[processors.filepath]]
[[processors.filepath.dir]]
tag = "path"
```
### ToSlash
The effects of this function are only noticeable on Windows platforms, because of the underlying golang implementation.
## Examples
### Basename
```toml
[[processors.filepath]]
[[processors.filepath.basename]]
tag = "path"
```
```diff
- my_metric,path="/var/log/batch/ajob.log" duration_seconds=134 1587920425000000000
+ my_metric,path="ajob.log" duration_seconds=134 1587920425000000000
```
### Dirname
```toml
[[processors.filepath]]
[[processors.filepath.dirname]]
field = "path"
dest = "folder"
```
```diff
- my_metric path="/var/log/batch/ajob.log",duration_seconds=134 1587920425000000000
+ my_metric path="/var/log/batch/ajob.log",folder="/var/log/batch",duration_seconds=134 1587920425000000000
```
### Stem
```toml
[[processors.filepath]]
[[processors.filepath.stem]]
tag = "path"
```
```diff
- my_metric,path="/var/log/batch/ajob.log" duration_seconds=134 1587920425000000000
+ my_metric,path="ajob" duration_seconds=134 1587920425000000000
```
### Clean
```toml
[[processors.filepath]]
[[processors.filepath.clean]]
tag = "path"
```
```diff
- my_metric,path="/var/log/dummy/../batch//ajob.log" duration_seconds=134 1587920425000000000
+ my_metric,path="/var/log/batch/ajob.log" duration_seconds=134 1587920425000000000
```
### Rel
```toml
[[processors.filepath]]
[[processors.filepath.rel]]
tag = "path"
base_path = "/var/log"
```
```diff
- my_metric,path="/var/log/batch/ajob.log" duration_seconds=134 1587920425000000000
+ my_metric,path="batch/ajob.log" duration_seconds=134 1587920425000000000
```
### ToSlash
```toml
[[processors.filepath]]
[[processors.filepath.rel]]
tag = "path"
```
```diff
- my_metric,path="\var\log\batch\ajob.log" duration_seconds=134 1587920425000000000
+ my_metric,path="/var/log/batch/ajob.log" duration_seconds=134 1587920425000000000
```
## Processing paths from tail plugin
This plugin can be used together with the
[tail input plugn](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/tail) to make modifications
to the `path` tag injected for every file.
Scenario:
* A log file `/var/log/myjobs/mysql_backup.log`, containing logs for a job execution. Whenever the job ends, a line is
written to the log file following this format: `2020-04-05 11:45:21 total time execution: 70 seconds`
* We want to generate a measurement that captures the duration of the script as a field and includes the `path` as a
tag
* We are interested in the filename without its extensions, since it might be enough information for plotting our
execution times in a dashboard
* Just in case, we don't want to override the original path (if for some reason we end up having duplicates we might
want this information)
For this purpose, we will use the `tail` input plugin, the `grok` parser plugin and the `filepath` processor.
```toml
[[inputs.tail]]
files = ["/var/log/myjobs/**.log"]
data_format = "grok"
grok_patterns = ['%{TIMESTAMP_ISO8601:timestamp:ts-"2006-01-02 15:04:05"} total time execution: %{NUMBER:duration_seconds:int}']
name_override = "myjobs"
[[processors.filepath]]
[[processors.filepath.stem]]
tag = "path"
dest = "stempath"
```
The resulting output for a job taking 70 seconds for the mentioned log file would look like:
```text
myjobs_duration_seconds,host="my-host",path="/var/log/myjobs/mysql_backup.log",stempath="mysql_backup" 70 1587920425000000000
```
<!-- markdownlint-disable MD024 -->
# Filepath Processor Plugin
The `filepath` processor plugin maps certain go functions from [path/filepath](https://golang.org/pkg/path/filepath/)
onto tag and field values. Values can be modified in place or stored in another key.
Implemented functions are:
* [Base](https://golang.org/pkg/path/filepath/#Base) (accessible through `[[processors.filepath.basename]]`)
* [Rel](https://golang.org/pkg/path/filepath/#Rel) (accessible through `[[processors.filepath.rel]]`)
* [Dir](https://golang.org/pkg/path/filepath/#Dir) (accessible through `[[processors.filepath.dir]]`)
* [Clean](https://golang.org/pkg/path/filepath/#Clean) (accessible through `[[processors.filepath.clean]]`)
* [ToSlash](https://golang.org/pkg/path/filepath/#ToSlash) (accessible through `[[processors.filepath.toslash]]`)
On top of that, the plugin provides an extra function to retrieve the final path component without its extension. This
function is accessible through the `[[processors.filepath.stem]]` configuration item.
Please note that, in this implementation, these functions are processed in the order that they appear above( except for
`stem` that is applied in the first place).
Specify the `tag` and/or `field` that you want processed in each section and optionally a `dest` if you want the result
stored in a new tag or field.
If you plan to apply multiple transformations to the same `tag`/`field`, bear in mind the processing order stated above.
Telegraf minimum version: Telegraf 1.15.0
## Configuration
```toml
# Performs file path manipulations on tags and fields
[[processors.filepath]]
## Treat the tag value as a path and convert it to its last element, storing the result in a new tag
# [[processors.filepath.basename]]
# tag = "path"
# dest = "basepath"
## Treat the field value as a path and keep all but the last element of path, typically the path's directory
# [[processors.filepath.dirname]]
# field = "path"
## Treat the tag value as a path, converting it to its the last element without its suffix
# [[processors.filepath.stem]]
# tag = "path"
## Treat the tag value as a path, converting it to the shortest path name equivalent
## to path by purely lexical processing
# [[processors.filepath.clean]]
# tag = "path"
## Treat the tag value as a path, converting it to a relative path that is lexically
## equivalent to the source path when joined to 'base_path'
# [[processors.filepath.rel]]
# tag = "path"
# base_path = "/var/log"
## Treat the tag value as a path, replacing each separator character in path with a '/' character. Has only
## effect on Windows
# [[processors.filepath.toslash]]
# tag = "path"
```
## Considerations
### Clean
Even though `clean` is provided a standalone function, it is also invoked when using the `rel` and `dirname` functions,
so there is no need to use it along with them.
That is:
```toml
[[processors.filepath]]
[[processors.filepath.dir]]
tag = "path"
[[processors.filepath.clean]]
tag = "path"
```
Is equivalent to:
```toml
[[processors.filepath]]
[[processors.filepath.dir]]
tag = "path"
```
### ToSlash
The effects of this function are only noticeable on Windows platforms, because of the underlying golang implementation.
## Examples
### Basename
```toml
[[processors.filepath]]
[[processors.filepath.basename]]
tag = "path"
```
```diff
- my_metric,path="/var/log/batch/ajob.log" duration_seconds=134 1587920425000000000
+ my_metric,path="ajob.log" duration_seconds=134 1587920425000000000
```
### Dirname
```toml
[[processors.filepath]]
[[processors.filepath.dirname]]
field = "path"
dest = "folder"
```
```diff
- my_metric path="/var/log/batch/ajob.log",duration_seconds=134 1587920425000000000
+ my_metric path="/var/log/batch/ajob.log",folder="/var/log/batch",duration_seconds=134 1587920425000000000
```
### Stem
```toml
[[processors.filepath]]
[[processors.filepath.stem]]
tag = "path"
```
```diff
- my_metric,path="/var/log/batch/ajob.log" duration_seconds=134 1587920425000000000
+ my_metric,path="ajob" duration_seconds=134 1587920425000000000
```
### Clean
```toml
[[processors.filepath]]
[[processors.filepath.clean]]
tag = "path"
```
```diff
- my_metric,path="/var/log/dummy/../batch//ajob.log" duration_seconds=134 1587920425000000000
+ my_metric,path="/var/log/batch/ajob.log" duration_seconds=134 1587920425000000000
```
### Rel
```toml
[[processors.filepath]]
[[processors.filepath.rel]]
tag = "path"
base_path = "/var/log"
```
```diff
- my_metric,path="/var/log/batch/ajob.log" duration_seconds=134 1587920425000000000
+ my_metric,path="batch/ajob.log" duration_seconds=134 1587920425000000000
```
### ToSlash
```toml
[[processors.filepath]]
[[processors.filepath.rel]]
tag = "path"
```
```diff
- my_metric,path="\var\log\batch\ajob.log" duration_seconds=134 1587920425000000000
+ my_metric,path="/var/log/batch/ajob.log" duration_seconds=134 1587920425000000000
```
## Processing paths from tail plugin
This plugin can be used together with the
[tail input plugn](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/tail) to make modifications
to the `path` tag injected for every file.
Scenario:
* A log file `/var/log/myjobs/mysql_backup.log`, containing logs for a job execution. Whenever the job ends, a line is
written to the log file following this format: `2020-04-05 11:45:21 total time execution: 70 seconds`
* We want to generate a measurement that captures the duration of the script as a field and includes the `path` as a
tag
* We are interested in the filename without its extensions, since it might be enough information for plotting our
execution times in a dashboard
* Just in case, we don't want to override the original path (if for some reason we end up having duplicates we might
want this information)
For this purpose, we will use the `tail` input plugin, the `grok` parser plugin and the `filepath` processor.
```toml
# Performs file path manipulations on tags and fields
[[inputs.tail]]
files = ["/var/log/myjobs/**.log"]
data_format = "grok"
grok_patterns = ['%{TIMESTAMP_ISO8601:timestamp:ts-"2006-01-02 15:04:05"} total time execution: %{NUMBER:duration_seconds:int}']
name_override = "myjobs"
[[processors.filepath]]
[[processors.filepath.stem]]
tag = "path"
dest = "stempath"
```
The resulting output for a job taking 70 seconds for the mentioned log file would look like:
```text
myjobs_duration_seconds,host="my-host",path="/var/log/myjobs/mysql_backup.log",stempath="mysql_backup" 70 1587920425000000000
```

View File

@ -31,45 +31,6 @@ type RelOpts struct {
BasePath string
}
const sampleConfig = `
## Treat the tag value as a path and convert it to its last element, storing the result in a new tag
# [[processors.filepath.basename]]
# tag = "path"
# dest = "basepath"
## Treat the field value as a path and keep all but the last element of path, typically the path's directory
# [[processors.filepath.dirname]]
# field = "path"
## Treat the tag value as a path, converting it to its the last element without its suffix
# [[processors.filepath.stem]]
# tag = "path"
## Treat the tag value as a path, converting it to the shortest path name equivalent
## to path by purely lexical processing
# [[processors.filepath.clean]]
# tag = "path"
## Treat the tag value as a path, converting it to a relative path that is lexically
## equivalent to the source path when joined to 'base_path'
# [[processors.filepath.rel]]
# tag = "path"
# base_path = "/var/log"
## Treat the tag value as a path, replacing each separator character in path with a '/' character. Has only
## effect on Windows
# [[processors.filepath.toslash]]
# tag = "path"
`
func (o *Options) SampleConfig() string {
return sampleConfig
}
func (o *Options) Description() string {
return "Performs file path manipulations on tags and fields"
}
// applyFunc applies the specified function to the metric
func (o *Options) applyFunc(bo BaseOpts, fn ProcessorFunc, metric telegraf.Metric) {
if bo.Tag != "" {

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package filepath
func (o *Options) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -7,6 +7,7 @@ Telegraf minimum version: Telegraf 1.15.0
## Configuration
```toml
# Add a tag of the network interface name looked up over SNMP by interface number
[[processors.ifname]]
## Name of tag holding the interface number
# tag = "ifIndex"

View File

@ -15,65 +15,6 @@ import (
"github.com/influxdata/telegraf/plugins/processors"
)
var sampleConfig = `
## Name of tag holding the interface number
# tag = "ifIndex"
## Name of output tag where service name will be added
# dest = "ifName"
## Name of tag of the SNMP agent to request the interface name from
# agent = "agent"
## Timeout for each request.
# timeout = "5s"
## SNMP version; can be 1, 2, or 3.
# version = 2
## SNMP community string.
# community = "public"
## Number of retries to attempt.
# retries = 3
## The GETBULK max-repetitions parameter.
# max_repetitions = 10
## SNMPv3 authentication and encryption options.
##
## Security Name.
# sec_name = "myuser"
## Authentication protocol; one of "MD5", "SHA", or "".
# auth_protocol = "MD5"
## Authentication password.
# auth_password = "pass"
## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv".
# sec_level = "authNoPriv"
## Context Name.
# context_name = ""
## Privacy protocol used for encrypted messages; one of "DES", "AES" or "".
# priv_protocol = ""
## Privacy password used for encrypted messages.
# priv_password = ""
## max_parallel_lookups is the maximum number of SNMP requests to
## make at the same time.
# max_parallel_lookups = 100
## ordered controls whether or not the metrics need to stay in the
## same order this plugin received them in. If false, this plugin
## may change the order when data is cached. If you need metrics to
## stay in order set this to true. keeping the metrics ordered may
## be slightly slower
# ordered = false
## cache_ttl is the amount of time interface names are cached for a
## given agent. After this period elapses if names are needed they
## will be retrieved again.
# cache_ttl = "8h"
`
type nameMap map[uint64]string
type keyType = string
type valType = nameMap
@ -113,14 +54,6 @@ type IfName struct {
const minRetry = 5 * time.Minute
func (d *IfName) SampleConfig() string {
return sampleConfig
}
func (d *IfName) Description() string {
return "Add a tag of the network interface name looked up over SNMP by interface number"
}
func (d *IfName) Init() error {
d.getMapRemote = d.getMapRemoteNoMock
d.makeTable = d.makeTableNoMock

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package ifname
func (d *IfName) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -8,6 +8,7 @@ Depending on the function, various parameters need to be configured:
Depending on the choice of the distribution function, the respective parameters must be set. Default settings are `noise_type = "laplacian"` with `mu = 0.0` and `scale = 1.0`:
```toml
# Adds noise to numerical fields
[[processors.noise]]
## Specified the type of the random distribution.
## Can be "laplacian", "gaussian" or "uniform".

View File

@ -19,28 +19,6 @@ const (
defaultNoiseType = "laplacian"
)
const sampleConfig = `
## Specified the type of the random distribution.
## Can be "laplacian", "gaussian" or "uniform".
# type = "laplacian
## Center of the distribution.
## Only used for Laplacian and Gaussian distributions.
# mu = 0.0
## Scale parameter for the Laplacian or Gaussian distribution
# scale = 1.0
## Upper and lower bound of the Uniform distribution
# min = -1.0
# max = 1.0
## Apply the noise only to numeric fields matching the filter criteria below.
## Excludes takes precedence over includes.
# include_fields = []
# exclude_fields = []
`
type Noise struct {
Scale float64 `toml:"scale"`
Min float64 `toml:"min"`
@ -54,14 +32,6 @@ type Noise struct {
fieldFilter filter.Filter
}
func (p *Noise) SampleConfig() string {
return sampleConfig
}
func (p *Noise) Description() string {
return "Adds noise to numerical fields"
}
// generates a random noise value depending on the defined probability density
// function and adds that to the original value. If any integer overflows
// happen during the calculation, the result is set to MaxInt or 0 (for uint)

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package noise
func (p *Noise) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -5,17 +5,6 @@ import (
"github.com/influxdata/telegraf/plugins/processors"
)
var sampleConfig = `
## All modifications on inputs and aggregators can be overridden:
# name_override = "new_name"
# name_prefix = "new_name_prefix"
# name_suffix = "new_name_suffix"
## Tags to be added (all values must be strings)
# [processors.override.tags]
# additional_tag = "tag_value"
`
type Override struct {
NameOverride string
NamePrefix string
@ -23,14 +12,6 @@ type Override struct {
Tags map[string]string
}
func (p *Override) SampleConfig() string {
return sampleConfig
}
func (p *Override) Description() string {
return "Apply metric modifications using override semantics."
}
func (p *Override) Apply(in ...telegraf.Metric) []telegraf.Metric {
for _, metric := range in {
if len(p.NameOverride) > 0 {

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package override
func (p *Override) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -6,6 +6,7 @@ creates new metrics based on the contents of the field.
## Configuration
```toml
# Parse a value in a specified field/tag(s) and add the result in a new metric
[[processors.parser]]
## The name of the fields whose value will be parsed.
parse_fields = ["message"]

View File

@ -16,32 +16,6 @@ type Parser struct {
parser telegraf.Parser
}
var SampleConfig = `
## The name of the fields whose value will be parsed.
parse_fields = []
## If true, incoming metrics are not emitted.
drop_original = false
## If set to override, emitted metrics will be merged by overriding the
## original metric using the newly parsed metrics.
merge = "override"
## The dataformat to be read from files
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
`
func (p *Parser) SampleConfig() string {
return SampleConfig
}
func (p *Parser) Description() string {
return "Parse a value in a specified field/tag(s) and add the result in a new metric"
}
func (p *Parser) Apply(metrics ...telegraf.Metric) []telegraf.Metric {
if p.parser == nil {
var err error

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package parser
func (p *Parser) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -11,6 +11,7 @@ To perform the reverse operation use the [unpivot] processor.
## Configuration
```toml
# Rotate a single valued metric into a multi field metric
[[processors.pivot]]
## Tag to use for naming the new field.
tag_key = "name"

View File

@ -5,29 +5,11 @@ import (
"github.com/influxdata/telegraf/plugins/processors"
)
const (
description = "Rotate a single valued metric into a multi field metric"
sampleConfig = `
## Tag to use for naming the new field.
tag_key = "name"
## Field to use as the value of the new field.
value_key = "value"
`
)
type Pivot struct {
TagKey string `toml:"tag_key"`
ValueKey string `toml:"value_key"`
}
func (p *Pivot) SampleConfig() string {
return sampleConfig
}
func (p *Pivot) Description() string {
return description
}
func (p *Pivot) Apply(metrics ...telegraf.Metric) []telegraf.Metric {
for _, m := range metrics {
key, ok := m.GetTag(p.TagKey)

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package pivot
func (p *Pivot) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -11,6 +11,7 @@ Telegraf minimum version: Telegraf 1.15.0
## Configuration
```toml
# Given a tag/field of a TCP or UDP port number, add a tag/field of the service name looked up in the system services file
[[processors.port_name]]
## Name of tag holding the port number
# tag = "port"

View File

@ -1,4 +1,4 @@
package portname
package port_name
import (
"bufio"
@ -11,26 +11,6 @@ import (
"github.com/influxdata/telegraf/plugins/processors"
)
var sampleConfig = `
[[processors.port_name]]
## Name of tag holding the port number
# tag = "port"
## Or name of the field holding the port number
# field = "port"
## Name of output tag or field (depending on the source) where service name will be added
# dest = "service"
## Default tcp or udp
# default_protocol = "tcp"
## Tag containing the protocol (tcp or udp, case-insensitive)
# protocol_tag = "proto"
## Field containing the protocol (tcp or udp, case-insensitive)
# protocol_field = "proto"
`
type sMap map[string]map[int]string // "https" == services["tcp"][443]
var services sMap
@ -46,14 +26,6 @@ type PortName struct {
Log telegraf.Logger `toml:"-"`
}
func (pn *PortName) SampleConfig() string {
return sampleConfig
}
func (pn *PortName) Description() string {
return "Given a tag/field of a TCP or UDP port number, add a tag/field of the service name looked up in the system services file"
}
func readServicesFile() {
file, err := os.Open(servicesPath())
if err != nil {

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package port_name
func (pn *PortName) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -1,4 +1,4 @@
package portname
package port_name
import (
"strings"

View File

@ -1,7 +1,7 @@
//go:build windows
// +build windows
package portname
package port_name
import (
"os"

View File

@ -1,7 +1,7 @@
//go:build !windows
// +build !windows
package portname
package port_name
func servicesPath() string {
return "/etc/services"

View File

@ -13,17 +13,6 @@ type Printer struct {
serializer serializers.Serializer
}
var sampleConfig = `
`
func (p *Printer) SampleConfig() string {
return sampleConfig
}
func (p *Printer) Description() string {
return "Print all metrics that pass through this filter."
}
func (p *Printer) Apply(in ...telegraf.Metric) []telegraf.Metric {
for _, metric := range in {
octets, err := p.serializer.Serialize(metric)

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package printer
func (p *Printer) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -9,6 +9,7 @@ For metrics transforms, `key` denotes the element that should be transformed. Fu
## Configuration
```toml
# Transforms tag and field values as well as measurement, tag and field names with regex pattern
[[processors.regex]]
namepass = ["nginx_requests"]

View File

@ -27,69 +27,6 @@ type converter struct {
Append bool `toml:"append"`
}
const sampleConfig = `
## Tag and field conversions defined in a separate sub-tables
# [[processors.regex.tags]]
# ## Tag to change
# key = "resp_code"
# ## Regular expression to match on a tag value
# pattern = "^(\\d)\\d\\d$"
# ## Matches of the pattern will be replaced with this string. Use ${1}
# ## notation to use the text of the first submatch.
# replacement = "${1}xx"
# [[processors.regex.fields]]
# ## Field to change
# key = "request"
# ## All the power of the Go regular expressions available here
# ## For example, named subgroups
# pattern = "^/api(?P<method>/[\\w/]+)\\S*"
# replacement = "${method}"
# ## If result_key is present, a new field will be created
# ## instead of changing existing field
# result_key = "method"
## Multiple conversions may be applied for one field sequentially
## Let's extract one more value
# [[processors.regex.fields]]
# key = "request"
# pattern = ".*category=(\\w+).*"
# replacement = "${1}"
# result_key = "search_category"
## Rename metric fields
# [[processors.regex.field_rename]]
# ## Regular expression to match on a field name
# pattern = "^search_(\\w+)d$"
# ## Matches of the pattern will be replaced with this string. Use ${1}
# ## notation to use the text of the first submatch.
# replacement = "${1}"
# ## If the new field name already exists, you can either "overwrite" the
# ## existing one with the value of the renamed field OR you can "keep"
# ## both the existing and source field.
# # result_key = "keep"
## Rename metric tags
# [[processors.regex.tag_rename]]
# ## Regular expression to match on a tag name
# pattern = "^search_(\\w+)d$"
# ## Matches of the pattern will be replaced with this string. Use ${1}
# ## notation to use the text of the first submatch.
# replacement = "${1}"
# ## If the new tag name already exists, you can either "overwrite" the
# ## existing one with the value of the renamed tag OR you can "keep"
# ## both the existing and source tag.
# # result_key = "keep"
## Rename metrics
# [[processors.regex.metric_rename]]
# ## Regular expression to match on an metric name
# pattern = "^search_(\\w+)d$"
# ## Matches of the pattern will be replaced with this string. Use ${1}
# ## notation to use the text of the first submatch.
# replacement = "${1}"
`
func (r *Regex) Init() error {
r.regexCache = make(map[string]*regexp.Regexp)
@ -157,14 +94,6 @@ func (r *Regex) Init() error {
return nil
}
func (r *Regex) SampleConfig() string {
return sampleConfig
}
func (r *Regex) Description() string {
return "Transforms tag and field values as well as measurement, tag and field names with regex pattern"
}
func (r *Regex) Apply(in ...telegraf.Metric) []telegraf.Metric {
for _, metric := range in {
for _, converter := range r.Tags {

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package regex
func (r *Regex) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -5,6 +5,7 @@ The `rename` processor renames measurements, fields, and tags.
## Configuration
```toml
# Rename measurements, tags, and fields that pass through this filter.
[[processors.rename]]
## Specify one sub-table per rename operation.
[[processors.rename.replace]]

View File

@ -5,9 +5,6 @@ import (
"github.com/influxdata/telegraf/plugins/processors"
)
const sampleConfig = `
`
type Replace struct {
Measurement string `toml:"measurement"`
Tag string `toml:"tag"`
@ -19,14 +16,6 @@ type Rename struct {
Replaces []Replace `toml:"replace"`
}
func (r *Rename) SampleConfig() string {
return sampleConfig
}
func (r *Rename) Description() string {
return "Rename measurements, tags, and fields that pass through this filter."
}
func (r *Rename) Apply(in ...telegraf.Metric) []telegraf.Metric {
for _, point := range in {
for _, replace := range r.Replaces {

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package rename
func (r *Rename) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -8,6 +8,7 @@ Telegraf minimum version: Telegraf 1.15.0
## Configuration
```toml
# ReverseDNS does a reverse lookup on IP addresses to retrieve the DNS name
[[processors.reverse_dns]]
## For optimal performance, you may want to limit which metrics are passed to this
## processor. eg:

View File

@ -9,53 +9,6 @@ import (
"github.com/influxdata/telegraf/plugins/processors"
)
const sampleConfig = `
## For optimal performance, you may want to limit which metrics are passed to this
## processor. eg:
## namepass = ["my_metric_*"]
## cache_ttl is how long the dns entries should stay cached for.
## generally longer is better, but if you expect a large number of diverse lookups
## you'll want to consider memory use.
cache_ttl = "24h"
## lookup_timeout is how long should you wait for a single dns request to repsond.
## this is also the maximum acceptable latency for a metric travelling through
## the reverse_dns processor. After lookup_timeout is exceeded, a metric will
## be passed on unaltered.
## multiple simultaneous resolution requests for the same IP will only make a
## single rDNS request, and they will all wait for the answer for this long.
lookup_timeout = "3s"
## max_parallel_lookups is the maximum number of dns requests to be in flight
## at the same time. Requesting hitting cached values do not count against this
## total, and neither do mulptiple requests for the same IP.
## It's probably best to keep this number fairly low.
max_parallel_lookups = 10
## ordered controls whether or not the metrics need to stay in the same order
## this plugin received them in. If false, this plugin will change the order
## with requests hitting cached results moving through immediately and not
## waiting on slower lookups. This may cause issues for you if you are
## depending on the order of metrics staying the same. If so, set this to true.
## keeping the metrics ordered may be slightly slower.
ordered = false
[[processors.reverse_dns.lookup]]
## get the ip from the field "source_ip", and put the result in the field "source_name"
field = "source_ip"
dest = "source_name"
[[processors.reverse_dns.lookup]]
## get the ip from the tag "destination_ip", and put the result in the tag
## "destination_name".
tag = "destination_ip"
dest = "destination_name"
## If you would prefer destination_name to be a field instead, you can use a
## processors.converter after this one, specifying the order attribute.
`
type lookupEntry struct {
Tag string `toml:"tag"`
Field string `toml:"field"`
@ -75,14 +28,6 @@ type ReverseDNS struct {
Log telegraf.Logger `toml:"-"`
}
func (r *ReverseDNS) SampleConfig() string {
return sampleConfig
}
func (r *ReverseDNS) Description() string {
return "ReverseDNS does a reverse lookup on IP addresses to retrieve the DNS name"
}
func (r *ReverseDNS) Start(acc telegraf.Accumulator) error {
r.acc = acc
r.reverseDNSCache = NewReverseDNSCache(

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package reverse_dns
func (r *ReverseDNS) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -7,7 +7,6 @@ import (
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/testutil"
)
@ -58,11 +57,3 @@ func TestSimpleReverseLookup(t *testing.T) {
require.True(t, ok)
require.EqualValues(t, "dns.google.", tag)
}
func TestLoadingConfig(t *testing.T) {
c := config.NewConfig()
err := c.LoadConfigData([]byte("[[processors.reverse_dns]]\n" + sampleConfig))
require.NoError(t, err)
require.Len(t, c.Processors, 1)
}

View File

@ -7,6 +7,7 @@ The `lat` and `lon` fields values should contain WGS-84 coordinates in decimal d
## Configuration
```toml
# Add the S2 Cell ID as a tag based on latitude and longitude fields
[[processors.s2geo]]
## The name of the lat and lon fields containing WGS-84 latitude and
## longitude in decimal degrees.

View File

@ -1,4 +1,4 @@
package geo
package s2geo
import (
"fmt"
@ -15,27 +15,6 @@ type Geo struct {
CellLevel int `toml:"cell_level"`
}
var SampleConfig = `
## The name of the lat and lon fields containing WGS-84 latitude and
## longitude in decimal degrees.
# lat_field = "lat"
# lon_field = "lon"
## New tag to create
# tag_key = "s2_cell_id"
## Cell level (see https://s2geometry.io/resources/s2cell_statistics.html)
# cell_level = 9
`
func (g *Geo) SampleConfig() string {
return SampleConfig
}
func (g *Geo) Description() string {
return "Add the S2 Cell ID as a tag based on latitude and longitude fields"
}
func (g *Geo) Init() error {
if g.CellLevel < 0 || g.CellLevel > 30 {
return fmt.Errorf("invalid cell level %d", g.CellLevel)

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package s2geo
func (g *Geo) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -1,4 +1,4 @@
package geo
package s2geo
import (
"testing"

View File

@ -17,6 +17,7 @@ Telegraf minimum version: Telegraf 1.15.0
## Configuration
```toml
# Process metrics using a Starlark script
[[processors.starlark]]
## The Starlark source can be set as a string in this configuration file, or
## by referencing a file containing the script. Only one source or script

View File

@ -9,31 +9,6 @@ import (
"go.starlark.net/starlark"
)
const (
description = "Process metrics using a Starlark script"
sampleConfig = `
## The Starlark source can be set as a string in this configuration file, or
## by referencing a file containing the script. Only one source or script
## should be set at once.
##
## Source of the Starlark script.
source = '''
def apply(metric):
return metric
'''
## File containing a Starlark script.
# script = "/usr/local/bin/myscript.star"
## The constants of the Starlark script.
# [processors.starlark.constants]
# max_size = 10
# threshold = 0.75
# default_name = "Julia"
# debug_mode = true
`
)
type Starlark struct {
common.StarlarkCommon
@ -58,14 +33,6 @@ func (s *Starlark) Init() error {
return nil
}
func (s *Starlark) SampleConfig() string {
return sampleConfig
}
func (s *Starlark) Description() string {
return description
}
func (s *Starlark) Start(_ telegraf.Accumulator) error {
return nil
}

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package starlark
func (s *Starlark) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -28,6 +28,7 @@ If you'd like to apply multiple processings to the same `tag_key` or `field_key`
## Configuration
```toml
# Perform string processing on tags, fields, and measurements
[[processors.strings]]
## Convert a field value to lowercase and store in a new field
# [[processors.strings.lowercase]]

View File

@ -48,74 +48,6 @@ type converter struct {
fn ConvertFunc
}
const sampleConfig = `
## Convert a tag value to uppercase
# [[processors.strings.uppercase]]
# tag = "method"
## Convert a field value to lowercase and store in a new field
# [[processors.strings.lowercase]]
# field = "uri_stem"
# dest = "uri_stem_normalised"
## Convert a field value to titlecase
# [[processors.strings.titlecase]]
# field = "status"
## Trim leading and trailing whitespace using the default cutset
# [[processors.strings.trim]]
# field = "message"
## Trim leading characters in cutset
# [[processors.strings.trim_left]]
# field = "message"
# cutset = "\t"
## Trim trailing characters in cutset
# [[processors.strings.trim_right]]
# field = "message"
# cutset = "\r\n"
## Trim the given prefix from the field
# [[processors.strings.trim_prefix]]
# field = "my_value"
# prefix = "my_"
## Trim the given suffix from the field
# [[processors.strings.trim_suffix]]
# field = "read_count"
# suffix = "_count"
## Replace all non-overlapping instances of old with new
# [[processors.strings.replace]]
# measurement = "*"
# old = ":"
# new = "_"
## Trims strings based on width
# [[processors.strings.left]]
# field = "message"
# width = 10
## Decode a base64 encoded utf-8 string
# [[processors.strings.base64decode]]
# field = "message"
## Sanitize a string to ensure it is a valid utf-8 string
## Each run of invalid UTF-8 byte sequences is replaced by the replacement string, which may be empty
# [[processors.strings.valid_utf8]]
# field = "message"
# replacement = ""
`
func (s *Strings) SampleConfig() string {
return sampleConfig
}
func (s *Strings) Description() string {
return "Perform string processing on tags, fields, and measurements"
}
func (c *converter) convertTag(metric telegraf.Metric) {
var tags map[string]string
if c.Tag == "*" {

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package strings
func (s *Strings) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -11,6 +11,7 @@ levels of cardinality are computationally and/or financially expensive.
## Configuration
```toml
# Restricts the number of tags that can pass through this filter and chooses which tags to preserve when over the limit.
[[processors.tag_limit]]
## Maximum number of tags to preserve
limit = 3

View File

@ -1,4 +1,4 @@
package taglimit
package tag_limit
import (
"fmt"
@ -7,14 +7,6 @@ import (
"github.com/influxdata/telegraf/plugins/processors"
)
const sampleConfig = `
## Maximum number of tags to preserve
limit = 10
## List of tags to preferentially preserve
keep = ["foo", "bar", "baz"]
`
type TagLimit struct {
Limit int `toml:"limit"`
Keep []string `toml:"keep"`
@ -23,14 +15,6 @@ type TagLimit struct {
keepTags map[string]string
}
func (d *TagLimit) SampleConfig() string {
return sampleConfig
}
func (d *TagLimit) Description() string {
return "Restricts the number of tags that can pass through this filter and chooses which tags to preserve when over the limit."
}
func (d *TagLimit) initOnce() error {
if d.init {
return nil

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package tag_limit
func (d *TagLimit) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -1,4 +1,4 @@
package taglimit
package tag_limit
import (
"testing"

View File

@ -13,6 +13,7 @@ Read the full [Go Template Documentation][].
## Configuration
```toml
# Uses a Go template to create a new tag
[[processors.template]]
## Tag to set with the output of the template.
tag = "topic"

View File

@ -15,24 +15,6 @@ type TemplateProcessor struct {
tmpl *template.Template
}
const sampleConfig = `
## Tag to set with the output of the template.
tag = "topic"
## Go template used to create the tag value. In order to ease TOML
## escaping requirements, you may wish to use single quotes around the
## template string.
template = '{{ .Tag "hostname" }}.{{ .Tag "level" }}'
`
func (r *TemplateProcessor) SampleConfig() string {
return sampleConfig
}
func (r *TemplateProcessor) Description() string {
return "Uses a Go template to create a new tag"
}
func (r *TemplateProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric {
// for each metric in "in" array
for _, metric := range in {

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package template
func (r *TemplateProcessor) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -18,6 +18,7 @@ Notes:
## Configuration
```toml
# Print all metrics that pass through this filter.
[[processors.topk]]
## How many seconds between aggregations
# period = 10
@ -28,8 +29,9 @@ Notes:
## with k of 3 will return 6 buckets.
# k = 10
## Based on which tags should the buckets be computed. Globs can be specified.
## If set to an empty list tags are not considered when creating the buckets
## Over which tags should the aggregation be done. Globs can be specified, in
## which case any tag matching the glob will aggregated over. If set to an
## empty list is no aggregation over tags is done
# group_by = ['*']
## The field(s) to aggregate
@ -42,27 +44,32 @@ Notes:
## What aggregation function to use. Options: sum, mean, min, max
# aggregation = "mean"
## Instead of the top k buckets, return the bottom k buckets
## Instead of the top k largest metrics, return the bottom k lowest metrics
# bottomk = false
## This setting provides a way to know wich metrics where group together.
## Add a tag (which name will be the value of this setting) to each metric.
## The value will be the tags used to pick its bucket.
## The plugin assigns each metric a GroupBy tag generated from its name and
## tags. If this setting is different than "" the plugin will add a
## tag (which name will be the value of this setting) to each metric with
## the value of the calculated GroupBy tag. Useful for debugging
# add_groupby_tag = ""
## This setting provides a way to know the position of each metric's bucket in the top k
## If the list is non empty, a field will be added to each and every metric
## for each string present in this setting. This field will contain the ranking
## of the bucket that the metric belonged to when aggregated over that field.
## These settings provide a way to know the position of each metric in
## the top k. The 'add_rank_field' setting allows to specify for which
## fields the position is required. If the list is non empty, then a field
## will be added to each and every metric for each string present in this
## setting. This field will contain the ranking of the group that
## the metric belonged to when aggregated over that field.
## The name of the field will be set to the name of the aggregation field,
## suffixed with the string '_topk_rank'
# add_rank_fields = []
## These settings provide a way to know what values the plugin is generating
## when aggregating metrics. If the list is non empty, then a field will be
## added to each every metric for each field present in this setting.
## This field will contain the computed aggregation for the bucket that the
## metric belonged to when aggregated over that field.
## when aggregating metrics. The 'add_aggregate_field' setting allows to
## specify for which fields the final aggregation value is required. If the
## list is non empty, then a field will be added to each every metric for
## each field present in this setting. This field will contain
## the computed aggregation for the group that the metric belonged to when
## aggregated over that field.
## The name of the field will be set to the name of the aggregation field,
## suffixed with the string '_topk_aggregate'
# add_aggregate_fields = []

View File

@ -52,62 +52,6 @@ func New() *TopK {
return &topk
}
var sampleConfig = `
## How many seconds between aggregations
# period = 10
## How many top buckets to return per field
## Every field specified to aggregate over will return k number of results.
## For example, 1 field with k of 10 will return 10 buckets. While 2 fields
## with k of 3 will return 6 buckets.
# k = 10
## Over which tags should the aggregation be done. Globs can be specified, in
## which case any tag matching the glob will aggregated over. If set to an
## empty list is no aggregation over tags is done
# group_by = ['*']
## The field(s) to aggregate
## Each field defined is used to create an independent aggregation. Each
## aggregation will return k buckets. If a metric does not have a defined
## field the metric will be dropped from the aggregation. Considering using
## the defaults processor plugin to ensure fields are set if required.
# fields = ["value"]
## What aggregation to use. Options: sum, mean, min, max
# aggregation = "mean"
## Instead of the top k largest metrics, return the bottom k lowest metrics
# bottomk = false
## The plugin assigns each metric a GroupBy tag generated from its name and
## tags. If this setting is different than "" the plugin will add a
## tag (which name will be the value of this setting) to each metric with
## the value of the calculated GroupBy tag. Useful for debugging
# add_groupby_tag = ""
## These settings provide a way to know the position of each metric in
## the top k. The 'add_rank_field' setting allows to specify for which
## fields the position is required. If the list is non empty, then a field
## will be added to each and every metric for each string present in this
## setting. This field will contain the ranking of the group that
## the metric belonged to when aggregated over that field.
## The name of the field will be set to the name of the aggregation field,
## suffixed with the string '_topk_rank'
# add_rank_fields = []
## These settings provide a way to know what values the plugin is generating
## when aggregating metrics. The 'add_aggregate_field' setting allows to
## specify for which fields the final aggregation value is required. If the
## list is non empty, then a field will be added to each every metric for
## each field present in this setting. This field will contain
## the computed aggregation for the group that the metric belonged to when
## aggregated over that field.
## The name of the field will be set to the name of the aggregation field,
## suffixed with the string '_topk_aggregate'
# add_aggregate_fields = []
`
type MetricAggregation struct {
groupbykey string
values map[string]float64
@ -127,19 +71,11 @@ func sortMetrics(metrics []MetricAggregation, field string, reverse bool) {
}
}
func (t *TopK) SampleConfig() string {
return sampleConfig
}
func (t *TopK) Reset() {
t.cache = make(map[string][]telegraf.Metric)
t.lastAggregation = time.Now()
}
func (t *TopK) Description() string {
return "Print all metrics that pass through this filter."
}
func (t *TopK) generateGroupByKey(m telegraf.Metric) (string, error) {
// Create the filter.Filter objects if they have not been created
if t.tagsGlobs == nil && len(t.GroupBy) > 0 {

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package topk
func (t *TopK) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -7,6 +7,7 @@ To perform the reverse operation use the [pivot] processor.
## Configuration
```toml
# Rotate multi field metric into several single field metrics
[[processors.unpivot]]
## Tag to use for the name.
tag_key = "name"

View File

@ -5,29 +5,11 @@ import (
"github.com/influxdata/telegraf/plugins/processors"
)
const (
description = "Rotate multi field metric into several single field metrics"
sampleConfig = `
## Tag to use for the name.
tag_key = "name"
## Field to use for the name of the value.
value_key = "value"
`
)
type Unpivot struct {
TagKey string `toml:"tag_key"`
ValueKey string `toml:"value_key"`
}
func (p *Unpivot) SampleConfig() string {
return sampleConfig
}
func (p *Unpivot) Description() string {
return description
}
func copyWithoutFields(metric telegraf.Metric) telegraf.Metric {
m := metric.Copy()

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package unpivot
func (p *Unpivot) SampleConfig() string {
return `{{ .SampleConfig }}`
}