feat: migrate aggregator plugins to new sample config format (#10912)

This commit is contained in:
Sebastian Spaink 2022-04-06 15:40:17 -05:00 committed by GitHub
parent 57dc749b9a
commit 43017e14dd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
25 changed files with 80 additions and 273 deletions

View File

@ -57,26 +57,6 @@ type basicstats struct {
TIME time.Time //intermediate value for rate
}
var sampleConfig = `
## The period on which to flush & clear the aggregator.
period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
drop_original = false
## Configures which basic stats to push as fields
# stats = ["count", "min", "max", "mean", "stdev", "s2", "sum"]
`
func (*BasicStats) SampleConfig() string {
return sampleConfig
}
func (*BasicStats) Description() string {
return "Keep the aggregate basicstats of each metric passing through."
}
func (b *BasicStats) Add(in telegraf.Metric) {
id := in.HashID()
if _, ok := b.cache[id]; !ok {

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package basicstats
func (*BasicStats) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -138,6 +138,7 @@ Using `max_roll_over` with a value greater 0 may be important, if you need to de
## Configuration
```toml
# Calculates a derivative for every field.
[[aggregators.derivative]]
## Specific Derivative Aggregator Arguments:

View File

@ -38,56 +38,6 @@ func NewDerivative() *Derivative {
return derivative
}
var sampleConfig = `
## The period in which to flush the aggregator.
period = "30s"
##
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
drop_original = false
##
## This aggregator will estimate a derivative for each field, which is
## contained in both the first and last metric of the aggregation interval.
## Without further configuration the derivative will be calculated with
## respect to the time difference between these two measurements in seconds.
## The formula applied is for every field:
##
## value_last - value_first
## derivative = --------------------------
## time_difference_in_seconds
##
## The resulting derivative will be named *fieldname_rate*. The suffix
## "_rate" can be configured by the *suffix* parameter. When using a
## derivation variable you can include its name for more clarity.
# suffix = "_rate"
##
## As an abstraction the derivative can be calculated not only by the time
## difference but by the difference of a field, which is contained in the
## measurement. This field is assumed to be monotonously increasing. This
## feature is used by specifying a *variable*.
## Make sure the specified variable is not filtered and exists in the metrics
## passed to this aggregator!
# variable = ""
##
## When using a field as the derivation parameter the name of that field will
## be used for the resulting derivative, e.g. *fieldname_by_parameter*.
##
## Note, that the calculation is based on the actual timestamp of the
## measurements. When there is only one measurement during that period, the
## measurement will be rolled over to the next period. The maximum number of
## such roll-overs can be configured with a default of 10.
# max_roll_over = 10
##
`
func (d *Derivative) SampleConfig() string {
return sampleConfig
}
func (d *Derivative) Description() string {
return "Calculates a derivative for every field."
}
func (d *Derivative) Add(in telegraf.Metric) {
id := in.HashID()
current, ok := d.cache[id]

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package derivative
func (d *Derivative) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -14,6 +14,7 @@ When a series has not been updated within the time defined in
## Configuration
```toml
# Report the final metric of a series
[[aggregators.final]]
## The period on which to flush & clear the aggregator.
period = "30s"

View File

@ -8,17 +8,6 @@ import (
"github.com/influxdata/telegraf/plugins/aggregators"
)
var sampleConfig = `
## The period on which to flush & clear the aggregator.
period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
drop_original = false
## The time that a series is not updated until considering it final.
series_timeout = "5m"
`
type Final struct {
SeriesTimeout config.Duration `toml:"series_timeout"`
@ -33,14 +22,6 @@ func NewFinal() *Final {
}
}
func (m *Final) SampleConfig() string {
return sampleConfig
}
func (m *Final) Description() string {
return "Report the final metric of a series"
}
func (m *Final) Add(in telegraf.Metric) {
id := in.HashID()
m.metricCache[id] = in

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package final
func (m *Final) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -82,57 +82,6 @@ func NewHistogramAggregator() *HistogramAggregator {
return h
}
var sampleConfig = `
## The period in which to flush the aggregator.
period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
drop_original = false
## If true, the histogram will be reset on flush instead
## of accumulating the results.
reset = false
## Whether bucket values should be accumulated. If set to false, "gt" tag will be added.
## Defaults to true.
cumulative = true
## Expiration interval for each histogram. The histogram will be expired if
## there are no changes in any buckets for this time interval. 0 == no expiration.
# expiration_interval = "0m"
## If true, aggregated histogram are pushed to output only if it was updated since
## previous push. Defaults to false.
# push_only_on_update = false
## Example config that aggregates all fields of the metric.
# [[aggregators.histogram.config]]
# ## Right borders of buckets (with +Inf implicitly added).
# buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
# ## The name of metric.
# measurement_name = "cpu"
## Example config that aggregates only specific fields of the metric.
# [[aggregators.histogram.config]]
# ## Right borders of buckets (with +Inf implicitly added).
# buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
# ## The name of metric.
# measurement_name = "diskio"
# ## The concrete fields of metric
# fields = ["io_time", "read_time", "write_time"]
`
// SampleConfig returns sample of config
func (h *HistogramAggregator) SampleConfig() string {
return sampleConfig
}
// Description returns description of aggregator plugin
func (h *HistogramAggregator) Description() string {
return "Create aggregate histograms."
}
// Add adds new hit to the buckets
func (h *HistogramAggregator) Add(in telegraf.Metric) {
addTime := timeNow()

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package histogram
func (h *HistogramAggregator) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -10,6 +10,7 @@ be handled more efficiently by the output.
## Configuration
```toml
# Merge metrics into multifield metrics by series key
[[aggregators.merge]]
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.

View File

@ -1,4 +1,4 @@
package seriesgrouper
package merge
import (
"time"
@ -8,15 +8,6 @@ import (
"github.com/influxdata/telegraf/plugins/aggregators"
)
const (
description = "Merge metrics into multifield metrics by series key"
sampleConfig = `
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
drop_original = true
`
)
type Merge struct {
grouper *metric.SeriesGrouper
}
@ -26,14 +17,6 @@ func (a *Merge) Init() error {
return nil
}
func (a *Merge) Description() string {
return description
}
func (a *Merge) SampleConfig() string {
return sampleConfig
}
func (a *Merge) Add(m telegraf.Metric) {
a.grouper.AddMetric(m)
}

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package merge
func (a *Merge) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -1,4 +1,4 @@
package seriesgrouper
package merge
import (
"testing"

View File

@ -26,23 +26,6 @@ type minmax struct {
max float64
}
var sampleConfig = `
## General Aggregator Arguments:
## The period on which to flush & clear the aggregator.
period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
drop_original = false
`
func (m *MinMax) SampleConfig() string {
return sampleConfig
}
func (m *MinMax) Description() string {
return "Keep the aggregate min/max of each metric passing through."
}
func (m *MinMax) Add(in telegraf.Metric) {
id := in.HashID()
if _, ok := m.cache[id]; !ok {

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package minmax
func (m *MinMax) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -6,6 +6,7 @@ per metric it sees and emits the quantiles every `period`.
## Configuration
```toml
# Keep the aggregate quantiles of each metric passing through.
[[aggregators.quantile]]
## General Aggregator Arguments:
## The period on which to flush & clear the aggregator.

View File

@ -26,41 +26,6 @@ type aggregate struct {
type newAlgorithmFunc func(compression float64) (algorithm, error)
var sampleConfig = `
## General Aggregator Arguments:
## The period on which to flush & clear the aggregator.
period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
drop_original = false
## Quantiles to output in the range [0,1]
# quantiles = [0.25, 0.5, 0.75]
## Type of aggregation algorithm
## Supported are:
## "t-digest" -- approximation using centroids, can cope with large number of samples
## "exact R7" -- exact computation also used by Excel or NumPy (Hyndman & Fan 1996 R7)
## "exact R8" -- exact computation (Hyndman & Fan 1996 R8)
## NOTE: Do not use "exact" algorithms with large number of samples
## to not impair performance or memory consumption!
# algorithm = "t-digest"
## Compression for approximation (t-digest). The value needs to be
## greater or equal to 1.0. Smaller values will result in more
## performance but less accuracy.
# compression = 100.0
`
func (q *Quantile) SampleConfig() string {
return sampleConfig
}
func (q *Quantile) Description() string {
return "Keep the aggregate quantiles of each metric passing through."
}
func (q *Quantile) Add(in telegraf.Metric) {
id := in.HashID()
if cached, ok := q.cache[id]; ok {

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package quantile
func (q *Quantile) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -19,6 +19,7 @@ functions.
## Configuration
```toml
# Aggregate metrics using a Starlark script
[[aggregators.starlark]]
## The Starlark source can be set as a string in this configuration file, or
## by referencing a file containing the script. Only one source or script

View File

@ -7,39 +7,6 @@ import (
"go.starlark.net/starlark"
)
const (
description = "Aggregate metrics using a Starlark script"
sampleConfig = `
## The Starlark source can be set as a string in this configuration file, or
## by referencing a file containing the script. Only one source or script
## should be set at once.
##
## Source of the Starlark script.
source = '''
state = {}
def add(metric):
state["last"] = metric
def push():
return state.get("last")
def reset():
state.clear()
'''
## File containing a Starlark script.
# script = "/usr/local/bin/myscript.star"
## The constants of the Starlark script.
# [aggregators.starlark.constants]
# max_size = 10
# threshold = 0.75
# default_name = "Julia"
# debug_mode = true
`
)
type Starlark struct {
common.StarlarkCommon
}
@ -72,14 +39,6 @@ func (s *Starlark) Init() error {
return nil
}
func (s *Starlark) SampleConfig() string {
return sampleConfig
}
func (s *Starlark) Description() string {
return description
}
func (s *Starlark) Add(metric telegraf.Metric) {
parameters, found := s.GetParameters("add")
if !found {

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package starlark
func (s *Starlark) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -18,6 +18,7 @@ limited set of values.
## Configuration
```toml
# Count the occurrence of values in fields.
[[aggregators.valuecounter]]
## General Aggregator Arguments:
## The period on which to flush & clear the aggregator.

View File

@ -27,27 +27,6 @@ func NewValueCounter() telegraf.Aggregator {
return vc
}
var sampleConfig = `
## General Aggregator Arguments:
## The period on which to flush & clear the aggregator.
period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
drop_original = false
## The fields for which the values will be counted
fields = []
`
// SampleConfig generates a sample config for the ValueCounter plugin
func (vc *ValueCounter) SampleConfig() string {
return sampleConfig
}
// Description returns the description of the ValueCounter plugin
func (vc *ValueCounter) Description() string {
return "Count the occurrence of values in fields."
}
// Add is run on every metric which passes the plugin
func (vc *ValueCounter) Add(in telegraf.Metric) {
id := in.HashID()

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package valuecounter
func (vc *ValueCounter) SampleConfig() string {
return `{{ .SampleConfig }}`
}