2015-11-25 05:22:11 +08:00
package config
2015-04-02 00:34:32 +08:00
import (
2016-04-02 03:53:34 +08:00
"bytes"
2022-01-19 06:04:09 +08:00
"crypto/tls"
2022-06-22 22:30:43 +08:00
_ "embed"
2015-04-02 00:34:32 +08:00
"fmt"
2021-09-29 05:16:32 +08:00
"io"
2015-11-14 07:14:07 +08:00
"log"
2018-11-06 06:19:46 +08:00
"net/http"
"net/url"
2016-04-02 03:53:34 +08:00
"os"
2015-10-19 15:09:36 +08:00
"path/filepath"
2020-11-04 05:40:57 +08:00
"reflect"
2016-04-02 03:53:34 +08:00
"regexp"
2016-08-08 22:55:16 +08:00
"runtime"
2015-04-02 00:34:32 +08:00
"sort"
2016-09-08 22:22:10 +08:00
"strconv"
2015-04-02 00:34:32 +08:00
"strings"
2022-06-16 20:04:45 +08:00
"sync"
2015-04-02 00:34:32 +08:00
"time"
2021-12-02 03:38:43 +08:00
"github.com/coreos/go-semver/semver"
2016-01-28 05:21:36 +08:00
"github.com/influxdata/telegraf"
2016-01-21 02:57:35 +08:00
"github.com/influxdata/telegraf/internal"
2021-07-02 04:48:16 +08:00
"github.com/influxdata/telegraf/internal/choice"
2020-05-05 02:09:10 +08:00
"github.com/influxdata/telegraf/models"
2016-09-08 22:22:10 +08:00
"github.com/influxdata/telegraf/plugins/aggregators"
2016-01-21 02:57:35 +08:00
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/outputs"
2016-02-06 08:36:35 +08:00
"github.com/influxdata/telegraf/plugins/parsers"
2021-06-11 03:22:18 +08:00
"github.com/influxdata/telegraf/plugins/parsers/json_v2"
2016-09-08 22:22:10 +08:00
"github.com/influxdata/telegraf/plugins/processors"
2016-02-11 06:50:07 +08:00
"github.com/influxdata/telegraf/plugins/serializers"
2016-04-02 03:53:34 +08:00
"github.com/influxdata/toml"
2016-03-01 18:12:28 +08:00
"github.com/influxdata/toml/ast"
2015-04-02 00:34:32 +08:00
)
2016-04-01 07:50:24 +08:00
var (
2019-04-26 11:34:40 +08:00
// Default sections
2019-06-06 05:07:02 +08:00
sectionDefaults = [ ] string { "global_tags" , "agent" , "outputs" ,
2019-04-26 11:34:40 +08:00
"processors" , "aggregators" , "inputs" }
2016-04-01 07:50:24 +08:00
// Default input plugins
inputDefaults = [ ] string { "cpu" , "mem" , "swap" , "system" , "kernel" ,
"processes" , "disk" , "diskio" }
// Default output plugins
outputDefaults = [ ] string { "influxdb" }
2016-04-02 03:53:34 +08:00
// envVarRe is a regex to find environment variables in the config file
2019-03-30 07:02:10 +08:00
envVarRe = regexp . MustCompile ( ` \$\ { (\w+)\}|\$(\w+) ` )
2018-01-05 07:28:00 +08:00
envVarEscaper = strings . NewReplacer (
` " ` , ` \" ` ,
` \ ` , ` \\ ` ,
)
2021-02-13 00:38:40 +08:00
httpLoadConfigRetryInterval = 10 * time . Second
2021-05-27 00:13:50 +08:00
// fetchURLRe is a regex to determine whether the requested file should
// be fetched from a remote or read from the filesystem.
fetchURLRe = regexp . MustCompile ( ` ^\w+:// ` )
2016-04-01 07:50:24 +08:00
)
2015-08-12 00:34:00 +08:00
// Config specifies the URL/user/password for the database that telegraf
2015-08-04 22:58:32 +08:00
// will be logging to, as well as all the plugins that the user has
// specified
2015-04-02 00:34:32 +08:00
type Config struct {
2022-06-16 20:04:45 +08:00
toml * toml . Config
errs [ ] error // config load errors.
UnusedFields map [ string ] bool
unusedFieldsMutex * sync . Mutex
2020-11-04 05:40:57 +08:00
2015-11-25 05:22:11 +08:00
Tags map [ string ] string
2016-01-08 04:39:43 +08:00
InputFilters [ ] string
2015-11-25 05:22:11 +08:00
OutputFilters [ ] string
2015-08-12 04:02:04 +08:00
2016-09-08 22:22:10 +08:00
Agent * AgentConfig
Inputs [ ] * models . RunningInput
Outputs [ ] * models . RunningOutput
Aggregators [ ] * models . RunningAggregator
2022-01-13 06:54:42 +08:00
Parsers [ ] * models . RunningParser
2016-09-27 23:17:58 +08:00
// Processors have a slice wrapper type because they need to be sorted
2020-06-05 22:43:43 +08:00
Processors models . RunningProcessors
AggProcessors models . RunningProcessors
2021-12-02 03:38:43 +08:00
Deprecations map [ string ] [ ] int64
version * semver . Version
2015-04-02 00:34:32 +08:00
}
2020-11-04 05:40:57 +08:00
// NewConfig creates a new struct to hold the Telegraf config.
// For historical reasons, It holds the actual instances of the running plugins
// once the configuration is parsed.
2015-11-24 07:28:11 +08:00
func NewConfig ( ) * Config {
c := & Config {
2022-06-16 20:04:45 +08:00
UnusedFields : map [ string ] bool { } ,
unusedFieldsMutex : & sync . Mutex { } ,
2020-11-04 05:40:57 +08:00
2015-11-26 09:42:07 +08:00
// Agent defaults:
Agent : & AgentConfig {
2021-04-10 01:15:04 +08:00
Interval : Duration ( 10 * time . Second ) ,
2019-05-04 01:55:11 +08:00
RoundInterval : true ,
2021-04-10 01:15:04 +08:00
FlushInterval : Duration ( 10 * time . Second ) ,
2019-10-23 04:32:03 +08:00
LogTarget : "file" ,
2019-05-04 01:55:11 +08:00
LogfileRotationMaxArchives : 5 ,
2015-11-26 09:42:07 +08:00
} ,
2015-11-25 05:22:11 +08:00
Tags : make ( map [ string ] string ) ,
2016-07-28 19:31:11 +08:00
Inputs : make ( [ ] * models . RunningInput , 0 ) ,
Outputs : make ( [ ] * models . RunningOutput , 0 ) ,
2022-01-13 06:54:42 +08:00
Parsers : make ( [ ] * models . RunningParser , 0 ) ,
2016-09-08 22:22:10 +08:00
Processors : make ( [ ] * models . RunningProcessor , 0 ) ,
2020-06-05 22:43:43 +08:00
AggProcessors : make ( [ ] * models . RunningProcessor , 0 ) ,
2016-01-08 04:39:43 +08:00
InputFilters : make ( [ ] string , 0 ) ,
2015-11-25 05:22:11 +08:00
OutputFilters : make ( [ ] string , 0 ) ,
2021-12-02 03:38:43 +08:00
Deprecations : make ( map [ string ] [ ] int64 ) ,
}
// Handle unknown version
version := internal . Version ( )
if version == "" || version == "unknown" {
version = "0.0.0-unknown"
2015-11-24 07:28:11 +08:00
}
2021-12-02 03:38:43 +08:00
c . version = semver . New ( version )
2020-11-04 05:40:57 +08:00
tomlCfg := & toml . Config {
NormFieldName : toml . DefaultConfig . NormFieldName ,
FieldToKey : toml . DefaultConfig . FieldToKey ,
MissingField : c . missingTomlField ,
}
c . toml = tomlCfg
2015-11-24 07:28:11 +08:00
return c
}
2020-11-04 05:40:57 +08:00
// AgentConfig defines configuration that will be used by the Telegraf agent
2015-11-26 09:42:07 +08:00
type AgentConfig struct {
// Interval at which to gather information
2021-04-10 01:15:04 +08:00
Interval Duration
2015-11-26 09:42:07 +08:00
// RoundInterval rounds collection interval to 'interval'.
// ie, if Interval=10s then always collect on :00, :10, :20, etc.
RoundInterval bool
2022-03-03 23:20:02 +08:00
// Collected metrics are rounded to the precision specified. Precision is
// specified as an interval with an integer + unit (e.g. 0s, 10ms, 2us, 4s).
// Valid time units are "ns", "us" (or "µs"), "ms", "s".
//
2017-04-13 01:42:11 +08:00
// By default or when set to "0s", precision will be set to the same
2022-03-03 23:20:02 +08:00
// timestamp order as the collection interval, with the maximum being 1s:
2016-06-13 22:21:11 +08:00
// ie, when interval = "10s", precision will be "1s"
// when interval = "250ms", precision will be "1ms"
2022-03-03 23:20:02 +08:00
//
2016-06-13 22:21:11 +08:00
// Precision will NOT be used for service inputs. It is up to each individual
// service input to set the timestamp at the appropriate precision.
2021-04-10 01:15:04 +08:00
Precision Duration
2016-06-13 22:21:11 +08:00
2016-01-20 04:00:36 +08:00
// CollectionJitter is used to jitter the collection by a random amount.
// Each plugin will sleep for a random time within jitter before collecting.
// This can be used to avoid many plugins querying things like sysfs at the
// same time, which can have a measurable effect on the system.
2021-04-10 01:15:04 +08:00
CollectionJitter Duration
2016-01-20 04:00:36 +08:00
2022-02-16 01:39:12 +08:00
// CollectionOffset is used to shift the collection by the given amount.
// This can be be used to avoid many plugins querying constraint devices
// at the same time by manually scheduling them in time.
CollectionOffset Duration
2016-02-16 08:21:38 +08:00
// FlushInterval is the Interval at which to flush data
2021-04-10 01:15:04 +08:00
FlushInterval Duration
2015-11-26 09:42:07 +08:00
2016-01-20 04:00:36 +08:00
// FlushJitter Jitters the flush interval by a random amount.
// This is primarily to avoid large write spikes for users running a large
// number of telegraf instances.
// ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
2021-04-10 01:15:04 +08:00
FlushJitter Duration
2015-11-26 09:42:07 +08:00
2016-04-24 18:43:54 +08:00
// MetricBatchSize is the maximum number of metrics that is wrote to an
// output plugin in one call.
MetricBatchSize int
2016-01-23 02:54:12 +08:00
// MetricBufferLimit is the max number of metrics that each output plugin
// will cache. The buffer is cleared when a successful write occurs. When
2016-04-24 18:43:54 +08:00
// full, the oldest metrics will be overwritten. This number should be a
// multiple of MetricBatchSize. Due to current implementation, this could
// not be less than 2 times MetricBatchSize.
2016-01-23 02:54:12 +08:00
MetricBufferLimit int
2016-02-16 08:21:38 +08:00
// FlushBufferWhenFull tells Telegraf to flush the metric buffer whenever
// it fills up, regardless of FlushInterval. Setting this option to true
// does _not_ deactivate FlushInterval.
2022-03-02 06:05:53 +08:00
FlushBufferWhenFull bool ` toml:"flush_buffer_when_full" deprecated:"0.13.0;2.0.0;option is ignored" `
2016-02-16 08:21:38 +08:00
2016-06-13 22:21:11 +08:00
// TODO(cam): Remove UTC and parameter, they are no longer
2015-11-26 09:42:07 +08:00
// valid for the agent config. Leaving them here for now for backwards-
2017-11-01 08:00:06 +08:00
// compatibility
2022-03-02 06:05:53 +08:00
UTC bool ` toml:"utc" deprecated:"1.0.0;option is ignored" `
2015-11-26 09:42:07 +08:00
2016-01-16 03:25:56 +08:00
// Debug is the option for running in debug mode
2019-05-04 01:55:11 +08:00
Debug bool ` toml:"debug" `
2016-01-16 03:25:56 +08:00
2019-05-04 01:55:11 +08:00
// Quiet is the option for running in quiet mode
Quiet bool ` toml:"quiet" `
2016-10-01 05:37:56 +08:00
2019-10-23 04:32:03 +08:00
// Log target controls the destination for logs and can be one of "file",
// "stderr" or, on Windows, "eventlog". When set to "file", the output file
// is determined by the "logfile" setting.
2019-08-29 05:34:44 +08:00
LogTarget string ` toml:"logtarget" `
2019-10-23 04:32:03 +08:00
// Name of the file to be logged to when using the "file" logtarget. If set to
// the empty string then logs are written to stderr.
2019-05-04 01:55:11 +08:00
Logfile string ` toml:"logfile" `
2019-05-04 01:25:28 +08:00
2019-06-04 08:38:21 +08:00
// The file will be rotated after the time interval specified. When set
// to 0 no time based rotation is performed.
2021-04-10 01:15:04 +08:00
LogfileRotationInterval Duration ` toml:"logfile_rotation_interval" `
2019-05-04 01:25:28 +08:00
2019-06-04 08:38:21 +08:00
// The logfile will be rotated when it becomes larger than the specified
// size. When set to 0 no size based rotation is performed.
2021-04-10 01:15:04 +08:00
LogfileRotationMaxSize Size ` toml:"logfile_rotation_max_size" `
2019-05-04 01:55:11 +08:00
// Maximum number of rotated archives to keep, any older logs are deleted.
// If set to -1, no archives are removed.
LogfileRotationMaxArchives int ` toml:"logfile_rotation_max_archives" `
2019-05-04 01:25:28 +08:00
2021-04-17 02:39:19 +08:00
// Pick a timezone to use when logging or type 'local' for local time.
LogWithTimezone string ` toml:"log_with_timezone" `
2016-03-22 05:33:19 +08:00
Hostname string
OmitHostname bool
2022-03-18 11:43:46 +08:00
2022-03-23 23:27:58 +08:00
// Method for translating SNMP objects. 'netsnmp' to call external programs,
// 'gosmi' to use the built-in library.
SnmpTranslator string ` toml:"snmp_translator" `
2015-11-26 09:42:07 +08:00
}
2020-08-20 06:18:52 +08:00
// InputNames returns a list of strings of the configured inputs.
2016-01-08 04:39:43 +08:00
func ( c * Config ) InputNames ( ) [ ] string {
2015-11-25 05:22:11 +08:00
var name [ ] string
2016-01-08 04:39:43 +08:00
for _ , input := range c . Inputs {
2019-02-27 08:03:13 +08:00
name = append ( name , input . Config . Name )
2015-08-12 04:02:04 +08:00
}
2020-08-20 06:18:52 +08:00
return PluginNameCounts ( name )
2015-08-08 04:31:25 +08:00
}
2020-08-20 06:18:52 +08:00
// AggregatorNames returns a list of strings of the configured aggregators.
2018-06-01 02:56:49 +08:00
func ( c * Config ) AggregatorNames ( ) [ ] string {
var name [ ] string
for _ , aggregator := range c . Aggregators {
2019-02-27 10:22:12 +08:00
name = append ( name , aggregator . Config . Name )
2018-06-01 02:56:49 +08:00
}
2020-08-20 06:18:52 +08:00
return PluginNameCounts ( name )
2018-06-01 02:56:49 +08:00
}
2022-01-13 06:54:42 +08:00
// ParserNames returns a list of strings of the configured parsers.
func ( c * Config ) ParserNames ( ) [ ] string {
var name [ ] string
for _ , parser := range c . Parsers {
name = append ( name , parser . Config . DataFormat )
}
return PluginNameCounts ( name )
}
2020-08-20 06:18:52 +08:00
// ProcessorNames returns a list of strings of the configured processors.
2018-06-01 02:56:49 +08:00
func ( c * Config ) ProcessorNames ( ) [ ] string {
var name [ ] string
for _ , processor := range c . Processors {
2019-08-22 07:49:07 +08:00
name = append ( name , processor . Config . Name )
2018-06-01 02:56:49 +08:00
}
2020-08-20 06:18:52 +08:00
return PluginNameCounts ( name )
2018-06-01 02:56:49 +08:00
}
2020-08-20 06:18:52 +08:00
// OutputNames returns a list of strings of the configured outputs.
2015-11-25 05:22:11 +08:00
func ( c * Config ) OutputNames ( ) [ ] string {
var name [ ] string
for _ , output := range c . Outputs {
2019-08-22 07:49:07 +08:00
name = append ( name , output . Config . Name )
2015-11-24 09:00:54 +08:00
}
2020-08-20 06:18:52 +08:00
return PluginNameCounts ( name )
}
2020-09-16 04:55:23 +08:00
// PluginNameCounts returns a list of sorted plugin names and their count
2020-08-20 06:18:52 +08:00
func PluginNameCounts ( plugins [ ] string ) [ ] string {
names := make ( map [ string ] int )
for _ , plugin := range plugins {
names [ plugin ] ++
}
var namecount [ ] string
for name , count := range names {
if count == 1 {
namecount = append ( namecount , name )
} else {
namecount = append ( namecount , fmt . Sprintf ( "%s (%dx)" , name , count ) )
}
}
2020-09-16 04:55:23 +08:00
sort . Strings ( namecount )
2020-08-20 06:18:52 +08:00
return namecount
2015-11-24 09:00:54 +08:00
}
2015-08-04 22:58:32 +08:00
// ListTags returns a string of tags specified in the config,
// line-protocol style
2015-04-02 00:34:32 +08:00
func ( c * Config ) ListTags ( ) string {
var tags [ ] string
for k , v := range c . Tags {
tags = append ( tags , fmt . Sprintf ( "%s=%s" , k , v ) )
}
sort . Strings ( tags )
return strings . Join ( tags , " " )
}
2015-05-19 06:10:11 +08:00
2016-02-18 12:57:33 +08:00
var header = ` # Telegraf Configuration
2016-04-01 07:50:24 +08:00
#
2015-05-23 07:45:14 +08:00
# Telegraf is entirely plugin driven . All metrics are gathered from the
2016-01-23 02:54:12 +08:00
# declared inputs , and sent to the declared outputs .
2016-04-01 07:50:24 +08:00
#
2016-01-23 02:54:12 +08:00
# Plugins must be declared in here to be active .
# To deactivate a plugin , comment out the name and any variables .
2016-04-01 07:50:24 +08:00
#
2016-01-23 02:54:12 +08:00
# Use ' telegraf - config telegraf . conf - test ' to see what metrics a config
2015-05-19 06:51:11 +08:00
# file would generate .
2016-04-02 03:53:34 +08:00
#
2019-03-30 07:02:10 +08:00
# Environment variables can be used anywhere in this config file , simply surround
# them with $ { } . For strings the variable must be within quotes ( ie , "${STR_VAR}" ) ,
# for numbers and booleans they should be plain ( ie , $ { INT_VAR } , $ { BOOL_VAR } )
2015-05-19 06:51:11 +08:00
2019-04-26 11:34:40 +08:00
`
var globalTagsConfig = `
2016-01-28 02:09:14 +08:00
# Global tags can be specified here in key = "value" format .
2016-02-09 06:56:43 +08:00
[ global_tags ]
2016-01-28 02:09:14 +08:00
# dc = "us-east-1" # will tag all metrics with dc = us - east - 1
# rack = "1a"
2016-04-02 03:53:34 +08:00
# # Environment variables can be used as tags , and throughout the config file
# user = "$USER"
2015-05-23 07:26:32 +08:00
2019-04-26 11:34:40 +08:00
`
2022-02-16 01:39:12 +08:00
2022-06-22 22:30:43 +08:00
// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the agentConfig data.
//go:embed agent.conf
var agentConfig string
2015-08-27 01:02:10 +08:00
2019-04-26 11:34:40 +08:00
var outputHeader = `
2016-04-01 07:50:24 +08:00
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# OUTPUT PLUGINS #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
2019-04-26 11:34:40 +08:00
2015-08-26 07:59:12 +08:00
`
2015-05-19 06:10:11 +08:00
2016-09-08 22:22:10 +08:00
var processorHeader = `
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# PROCESSOR PLUGINS #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
2019-04-26 11:34:40 +08:00
2016-09-08 22:22:10 +08:00
`
var aggregatorHeader = `
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# AGGREGATOR PLUGINS #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
2019-04-26 11:34:40 +08:00
2016-09-08 22:22:10 +08:00
`
2016-04-01 07:50:24 +08:00
var inputHeader = `
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# INPUT PLUGINS #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
2019-04-26 11:34:40 +08:00
2015-05-19 06:10:11 +08:00
`
2016-01-08 04:39:43 +08:00
var serviceInputHeader = `
2016-04-01 07:50:24 +08:00
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# SERVICE INPUT PLUGINS #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
2019-04-26 11:34:40 +08:00
2015-09-25 02:06:11 +08:00
`
2015-09-22 09:38:57 +08:00
// PrintSampleConfig prints the sample config
2016-10-05 17:58:30 +08:00
func PrintSampleConfig (
2019-04-26 11:34:40 +08:00
sectionFilters [ ] string ,
2016-10-05 17:58:30 +08:00
inputFilters [ ] string ,
outputFilters [ ] string ,
aggregatorFilters [ ] string ,
processorFilters [ ] string ,
) {
2019-04-26 11:34:40 +08:00
// print headers
2022-02-23 09:47:04 +08:00
fmt . Print ( header )
2015-05-19 06:10:11 +08:00
2019-04-26 11:34:40 +08:00
if len ( sectionFilters ) == 0 {
sectionFilters = sectionDefaults
}
printFilteredGlobalSections ( sectionFilters )
2016-09-08 22:22:10 +08:00
// print output plugins
2019-04-26 11:34:40 +08:00
if sliceContains ( "outputs" , sectionFilters ) {
if len ( outputFilters ) != 0 {
if len ( outputFilters ) >= 3 && outputFilters [ 1 ] != "none" {
2022-02-23 09:47:04 +08:00
fmt . Print ( outputHeader )
2019-04-26 11:34:40 +08:00
}
printFilteredOutputs ( outputFilters , false )
} else {
2022-02-23 09:47:04 +08:00
fmt . Print ( outputHeader )
2019-04-26 11:34:40 +08:00
printFilteredOutputs ( outputDefaults , false )
// Print non-default outputs, commented
var pnames [ ] string
for pname := range outputs . Outputs {
if ! sliceContains ( pname , outputDefaults ) {
pnames = append ( pnames , pname )
}
2016-04-01 07:50:24 +08:00
}
2019-04-26 11:34:40 +08:00
sort . Strings ( pnames )
printFilteredOutputs ( pnames , true )
2015-09-22 09:38:57 +08:00
}
2015-05-19 06:10:11 +08:00
}
2016-09-08 22:22:10 +08:00
// print processor plugins
2019-04-26 11:34:40 +08:00
if sliceContains ( "processors" , sectionFilters ) {
if len ( processorFilters ) != 0 {
if len ( processorFilters ) >= 3 && processorFilters [ 1 ] != "none" {
2022-02-23 09:47:04 +08:00
fmt . Print ( processorHeader )
2019-04-26 11:34:40 +08:00
}
printFilteredProcessors ( processorFilters , false )
} else {
2022-02-23 09:47:04 +08:00
fmt . Print ( processorHeader )
2019-04-26 11:34:40 +08:00
pnames := [ ] string { }
for pname := range processors . Processors {
pnames = append ( pnames , pname )
}
sort . Strings ( pnames )
printFilteredProcessors ( pnames , true )
2016-10-05 17:58:30 +08:00
}
2016-09-08 22:22:10 +08:00
}
2019-04-26 11:34:40 +08:00
// print aggregator plugins
if sliceContains ( "aggregators" , sectionFilters ) {
if len ( aggregatorFilters ) != 0 {
if len ( aggregatorFilters ) >= 3 && aggregatorFilters [ 1 ] != "none" {
2022-02-23 09:47:04 +08:00
fmt . Print ( aggregatorHeader )
2019-04-26 11:34:40 +08:00
}
printFilteredAggregators ( aggregatorFilters , false )
} else {
2022-02-23 09:47:04 +08:00
fmt . Print ( aggregatorHeader )
2019-04-26 11:34:40 +08:00
pnames := [ ] string { }
for pname := range aggregators . Aggregators {
pnames = append ( pnames , pname )
}
sort . Strings ( pnames )
printFilteredAggregators ( pnames , true )
2016-10-05 17:58:30 +08:00
}
2016-09-08 22:22:10 +08:00
}
// print input plugins
2019-04-26 11:34:40 +08:00
if sliceContains ( "inputs" , sectionFilters ) {
if len ( inputFilters ) != 0 {
if len ( inputFilters ) >= 3 && inputFilters [ 1 ] != "none" {
2022-02-23 09:47:04 +08:00
fmt . Print ( inputHeader )
2019-04-26 11:34:40 +08:00
}
printFilteredInputs ( inputFilters , false )
} else {
2022-02-23 09:47:04 +08:00
fmt . Print ( inputHeader )
2019-04-26 11:34:40 +08:00
printFilteredInputs ( inputDefaults , false )
// Print non-default inputs, commented
var pnames [ ] string
for pname := range inputs . Inputs {
if ! sliceContains ( pname , inputDefaults ) {
pnames = append ( pnames , pname )
}
2016-04-01 07:50:24 +08:00
}
2019-04-26 11:34:40 +08:00
sort . Strings ( pnames )
printFilteredInputs ( pnames , true )
2016-04-01 07:50:24 +08:00
}
2015-08-26 07:59:12 +08:00
}
2016-04-01 07:50:24 +08:00
}
2015-05-19 06:10:11 +08:00
2016-09-08 22:22:10 +08:00
func printFilteredProcessors ( processorFilters [ ] string , commented bool ) {
// Filter processors
var pnames [ ] string
for pname := range processors . Processors {
if sliceContains ( pname , processorFilters ) {
pnames = append ( pnames , pname )
}
}
sort . Strings ( pnames )
// Print Outputs
for _ , pname := range pnames {
creator := processors . Processors [ pname ]
output := creator ( )
2022-01-27 23:59:33 +08:00
printConfig ( pname , output , "processors" , commented , processors . Deprecations [ pname ] )
2016-09-08 22:22:10 +08:00
}
}
func printFilteredAggregators ( aggregatorFilters [ ] string , commented bool ) {
// Filter outputs
var anames [ ] string
for aname := range aggregators . Aggregators {
if sliceContains ( aname , aggregatorFilters ) {
anames = append ( anames , aname )
}
}
sort . Strings ( anames )
// Print Outputs
for _ , aname := range anames {
creator := aggregators . Aggregators [ aname ]
output := creator ( )
2022-01-27 23:59:33 +08:00
printConfig ( aname , output , "aggregators" , commented , aggregators . Deprecations [ aname ] )
2016-09-08 22:22:10 +08:00
}
}
2016-04-01 07:50:24 +08:00
func printFilteredInputs ( inputFilters [ ] string , commented bool ) {
2016-01-08 04:39:43 +08:00
// Filter inputs
2015-08-26 07:59:12 +08:00
var pnames [ ] string
2016-01-08 04:39:43 +08:00
for pname := range inputs . Inputs {
2016-04-01 07:50:24 +08:00
if sliceContains ( pname , inputFilters ) {
2015-09-22 09:38:57 +08:00
pnames = append ( pnames , pname )
}
2015-08-26 07:59:12 +08:00
}
sort . Strings ( pnames )
2016-04-02 03:53:34 +08:00
// cache service inputs to print them at the end
2016-01-28 05:21:36 +08:00
servInputs := make ( map [ string ] telegraf . ServiceInput )
2016-04-02 03:53:34 +08:00
// for alphabetical looping:
servInputNames := [ ] string { }
// Print Inputs
2015-08-26 07:59:12 +08:00
for _ , pname := range pnames {
2021-11-19 04:56:18 +08:00
// Skip inputs that are registered twice for backward compatibility
2022-04-29 03:40:34 +08:00
switch pname {
case "cisco_telemetry_gnmi" , "io" , "KNXListener" :
2021-11-19 04:56:18 +08:00
continue
}
2016-01-08 04:39:43 +08:00
creator := inputs . Inputs [ pname ]
input := creator ( )
2015-05-19 06:10:11 +08:00
2022-02-23 09:47:04 +08:00
if p , ok := input . ( telegraf . ServiceInput ) ; ok {
2016-01-08 04:39:43 +08:00
servInputs [ pname ] = p
2016-04-02 03:53:34 +08:00
servInputNames = append ( servInputNames , pname )
2015-09-25 02:06:11 +08:00
continue
2015-05-19 06:10:11 +08:00
}
2015-09-25 02:06:11 +08:00
2022-01-27 23:59:33 +08:00
printConfig ( pname , input , "inputs" , commented , inputs . Deprecations [ pname ] )
2015-09-25 02:06:11 +08:00
}
2016-01-08 04:39:43 +08:00
// Print Service Inputs
2016-04-01 07:50:24 +08:00
if len ( servInputs ) == 0 {
return
}
2016-04-02 03:53:34 +08:00
sort . Strings ( servInputNames )
2019-04-26 11:34:40 +08:00
2022-02-23 09:47:04 +08:00
fmt . Print ( serviceInputHeader )
2016-04-02 03:53:34 +08:00
for _ , name := range servInputNames {
2022-01-27 23:59:33 +08:00
printConfig ( name , servInputs [ name ] , "inputs" , commented , inputs . Deprecations [ name ] )
2016-04-01 07:50:24 +08:00
}
}
func printFilteredOutputs ( outputFilters [ ] string , commented bool ) {
// Filter outputs
var onames [ ] string
for oname := range outputs . Outputs {
if sliceContains ( oname , outputFilters ) {
onames = append ( onames , oname )
}
}
sort . Strings ( onames )
// Print Outputs
for _ , oname := range onames {
creator := outputs . Outputs [ oname ]
output := creator ( )
2022-01-27 23:59:33 +08:00
printConfig ( oname , output , "outputs" , commented , outputs . Deprecations [ oname ] )
2015-09-25 02:06:11 +08:00
}
}
2019-04-26 11:34:40 +08:00
func printFilteredGlobalSections ( sectionFilters [ ] string ) {
if sliceContains ( "global_tags" , sectionFilters ) {
2022-02-23 09:47:04 +08:00
fmt . Print ( globalTagsConfig )
2019-04-26 11:34:40 +08:00
}
2019-06-06 05:07:02 +08:00
if sliceContains ( "agent" , sectionFilters ) {
2022-02-23 09:47:04 +08:00
fmt . Print ( agentConfig )
2019-06-06 05:07:02 +08:00
}
2019-04-26 11:34:40 +08:00
}
2022-01-27 23:59:33 +08:00
func printConfig ( name string , p telegraf . PluginDescriber , op string , commented bool , di telegraf . DeprecationInfo ) {
2016-04-01 07:50:24 +08:00
comment := ""
if commented {
comment = "# "
}
2022-01-27 23:59:33 +08:00
if di . Since != "" {
removalNote := ""
if di . RemovalIn != "" {
removalNote = " and will be removed in " + di . RemovalIn
}
2022-04-06 06:11:09 +08:00
fmt . Printf ( "\n%s ## DEPRECATED: The '%s' plugin is deprecated in version %s%s, %s." , comment , name , di . Since , removalNote , di . Notice )
2022-01-27 23:59:33 +08:00
}
2016-04-01 07:50:24 +08:00
2015-10-23 04:24:51 +08:00
config := p . SampleConfig ( )
2015-09-25 02:06:11 +08:00
if config == "" {
2022-04-06 06:11:09 +08:00
fmt . Printf ( "\n#[[%s.%s]]" , op , name )
2016-04-01 07:50:24 +08:00
fmt . Printf ( "\n%s # no configuration\n\n" , comment )
2015-09-25 02:06:11 +08:00
} else {
2016-04-01 07:50:24 +08:00
lines := strings . Split ( config , "\n" )
2022-04-06 06:11:09 +08:00
fmt . Print ( "\n" )
2016-04-01 07:50:24 +08:00
for i , line := range lines {
2022-04-06 06:11:09 +08:00
if i == len ( lines ) - 1 {
2016-04-01 07:50:24 +08:00
fmt . Print ( "\n" )
continue
}
2016-05-31 23:25:02 +08:00
fmt . Print ( strings . TrimRight ( comment + line , " " ) + "\n" )
2016-04-01 07:50:24 +08:00
}
2015-05-19 06:10:11 +08:00
}
}
2015-08-25 04:52:46 +08:00
2015-09-22 09:38:57 +08:00
func sliceContains ( name string , list [ ] string ) bool {
for _ , b := range list {
if b == name {
return true
}
}
return false
}
2016-01-08 04:39:43 +08:00
// PrintInputConfig prints the config usage of a single input.
func PrintInputConfig ( name string ) error {
2022-02-23 09:47:04 +08:00
creator , ok := inputs . Inputs [ name ]
if ! ok {
return fmt . Errorf ( "input %s not found" , name )
2015-08-25 04:52:46 +08:00
}
2022-02-23 09:47:04 +08:00
printConfig ( name , creator ( ) , "inputs" , false , inputs . Deprecations [ name ] )
2015-08-25 04:52:46 +08:00
return nil
}
2015-10-17 12:47:13 +08:00
2015-10-23 04:24:51 +08:00
// PrintOutputConfig prints the config usage of a single output.
func PrintOutputConfig ( name string ) error {
2022-02-23 09:47:04 +08:00
creator , ok := outputs . Outputs [ name ]
if ! ok {
return fmt . Errorf ( "output %s not found" , name )
2015-10-23 04:24:51 +08:00
}
2022-02-23 09:47:04 +08:00
printConfig ( name , creator ( ) , "outputs" , false , outputs . Deprecations [ name ] )
2015-10-23 04:24:51 +08:00
return nil
}
2020-11-04 05:40:57 +08:00
// LoadDirectory loads all toml config files found in the specified path, recursively.
2015-10-19 15:09:36 +08:00
func ( c * Config ) LoadDirectory ( path string ) error {
2016-09-28 22:30:02 +08:00
walkfn := func ( thispath string , info os . FileInfo , _ error ) error {
2017-02-23 21:45:36 +08:00
if info == nil {
log . Printf ( "W! Telegraf is not permitted to read %s" , thispath )
return nil
}
2018-04-12 07:51:19 +08:00
2016-09-28 22:30:02 +08:00
if info . IsDir ( ) {
2018-04-12 07:51:19 +08:00
if strings . HasPrefix ( info . Name ( ) , ".." ) {
// skip Kubernetes mounts, prevening loading the same config twice
return filepath . SkipDir
}
2016-09-28 22:30:02 +08:00
return nil
2015-10-19 15:09:36 +08:00
}
2016-09-28 22:30:02 +08:00
name := info . Name ( )
2015-11-26 09:42:07 +08:00
if len ( name ) < 6 || name [ len ( name ) - 5 : ] != ".conf" {
2016-09-28 22:30:02 +08:00
return nil
2015-10-19 15:09:36 +08:00
}
2016-09-28 22:30:02 +08:00
err := c . LoadConfig ( thispath )
2015-10-19 15:09:36 +08:00
if err != nil {
return err
}
2016-09-28 22:30:02 +08:00
return nil
2015-10-19 15:09:36 +08:00
}
2016-09-28 22:30:02 +08:00
return filepath . Walk ( path , walkfn )
2015-10-19 15:09:36 +08:00
}
2016-04-29 05:19:03 +08:00
// Try to find a default config file at these locations (in order):
// 1. $TELEGRAF_CONFIG_PATH
// 2. $HOME/.telegraf/telegraf.conf
// 3. /etc/telegraf/telegraf.conf
//
func getDefaultConfigPath ( ) ( string , error ) {
envfile := os . Getenv ( "TELEGRAF_CONFIG_PATH" )
homefile := os . ExpandEnv ( "${HOME}/.telegraf/telegraf.conf" )
etcfile := "/etc/telegraf/telegraf.conf"
2016-08-08 22:55:16 +08:00
if runtime . GOOS == "windows" {
2019-08-28 04:47:01 +08:00
programFiles := os . Getenv ( "ProgramFiles" )
if programFiles == "" { // Should never happen
programFiles = ` C:\Program Files `
}
etcfile = programFiles + ` \Telegraf\telegraf.conf `
2016-08-08 22:55:16 +08:00
}
2016-04-29 05:19:03 +08:00
for _ , path := range [ ] string { envfile , homefile , etcfile } {
2021-06-03 11:22:15 +08:00
if isURL ( path ) {
log . Printf ( "I! Using config url: %s" , path )
return path , nil
}
2016-04-29 05:19:03 +08:00
if _ , err := os . Stat ( path ) ; err == nil {
2016-10-01 05:37:56 +08:00
log . Printf ( "I! Using config file: %s" , path )
2016-04-29 05:19:03 +08:00
return path , nil
}
}
// if we got here, we didn't find a file in a default location
return "" , fmt . Errorf ( "No config file specified, and could not find one" +
" in $TELEGRAF_CONFIG_PATH, %s, or %s" , homefile , etcfile )
}
2021-06-03 11:22:15 +08:00
// isURL checks if string is valid url
func isURL ( str string ) bool {
u , err := url . Parse ( str )
return err == nil && u . Scheme != "" && u . Host != ""
}
2015-11-25 05:22:11 +08:00
// LoadConfig loads the given config file and applies it to c
2015-11-24 07:28:11 +08:00
func ( c * Config ) LoadConfig ( path string ) error {
2016-04-29 05:19:03 +08:00
var err error
if path == "" {
if path , err = getDefaultConfigPath ( ) ; err != nil {
return err
}
}
2018-11-06 06:19:46 +08:00
data , err := loadConfig ( path )
if err != nil {
2020-06-05 22:43:43 +08:00
return fmt . Errorf ( "Error loading config file %s: %w" , path , err )
2018-11-06 06:19:46 +08:00
}
2020-06-05 22:43:43 +08:00
if err = c . LoadConfigData ( data ) ; err != nil {
return fmt . Errorf ( "Error loading config file %s: %w" , path , err )
}
return nil
}
// LoadConfigData loads TOML-formatted config data
func ( c * Config ) LoadConfigData ( data [ ] byte ) error {
2018-11-06 06:19:46 +08:00
tbl , err := parseConfig ( data )
2015-10-17 12:47:13 +08:00
if err != nil {
2020-06-05 22:43:43 +08:00
return fmt . Errorf ( "Error parsing data: %s" , err )
2015-10-17 12:47:13 +08:00
}
2016-04-30 06:12:15 +08:00
// Parse tags tables first:
for _ , tableName := range [ ] string { "tags" , "global_tags" } {
if val , ok := tbl . Fields [ tableName ] ; ok {
subTable , ok := val . ( * ast . Table )
if ! ok {
2020-06-05 22:43:43 +08:00
return fmt . Errorf ( "invalid configuration, bad table name %q" , tableName )
2016-04-30 06:12:15 +08:00
}
2020-11-04 05:40:57 +08:00
if err = c . toml . UnmarshalTable ( subTable , c . Tags ) ; err != nil {
return fmt . Errorf ( "error parsing table name %q: %s" , tableName , err )
2016-04-30 06:12:15 +08:00
}
}
}
// Parse agent table:
if val , ok := tbl . Fields [ "agent" ] ; ok {
subTable , ok := val . ( * ast . Table )
if ! ok {
2020-06-05 22:43:43 +08:00
return fmt . Errorf ( "invalid configuration, error parsing agent table" )
2016-04-30 06:12:15 +08:00
}
2020-11-04 05:40:57 +08:00
if err = c . toml . UnmarshalTable ( subTable , c . Agent ) ; err != nil {
return fmt . Errorf ( "error parsing [agent]: %w" , err )
2016-04-30 06:12:15 +08:00
}
}
2018-11-06 05:34:28 +08:00
if ! c . Agent . OmitHostname {
if c . Agent . Hostname == "" {
hostname , err := os . Hostname ( )
if err != nil {
return err
}
c . Agent . Hostname = hostname
}
c . Tags [ "host" ] = c . Agent . Hostname
}
2022-03-18 11:43:46 +08:00
// Set snmp agent translator default
2022-03-23 23:27:58 +08:00
if c . Agent . SnmpTranslator == "" {
c . Agent . SnmpTranslator = "netsnmp"
2022-03-18 11:43:46 +08:00
}
2020-11-04 05:40:57 +08:00
if len ( c . UnusedFields ) > 0 {
return fmt . Errorf ( "line %d: configuration specified the fields %q, but they weren't used" , tbl . Line , keys ( c . UnusedFields ) )
}
2016-04-30 06:12:15 +08:00
// Parse all the rest of the plugins:
2015-10-17 12:47:13 +08:00
for name , val := range tbl . Fields {
2015-11-14 07:14:07 +08:00
subTable , ok := val . ( * ast . Table )
2015-10-17 12:47:13 +08:00
if ! ok {
2020-06-05 22:43:43 +08:00
return fmt . Errorf ( "invalid configuration, error parsing field %q as table" , name )
2015-10-17 12:47:13 +08:00
}
switch name {
2016-04-30 06:12:15 +08:00
case "agent" , "global_tags" , "tags" :
2015-10-17 12:47:13 +08:00
case "outputs" :
2016-01-08 04:39:43 +08:00
for pluginName , pluginVal := range subTable . Fields {
switch pluginSubTable := pluginVal . ( type ) {
2016-09-08 22:22:10 +08:00
// legacy [outputs.influxdb] support
2015-11-14 07:14:07 +08:00
case * ast . Table :
2016-01-08 04:39:43 +08:00
if err = c . addOutput ( pluginName , pluginSubTable ) ; err != nil {
2020-11-04 05:40:57 +08:00
return fmt . Errorf ( "error parsing %s, %w" , pluginName , err )
2015-11-25 05:22:11 +08:00
}
2015-11-14 07:14:07 +08:00
case [ ] * ast . Table :
2016-01-08 04:39:43 +08:00
for _ , t := range pluginSubTable {
if err = c . addOutput ( pluginName , t ) ; err != nil {
2020-11-04 05:40:57 +08:00
return fmt . Errorf ( "error parsing %s array, %w" , pluginName , err )
2015-11-25 05:22:11 +08:00
}
2015-11-14 07:14:07 +08:00
}
default :
2020-11-04 05:40:57 +08:00
return fmt . Errorf ( "unsupported config format: %s" ,
2020-06-05 22:43:43 +08:00
pluginName )
2015-10-17 12:47:13 +08:00
}
2020-11-04 05:40:57 +08:00
if len ( c . UnusedFields ) > 0 {
return fmt . Errorf ( "plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used" , name , pluginName , subTable . Line , keys ( c . UnusedFields ) )
}
2015-10-17 12:47:13 +08:00
}
2016-01-09 03:49:50 +08:00
case "inputs" , "plugins" :
2015-11-20 10:08:02 +08:00
for pluginName , pluginVal := range subTable . Fields {
switch pluginSubTable := pluginVal . ( type ) {
2016-09-08 22:22:10 +08:00
// legacy [inputs.cpu] support
2015-11-20 10:08:02 +08:00
case * ast . Table :
2016-01-08 04:39:43 +08:00
if err = c . addInput ( pluginName , pluginSubTable ) ; err != nil {
2020-11-04 05:40:57 +08:00
return fmt . Errorf ( "error parsing %s, %w" , pluginName , err )
2015-11-25 05:22:11 +08:00
}
2015-11-20 10:08:02 +08:00
case [ ] * ast . Table :
2015-11-25 05:22:11 +08:00
for _ , t := range pluginSubTable {
2016-01-08 04:39:43 +08:00
if err = c . addInput ( pluginName , t ) ; err != nil {
2020-11-04 05:40:57 +08:00
return fmt . Errorf ( "error parsing %s, %w" , pluginName , err )
2015-11-25 05:22:11 +08:00
}
2015-11-20 10:08:02 +08:00
}
default :
2020-06-05 22:43:43 +08:00
return fmt . Errorf ( "Unsupported config format: %s" ,
pluginName )
2015-11-20 10:08:02 +08:00
}
2020-11-04 05:40:57 +08:00
if len ( c . UnusedFields ) > 0 {
return fmt . Errorf ( "plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used" , name , pluginName , subTable . Line , keys ( c . UnusedFields ) )
}
2015-11-20 10:08:02 +08:00
}
2016-09-08 22:22:10 +08:00
case "processors" :
for pluginName , pluginVal := range subTable . Fields {
switch pluginSubTable := pluginVal . ( type ) {
case [ ] * ast . Table :
for _ , t := range pluginSubTable {
if err = c . addProcessor ( pluginName , t ) ; err != nil {
2020-11-04 05:40:57 +08:00
return fmt . Errorf ( "error parsing %s, %w" , pluginName , err )
2016-09-08 22:22:10 +08:00
}
}
default :
2020-06-05 22:43:43 +08:00
return fmt . Errorf ( "Unsupported config format: %s" ,
pluginName )
2016-09-08 22:22:10 +08:00
}
2020-11-04 05:40:57 +08:00
if len ( c . UnusedFields ) > 0 {
return fmt . Errorf ( "plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used" , name , pluginName , subTable . Line , keys ( c . UnusedFields ) )
}
2016-09-08 22:22:10 +08:00
}
case "aggregators" :
for pluginName , pluginVal := range subTable . Fields {
switch pluginSubTable := pluginVal . ( type ) {
case [ ] * ast . Table :
for _ , t := range pluginSubTable {
if err = c . addAggregator ( pluginName , t ) ; err != nil {
2020-06-05 22:43:43 +08:00
return fmt . Errorf ( "Error parsing %s, %s" , pluginName , err )
2016-09-08 22:22:10 +08:00
}
}
default :
2020-06-05 22:43:43 +08:00
return fmt . Errorf ( "Unsupported config format: %s" ,
pluginName )
2016-09-08 22:22:10 +08:00
}
2020-11-04 05:40:57 +08:00
if len ( c . UnusedFields ) > 0 {
return fmt . Errorf ( "plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used" , name , pluginName , subTable . Line , keys ( c . UnusedFields ) )
}
2016-09-08 22:22:10 +08:00
}
2016-01-08 04:39:43 +08:00
// Assume it's an input input for legacy config file support if no other
2015-11-20 10:08:02 +08:00
// identifiers are present
2015-10-17 12:47:13 +08:00
default :
2016-01-08 04:39:43 +08:00
if err = c . addInput ( name , subTable ) ; err != nil {
2020-06-05 22:43:43 +08:00
return fmt . Errorf ( "Error parsing %s, %s" , name , err )
2015-11-25 05:22:11 +08:00
}
2015-10-17 12:47:13 +08:00
}
}
2016-09-27 23:17:58 +08:00
if len ( c . Processors ) > 1 {
sort . Sort ( c . Processors )
}
2018-11-06 05:34:28 +08:00
2015-10-17 12:47:13 +08:00
return nil
}
2015-11-25 05:22:11 +08:00
2016-06-23 01:54:29 +08:00
// trimBOM trims the Byte-Order-Marks from the beginning of the file.
2017-11-01 08:00:06 +08:00
// this is for Windows compatibility only.
2016-06-23 01:54:29 +08:00
// see https://github.com/influxdata/telegraf/issues/1378
2016-06-24 15:47:31 +08:00
func trimBOM ( f [ ] byte ) [ ] byte {
return bytes . TrimPrefix ( f , [ ] byte ( "\xef\xbb\xbf" ) )
2016-06-23 01:54:29 +08:00
}
2018-01-05 07:28:00 +08:00
// escapeEnv escapes a value for inserting into a TOML string.
func escapeEnv ( value string ) string {
return envVarEscaper . Replace ( value )
}
2018-11-06 06:19:46 +08:00
func loadConfig ( config string ) ( [ ] byte , error ) {
2021-05-27 00:13:50 +08:00
if fetchURLRe . MatchString ( config ) {
u , err := url . Parse ( config )
if err != nil {
return nil , err
}
2018-11-06 06:19:46 +08:00
2021-05-27 00:13:50 +08:00
switch u . Scheme {
case "https" , "http" :
return fetchConfig ( u )
default :
return nil , fmt . Errorf ( "scheme %q not supported" , u . Scheme )
}
2018-11-06 06:19:46 +08:00
}
2021-05-27 00:13:50 +08:00
// If it isn't a https scheme, try it as a file
2021-09-29 05:16:32 +08:00
return os . ReadFile ( config )
2018-11-06 06:19:46 +08:00
}
func fetchConfig ( u * url . URL ) ( [ ] byte , error ) {
req , err := http . NewRequest ( "GET" , u . String ( ) , nil )
if err != nil {
return nil , err
}
2019-10-01 07:55:47 +08:00
if v , exists := os . LookupEnv ( "INFLUX_TOKEN" ) ; exists {
req . Header . Add ( "Authorization" , "Token " + v )
}
2018-11-06 06:19:46 +08:00
req . Header . Add ( "Accept" , "application/toml" )
2020-07-02 03:52:05 +08:00
req . Header . Set ( "User-Agent" , internal . ProductToken ( ) )
2019-02-06 07:15:58 +08:00
2021-02-13 00:38:40 +08:00
retries := 3
for i := 0 ; i <= retries ; i ++ {
resp , err := http . DefaultClient . Do ( req )
if err != nil {
return nil , fmt . Errorf ( "Retry %d of %d failed connecting to HTTP config server %s" , i , retries , err )
}
if resp . StatusCode != http . StatusOK {
if i < retries {
log . Printf ( "Error getting HTTP config. Retry %d of %d in %s. Status=%d" , i , retries , httpLoadConfigRetryInterval , resp . StatusCode )
time . Sleep ( httpLoadConfigRetryInterval )
continue
}
return nil , fmt . Errorf ( "Retry %d of %d failed to retrieve remote config: %s" , i , retries , resp . Status )
}
defer resp . Body . Close ( )
2021-09-29 05:16:32 +08:00
return io . ReadAll ( resp . Body )
2019-02-06 07:15:58 +08:00
}
2021-02-13 00:38:40 +08:00
return nil , nil
2018-11-06 06:19:46 +08:00
}
// parseConfig loads a TOML configuration from a provided path and
// returns the AST produced from the TOML parser. When loading the file, it
// will find environment variables and replace them.
func parseConfig ( contents [ ] byte ) ( * ast . Table , error ) {
2016-06-23 01:54:29 +08:00
contents = trimBOM ( contents )
2016-04-02 03:53:34 +08:00
2019-03-30 07:02:10 +08:00
parameters := envVarRe . FindAllSubmatch ( contents , - 1 )
for _ , parameter := range parameters {
if len ( parameter ) != 3 {
continue
}
2020-11-04 05:40:57 +08:00
var envVar [ ] byte
2019-03-30 07:02:10 +08:00
if parameter [ 1 ] != nil {
2020-11-04 05:40:57 +08:00
envVar = parameter [ 1 ]
2019-03-30 07:02:10 +08:00
} else if parameter [ 2 ] != nil {
2020-11-04 05:40:57 +08:00
envVar = parameter [ 2 ]
2019-03-30 07:02:10 +08:00
} else {
continue
}
2020-11-04 05:40:57 +08:00
envVal , ok := os . LookupEnv ( strings . TrimPrefix ( string ( envVar ) , "$" ) )
2018-01-05 07:28:00 +08:00
if ok {
2020-11-04 05:40:57 +08:00
envVal = escapeEnv ( envVal )
contents = bytes . Replace ( contents , parameter [ 0 ] , [ ] byte ( envVal ) , 1 )
2016-04-02 03:53:34 +08:00
}
}
return toml . Parse ( contents )
}
2016-09-08 22:22:10 +08:00
func ( c * Config ) addAggregator ( name string , table * ast . Table ) error {
creator , ok := aggregators . Aggregators [ name ]
if ! ok {
2021-12-02 03:38:43 +08:00
// Handle removed, deprecated plugins
if di , deprecated := aggregators . Deprecations [ name ] ; deprecated {
printHistoricPluginDeprecationNotice ( "aggregators" , name , di )
return fmt . Errorf ( "plugin deprecated" )
}
2016-09-08 22:22:10 +08:00
return fmt . Errorf ( "Undefined but requested aggregator: %s" , name )
}
aggregator := creator ( )
2020-11-04 05:40:57 +08:00
conf , err := c . buildAggregator ( name , table )
2016-09-08 22:22:10 +08:00
if err != nil {
return err
}
2020-11-04 05:40:57 +08:00
if err := c . toml . UnmarshalTable ( table , aggregator ) ; err != nil {
2016-09-08 22:22:10 +08:00
return err
}
2021-12-02 03:38:43 +08:00
if err := c . printUserDeprecation ( "aggregators" , name , aggregator ) ; err != nil {
return err
}
2016-09-23 01:10:51 +08:00
c . Aggregators = append ( c . Aggregators , models . NewRunningAggregator ( aggregator , conf ) )
2016-09-08 22:22:10 +08:00
return nil
}
2022-01-13 06:54:42 +08:00
func ( c * Config ) probeParser ( table * ast . Table ) bool {
var dataformat string
c . getFieldString ( table , "data_format" , & dataformat )
_ , ok := parsers . Parsers [ dataformat ]
return ok
}
func ( c * Config ) addParser ( parentname string , table * ast . Table ) ( * models . RunningParser , error ) {
var dataformat string
c . getFieldString ( table , "data_format" , & dataformat )
creator , ok := parsers . Parsers [ dataformat ]
if ! ok {
return nil , fmt . Errorf ( "Undefined but requested parser: %s" , dataformat )
}
parser := creator ( parentname )
conf , err := c . buildParser ( parentname , table )
if err != nil {
return nil , err
}
if err := c . toml . UnmarshalTable ( table , parser ) ; err != nil {
return nil , err
}
running := models . NewRunningParser ( parser , conf )
c . Parsers = append ( c . Parsers , running )
return running , nil
}
2016-09-08 22:22:10 +08:00
func ( c * Config ) addProcessor ( name string , table * ast . Table ) error {
creator , ok := processors . Processors [ name ]
if ! ok {
2021-12-02 03:38:43 +08:00
// Handle removed, deprecated plugins
if di , deprecated := processors . Deprecations [ name ] ; deprecated {
printHistoricPluginDeprecationNotice ( "processors" , name , di )
return fmt . Errorf ( "plugin deprecated" )
}
2016-09-08 22:22:10 +08:00
return fmt . Errorf ( "Undefined but requested processor: %s" , name )
}
2020-11-04 05:40:57 +08:00
processorConfig , err := c . buildProcessor ( name , table )
2016-09-08 22:22:10 +08:00
if err != nil {
return err
}
2021-03-23 01:21:36 +08:00
rf , err := c . newRunningProcessor ( creator , processorConfig , table )
2020-06-05 22:43:43 +08:00
if err != nil {
2016-09-08 22:22:10 +08:00
return err
}
2020-06-05 22:43:43 +08:00
c . Processors = append ( c . Processors , rf )
2016-09-08 22:22:10 +08:00
2020-06-05 22:43:43 +08:00
// save a copy for the aggregator
2021-03-23 01:21:36 +08:00
rf , err = c . newRunningProcessor ( creator , processorConfig , table )
2020-06-05 22:43:43 +08:00
if err != nil {
return err
}
c . AggProcessors = append ( c . AggProcessors , rf )
2016-09-08 22:22:10 +08:00
return nil
}
2020-06-05 22:43:43 +08:00
func ( c * Config ) newRunningProcessor (
creator processors . StreamingCreator ,
processorConfig * models . ProcessorConfig ,
table * ast . Table ,
) ( * models . RunningProcessor , error ) {
processor := creator ( )
if p , ok := processor . ( unwrappable ) ; ok {
2020-11-04 05:40:57 +08:00
if err := c . toml . UnmarshalTable ( table , p . Unwrap ( ) ) ; err != nil {
2020-06-05 22:43:43 +08:00
return nil , err
}
} else {
2020-11-04 05:40:57 +08:00
if err := c . toml . UnmarshalTable ( table , processor ) ; err != nil {
2020-06-05 22:43:43 +08:00
return nil , err
}
}
2021-12-02 03:38:43 +08:00
if err := c . printUserDeprecation ( "processors" , processorConfig . Name , processor ) ; err != nil {
return nil , err
}
2020-06-05 22:43:43 +08:00
rf := models . NewRunningProcessor ( processor , processorConfig )
return rf , nil
}
2015-11-25 05:22:11 +08:00
func ( c * Config ) addOutput ( name string , table * ast . Table ) error {
if len ( c . OutputFilters ) > 0 && ! sliceContains ( name , c . OutputFilters ) {
return nil
}
creator , ok := outputs . Outputs [ name ]
if ! ok {
2021-12-02 03:38:43 +08:00
// Handle removed, deprecated plugins
if di , deprecated := outputs . Deprecations [ name ] ; deprecated {
printHistoricPluginDeprecationNotice ( "outputs" , name , di )
return fmt . Errorf ( "plugin deprecated" )
}
2022-02-23 09:47:04 +08:00
return fmt . Errorf ( "undefined but requested output: %s" , name )
2015-11-25 05:22:11 +08:00
}
2015-12-01 22:15:28 +08:00
output := creator ( )
2015-11-25 05:22:11 +08:00
2016-02-11 06:50:07 +08:00
// If the output has a SetSerializer function, then this means it can write
// arbitrary types of output, so build the serializer and set it.
2022-02-23 09:47:04 +08:00
if t , ok := output . ( serializers . SerializerOutput ) ; ok {
2021-03-23 01:21:36 +08:00
serializer , err := c . buildSerializer ( table )
2016-02-11 06:50:07 +08:00
if err != nil {
return err
}
t . SetSerializer ( serializer )
}
2020-11-04 05:40:57 +08:00
outputConfig , err := c . buildOutput ( name , table )
2015-12-01 22:15:28 +08:00
if err != nil {
return err
}
2020-11-04 05:40:57 +08:00
if err := c . toml . UnmarshalTable ( table , output ) ; err != nil {
2015-11-25 05:22:11 +08:00
return err
}
2021-12-02 03:38:43 +08:00
if err := c . printUserDeprecation ( "outputs" , name , output ) ; err != nil {
return err
}
2022-01-19 06:04:09 +08:00
if c , ok := interface { } ( output ) . ( interface { TLSConfig ( ) ( * tls . Config , error ) } ) ; ok {
if _ , err := c . TLSConfig ( ) ; err != nil {
return err
}
}
2021-03-23 01:21:36 +08:00
ro := models . NewRunningOutput ( output , outputConfig , c . Agent . MetricBatchSize , c . Agent . MetricBufferLimit )
2015-11-25 05:22:11 +08:00
c . Outputs = append ( c . Outputs , ro )
return nil
}
2016-01-08 04:39:43 +08:00
func ( c * Config ) addInput ( name string , table * ast . Table ) error {
if len ( c . InputFilters ) > 0 && ! sliceContains ( name , c . InputFilters ) {
2015-11-25 05:22:11 +08:00
return nil
}
2021-12-02 03:38:43 +08:00
2022-01-13 06:54:42 +08:00
// For inputs with parsers we need to compute the set of
// options that is not covered by both, the parser and the input.
// We achieve this by keeping a local book of missing entries
// that counts the number of misses. In case we have a parser
// for the input both need to miss the entry. We count the
// missing entries at the end.
missThreshold := 0
missCount := make ( map [ string ] int )
c . setLocalMissingTomlFieldTracker ( missCount )
defer c . resetMissingTomlFieldTracker ( )
2016-01-08 04:39:43 +08:00
creator , ok := inputs . Inputs [ name ]
2015-11-25 05:22:11 +08:00
if ! ok {
2021-12-02 03:38:43 +08:00
// Handle removed, deprecated plugins
if di , deprecated := inputs . Deprecations [ name ] ; deprecated {
printHistoricPluginDeprecationNotice ( "inputs" , name , di )
return fmt . Errorf ( "plugin deprecated" )
}
2016-01-08 04:39:43 +08:00
return fmt . Errorf ( "Undefined but requested input: %s" , name )
2015-11-25 05:22:11 +08:00
}
2016-01-08 04:39:43 +08:00
input := creator ( )
2015-11-25 05:22:11 +08:00
2022-01-13 06:54:42 +08:00
// If the input has a SetParser or SetParserFunc function, it can accept
// arbitrary data-formats, so build the requested parser and set it.
if t , ok := input . ( telegraf . ParserInput ) ; ok {
missThreshold = 1
if parser , err := c . addParser ( name , table ) ; err == nil {
t . SetParser ( parser )
} else {
missThreshold = 0
// Fallback to the old way of instantiating the parsers.
config , err := c . getParserConfig ( name , table )
if err != nil {
return err
}
parser , err := c . buildParserOld ( name , config )
if err != nil {
return err
}
t . SetParser ( parser )
2016-02-06 08:36:35 +08:00
}
}
2022-01-13 06:54:42 +08:00
// Keep the old interface for backward compatibility
if t , ok := input . ( parsers . ParserInput ) ; ok {
// DEPRECATED: Please switch your plugin to telegraf.ParserInput.
missThreshold = 1
if parser , err := c . addParser ( name , table ) ; err == nil {
t . SetParser ( parser )
} else {
missThreshold = 0
// Fallback to the old way of instantiating the parsers.
config , err := c . getParserConfig ( name , table )
if err != nil {
return err
}
parser , err := c . buildParserOld ( name , config )
if err != nil {
return err
}
t . SetParser ( parser )
2018-09-19 00:23:45 +08:00
}
2022-01-13 06:54:42 +08:00
}
if t , ok := input . ( telegraf . ParserFuncInput ) ; ok {
missThreshold = 1
if c . probeParser ( table ) {
t . SetParserFunc ( func ( ) ( telegraf . Parser , error ) {
parser , err := c . addParser ( name , table )
if err != nil {
return nil , err
}
err = parser . Init ( )
return parser , err
} )
} else {
missThreshold = 0
// Fallback to the old way
config , err := c . getParserConfig ( name , table )
2021-12-16 23:20:04 +08:00
if err != nil {
2022-01-13 06:54:42 +08:00
return err
2021-12-16 23:20:04 +08:00
}
2022-01-13 06:54:42 +08:00
t . SetParserFunc ( func ( ) ( telegraf . Parser , error ) {
return c . buildParserOld ( name , config )
} )
}
}
if t , ok := input . ( parsers . ParserFuncInput ) ; ok {
// DEPRECATED: Please switch your plugin to telegraf.ParserFuncInput.
missThreshold = 1
if c . probeParser ( table ) {
t . SetParserFunc ( func ( ) ( parsers . Parser , error ) {
parser , err := c . addParser ( name , table )
if err != nil {
2021-12-16 23:20:04 +08:00
return nil , err
}
2022-01-13 06:54:42 +08:00
err = parser . Init ( )
return parser , err
} )
} else {
missThreshold = 0
// Fallback to the old way
config , err := c . getParserConfig ( name , table )
if err != nil {
return err
2021-12-16 23:20:04 +08:00
}
2022-01-13 06:54:42 +08:00
t . SetParserFunc ( func ( ) ( parsers . Parser , error ) {
return c . buildParserOld ( name , config )
} )
}
2018-09-19 00:23:45 +08:00
}
2020-11-04 05:40:57 +08:00
pluginConfig , err := c . buildInput ( name , table )
2015-11-25 05:22:11 +08:00
if err != nil {
return err
}
2015-12-01 22:15:28 +08:00
2020-11-04 05:40:57 +08:00
if err := c . toml . UnmarshalTable ( table , input ) ; err != nil {
2015-12-01 22:15:28 +08:00
return err
}
2021-12-02 03:38:43 +08:00
if err := c . printUserDeprecation ( "inputs" , name , input ) ; err != nil {
return err
}
2022-01-19 06:04:09 +08:00
if c , ok := interface { } ( input ) . ( interface { TLSConfig ( ) ( * tls . Config , error ) } ) ; ok {
if _ , err := c . TLSConfig ( ) ; err != nil {
return err
}
}
2016-11-07 16:34:46 +08:00
rp := models . NewRunningInput ( input , pluginConfig )
2018-11-06 05:34:28 +08:00
rp . SetDefaultTags ( c . Tags )
2016-01-08 04:39:43 +08:00
c . Inputs = append ( c . Inputs , rp )
2022-01-13 06:54:42 +08:00
// Check the number of misses against the threshold
for key , count := range missCount {
if count <= missThreshold {
continue
}
if err := c . missingTomlField ( nil , key ) ; err != nil {
return err
}
}
2015-11-25 05:22:11 +08:00
return nil
}
2016-10-06 20:29:46 +08:00
// buildAggregator parses Aggregator specific items from the ast.Table,
// builds the filter and returns a
// models.AggregatorConfig to be inserted into models.RunningAggregator
2020-11-04 05:40:57 +08:00
func ( c * Config ) buildAggregator ( name string , tbl * ast . Table ) ( * models . AggregatorConfig , error ) {
2016-10-07 23:43:44 +08:00
conf := & models . AggregatorConfig {
Name : name ,
Delay : time . Millisecond * 100 ,
Period : time . Second * 30 ,
2019-08-01 03:52:12 +08:00
Grace : time . Second * 0 ,
2016-10-07 23:43:44 +08:00
}
2016-09-23 01:10:51 +08:00
2020-11-04 05:40:57 +08:00
c . getFieldDuration ( tbl , "period" , & conf . Period )
c . getFieldDuration ( tbl , "delay" , & conf . Delay )
c . getFieldDuration ( tbl , "grace" , & conf . Grace )
c . getFieldBool ( tbl , "drop_original" , & conf . DropOriginal )
c . getFieldString ( tbl , "name_prefix" , & conf . MeasurementPrefix )
c . getFieldString ( tbl , "name_suffix" , & conf . MeasurementSuffix )
c . getFieldString ( tbl , "name_override" , & conf . NameOverride )
c . getFieldString ( tbl , "alias" , & conf . Alias )
2019-08-22 07:49:07 +08:00
2016-09-08 22:22:10 +08:00
conf . Tags = make ( map [ string ] string )
if node , ok := tbl . Fields [ "tags" ] ; ok {
if subtbl , ok := node . ( * ast . Table ) ; ok {
2020-11-04 05:40:57 +08:00
if err := c . toml . UnmarshalTable ( subtbl , conf . Tags ) ; err != nil {
2020-06-27 02:30:29 +08:00
return nil , fmt . Errorf ( "could not parse tags for input %s" , name )
2016-09-08 22:22:10 +08:00
}
}
}
2020-11-04 05:40:57 +08:00
if c . hasErrs ( ) {
return nil , c . firstErr ( )
}
2016-09-08 22:22:10 +08:00
var err error
2020-11-04 05:40:57 +08:00
conf . Filter , err = c . buildFilter ( tbl )
2016-09-08 22:22:10 +08:00
if err != nil {
return conf , err
}
return conf , nil
}
2022-01-13 06:54:42 +08:00
// buildParser parses Parser specific items from the ast.Table,
// builds the filter and returns a
// models.ParserConfig to be inserted into models.RunningParser
func ( c * Config ) buildParser ( name string , tbl * ast . Table ) ( * models . ParserConfig , error ) {
var dataformat string
c . getFieldString ( tbl , "data_format" , & dataformat )
conf := & models . ParserConfig {
Parent : name ,
DataFormat : dataformat ,
}
return conf , nil
}
2016-10-06 20:29:46 +08:00
// buildProcessor parses Processor specific items from the ast.Table,
// builds the filter and returns a
// models.ProcessorConfig to be inserted into models.RunningProcessor
2020-11-04 05:40:57 +08:00
func ( c * Config ) buildProcessor ( name string , tbl * ast . Table ) ( * models . ProcessorConfig , error ) {
2016-09-08 22:22:10 +08:00
conf := & models . ProcessorConfig { Name : name }
2020-11-04 05:40:57 +08:00
c . getFieldInt64 ( tbl , "order" , & conf . Order )
c . getFieldString ( tbl , "alias" , & conf . Alias )
2016-09-27 23:17:58 +08:00
2020-11-04 05:40:57 +08:00
if c . hasErrs ( ) {
return nil , c . firstErr ( )
2019-08-22 07:49:07 +08:00
}
2016-09-08 22:22:10 +08:00
var err error
2020-11-04 05:40:57 +08:00
conf . Filter , err = c . buildFilter ( tbl )
2016-09-08 22:22:10 +08:00
if err != nil {
return conf , err
}
return conf , nil
}
2016-02-23 04:35:06 +08:00
// buildFilter builds a Filter
// (tagpass/tagdrop/namepass/namedrop/fieldpass/fielddrop) to
2016-07-28 19:31:11 +08:00
// be inserted into the models.OutputConfig/models.InputConfig
2016-04-13 07:06:27 +08:00
// to be used for glob filtering on tags and measurements
2020-11-04 05:40:57 +08:00
func ( c * Config ) buildFilter ( tbl * ast . Table ) ( models . Filter , error ) {
2016-07-28 19:31:11 +08:00
f := models . Filter { }
2015-11-25 05:22:11 +08:00
2020-11-04 05:40:57 +08:00
c . getFieldStringSlice ( tbl , "namepass" , & f . NamePass )
c . getFieldStringSlice ( tbl , "namedrop" , & f . NameDrop )
2015-11-25 05:22:11 +08:00
2020-11-04 05:40:57 +08:00
c . getFieldStringSlice ( tbl , "pass" , & f . FieldPass )
c . getFieldStringSlice ( tbl , "fieldpass" , & f . FieldPass )
2016-02-20 13:35:12 +08:00
2020-11-04 05:40:57 +08:00
c . getFieldStringSlice ( tbl , "drop" , & f . FieldDrop )
c . getFieldStringSlice ( tbl , "fielddrop" , & f . FieldDrop )
2016-02-20 13:35:12 +08:00
2020-11-04 05:40:57 +08:00
c . getFieldTagFilter ( tbl , "tagpass" , & f . TagPass )
c . getFieldTagFilter ( tbl , "tagdrop" , & f . TagDrop )
2015-11-25 05:22:11 +08:00
2020-11-04 05:40:57 +08:00
c . getFieldStringSlice ( tbl , "tagexclude" , & f . TagExclude )
c . getFieldStringSlice ( tbl , "taginclude" , & f . TagInclude )
2015-11-25 05:22:11 +08:00
2020-11-04 05:40:57 +08:00
if c . hasErrs ( ) {
return f , c . firstErr ( )
2016-04-13 07:06:27 +08:00
}
2016-09-05 23:16:37 +08:00
if err := f . Compile ( ) ; err != nil {
2016-04-13 07:06:27 +08:00
return f , err
}
return f , nil
2015-12-01 22:15:28 +08:00
}
2016-01-08 04:39:43 +08:00
// buildInput parses input specific items from the ast.Table,
2015-12-12 04:07:32 +08:00
// builds the filter and returns a
2016-07-28 19:31:11 +08:00
// models.InputConfig to be inserted into models.RunningInput
2020-11-04 05:40:57 +08:00
func ( c * Config ) buildInput ( name string , tbl * ast . Table ) ( * models . InputConfig , error ) {
2016-07-28 19:31:11 +08:00
cp := & models . InputConfig { Name : name }
2020-11-04 05:40:57 +08:00
c . getFieldDuration ( tbl , "interval" , & cp . Interval )
c . getFieldDuration ( tbl , "precision" , & cp . Precision )
c . getFieldDuration ( tbl , "collection_jitter" , & cp . CollectionJitter )
2022-02-16 01:39:12 +08:00
c . getFieldDuration ( tbl , "collection_offset" , & cp . CollectionOffset )
2020-11-04 05:40:57 +08:00
c . getFieldString ( tbl , "name_prefix" , & cp . MeasurementPrefix )
c . getFieldString ( tbl , "name_suffix" , & cp . MeasurementSuffix )
c . getFieldString ( tbl , "name_override" , & cp . NameOverride )
c . getFieldString ( tbl , "alias" , & cp . Alias )
2019-08-22 07:49:07 +08:00
2015-12-12 04:07:32 +08:00
cp . Tags = make ( map [ string ] string )
if node , ok := tbl . Fields [ "tags" ] ; ok {
if subtbl , ok := node . ( * ast . Table ) ; ok {
2020-11-04 05:40:57 +08:00
if err := c . toml . UnmarshalTable ( subtbl , cp . Tags ) ; err != nil {
return nil , fmt . Errorf ( "could not parse tags for input %s" , name )
2015-12-12 04:07:32 +08:00
}
}
}
2020-11-04 05:40:57 +08:00
if c . hasErrs ( ) {
return nil , c . firstErr ( )
}
2016-04-13 07:06:27 +08:00
var err error
2020-11-04 05:40:57 +08:00
cp . Filter , err = c . buildFilter ( tbl )
2016-04-13 07:06:27 +08:00
if err != nil {
return cp , err
}
2015-12-01 22:15:28 +08:00
return cp , nil
}
2022-01-13 06:54:42 +08:00
// buildParserOld grabs the necessary entries from the ast.Table for creating
2016-02-06 08:36:35 +08:00
// a parsers.Parser object, and creates it, which can then be added onto
// an Input object.
2022-01-13 06:54:42 +08:00
func ( c * Config ) buildParserOld ( name string , config * parsers . Config ) ( telegraf . Parser , error ) {
2021-03-04 04:26:09 +08:00
parser , err := parsers . NewParser ( config )
if err != nil {
return nil , err
}
logger := models . NewLogger ( "parsers" , config . DataFormat , name )
models . SetLoggerOnPlugin ( parser , logger )
2021-07-02 04:48:16 +08:00
if initializer , ok := parser . ( telegraf . Initializer ) ; ok {
if err := initializer . Init ( ) ; err != nil {
return nil , err
}
}
2021-03-04 04:26:09 +08:00
return parser , nil
2018-09-19 00:23:45 +08:00
}
2020-11-04 05:40:57 +08:00
func ( c * Config ) getParserConfig ( name string , tbl * ast . Table ) ( * parsers . Config , error ) {
pc := & parsers . Config {
2020-01-22 02:10:02 +08:00
JSONStrict : true ,
}
2016-02-06 08:36:35 +08:00
2020-11-04 05:40:57 +08:00
c . getFieldString ( tbl , "data_format" , & pc . DataFormat )
2016-02-06 08:36:35 +08:00
2016-02-11 02:50:05 +08:00
// Legacy support, exec plugin originally parsed JSON by default.
2020-11-04 05:40:57 +08:00
if name == "exec" && pc . DataFormat == "" {
pc . DataFormat = "json"
} else if pc . DataFormat == "" {
pc . DataFormat = "influx"
}
c . getFieldString ( tbl , "separator" , & pc . Separator )
c . getFieldStringSlice ( tbl , "templates" , & pc . Templates )
c . getFieldStringSlice ( tbl , "tag_keys" , & pc . TagKeys )
c . getFieldStringSlice ( tbl , "json_string_fields" , & pc . JSONStringFields )
c . getFieldString ( tbl , "json_name_key" , & pc . JSONNameKey )
c . getFieldString ( tbl , "json_query" , & pc . JSONQuery )
c . getFieldString ( tbl , "json_time_key" , & pc . JSONTimeKey )
c . getFieldString ( tbl , "json_time_format" , & pc . JSONTimeFormat )
c . getFieldString ( tbl , "json_timezone" , & pc . JSONTimezone )
c . getFieldBool ( tbl , "json_strict" , & pc . JSONStrict )
c . getFieldString ( tbl , "data_type" , & pc . DataType )
c . getFieldString ( tbl , "collectd_auth_file" , & pc . CollectdAuthFile )
c . getFieldString ( tbl , "collectd_security_level" , & pc . CollectdSecurityLevel )
c . getFieldString ( tbl , "collectd_parse_multivalue" , & pc . CollectdSplit )
c . getFieldStringSlice ( tbl , "collectd_typesdb" , & pc . CollectdTypesDB )
c . getFieldString ( tbl , "dropwizard_metric_registry_path" , & pc . DropwizardMetricRegistryPath )
c . getFieldString ( tbl , "dropwizard_time_path" , & pc . DropwizardTimePath )
c . getFieldString ( tbl , "dropwizard_time_format" , & pc . DropwizardTimeFormat )
c . getFieldString ( tbl , "dropwizard_tags_path" , & pc . DropwizardTagsPath )
c . getFieldStringMap ( tbl , "dropwizard_tag_paths" , & pc . DropwizardTagPathsMap )
//for grok data_format
c . getFieldStringSlice ( tbl , "grok_named_patterns" , & pc . GrokNamedPatterns )
c . getFieldStringSlice ( tbl , "grok_patterns" , & pc . GrokPatterns )
c . getFieldString ( tbl , "grok_custom_patterns" , & pc . GrokCustomPatterns )
c . getFieldStringSlice ( tbl , "grok_custom_pattern_files" , & pc . GrokCustomPatternFiles )
c . getFieldString ( tbl , "grok_timezone" , & pc . GrokTimezone )
c . getFieldString ( tbl , "grok_unique_timestamp" , & pc . GrokUniqueTimestamp )
c . getFieldStringSlice ( tbl , "form_urlencoded_tag_keys" , & pc . FormUrlencodedTagKeys )
2021-03-12 10:53:32 +08:00
c . getFieldString ( tbl , "value_field_name" , & pc . ValueFieldName )
2022-03-11 06:09:58 +08:00
// for influx parser
c . getFieldString ( tbl , "influx_parser_type" , & pc . InfluxParserType )
2021-07-02 04:48:16 +08:00
//for XPath parser family
if choice . Contains ( pc . DataFormat , [ ] string { "xml" , "xpath_json" , "xpath_msgpack" , "xpath_protobuf" } ) {
c . getFieldString ( tbl , "xpath_protobuf_file" , & pc . XPathProtobufFile )
c . getFieldString ( tbl , "xpath_protobuf_type" , & pc . XPathProtobufType )
2022-03-23 23:28:17 +08:00
c . getFieldStringSlice ( tbl , "xpath_protobuf_import_paths" , & pc . XPathProtobufImportPaths )
2021-07-02 04:48:16 +08:00
c . getFieldBool ( tbl , "xpath_print_document" , & pc . XPathPrintDocument )
// Determine the actual xpath configuration tables
node , xpathOK := tbl . Fields [ "xpath" ]
if ! xpathOK {
// Add this for backward compatibility
node , xpathOK = tbl . Fields [ pc . DataFormat ]
}
if xpathOK {
if subtbls , ok := node . ( [ ] * ast . Table ) ; ok {
pc . XPathConfig = make ( [ ] parsers . XPathConfig , len ( subtbls ) )
for i , subtbl := range subtbls {
subcfg := pc . XPathConfig [ i ]
c . getFieldString ( subtbl , "metric_name" , & subcfg . MetricQuery )
c . getFieldString ( subtbl , "metric_selection" , & subcfg . Selection )
c . getFieldString ( subtbl , "timestamp" , & subcfg . Timestamp )
c . getFieldString ( subtbl , "timestamp_format" , & subcfg . TimestampFmt )
c . getFieldStringMap ( subtbl , "tags" , & subcfg . Tags )
c . getFieldStringMap ( subtbl , "fields" , & subcfg . Fields )
c . getFieldStringMap ( subtbl , "fields_int" , & subcfg . FieldsInt )
c . getFieldString ( subtbl , "field_selection" , & subcfg . FieldSelection )
c . getFieldBool ( subtbl , "field_name_expansion" , & subcfg . FieldNameExpand )
c . getFieldString ( subtbl , "field_name" , & subcfg . FieldNameQuery )
c . getFieldString ( subtbl , "field_value" , & subcfg . FieldValueQuery )
2022-02-25 06:09:33 +08:00
c . getFieldString ( subtbl , "tag_selection" , & subcfg . TagSelection )
c . getFieldBool ( subtbl , "tag_name_expansion" , & subcfg . TagNameExpand )
c . getFieldString ( subtbl , "tag_name" , & subcfg . TagNameQuery )
c . getFieldString ( subtbl , "tag_value" , & subcfg . TagValueQuery )
2021-07-02 04:48:16 +08:00
pc . XPathConfig [ i ] = subcfg
}
2021-03-04 04:26:09 +08:00
}
}
}
2021-06-11 03:22:18 +08:00
//for JSONPath parser
if node , ok := tbl . Fields [ "json_v2" ] ; ok {
if metricConfigs , ok := node . ( [ ] * ast . Table ) ; ok {
pc . JSONV2Config = make ( [ ] parsers . JSONV2Config , len ( metricConfigs ) )
for i , metricConfig := range metricConfigs {
mc := pc . JSONV2Config [ i ]
c . getFieldString ( metricConfig , "measurement_name" , & mc . MeasurementName )
if mc . MeasurementName == "" {
mc . MeasurementName = name
}
c . getFieldString ( metricConfig , "measurement_name_path" , & mc . MeasurementNamePath )
c . getFieldString ( metricConfig , "timestamp_path" , & mc . TimestampPath )
c . getFieldString ( metricConfig , "timestamp_format" , & mc . TimestampFormat )
c . getFieldString ( metricConfig , "timestamp_timezone" , & mc . TimestampTimezone )
2021-10-05 02:19:06 +08:00
mc . Fields = getFieldSubtable ( c , metricConfig )
mc . Tags = getTagSubtable ( c , metricConfig )
2021-06-11 03:22:18 +08:00
if objectconfigs , ok := metricConfig . Fields [ "object" ] ; ok {
if objectconfigs , ok := objectconfigs . ( [ ] * ast . Table ) ; ok {
for _ , objectConfig := range objectconfigs {
var o json_v2 . JSONObject
c . getFieldString ( objectConfig , "path" , & o . Path )
2022-02-04 06:08:09 +08:00
c . getFieldBool ( objectConfig , "optional" , & o . Optional )
2021-06-11 03:22:18 +08:00
c . getFieldString ( objectConfig , "timestamp_key" , & o . TimestampKey )
c . getFieldString ( objectConfig , "timestamp_format" , & o . TimestampFormat )
c . getFieldString ( objectConfig , "timestamp_timezone" , & o . TimestampTimezone )
c . getFieldBool ( objectConfig , "disable_prepend_keys" , & o . DisablePrependKeys )
c . getFieldStringSlice ( objectConfig , "included_keys" , & o . IncludedKeys )
c . getFieldStringSlice ( objectConfig , "excluded_keys" , & o . ExcludedKeys )
c . getFieldStringSlice ( objectConfig , "tags" , & o . Tags )
c . getFieldStringMap ( objectConfig , "renames" , & o . Renames )
c . getFieldStringMap ( objectConfig , "fields" , & o . Fields )
2021-10-05 02:19:06 +08:00
o . FieldPaths = getFieldSubtable ( c , objectConfig )
o . TagPaths = getTagSubtable ( c , objectConfig )
2021-06-11 03:22:18 +08:00
mc . JSONObjects = append ( mc . JSONObjects , o )
}
}
}
pc . JSONV2Config [ i ] = mc
}
}
}
2020-11-04 05:40:57 +08:00
pc . MetricName = name
if c . hasErrs ( ) {
return nil , c . firstErr ( )
}
return pc , nil
}
2021-10-05 02:19:06 +08:00
func getFieldSubtable ( c * Config , metricConfig * ast . Table ) [ ] json_v2 . DataSet {
var fields [ ] json_v2 . DataSet
if fieldConfigs , ok := metricConfig . Fields [ "field" ] ; ok {
if fieldConfigs , ok := fieldConfigs . ( [ ] * ast . Table ) ; ok {
for _ , fieldconfig := range fieldConfigs {
var f json_v2 . DataSet
c . getFieldString ( fieldconfig , "path" , & f . Path )
c . getFieldString ( fieldconfig , "rename" , & f . Rename )
c . getFieldString ( fieldconfig , "type" , & f . Type )
2022-03-12 03:51:37 +08:00
c . getFieldBool ( fieldconfig , "optional" , & f . Optional )
2021-10-05 02:19:06 +08:00
fields = append ( fields , f )
}
}
}
return fields
}
func getTagSubtable ( c * Config , metricConfig * ast . Table ) [ ] json_v2 . DataSet {
var tags [ ] json_v2 . DataSet
if fieldConfigs , ok := metricConfig . Fields [ "tag" ] ; ok {
if fieldConfigs , ok := fieldConfigs . ( [ ] * ast . Table ) ; ok {
for _ , fieldconfig := range fieldConfigs {
var t json_v2 . DataSet
c . getFieldString ( fieldconfig , "path" , & t . Path )
c . getFieldString ( fieldconfig , "rename" , & t . Rename )
t . Type = "string"
tags = append ( tags , t )
2022-03-12 03:51:37 +08:00
c . getFieldBool ( fieldconfig , "optional" , & t . Optional )
2021-10-05 02:19:06 +08:00
}
}
}
return tags
}
2020-11-04 05:40:57 +08:00
// buildSerializer grabs the necessary entries from the ast.Table for creating
// a serializers.Serializer object, and creates it, which can then be added onto
// an Output object.
2021-03-23 01:21:36 +08:00
func ( c * Config ) buildSerializer ( tbl * ast . Table ) ( serializers . Serializer , error ) {
sc := & serializers . Config { TimestampUnits : 1 * time . Second }
2020-11-04 05:40:57 +08:00
c . getFieldString ( tbl , "data_format" , & sc . DataFormat )
if sc . DataFormat == "" {
sc . DataFormat = "influx"
}
c . getFieldString ( tbl , "prefix" , & sc . Prefix )
c . getFieldString ( tbl , "template" , & sc . Template )
c . getFieldStringSlice ( tbl , "templates" , & sc . Templates )
c . getFieldString ( tbl , "carbon2_format" , & sc . Carbon2Format )
2021-04-08 22:31:31 +08:00
c . getFieldString ( tbl , "carbon2_sanitize_replace_char" , & sc . Carbon2SanitizeReplaceChar )
2020-11-04 05:40:57 +08:00
c . getFieldInt ( tbl , "influx_max_line_bytes" , & sc . InfluxMaxLineBytes )
c . getFieldBool ( tbl , "influx_sort_fields" , & sc . InfluxSortFields )
c . getFieldBool ( tbl , "influx_uint_support" , & sc . InfluxUintSupport )
c . getFieldBool ( tbl , "graphite_tag_support" , & sc . GraphiteTagSupport )
2021-05-19 00:29:30 +08:00
c . getFieldString ( tbl , "graphite_tag_sanitize_mode" , & sc . GraphiteTagSanitizeMode )
2020-11-04 05:40:57 +08:00
c . getFieldString ( tbl , "graphite_separator" , & sc . GraphiteSeparator )
c . getFieldDuration ( tbl , "json_timestamp_units" , & sc . TimestampUnits )
2021-09-21 23:12:44 +08:00
c . getFieldString ( tbl , "json_timestamp_format" , & sc . TimestampFormat )
2020-11-04 05:40:57 +08:00
c . getFieldBool ( tbl , "splunkmetric_hec_routing" , & sc . HecRouting )
c . getFieldBool ( tbl , "splunkmetric_multimetric" , & sc . SplunkmetricMultiMetric )
c . getFieldStringSlice ( tbl , "wavefront_source_override" , & sc . WavefrontSourceOverride )
c . getFieldBool ( tbl , "wavefront_use_strict" , & sc . WavefrontUseStrict )
2021-12-15 06:04:30 +08:00
c . getFieldBool ( tbl , "wavefront_disable_prefix_conversion" , & sc . WavefrontDisablePrefixConversion )
2020-11-04 05:40:57 +08:00
c . getFieldBool ( tbl , "prometheus_export_timestamp" , & sc . PrometheusExportTimestamp )
c . getFieldBool ( tbl , "prometheus_sort_metrics" , & sc . PrometheusSortMetrics )
c . getFieldBool ( tbl , "prometheus_string_as_label" , & sc . PrometheusStringAsLabel )
if c . hasErrs ( ) {
return nil , c . firstErr ( )
}
return serializers . NewSerializer ( sc )
}
// buildOutput parses output specific items from the ast.Table,
// builds the filter and returns an
// models.OutputConfig to be inserted into models.RunningInput
// Note: error exists in the return for future calls that might require error
func ( c * Config ) buildOutput ( name string , tbl * ast . Table ) ( * models . OutputConfig , error ) {
filter , err := c . buildFilter ( tbl )
if err != nil {
return nil , err
}
oc := & models . OutputConfig {
Name : name ,
Filter : filter ,
}
// TODO: support FieldPass/FieldDrop on outputs
c . getFieldDuration ( tbl , "flush_interval" , & oc . FlushInterval )
2021-02-10 03:12:49 +08:00
c . getFieldDuration ( tbl , "flush_jitter" , & oc . FlushJitter )
2020-11-04 05:40:57 +08:00
c . getFieldInt ( tbl , "metric_buffer_limit" , & oc . MetricBufferLimit )
c . getFieldInt ( tbl , "metric_batch_size" , & oc . MetricBatchSize )
c . getFieldString ( tbl , "alias" , & oc . Alias )
c . getFieldString ( tbl , "name_override" , & oc . NameOverride )
c . getFieldString ( tbl , "name_suffix" , & oc . NameSuffix )
c . getFieldString ( tbl , "name_prefix" , & oc . NamePrefix )
if c . hasErrs ( ) {
return nil , c . firstErr ( )
}
return oc , nil
}
2021-03-23 01:21:36 +08:00
func ( c * Config ) missingTomlField ( _ reflect . Type , key string ) error {
2020-11-04 05:40:57 +08:00
switch key {
2021-04-08 22:31:31 +08:00
case "alias" , "carbon2_format" , "carbon2_sanitize_replace_char" , "collectd_auth_file" ,
"collectd_parse_multivalue" , "collectd_security_level" , "collectd_typesdb" , "collection_jitter" ,
2022-02-16 01:39:12 +08:00
"collection_offset" ,
2020-11-05 07:23:52 +08:00
"data_format" , "data_type" , "delay" , "drop" , "drop_original" , "dropwizard_metric_registry_path" ,
"dropwizard_tag_paths" , "dropwizard_tags_path" , "dropwizard_time_format" , "dropwizard_time_path" ,
"fielddrop" , "fieldpass" , "flush_interval" , "flush_jitter" , "form_urlencoded_tag_keys" ,
2021-05-19 00:29:30 +08:00
"grace" , "graphite_separator" , "graphite_tag_sanitize_mode" , "graphite_tag_support" ,
"grok_custom_pattern_files" , "grok_custom_patterns" , "grok_named_patterns" , "grok_patterns" ,
2022-03-11 06:09:58 +08:00
"grok_timezone" , "grok_unique_timestamp" , "influx_max_line_bytes" , "influx_parser_type" , "influx_sort_fields" ,
2022-06-22 23:56:51 +08:00
"influx_uint_support" , "interval" , "json_timestamp_units" , "json_v2" ,
2021-09-22 05:51:43 +08:00
"lvm" , "metric_batch_size" , "metric_buffer_limit" , "name_override" , "name_prefix" ,
2020-11-05 07:23:52 +08:00
"name_suffix" , "namedrop" , "namepass" , "order" , "pass" , "period" , "precision" ,
2021-10-06 05:11:46 +08:00
"prefix" , "prometheus_export_timestamp" , "prometheus_ignore_timestamp" , "prometheus_sort_metrics" , "prometheus_string_as_label" ,
2020-11-05 07:23:52 +08:00
"separator" , "splunkmetric_hec_routing" , "splunkmetric_multimetric" , "tag_keys" ,
2020-11-17 05:57:50 +08:00
"tagdrop" , "tagexclude" , "taginclude" , "tagpass" , "tags" , "template" , "templates" ,
2022-06-09 03:39:02 +08:00
"value_field_name" , "wavefront_source_override" , "wavefront_use_strict" , "wavefront_disable_prefix_conversion" :
2020-11-05 07:23:52 +08:00
2020-11-04 05:40:57 +08:00
// ignore fields that are common to all plugins.
default :
2022-06-16 20:04:45 +08:00
c . unusedFieldsMutex . Lock ( )
2020-11-04 05:40:57 +08:00
c . UnusedFields [ key ] = true
2022-06-16 20:04:45 +08:00
c . unusedFieldsMutex . Unlock ( )
2016-02-06 08:36:35 +08:00
}
2020-11-04 05:40:57 +08:00
return nil
}
2016-02-06 08:36:35 +08:00
2022-01-13 06:54:42 +08:00
func ( c * Config ) setLocalMissingTomlFieldTracker ( counter map [ string ] int ) {
f := func ( _ reflect . Type , key string ) error {
if c , ok := counter [ key ] ; ok {
counter [ key ] = c + 1
} else {
counter [ key ] = 1
}
return nil
}
c . toml . MissingField = f
}
func ( c * Config ) resetMissingTomlFieldTracker ( ) {
c . toml . MissingField = c . missingTomlField
}
2020-11-04 05:40:57 +08:00
func ( c * Config ) getFieldString ( tbl * ast . Table , fieldName string , target * string ) {
if node , ok := tbl . Fields [ fieldName ] ; ok {
2016-02-06 08:36:35 +08:00
if kv , ok := node . ( * ast . KeyValue ) ; ok {
if str , ok := kv . Value . ( * ast . String ) ; ok {
2020-11-04 05:40:57 +08:00
* target = str . Value
2016-02-06 08:36:35 +08:00
}
}
}
2020-11-04 05:40:57 +08:00
}
2016-02-06 08:36:35 +08:00
2020-11-04 05:40:57 +08:00
func ( c * Config ) getFieldDuration ( tbl * ast . Table , fieldName string , target interface { } ) {
if node , ok := tbl . Fields [ fieldName ] ; ok {
2016-02-06 08:36:35 +08:00
if kv , ok := node . ( * ast . KeyValue ) ; ok {
2020-11-04 05:40:57 +08:00
if str , ok := kv . Value . ( * ast . String ) ; ok {
d , err := time . ParseDuration ( str . Value )
if err != nil {
c . addError ( tbl , fmt . Errorf ( "error parsing duration: %w" , err ) )
return
2016-02-06 08:36:35 +08:00
}
2020-11-04 05:40:57 +08:00
targetVal := reflect . ValueOf ( target ) . Elem ( )
targetVal . Set ( reflect . ValueOf ( d ) )
2016-02-06 08:36:35 +08:00
}
}
}
2020-11-04 05:40:57 +08:00
}
2016-02-06 08:36:35 +08:00
2020-11-04 05:40:57 +08:00
func ( c * Config ) getFieldBool ( tbl * ast . Table , fieldName string , target * bool ) {
var err error
if node , ok := tbl . Fields [ fieldName ] ; ok {
2016-02-06 08:36:35 +08:00
if kv , ok := node . ( * ast . KeyValue ) ; ok {
2020-11-04 05:40:57 +08:00
switch t := kv . Value . ( type ) {
case * ast . Boolean :
* target , err = t . Boolean ( )
if err != nil {
c . addError ( tbl , fmt . Errorf ( "unknown boolean value type %q, expecting boolean" , kv . Value ) )
return
2016-02-06 08:36:35 +08:00
}
2020-11-04 05:40:57 +08:00
case * ast . String :
* target , err = strconv . ParseBool ( t . Value )
if err != nil {
c . addError ( tbl , fmt . Errorf ( "unknown boolean value type %q, expecting boolean" , kv . Value ) )
return
}
default :
c . addError ( tbl , fmt . Errorf ( "unknown boolean value type %q, expecting boolean" , kv . Value . Source ( ) ) )
return
2016-02-06 08:36:35 +08:00
}
}
}
2020-11-04 05:40:57 +08:00
}
2016-02-06 08:36:35 +08:00
2020-11-04 05:40:57 +08:00
func ( c * Config ) getFieldInt ( tbl * ast . Table , fieldName string , target * int ) {
if node , ok := tbl . Fields [ fieldName ] ; ok {
2018-08-23 10:26:48 +08:00
if kv , ok := node . ( * ast . KeyValue ) ; ok {
2020-11-04 05:40:57 +08:00
if iAst , ok := kv . Value . ( * ast . Integer ) ; ok {
i , err := iAst . Int ( )
if err != nil {
c . addError ( tbl , fmt . Errorf ( "unexpected int type %q, expecting int" , iAst . Value ) )
return
2018-08-23 10:26:48 +08:00
}
2020-11-04 05:40:57 +08:00
* target = int ( i )
2018-08-23 10:26:48 +08:00
}
}
}
2020-11-04 05:40:57 +08:00
}
2018-08-23 10:26:48 +08:00
2020-11-04 05:40:57 +08:00
func ( c * Config ) getFieldInt64 ( tbl * ast . Table , fieldName string , target * int64 ) {
if node , ok := tbl . Fields [ fieldName ] ; ok {
2020-01-22 02:10:02 +08:00
if kv , ok := node . ( * ast . KeyValue ) ; ok {
2020-11-04 05:40:57 +08:00
if iAst , ok := kv . Value . ( * ast . Integer ) ; ok {
i , err := iAst . Int ( )
2020-01-22 02:10:02 +08:00
if err != nil {
2020-11-04 05:40:57 +08:00
c . addError ( tbl , fmt . Errorf ( "unexpected int type %q, expecting int" , iAst . Value ) )
return
2020-01-22 02:10:02 +08:00
}
2020-11-04 05:40:57 +08:00
* target = i
2020-01-22 02:10:02 +08:00
}
}
}
2020-11-04 05:40:57 +08:00
}
2020-01-22 02:10:02 +08:00
2020-11-04 05:40:57 +08:00
func ( c * Config ) getFieldStringSlice ( tbl * ast . Table , fieldName string , target * [ ] string ) {
if node , ok := tbl . Fields [ fieldName ] ; ok {
2017-04-13 01:41:26 +08:00
if kv , ok := node . ( * ast . KeyValue ) ; ok {
2022-02-23 09:47:04 +08:00
ary , ok := kv . Value . ( * ast . Array )
if ! ok {
2021-02-27 02:58:13 +08:00
c . addError ( tbl , fmt . Errorf ( "found unexpected format while parsing %q, expecting string array/slice format" , fieldName ) )
return
2017-04-13 01:41:26 +08:00
}
2022-02-23 09:47:04 +08:00
for _ , elem := range ary . Value {
if str , ok := elem . ( * ast . String ) ; ok {
* target = append ( * target , str . Value )
}
}
2017-04-13 01:41:26 +08:00
}
}
2020-11-04 05:40:57 +08:00
}
2021-03-04 04:26:09 +08:00
2020-11-04 05:40:57 +08:00
func ( c * Config ) getFieldTagFilter ( tbl * ast . Table , fieldName string , target * [ ] models . TagFilter ) {
if node , ok := tbl . Fields [ fieldName ] ; ok {
2018-01-09 07:11:36 +08:00
if subtbl , ok := node . ( * ast . Table ) ; ok {
for name , val := range subtbl . Fields {
if kv , ok := val . ( * ast . KeyValue ) ; ok {
2022-02-23 09:47:04 +08:00
ary , ok := kv . Value . ( * ast . Array )
if ! ok {
2021-02-27 02:58:13 +08:00
c . addError ( tbl , fmt . Errorf ( "found unexpected format while parsing %q, expecting string array/slice format on each entry" , fieldName ) )
return
2019-06-18 04:34:54 +08:00
}
2022-02-23 09:47:04 +08:00
tagFilter := models . TagFilter { Name : name }
for _ , elem := range ary . Value {
if str , ok := elem . ( * ast . String ) ; ok {
tagFilter . Filter = append ( tagFilter . Filter , str . Value )
}
}
* target = append ( * target , tagFilter )
2019-06-18 04:34:54 +08:00
}
}
}
}
2016-02-06 08:36:35 +08:00
}
2020-11-04 05:40:57 +08:00
func ( c * Config ) getFieldStringMap ( tbl * ast . Table , fieldName string , target * map [ string ] string ) {
* target = map [ string ] string { }
if node , ok := tbl . Fields [ fieldName ] ; ok {
if subtbl , ok := node . ( * ast . Table ) ; ok {
for name , val := range subtbl . Fields {
if kv , ok := val . ( * ast . KeyValue ) ; ok {
if str , ok := kv . Value . ( * ast . String ) ; ok {
( * target ) [ name ] = str . Value
2019-04-06 05:46:12 +08:00
}
}
}
}
}
2016-02-11 06:50:07 +08:00
}
2020-11-04 05:40:57 +08:00
func keys ( m map [ string ] bool ) [ ] string {
result := [ ] string { }
for k := range m {
result = append ( result , k )
2020-03-14 06:04:23 +08:00
}
2020-11-04 05:40:57 +08:00
return result
}
2020-03-14 06:04:23 +08:00
2020-11-04 05:40:57 +08:00
func ( c * Config ) hasErrs ( ) bool {
return len ( c . errs ) > 0
}
2020-03-14 06:04:23 +08:00
2020-11-04 05:40:57 +08:00
func ( c * Config ) firstErr ( ) error {
if len ( c . errs ) == 0 {
return nil
2020-03-14 06:04:23 +08:00
}
2020-11-04 05:40:57 +08:00
return c . errs [ 0 ]
}
2020-03-14 06:04:23 +08:00
2020-11-04 05:40:57 +08:00
func ( c * Config ) addError ( tbl * ast . Table , err error ) {
c . errs = append ( c . errs , fmt . Errorf ( "line %d:%d: %w" , tbl . Line , tbl . Position , err ) )
2015-11-25 05:22:11 +08:00
}
2020-06-05 22:43:43 +08:00
// unwrappable lets you retrieve the original telegraf.Processor from the
// StreamingProcessor. This is necessary because the toml Unmarshaller won't
// look inside composed types.
type unwrappable interface {
Unwrap ( ) telegraf . Processor
}