2015-11-25 05:22:11 +08:00
package config
2015-04-02 00:34:32 +08:00
import (
2016-04-02 03:53:34 +08:00
"bytes"
2022-01-19 06:04:09 +08:00
"crypto/tls"
2022-07-29 04:30:36 +08:00
"errors"
2015-04-02 00:34:32 +08:00
"fmt"
2021-09-29 05:16:32 +08:00
"io"
2015-11-14 07:14:07 +08:00
"log"
2018-11-06 06:19:46 +08:00
"net/http"
"net/url"
2016-04-02 03:53:34 +08:00
"os"
2015-10-19 15:09:36 +08:00
"path/filepath"
2020-11-04 05:40:57 +08:00
"reflect"
2016-04-02 03:53:34 +08:00
"regexp"
2016-08-08 22:55:16 +08:00
"runtime"
2015-04-02 00:34:32 +08:00
"sort"
2016-09-08 22:22:10 +08:00
"strconv"
2015-04-02 00:34:32 +08:00
"strings"
2022-06-16 20:04:45 +08:00
"sync"
2015-04-02 00:34:32 +08:00
"time"
2021-12-02 03:38:43 +08:00
"github.com/coreos/go-semver/semver"
2022-11-09 05:16:26 +08:00
"github.com/google/uuid"
2022-10-13 03:19:47 +08:00
"github.com/influxdata/toml"
"github.com/influxdata/toml/ast"
2021-12-02 03:38:43 +08:00
2016-01-28 05:21:36 +08:00
"github.com/influxdata/telegraf"
2016-01-21 02:57:35 +08:00
"github.com/influxdata/telegraf/internal"
2020-05-05 02:09:10 +08:00
"github.com/influxdata/telegraf/models"
2016-09-08 22:22:10 +08:00
"github.com/influxdata/telegraf/plugins/aggregators"
2016-01-21 02:57:35 +08:00
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/outputs"
2016-02-06 08:36:35 +08:00
"github.com/influxdata/telegraf/plugins/parsers"
2016-09-08 22:22:10 +08:00
"github.com/influxdata/telegraf/plugins/processors"
2016-02-11 06:50:07 +08:00
"github.com/influxdata/telegraf/plugins/serializers"
2015-04-02 00:34:32 +08:00
)
2016-04-01 07:50:24 +08:00
var (
2016-04-02 03:53:34 +08:00
// envVarRe is a regex to find environment variables in the config file
2022-10-13 03:19:47 +08:00
envVarRe = regexp . MustCompile ( ` \$ { (\w+)}|\$(\w+) ` )
2018-01-05 07:28:00 +08:00
envVarEscaper = strings . NewReplacer (
` " ` , ` \" ` ,
` \ ` , ` \\ ` ,
)
2021-02-13 00:38:40 +08:00
httpLoadConfigRetryInterval = 10 * time . Second
2021-05-27 00:13:50 +08:00
// fetchURLRe is a regex to determine whether the requested file should
// be fetched from a remote or read from the filesystem.
fetchURLRe = regexp . MustCompile ( ` ^\w+:// ` )
2016-04-01 07:50:24 +08:00
)
2015-08-12 00:34:00 +08:00
// Config specifies the URL/user/password for the database that telegraf
2015-08-04 22:58:32 +08:00
// will be logging to, as well as all the plugins that the user has
// specified
2015-04-02 00:34:32 +08:00
type Config struct {
2022-06-16 20:04:45 +08:00
toml * toml . Config
errs [ ] error // config load errors.
UnusedFields map [ string ] bool
unusedFieldsMutex * sync . Mutex
2020-11-04 05:40:57 +08:00
2015-11-25 05:22:11 +08:00
Tags map [ string ] string
2016-01-08 04:39:43 +08:00
InputFilters [ ] string
2015-11-25 05:22:11 +08:00
OutputFilters [ ] string
2015-08-12 04:02:04 +08:00
2016-09-08 22:22:10 +08:00
Agent * AgentConfig
Inputs [ ] * models . RunningInput
Outputs [ ] * models . RunningOutput
Aggregators [ ] * models . RunningAggregator
2016-09-27 23:17:58 +08:00
// Processors have a slice wrapper type because they need to be sorted
2020-06-05 22:43:43 +08:00
Processors models . RunningProcessors
AggProcessors models . RunningProcessors
2022-09-16 22:50:26 +08:00
// Parsers are created by their inputs during gather. Config doesn't keep track of them
// like the other plugins because they need to be garbage collected (See issue #11809)
2021-12-02 03:38:43 +08:00
Deprecations map [ string ] [ ] int64
version * semver . Version
2015-04-02 00:34:32 +08:00
}
2020-11-04 05:40:57 +08:00
// NewConfig creates a new struct to hold the Telegraf config.
// For historical reasons, It holds the actual instances of the running plugins
// once the configuration is parsed.
2015-11-24 07:28:11 +08:00
func NewConfig ( ) * Config {
c := & Config {
2022-06-16 20:04:45 +08:00
UnusedFields : map [ string ] bool { } ,
unusedFieldsMutex : & sync . Mutex { } ,
2020-11-04 05:40:57 +08:00
2015-11-26 09:42:07 +08:00
// Agent defaults:
Agent : & AgentConfig {
2021-04-10 01:15:04 +08:00
Interval : Duration ( 10 * time . Second ) ,
2019-05-04 01:55:11 +08:00
RoundInterval : true ,
2021-04-10 01:15:04 +08:00
FlushInterval : Duration ( 10 * time . Second ) ,
2019-10-23 04:32:03 +08:00
LogTarget : "file" ,
2019-05-04 01:55:11 +08:00
LogfileRotationMaxArchives : 5 ,
2015-11-26 09:42:07 +08:00
} ,
2015-11-25 05:22:11 +08:00
Tags : make ( map [ string ] string ) ,
2016-07-28 19:31:11 +08:00
Inputs : make ( [ ] * models . RunningInput , 0 ) ,
Outputs : make ( [ ] * models . RunningOutput , 0 ) ,
2016-09-08 22:22:10 +08:00
Processors : make ( [ ] * models . RunningProcessor , 0 ) ,
2020-06-05 22:43:43 +08:00
AggProcessors : make ( [ ] * models . RunningProcessor , 0 ) ,
2016-01-08 04:39:43 +08:00
InputFilters : make ( [ ] string , 0 ) ,
2015-11-25 05:22:11 +08:00
OutputFilters : make ( [ ] string , 0 ) ,
2021-12-02 03:38:43 +08:00
Deprecations : make ( map [ string ] [ ] int64 ) ,
}
// Handle unknown version
2022-08-25 10:46:58 +08:00
version := internal . Version
2021-12-02 03:38:43 +08:00
if version == "" || version == "unknown" {
version = "0.0.0-unknown"
2015-11-24 07:28:11 +08:00
}
2021-12-02 03:38:43 +08:00
c . version = semver . New ( version )
2020-11-04 05:40:57 +08:00
tomlCfg := & toml . Config {
NormFieldName : toml . DefaultConfig . NormFieldName ,
FieldToKey : toml . DefaultConfig . FieldToKey ,
MissingField : c . missingTomlField ,
}
c . toml = tomlCfg
2015-11-24 07:28:11 +08:00
return c
}
2020-11-04 05:40:57 +08:00
// AgentConfig defines configuration that will be used by the Telegraf agent
2015-11-26 09:42:07 +08:00
type AgentConfig struct {
// Interval at which to gather information
2021-04-10 01:15:04 +08:00
Interval Duration
2015-11-26 09:42:07 +08:00
// RoundInterval rounds collection interval to 'interval'.
// ie, if Interval=10s then always collect on :00, :10, :20, etc.
RoundInterval bool
2022-03-03 23:20:02 +08:00
// Collected metrics are rounded to the precision specified. Precision is
// specified as an interval with an integer + unit (e.g. 0s, 10ms, 2us, 4s).
// Valid time units are "ns", "us" (or "µs"), "ms", "s".
//
2022-10-13 03:19:47 +08:00
// By default, or when set to "0s", precision will be set to the same
2022-03-03 23:20:02 +08:00
// timestamp order as the collection interval, with the maximum being 1s:
2016-06-13 22:21:11 +08:00
// ie, when interval = "10s", precision will be "1s"
// when interval = "250ms", precision will be "1ms"
2022-03-03 23:20:02 +08:00
//
2016-06-13 22:21:11 +08:00
// Precision will NOT be used for service inputs. It is up to each individual
// service input to set the timestamp at the appropriate precision.
2021-04-10 01:15:04 +08:00
Precision Duration
2016-06-13 22:21:11 +08:00
2016-01-20 04:00:36 +08:00
// CollectionJitter is used to jitter the collection by a random amount.
// Each plugin will sleep for a random time within jitter before collecting.
// This can be used to avoid many plugins querying things like sysfs at the
// same time, which can have a measurable effect on the system.
2021-04-10 01:15:04 +08:00
CollectionJitter Duration
2016-01-20 04:00:36 +08:00
2022-02-16 01:39:12 +08:00
// CollectionOffset is used to shift the collection by the given amount.
2022-10-13 03:19:47 +08:00
// This can be used to avoid many plugins querying constraint devices
2022-02-16 01:39:12 +08:00
// at the same time by manually scheduling them in time.
CollectionOffset Duration
2016-02-16 08:21:38 +08:00
// FlushInterval is the Interval at which to flush data
2021-04-10 01:15:04 +08:00
FlushInterval Duration
2015-11-26 09:42:07 +08:00
2016-01-20 04:00:36 +08:00
// FlushJitter Jitters the flush interval by a random amount.
// This is primarily to avoid large write spikes for users running a large
// number of telegraf instances.
// ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
2021-04-10 01:15:04 +08:00
FlushJitter Duration
2015-11-26 09:42:07 +08:00
2022-10-13 03:19:47 +08:00
// MetricBatchSize is the maximum number of metrics that is written to an
2016-04-24 18:43:54 +08:00
// output plugin in one call.
MetricBatchSize int
2016-01-23 02:54:12 +08:00
// MetricBufferLimit is the max number of metrics that each output plugin
// will cache. The buffer is cleared when a successful write occurs. When
2016-04-24 18:43:54 +08:00
// full, the oldest metrics will be overwritten. This number should be a
// multiple of MetricBatchSize. Due to current implementation, this could
// not be less than 2 times MetricBatchSize.
2016-01-23 02:54:12 +08:00
MetricBufferLimit int
2016-02-16 08:21:38 +08:00
// FlushBufferWhenFull tells Telegraf to flush the metric buffer whenever
// it fills up, regardless of FlushInterval. Setting this option to true
// does _not_ deactivate FlushInterval.
2022-03-02 06:05:53 +08:00
FlushBufferWhenFull bool ` toml:"flush_buffer_when_full" deprecated:"0.13.0;2.0.0;option is ignored" `
2016-02-16 08:21:38 +08:00
2016-06-13 22:21:11 +08:00
// TODO(cam): Remove UTC and parameter, they are no longer
2015-11-26 09:42:07 +08:00
// valid for the agent config. Leaving them here for now for backwards-
2017-11-01 08:00:06 +08:00
// compatibility
2022-03-02 06:05:53 +08:00
UTC bool ` toml:"utc" deprecated:"1.0.0;option is ignored" `
2015-11-26 09:42:07 +08:00
2016-01-16 03:25:56 +08:00
// Debug is the option for running in debug mode
2019-05-04 01:55:11 +08:00
Debug bool ` toml:"debug" `
2016-01-16 03:25:56 +08:00
2019-05-04 01:55:11 +08:00
// Quiet is the option for running in quiet mode
Quiet bool ` toml:"quiet" `
2016-10-01 05:37:56 +08:00
2019-10-23 04:32:03 +08:00
// Log target controls the destination for logs and can be one of "file",
// "stderr" or, on Windows, "eventlog". When set to "file", the output file
// is determined by the "logfile" setting.
2019-08-29 05:34:44 +08:00
LogTarget string ` toml:"logtarget" `
2019-10-23 04:32:03 +08:00
// Name of the file to be logged to when using the "file" logtarget. If set to
// the empty string then logs are written to stderr.
2019-05-04 01:55:11 +08:00
Logfile string ` toml:"logfile" `
2019-05-04 01:25:28 +08:00
2019-06-04 08:38:21 +08:00
// The file will be rotated after the time interval specified. When set
// to 0 no time based rotation is performed.
2021-04-10 01:15:04 +08:00
LogfileRotationInterval Duration ` toml:"logfile_rotation_interval" `
2019-05-04 01:25:28 +08:00
2019-06-04 08:38:21 +08:00
// The logfile will be rotated when it becomes larger than the specified
// size. When set to 0 no size based rotation is performed.
2021-04-10 01:15:04 +08:00
LogfileRotationMaxSize Size ` toml:"logfile_rotation_max_size" `
2019-05-04 01:55:11 +08:00
// Maximum number of rotated archives to keep, any older logs are deleted.
// If set to -1, no archives are removed.
LogfileRotationMaxArchives int ` toml:"logfile_rotation_max_archives" `
2019-05-04 01:25:28 +08:00
2021-04-17 02:39:19 +08:00
// Pick a timezone to use when logging or type 'local' for local time.
LogWithTimezone string ` toml:"log_with_timezone" `
2016-03-22 05:33:19 +08:00
Hostname string
OmitHostname bool
2022-03-18 11:43:46 +08:00
2022-03-23 23:27:58 +08:00
// Method for translating SNMP objects. 'netsnmp' to call external programs,
// 'gosmi' to use the built-in library.
SnmpTranslator string ` toml:"snmp_translator" `
2015-11-26 09:42:07 +08:00
}
2020-08-20 06:18:52 +08:00
// InputNames returns a list of strings of the configured inputs.
2016-01-08 04:39:43 +08:00
func ( c * Config ) InputNames ( ) [ ] string {
2015-11-25 05:22:11 +08:00
var name [ ] string
2016-01-08 04:39:43 +08:00
for _ , input := range c . Inputs {
2019-02-27 08:03:13 +08:00
name = append ( name , input . Config . Name )
2015-08-12 04:02:04 +08:00
}
2020-08-20 06:18:52 +08:00
return PluginNameCounts ( name )
2015-08-08 04:31:25 +08:00
}
2020-08-20 06:18:52 +08:00
// AggregatorNames returns a list of strings of the configured aggregators.
2018-06-01 02:56:49 +08:00
func ( c * Config ) AggregatorNames ( ) [ ] string {
var name [ ] string
for _ , aggregator := range c . Aggregators {
2019-02-27 10:22:12 +08:00
name = append ( name , aggregator . Config . Name )
2018-06-01 02:56:49 +08:00
}
2020-08-20 06:18:52 +08:00
return PluginNameCounts ( name )
2018-06-01 02:56:49 +08:00
}
2020-08-20 06:18:52 +08:00
// ProcessorNames returns a list of strings of the configured processors.
2018-06-01 02:56:49 +08:00
func ( c * Config ) ProcessorNames ( ) [ ] string {
var name [ ] string
for _ , processor := range c . Processors {
2019-08-22 07:49:07 +08:00
name = append ( name , processor . Config . Name )
2018-06-01 02:56:49 +08:00
}
2020-08-20 06:18:52 +08:00
return PluginNameCounts ( name )
2018-06-01 02:56:49 +08:00
}
2020-08-20 06:18:52 +08:00
// OutputNames returns a list of strings of the configured outputs.
2015-11-25 05:22:11 +08:00
func ( c * Config ) OutputNames ( ) [ ] string {
var name [ ] string
for _ , output := range c . Outputs {
2019-08-22 07:49:07 +08:00
name = append ( name , output . Config . Name )
2015-11-24 09:00:54 +08:00
}
2020-08-20 06:18:52 +08:00
return PluginNameCounts ( name )
}
2020-09-16 04:55:23 +08:00
// PluginNameCounts returns a list of sorted plugin names and their count
2020-08-20 06:18:52 +08:00
func PluginNameCounts ( plugins [ ] string ) [ ] string {
names := make ( map [ string ] int )
for _ , plugin := range plugins {
names [ plugin ] ++
}
var namecount [ ] string
for name , count := range names {
if count == 1 {
namecount = append ( namecount , name )
} else {
namecount = append ( namecount , fmt . Sprintf ( "%s (%dx)" , name , count ) )
}
}
2020-09-16 04:55:23 +08:00
sort . Strings ( namecount )
2020-08-20 06:18:52 +08:00
return namecount
2015-11-24 09:00:54 +08:00
}
2015-08-04 22:58:32 +08:00
// ListTags returns a string of tags specified in the config,
// line-protocol style
2015-04-02 00:34:32 +08:00
func ( c * Config ) ListTags ( ) string {
var tags [ ] string
for k , v := range c . Tags {
tags = append ( tags , fmt . Sprintf ( "%s=%s" , k , v ) )
}
sort . Strings ( tags )
return strings . Join ( tags , " " )
}
2015-05-19 06:10:11 +08:00
2015-09-22 09:38:57 +08:00
func sliceContains ( name string , list [ ] string ) bool {
for _ , b := range list {
if b == name {
return true
}
}
return false
}
2022-11-08 03:54:52 +08:00
// WalkDirectory collects all toml files that need to be loaded
func WalkDirectory ( path string ) ( [ ] string , error ) {
var files [ ] string
2016-09-28 22:30:02 +08:00
walkfn := func ( thispath string , info os . FileInfo , _ error ) error {
2017-02-23 21:45:36 +08:00
if info == nil {
log . Printf ( "W! Telegraf is not permitted to read %s" , thispath )
return nil
}
2018-04-12 07:51:19 +08:00
2016-09-28 22:30:02 +08:00
if info . IsDir ( ) {
2018-04-12 07:51:19 +08:00
if strings . HasPrefix ( info . Name ( ) , ".." ) {
2022-11-08 03:54:52 +08:00
// skip Kubernetes mounts, preventing loading the same config twice
2018-04-12 07:51:19 +08:00
return filepath . SkipDir
}
2016-09-28 22:30:02 +08:00
return nil
2015-10-19 15:09:36 +08:00
}
2016-09-28 22:30:02 +08:00
name := info . Name ( )
2015-11-26 09:42:07 +08:00
if len ( name ) < 6 || name [ len ( name ) - 5 : ] != ".conf" {
2016-09-28 22:30:02 +08:00
return nil
2015-10-19 15:09:36 +08:00
}
2022-11-08 03:54:52 +08:00
files = append ( files , thispath )
2016-09-28 22:30:02 +08:00
return nil
2015-10-19 15:09:36 +08:00
}
2022-11-08 03:54:52 +08:00
return files , filepath . Walk ( path , walkfn )
2015-10-19 15:09:36 +08:00
}
2016-04-29 05:19:03 +08:00
// Try to find a default config file at these locations (in order):
2022-08-18 15:22:40 +08:00
// 1. $TELEGRAF_CONFIG_PATH
// 2. $HOME/.telegraf/telegraf.conf
// 3. /etc/telegraf/telegraf.conf
2016-04-29 05:19:03 +08:00
func getDefaultConfigPath ( ) ( string , error ) {
envfile := os . Getenv ( "TELEGRAF_CONFIG_PATH" )
homefile := os . ExpandEnv ( "${HOME}/.telegraf/telegraf.conf" )
etcfile := "/etc/telegraf/telegraf.conf"
2016-08-08 22:55:16 +08:00
if runtime . GOOS == "windows" {
2019-08-28 04:47:01 +08:00
programFiles := os . Getenv ( "ProgramFiles" )
if programFiles == "" { // Should never happen
programFiles = ` C:\Program Files `
}
etcfile = programFiles + ` \Telegraf\telegraf.conf `
2016-08-08 22:55:16 +08:00
}
2016-04-29 05:19:03 +08:00
for _ , path := range [ ] string { envfile , homefile , etcfile } {
2021-06-03 11:22:15 +08:00
if isURL ( path ) {
log . Printf ( "I! Using config url: %s" , path )
return path , nil
}
2016-04-29 05:19:03 +08:00
if _ , err := os . Stat ( path ) ; err == nil {
2016-10-01 05:37:56 +08:00
log . Printf ( "I! Using config file: %s" , path )
2016-04-29 05:19:03 +08:00
return path , nil
}
}
// if we got here, we didn't find a file in a default location
2022-10-13 03:19:47 +08:00
return "" , fmt . Errorf ( "no config file specified, and could not find one" +
2016-04-29 05:19:03 +08:00
" in $TELEGRAF_CONFIG_PATH, %s, or %s" , homefile , etcfile )
}
2021-06-03 11:22:15 +08:00
// isURL checks if string is valid url
func isURL ( str string ) bool {
u , err := url . Parse ( str )
return err == nil && u . Scheme != "" && u . Host != ""
}
2015-11-25 05:22:11 +08:00
// LoadConfig loads the given config file and applies it to c
2015-11-24 07:28:11 +08:00
func ( c * Config ) LoadConfig ( path string ) error {
2016-04-29 05:19:03 +08:00
var err error
if path == "" {
if path , err = getDefaultConfigPath ( ) ; err != nil {
return err
}
}
2022-08-20 03:38:03 +08:00
data , err := LoadConfigFile ( path )
2018-11-06 06:19:46 +08:00
if err != nil {
2022-10-13 03:19:47 +08:00
return fmt . Errorf ( "error loading config file %s: %w" , path , err )
2018-11-06 06:19:46 +08:00
}
2020-06-05 22:43:43 +08:00
if err = c . LoadConfigData ( data ) ; err != nil {
2022-10-13 03:19:47 +08:00
return fmt . Errorf ( "error loading config file %s: %w" , path , err )
2020-06-05 22:43:43 +08:00
}
return nil
}
2022-11-08 03:54:52 +08:00
func ( c * Config ) LoadAll ( configFiles ... string ) error {
for _ , fConfig := range configFiles {
if err := c . LoadConfig ( fConfig ) ; err != nil {
return err
}
}
return nil
}
2020-06-05 22:43:43 +08:00
// LoadConfigData loads TOML-formatted config data
func ( c * Config ) LoadConfigData ( data [ ] byte ) error {
2022-11-09 05:16:26 +08:00
// Create unique identifier for plugins to identify when using multiple configurations
id := uuid . New ( )
2018-11-06 06:19:46 +08:00
tbl , err := parseConfig ( data )
2015-10-17 12:47:13 +08:00
if err != nil {
2022-10-13 03:19:47 +08:00
return fmt . Errorf ( "error parsing data: %s" , err )
2015-10-17 12:47:13 +08:00
}
2016-04-30 06:12:15 +08:00
// Parse tags tables first:
for _ , tableName := range [ ] string { "tags" , "global_tags" } {
if val , ok := tbl . Fields [ tableName ] ; ok {
subTable , ok := val . ( * ast . Table )
if ! ok {
2020-06-05 22:43:43 +08:00
return fmt . Errorf ( "invalid configuration, bad table name %q" , tableName )
2016-04-30 06:12:15 +08:00
}
2020-11-04 05:40:57 +08:00
if err = c . toml . UnmarshalTable ( subTable , c . Tags ) ; err != nil {
return fmt . Errorf ( "error parsing table name %q: %s" , tableName , err )
2016-04-30 06:12:15 +08:00
}
}
}
// Parse agent table:
if val , ok := tbl . Fields [ "agent" ] ; ok {
subTable , ok := val . ( * ast . Table )
if ! ok {
2020-06-05 22:43:43 +08:00
return fmt . Errorf ( "invalid configuration, error parsing agent table" )
2016-04-30 06:12:15 +08:00
}
2020-11-04 05:40:57 +08:00
if err = c . toml . UnmarshalTable ( subTable , c . Agent ) ; err != nil {
return fmt . Errorf ( "error parsing [agent]: %w" , err )
2016-04-30 06:12:15 +08:00
}
}
2018-11-06 05:34:28 +08:00
if ! c . Agent . OmitHostname {
if c . Agent . Hostname == "" {
hostname , err := os . Hostname ( )
if err != nil {
return err
}
c . Agent . Hostname = hostname
}
c . Tags [ "host" ] = c . Agent . Hostname
}
2022-03-18 11:43:46 +08:00
// Set snmp agent translator default
2022-03-23 23:27:58 +08:00
if c . Agent . SnmpTranslator == "" {
c . Agent . SnmpTranslator = "netsnmp"
2022-03-18 11:43:46 +08:00
}
2020-11-04 05:40:57 +08:00
if len ( c . UnusedFields ) > 0 {
return fmt . Errorf ( "line %d: configuration specified the fields %q, but they weren't used" , tbl . Line , keys ( c . UnusedFields ) )
}
2016-04-30 06:12:15 +08:00
// Parse all the rest of the plugins:
2015-10-17 12:47:13 +08:00
for name , val := range tbl . Fields {
2015-11-14 07:14:07 +08:00
subTable , ok := val . ( * ast . Table )
2015-10-17 12:47:13 +08:00
if ! ok {
2020-06-05 22:43:43 +08:00
return fmt . Errorf ( "invalid configuration, error parsing field %q as table" , name )
2015-10-17 12:47:13 +08:00
}
switch name {
2016-04-30 06:12:15 +08:00
case "agent" , "global_tags" , "tags" :
2015-10-17 12:47:13 +08:00
case "outputs" :
2016-01-08 04:39:43 +08:00
for pluginName , pluginVal := range subTable . Fields {
switch pluginSubTable := pluginVal . ( type ) {
2016-09-08 22:22:10 +08:00
// legacy [outputs.influxdb] support
2015-11-14 07:14:07 +08:00
case * ast . Table :
2016-01-08 04:39:43 +08:00
if err = c . addOutput ( pluginName , pluginSubTable ) ; err != nil {
2020-11-04 05:40:57 +08:00
return fmt . Errorf ( "error parsing %s, %w" , pluginName , err )
2015-11-25 05:22:11 +08:00
}
2015-11-14 07:14:07 +08:00
case [ ] * ast . Table :
2016-01-08 04:39:43 +08:00
for _ , t := range pluginSubTable {
if err = c . addOutput ( pluginName , t ) ; err != nil {
2020-11-04 05:40:57 +08:00
return fmt . Errorf ( "error parsing %s array, %w" , pluginName , err )
2015-11-25 05:22:11 +08:00
}
2015-11-14 07:14:07 +08:00
}
default :
2020-11-04 05:40:57 +08:00
return fmt . Errorf ( "unsupported config format: %s" ,
2020-06-05 22:43:43 +08:00
pluginName )
2015-10-17 12:47:13 +08:00
}
2020-11-04 05:40:57 +08:00
if len ( c . UnusedFields ) > 0 {
return fmt . Errorf ( "plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used" , name , pluginName , subTable . Line , keys ( c . UnusedFields ) )
}
2015-10-17 12:47:13 +08:00
}
2016-01-09 03:49:50 +08:00
case "inputs" , "plugins" :
2015-11-20 10:08:02 +08:00
for pluginName , pluginVal := range subTable . Fields {
switch pluginSubTable := pluginVal . ( type ) {
2016-09-08 22:22:10 +08:00
// legacy [inputs.cpu] support
2015-11-20 10:08:02 +08:00
case * ast . Table :
2016-01-08 04:39:43 +08:00
if err = c . addInput ( pluginName , pluginSubTable ) ; err != nil {
2020-11-04 05:40:57 +08:00
return fmt . Errorf ( "error parsing %s, %w" , pluginName , err )
2015-11-25 05:22:11 +08:00
}
2015-11-20 10:08:02 +08:00
case [ ] * ast . Table :
2015-11-25 05:22:11 +08:00
for _ , t := range pluginSubTable {
2016-01-08 04:39:43 +08:00
if err = c . addInput ( pluginName , t ) ; err != nil {
2020-11-04 05:40:57 +08:00
return fmt . Errorf ( "error parsing %s, %w" , pluginName , err )
2015-11-25 05:22:11 +08:00
}
2015-11-20 10:08:02 +08:00
}
default :
2022-10-13 03:19:47 +08:00
return fmt . Errorf ( "unsupported config format: %s" ,
2020-06-05 22:43:43 +08:00
pluginName )
2015-11-20 10:08:02 +08:00
}
2020-11-04 05:40:57 +08:00
if len ( c . UnusedFields ) > 0 {
return fmt . Errorf ( "plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used" , name , pluginName , subTable . Line , keys ( c . UnusedFields ) )
}
2015-11-20 10:08:02 +08:00
}
2016-09-08 22:22:10 +08:00
case "processors" :
for pluginName , pluginVal := range subTable . Fields {
switch pluginSubTable := pluginVal . ( type ) {
case [ ] * ast . Table :
for _ , t := range pluginSubTable {
2022-11-09 05:16:26 +08:00
if err = c . addProcessor ( id . String ( ) , pluginName , t ) ; err != nil {
2020-11-04 05:40:57 +08:00
return fmt . Errorf ( "error parsing %s, %w" , pluginName , err )
2016-09-08 22:22:10 +08:00
}
}
default :
2022-10-13 03:19:47 +08:00
return fmt . Errorf ( "unsupported config format: %s" ,
2020-06-05 22:43:43 +08:00
pluginName )
2016-09-08 22:22:10 +08:00
}
2020-11-04 05:40:57 +08:00
if len ( c . UnusedFields ) > 0 {
return fmt . Errorf ( "plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used" , name , pluginName , subTable . Line , keys ( c . UnusedFields ) )
}
2016-09-08 22:22:10 +08:00
}
case "aggregators" :
for pluginName , pluginVal := range subTable . Fields {
switch pluginSubTable := pluginVal . ( type ) {
case [ ] * ast . Table :
for _ , t := range pluginSubTable {
if err = c . addAggregator ( pluginName , t ) ; err != nil {
2022-10-13 03:19:47 +08:00
return fmt . Errorf ( "error parsing %s, %s" , pluginName , err )
2016-09-08 22:22:10 +08:00
}
}
default :
2022-10-13 03:19:47 +08:00
return fmt . Errorf ( "unsupported config format: %s" ,
2020-06-05 22:43:43 +08:00
pluginName )
2016-09-08 22:22:10 +08:00
}
2020-11-04 05:40:57 +08:00
if len ( c . UnusedFields ) > 0 {
return fmt . Errorf ( "plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used" , name , pluginName , subTable . Line , keys ( c . UnusedFields ) )
}
2016-09-08 22:22:10 +08:00
}
2022-10-13 03:19:47 +08:00
// Assume it's an input for legacy config file support if no other
2015-11-20 10:08:02 +08:00
// identifiers are present
2015-10-17 12:47:13 +08:00
default :
2016-01-08 04:39:43 +08:00
if err = c . addInput ( name , subTable ) ; err != nil {
2022-10-13 03:19:47 +08:00
return fmt . Errorf ( "error parsing %s, %s" , name , err )
2015-11-25 05:22:11 +08:00
}
2015-10-17 12:47:13 +08:00
}
}
2016-09-27 23:17:58 +08:00
if len ( c . Processors ) > 1 {
sort . Sort ( c . Processors )
}
2018-11-06 05:34:28 +08:00
2015-10-17 12:47:13 +08:00
return nil
}
2015-11-25 05:22:11 +08:00
2016-06-23 01:54:29 +08:00
// trimBOM trims the Byte-Order-Marks from the beginning of the file.
2017-11-01 08:00:06 +08:00
// this is for Windows compatibility only.
2016-06-23 01:54:29 +08:00
// see https://github.com/influxdata/telegraf/issues/1378
2016-06-24 15:47:31 +08:00
func trimBOM ( f [ ] byte ) [ ] byte {
return bytes . TrimPrefix ( f , [ ] byte ( "\xef\xbb\xbf" ) )
2016-06-23 01:54:29 +08:00
}
2018-01-05 07:28:00 +08:00
// escapeEnv escapes a value for inserting into a TOML string.
func escapeEnv ( value string ) string {
return envVarEscaper . Replace ( value )
}
2022-08-20 03:38:03 +08:00
func LoadConfigFile ( config string ) ( [ ] byte , error ) {
2021-05-27 00:13:50 +08:00
if fetchURLRe . MatchString ( config ) {
u , err := url . Parse ( config )
if err != nil {
return nil , err
}
2018-11-06 06:19:46 +08:00
2021-05-27 00:13:50 +08:00
switch u . Scheme {
case "https" , "http" :
return fetchConfig ( u )
default :
return nil , fmt . Errorf ( "scheme %q not supported" , u . Scheme )
}
2018-11-06 06:19:46 +08:00
}
2021-05-27 00:13:50 +08:00
// If it isn't a https scheme, try it as a file
2022-09-14 00:43:03 +08:00
buffer , err := os . ReadFile ( config )
if err != nil {
return nil , err
}
mimeType := http . DetectContentType ( buffer )
if ! strings . Contains ( mimeType , "text/plain" ) {
return nil , fmt . Errorf ( "provided config is not a TOML file: %s" , config )
}
return buffer , nil
2018-11-06 06:19:46 +08:00
}
func fetchConfig ( u * url . URL ) ( [ ] byte , error ) {
req , err := http . NewRequest ( "GET" , u . String ( ) , nil )
if err != nil {
return nil , err
}
2019-10-01 07:55:47 +08:00
if v , exists := os . LookupEnv ( "INFLUX_TOKEN" ) ; exists {
req . Header . Add ( "Authorization" , "Token " + v )
}
2018-11-06 06:19:46 +08:00
req . Header . Add ( "Accept" , "application/toml" )
2020-07-02 03:52:05 +08:00
req . Header . Set ( "User-Agent" , internal . ProductToken ( ) )
2019-02-06 07:15:58 +08:00
2021-02-13 00:38:40 +08:00
retries := 3
for i := 0 ; i <= retries ; i ++ {
2022-10-04 23:26:02 +08:00
body , err , retry := func ( ) ( [ ] byte , error , bool ) {
resp , err := http . DefaultClient . Do ( req )
if err != nil {
return nil , fmt . Errorf ( "retry %d of %d failed connecting to HTTP config server %s" , i , retries , err ) , false
}
defer resp . Body . Close ( )
if resp . StatusCode != http . StatusOK {
if i < retries {
log . Printf ( "Error getting HTTP config. Retry %d of %d in %s. Status=%d" , i , retries , httpLoadConfigRetryInterval , resp . StatusCode )
return nil , nil , true
}
return nil , fmt . Errorf ( "retry %d of %d failed to retrieve remote config: %s" , i , retries , resp . Status ) , false
}
body , err := io . ReadAll ( resp . Body )
return body , err , false
} ( )
2021-02-13 00:38:40 +08:00
if err != nil {
2022-10-04 23:26:02 +08:00
return nil , err
2021-02-13 00:38:40 +08:00
}
2022-10-04 23:26:02 +08:00
if retry {
time . Sleep ( httpLoadConfigRetryInterval )
continue
2021-02-13 00:38:40 +08:00
}
2022-10-04 23:26:02 +08:00
return body , err
2019-02-06 07:15:58 +08:00
}
2021-02-13 00:38:40 +08:00
return nil , nil
2018-11-06 06:19:46 +08:00
}
// parseConfig loads a TOML configuration from a provided path and
// returns the AST produced from the TOML parser. When loading the file, it
// will find environment variables and replace them.
func parseConfig ( contents [ ] byte ) ( * ast . Table , error ) {
2016-06-23 01:54:29 +08:00
contents = trimBOM ( contents )
2016-04-02 03:53:34 +08:00
2019-03-30 07:02:10 +08:00
parameters := envVarRe . FindAllSubmatch ( contents , - 1 )
for _ , parameter := range parameters {
if len ( parameter ) != 3 {
continue
}
2020-11-04 05:40:57 +08:00
var envVar [ ] byte
2019-03-30 07:02:10 +08:00
if parameter [ 1 ] != nil {
2020-11-04 05:40:57 +08:00
envVar = parameter [ 1 ]
2019-03-30 07:02:10 +08:00
} else if parameter [ 2 ] != nil {
2020-11-04 05:40:57 +08:00
envVar = parameter [ 2 ]
2019-03-30 07:02:10 +08:00
} else {
continue
}
2020-11-04 05:40:57 +08:00
envVal , ok := os . LookupEnv ( strings . TrimPrefix ( string ( envVar ) , "$" ) )
2018-01-05 07:28:00 +08:00
if ok {
2020-11-04 05:40:57 +08:00
envVal = escapeEnv ( envVal )
contents = bytes . Replace ( contents , parameter [ 0 ] , [ ] byte ( envVal ) , 1 )
2016-04-02 03:53:34 +08:00
}
}
return toml . Parse ( contents )
}
2016-09-08 22:22:10 +08:00
func ( c * Config ) addAggregator ( name string , table * ast . Table ) error {
creator , ok := aggregators . Aggregators [ name ]
if ! ok {
2021-12-02 03:38:43 +08:00
// Handle removed, deprecated plugins
if di , deprecated := aggregators . Deprecations [ name ] ; deprecated {
printHistoricPluginDeprecationNotice ( "aggregators" , name , di )
return fmt . Errorf ( "plugin deprecated" )
}
2022-10-13 03:19:47 +08:00
return fmt . Errorf ( "undefined but requested aggregator: %s" , name )
2016-09-08 22:22:10 +08:00
}
aggregator := creator ( )
2020-11-04 05:40:57 +08:00
conf , err := c . buildAggregator ( name , table )
2016-09-08 22:22:10 +08:00
if err != nil {
return err
}
2020-11-04 05:40:57 +08:00
if err := c . toml . UnmarshalTable ( table , aggregator ) ; err != nil {
2016-09-08 22:22:10 +08:00
return err
}
2021-12-02 03:38:43 +08:00
if err := c . printUserDeprecation ( "aggregators" , name , aggregator ) ; err != nil {
return err
}
2016-09-23 01:10:51 +08:00
c . Aggregators = append ( c . Aggregators , models . NewRunningAggregator ( aggregator , conf ) )
2016-09-08 22:22:10 +08:00
return nil
}
2022-10-25 03:51:07 +08:00
func ( c * Config ) probeParser ( parentcategory string , parentname string , table * ast . Table ) bool {
2022-01-13 06:54:42 +08:00
var dataformat string
c . getFieldString ( table , "data_format" , & dataformat )
2022-10-25 03:51:07 +08:00
if dataformat == "" {
dataformat = setDefaultParser ( parentcategory , parentname )
}
2022-01-13 06:54:42 +08:00
2022-10-21 17:09:20 +08:00
creator , ok := parsers . Parsers [ dataformat ]
if ! ok {
return false
}
// Try to parse the options to detect if any of them is misspelled
// We don't actually use the parser, so no need to check the error.
parser := creator ( "" )
_ = c . toml . UnmarshalTable ( table , parser )
return true
2022-01-13 06:54:42 +08:00
}
2022-09-27 04:24:34 +08:00
func ( c * Config ) addParser ( parentcategory , parentname string , table * ast . Table ) ( * models . RunningParser , error ) {
2022-01-13 06:54:42 +08:00
var dataformat string
c . getFieldString ( table , "data_format" , & dataformat )
2022-07-29 04:30:36 +08:00
if dataformat == "" {
2022-10-25 03:51:07 +08:00
dataformat = setDefaultParser ( parentcategory , parentname )
2022-07-29 04:30:36 +08:00
}
2022-10-25 03:51:07 +08:00
2022-09-22 03:02:41 +08:00
var influxParserType string
c . getFieldString ( table , "influx_parser_type" , & influxParserType )
if dataformat == "influx" && influxParserType == "upstream" {
dataformat = "influx_upstream"
}
2022-07-29 04:30:36 +08:00
2022-01-13 06:54:42 +08:00
creator , ok := parsers . Parsers [ dataformat ]
if ! ok {
2022-10-13 03:19:47 +08:00
return nil , fmt . Errorf ( "undefined but requested parser: %s" , dataformat )
2022-01-13 06:54:42 +08:00
}
parser := creator ( parentname )
2022-11-09 03:04:12 +08:00
conf := c . buildParser ( parentname , table )
2022-01-13 06:54:42 +08:00
if err := c . toml . UnmarshalTable ( table , parser ) ; err != nil {
return nil , err
}
running := models . NewRunningParser ( parser , conf )
2022-11-09 03:04:12 +08:00
err := running . Init ( )
2022-09-16 22:50:26 +08:00
return running , err
2022-01-13 06:54:42 +08:00
}
2022-11-09 05:16:26 +08:00
func ( c * Config ) addProcessor ( id string , name string , table * ast . Table ) error {
2016-09-08 22:22:10 +08:00
creator , ok := processors . Processors [ name ]
if ! ok {
2021-12-02 03:38:43 +08:00
// Handle removed, deprecated plugins
if di , deprecated := processors . Deprecations [ name ] ; deprecated {
printHistoricPluginDeprecationNotice ( "processors" , name , di )
return fmt . Errorf ( "plugin deprecated" )
}
2022-10-13 03:19:47 +08:00
return fmt . Errorf ( "undefined but requested processor: %s" , name )
2016-09-08 22:22:10 +08:00
}
2022-09-27 04:24:34 +08:00
// For processors with parsers we need to compute the set of
// options that is not covered by both, the parser and the processor.
// We achieve this by keeping a local book of missing entries
// that counts the number of misses. In case we have a parser
// for the input both need to miss the entry. We count the
// missing entries at the end.
missCount := make ( map [ string ] int )
2022-10-21 17:09:20 +08:00
missCountThreshold := 0
2022-09-27 04:24:34 +08:00
c . setLocalMissingTomlFieldTracker ( missCount )
defer c . resetMissingTomlFieldTracker ( )
2016-09-08 22:22:10 +08:00
2022-11-09 05:16:26 +08:00
processorConfig , err := c . buildProcessor ( id , name , table )
2016-09-08 22:22:10 +08:00
if err != nil {
return err
}
2022-10-25 03:21:24 +08:00
// Setup the processor running before the aggregators
processorBefore , hasParser , err := c . setupProcessor ( processorConfig . Name , creator , table )
if err != nil {
2016-09-08 22:22:10 +08:00
return err
}
2022-10-25 03:21:24 +08:00
rf := models . NewRunningProcessor ( processorBefore , processorConfig )
2020-06-05 22:43:43 +08:00
c . Processors = append ( c . Processors , rf )
2016-09-08 22:22:10 +08:00
2022-10-25 03:21:24 +08:00
// Setup another (new) processor instance running after the aggregator
processorAfter , _ , err := c . setupProcessor ( processorConfig . Name , creator , table )
if err != nil {
2020-06-05 22:43:43 +08:00
return err
}
2022-10-25 03:21:24 +08:00
rf = models . NewRunningProcessor ( processorAfter , processorConfig )
2020-06-05 22:43:43 +08:00
c . AggProcessors = append ( c . AggProcessors , rf )
2016-09-08 22:22:10 +08:00
2022-10-21 17:09:20 +08:00
// Check the number of misses against the threshold
2022-10-25 03:21:24 +08:00
if hasParser {
missCountThreshold = 2
}
2022-10-21 17:09:20 +08:00
for key , count := range missCount {
if count <= missCountThreshold {
continue
}
if err := c . missingTomlField ( nil , key ) ; err != nil {
return err
}
}
2016-09-08 22:22:10 +08:00
return nil
}
2022-10-25 03:21:24 +08:00
func ( c * Config ) setupProcessor ( name string , creator processors . StreamingCreator , table * ast . Table ) ( telegraf . StreamingProcessor , bool , error ) {
var hasParser bool
streamingProcessor := creator ( )
var processor interface { }
if p , ok := streamingProcessor . ( unwrappable ) ; ok {
processor = p . Unwrap ( )
} else {
processor = streamingProcessor
}
// If the (underlying) processor has a SetParser or SetParserFunc function,
// it can accept arbitrary data-formats, so build the requested parser and
// set it.
if t , ok := processor . ( telegraf . ParserPlugin ) ; ok {
parser , err := c . addParser ( "processors" , name , table )
if err != nil {
return nil , true , fmt . Errorf ( "adding parser failed: %w" , err )
2020-06-05 22:43:43 +08:00
}
2022-10-25 03:21:24 +08:00
t . SetParser ( parser )
hasParser = true
}
if t , ok := processor . ( telegraf . ParserFuncPlugin ) ; ok {
2022-10-25 03:51:07 +08:00
if ! c . probeParser ( "processors" , name , table ) {
2022-10-25 03:21:24 +08:00
return nil , false , errors . New ( "parser not found" )
}
t . SetParserFunc ( func ( ) ( telegraf . Parser , error ) {
return c . addParser ( "processors" , name , table )
} )
hasParser = true
2020-06-05 22:43:43 +08:00
}
2022-09-27 04:24:34 +08:00
if err := c . toml . UnmarshalTable ( table , processor ) ; err != nil {
2022-10-25 03:21:24 +08:00
return nil , hasParser , fmt . Errorf ( "unmarshalling failed: %w" , err )
2021-12-02 03:38:43 +08:00
}
2022-10-25 03:21:24 +08:00
err := c . printUserDeprecation ( "processors" , name , processor )
return streamingProcessor , hasParser , err
2020-06-05 22:43:43 +08:00
}
2015-11-25 05:22:11 +08:00
func ( c * Config ) addOutput ( name string , table * ast . Table ) error {
if len ( c . OutputFilters ) > 0 && ! sliceContains ( name , c . OutputFilters ) {
return nil
}
creator , ok := outputs . Outputs [ name ]
if ! ok {
2021-12-02 03:38:43 +08:00
// Handle removed, deprecated plugins
if di , deprecated := outputs . Deprecations [ name ] ; deprecated {
printHistoricPluginDeprecationNotice ( "outputs" , name , di )
return fmt . Errorf ( "plugin deprecated" )
}
2022-02-23 09:47:04 +08:00
return fmt . Errorf ( "undefined but requested output: %s" , name )
2015-11-25 05:22:11 +08:00
}
2015-12-01 22:15:28 +08:00
output := creator ( )
2015-11-25 05:22:11 +08:00
2016-02-11 06:50:07 +08:00
// If the output has a SetSerializer function, then this means it can write
// arbitrary types of output, so build the serializer and set it.
2022-02-23 09:47:04 +08:00
if t , ok := output . ( serializers . SerializerOutput ) ; ok {
2021-03-23 01:21:36 +08:00
serializer , err := c . buildSerializer ( table )
2016-02-11 06:50:07 +08:00
if err != nil {
return err
}
t . SetSerializer ( serializer )
}
2020-11-04 05:40:57 +08:00
outputConfig , err := c . buildOutput ( name , table )
2015-12-01 22:15:28 +08:00
if err != nil {
return err
}
2020-11-04 05:40:57 +08:00
if err := c . toml . UnmarshalTable ( table , output ) ; err != nil {
2015-11-25 05:22:11 +08:00
return err
}
2021-12-02 03:38:43 +08:00
if err := c . printUserDeprecation ( "outputs" , name , output ) ; err != nil {
return err
}
2022-01-19 06:04:09 +08:00
if c , ok := interface { } ( output ) . ( interface { TLSConfig ( ) ( * tls . Config , error ) } ) ; ok {
if _ , err := c . TLSConfig ( ) ; err != nil {
return err
}
}
2021-03-23 01:21:36 +08:00
ro := models . NewRunningOutput ( output , outputConfig , c . Agent . MetricBatchSize , c . Agent . MetricBufferLimit )
2015-11-25 05:22:11 +08:00
c . Outputs = append ( c . Outputs , ro )
return nil
}
2016-01-08 04:39:43 +08:00
func ( c * Config ) addInput ( name string , table * ast . Table ) error {
if len ( c . InputFilters ) > 0 && ! sliceContains ( name , c . InputFilters ) {
2015-11-25 05:22:11 +08:00
return nil
}
2021-12-02 03:38:43 +08:00
2022-01-13 06:54:42 +08:00
// For inputs with parsers we need to compute the set of
// options that is not covered by both, the parser and the input.
// We achieve this by keeping a local book of missing entries
// that counts the number of misses. In case we have a parser
// for the input both need to miss the entry. We count the
// missing entries at the end.
missCount := make ( map [ string ] int )
2022-10-21 17:09:20 +08:00
missCountThreshold := 0
2022-01-13 06:54:42 +08:00
c . setLocalMissingTomlFieldTracker ( missCount )
defer c . resetMissingTomlFieldTracker ( )
2016-01-08 04:39:43 +08:00
creator , ok := inputs . Inputs [ name ]
2015-11-25 05:22:11 +08:00
if ! ok {
2021-12-02 03:38:43 +08:00
// Handle removed, deprecated plugins
if di , deprecated := inputs . Deprecations [ name ] ; deprecated {
printHistoricPluginDeprecationNotice ( "inputs" , name , di )
return fmt . Errorf ( "plugin deprecated" )
}
2022-10-13 03:19:47 +08:00
return fmt . Errorf ( "undefined but requested input: %s" , name )
2015-11-25 05:22:11 +08:00
}
2016-01-08 04:39:43 +08:00
input := creator ( )
2015-11-25 05:22:11 +08:00
2022-01-13 06:54:42 +08:00
// If the input has a SetParser or SetParserFunc function, it can accept
// arbitrary data-formats, so build the requested parser and set it.
2022-09-27 04:24:34 +08:00
if t , ok := input . ( telegraf . ParserPlugin ) ; ok {
2022-10-21 17:09:20 +08:00
missCountThreshold = 1
2022-09-27 04:24:34 +08:00
parser , err := c . addParser ( "inputs" , name , table )
2022-07-29 04:30:36 +08:00
if err != nil {
return fmt . Errorf ( "adding parser failed: %w" , err )
2016-02-06 08:36:35 +08:00
}
2022-07-29 04:30:36 +08:00
t . SetParser ( parser )
2016-02-06 08:36:35 +08:00
}
2022-01-13 06:54:42 +08:00
// Keep the old interface for backward compatibility
if t , ok := input . ( parsers . ParserInput ) ; ok {
2022-09-27 04:24:34 +08:00
// DEPRECATED: Please switch your plugin to telegraf.ParserPlugin.
2022-10-21 17:09:20 +08:00
missCountThreshold = 1
2022-09-27 04:24:34 +08:00
parser , err := c . addParser ( "inputs" , name , table )
2022-07-29 04:30:36 +08:00
if err != nil {
return fmt . Errorf ( "adding parser failed: %w" , err )
2018-09-19 00:23:45 +08:00
}
2022-07-29 04:30:36 +08:00
t . SetParser ( parser )
2022-01-13 06:54:42 +08:00
}
2022-09-27 04:24:34 +08:00
if t , ok := input . ( telegraf . ParserFuncPlugin ) ; ok {
2022-10-21 17:09:20 +08:00
missCountThreshold = 1
2022-10-25 03:51:07 +08:00
if ! c . probeParser ( "inputs" , name , table ) {
2022-07-29 04:30:36 +08:00
return errors . New ( "parser not found" )
}
t . SetParserFunc ( func ( ) ( telegraf . Parser , error ) {
2022-09-27 04:24:34 +08:00
return c . addParser ( "inputs" , name , table )
2022-07-29 04:30:36 +08:00
} )
2022-01-13 06:54:42 +08:00
}
if t , ok := input . ( parsers . ParserFuncInput ) ; ok {
2022-09-27 04:24:34 +08:00
// DEPRECATED: Please switch your plugin to telegraf.ParserFuncPlugin.
2022-10-21 17:09:20 +08:00
missCountThreshold = 1
2022-10-25 03:51:07 +08:00
if ! c . probeParser ( "inputs" , name , table ) {
2022-07-29 04:30:36 +08:00
return errors . New ( "parser not found" )
}
t . SetParserFunc ( func ( ) ( parsers . Parser , error ) {
2022-09-27 04:24:34 +08:00
return c . addParser ( "inputs" , name , table )
2022-07-29 04:30:36 +08:00
} )
2018-09-19 00:23:45 +08:00
}
2020-11-04 05:40:57 +08:00
pluginConfig , err := c . buildInput ( name , table )
2015-11-25 05:22:11 +08:00
if err != nil {
return err
}
2015-12-01 22:15:28 +08:00
2020-11-04 05:40:57 +08:00
if err := c . toml . UnmarshalTable ( table , input ) ; err != nil {
2015-12-01 22:15:28 +08:00
return err
}
2021-12-02 03:38:43 +08:00
if err := c . printUserDeprecation ( "inputs" , name , input ) ; err != nil {
return err
}
2022-01-19 06:04:09 +08:00
if c , ok := interface { } ( input ) . ( interface { TLSConfig ( ) ( * tls . Config , error ) } ) ; ok {
if _ , err := c . TLSConfig ( ) ; err != nil {
return err
}
}
2016-11-07 16:34:46 +08:00
rp := models . NewRunningInput ( input , pluginConfig )
2018-11-06 05:34:28 +08:00
rp . SetDefaultTags ( c . Tags )
2016-01-08 04:39:43 +08:00
c . Inputs = append ( c . Inputs , rp )
2022-01-13 06:54:42 +08:00
// Check the number of misses against the threshold
for key , count := range missCount {
2022-10-21 17:09:20 +08:00
if count <= missCountThreshold {
2022-01-13 06:54:42 +08:00
continue
}
if err := c . missingTomlField ( nil , key ) ; err != nil {
return err
}
}
2015-11-25 05:22:11 +08:00
return nil
}
2016-10-06 20:29:46 +08:00
// buildAggregator parses Aggregator specific items from the ast.Table,
// builds the filter and returns a
// models.AggregatorConfig to be inserted into models.RunningAggregator
2020-11-04 05:40:57 +08:00
func ( c * Config ) buildAggregator ( name string , tbl * ast . Table ) ( * models . AggregatorConfig , error ) {
2016-10-07 23:43:44 +08:00
conf := & models . AggregatorConfig {
Name : name ,
Delay : time . Millisecond * 100 ,
Period : time . Second * 30 ,
2019-08-01 03:52:12 +08:00
Grace : time . Second * 0 ,
2016-10-07 23:43:44 +08:00
}
2016-09-23 01:10:51 +08:00
2020-11-04 05:40:57 +08:00
c . getFieldDuration ( tbl , "period" , & conf . Period )
c . getFieldDuration ( tbl , "delay" , & conf . Delay )
c . getFieldDuration ( tbl , "grace" , & conf . Grace )
c . getFieldBool ( tbl , "drop_original" , & conf . DropOriginal )
c . getFieldString ( tbl , "name_prefix" , & conf . MeasurementPrefix )
c . getFieldString ( tbl , "name_suffix" , & conf . MeasurementSuffix )
c . getFieldString ( tbl , "name_override" , & conf . NameOverride )
c . getFieldString ( tbl , "alias" , & conf . Alias )
2019-08-22 07:49:07 +08:00
2016-09-08 22:22:10 +08:00
conf . Tags = make ( map [ string ] string )
if node , ok := tbl . Fields [ "tags" ] ; ok {
if subtbl , ok := node . ( * ast . Table ) ; ok {
2020-11-04 05:40:57 +08:00
if err := c . toml . UnmarshalTable ( subtbl , conf . Tags ) ; err != nil {
2020-06-27 02:30:29 +08:00
return nil , fmt . Errorf ( "could not parse tags for input %s" , name )
2016-09-08 22:22:10 +08:00
}
}
}
2020-11-04 05:40:57 +08:00
if c . hasErrs ( ) {
return nil , c . firstErr ( )
}
2016-09-08 22:22:10 +08:00
var err error
2020-11-04 05:40:57 +08:00
conf . Filter , err = c . buildFilter ( tbl )
2016-09-08 22:22:10 +08:00
if err != nil {
return conf , err
}
return conf , nil
}
2022-01-13 06:54:42 +08:00
// buildParser parses Parser specific items from the ast.Table,
// builds the filter and returns a
// models.ParserConfig to be inserted into models.RunningParser
2022-11-09 03:04:12 +08:00
func ( c * Config ) buildParser ( name string , tbl * ast . Table ) * models . ParserConfig {
var dataFormat string
c . getFieldString ( tbl , "data_format" , & dataFormat )
2022-01-13 06:54:42 +08:00
conf := & models . ParserConfig {
Parent : name ,
2022-11-09 03:04:12 +08:00
DataFormat : dataFormat ,
2022-01-13 06:54:42 +08:00
}
2022-11-09 03:04:12 +08:00
return conf
2022-01-13 06:54:42 +08:00
}
2016-10-06 20:29:46 +08:00
// buildProcessor parses Processor specific items from the ast.Table,
// builds the filter and returns a
// models.ProcessorConfig to be inserted into models.RunningProcessor
2022-11-09 05:16:26 +08:00
func ( c * Config ) buildProcessor ( id string , name string , tbl * ast . Table ) ( * models . ProcessorConfig , error ) {
conf := & models . ProcessorConfig {
ID : id ,
Name : name ,
Line : tbl . Line ,
}
2016-09-08 22:22:10 +08:00
2020-11-04 05:40:57 +08:00
c . getFieldInt64 ( tbl , "order" , & conf . Order )
c . getFieldString ( tbl , "alias" , & conf . Alias )
2016-09-27 23:17:58 +08:00
2020-11-04 05:40:57 +08:00
if c . hasErrs ( ) {
return nil , c . firstErr ( )
2019-08-22 07:49:07 +08:00
}
2016-09-08 22:22:10 +08:00
var err error
2020-11-04 05:40:57 +08:00
conf . Filter , err = c . buildFilter ( tbl )
2016-09-08 22:22:10 +08:00
if err != nil {
return conf , err
}
return conf , nil
}
2016-02-23 04:35:06 +08:00
// buildFilter builds a Filter
// (tagpass/tagdrop/namepass/namedrop/fieldpass/fielddrop) to
2016-07-28 19:31:11 +08:00
// be inserted into the models.OutputConfig/models.InputConfig
2016-04-13 07:06:27 +08:00
// to be used for glob filtering on tags and measurements
2020-11-04 05:40:57 +08:00
func ( c * Config ) buildFilter ( tbl * ast . Table ) ( models . Filter , error ) {
2016-07-28 19:31:11 +08:00
f := models . Filter { }
2015-11-25 05:22:11 +08:00
2020-11-04 05:40:57 +08:00
c . getFieldStringSlice ( tbl , "namepass" , & f . NamePass )
c . getFieldStringSlice ( tbl , "namedrop" , & f . NameDrop )
2015-11-25 05:22:11 +08:00
2020-11-04 05:40:57 +08:00
c . getFieldStringSlice ( tbl , "pass" , & f . FieldPass )
c . getFieldStringSlice ( tbl , "fieldpass" , & f . FieldPass )
2016-02-20 13:35:12 +08:00
2020-11-04 05:40:57 +08:00
c . getFieldStringSlice ( tbl , "drop" , & f . FieldDrop )
c . getFieldStringSlice ( tbl , "fielddrop" , & f . FieldDrop )
2016-02-20 13:35:12 +08:00
2022-10-13 03:19:47 +08:00
c . getFieldTagFilter ( tbl , "tagpass" , & f . TagPassFilters )
c . getFieldTagFilter ( tbl , "tagdrop" , & f . TagDropFilters )
2015-11-25 05:22:11 +08:00
2020-11-04 05:40:57 +08:00
c . getFieldStringSlice ( tbl , "tagexclude" , & f . TagExclude )
c . getFieldStringSlice ( tbl , "taginclude" , & f . TagInclude )
2015-11-25 05:22:11 +08:00
2020-11-04 05:40:57 +08:00
if c . hasErrs ( ) {
return f , c . firstErr ( )
2016-04-13 07:06:27 +08:00
}
2016-09-05 23:16:37 +08:00
if err := f . Compile ( ) ; err != nil {
2016-04-13 07:06:27 +08:00
return f , err
}
return f , nil
2015-12-01 22:15:28 +08:00
}
2016-01-08 04:39:43 +08:00
// buildInput parses input specific items from the ast.Table,
2015-12-12 04:07:32 +08:00
// builds the filter and returns a
2016-07-28 19:31:11 +08:00
// models.InputConfig to be inserted into models.RunningInput
2020-11-04 05:40:57 +08:00
func ( c * Config ) buildInput ( name string , tbl * ast . Table ) ( * models . InputConfig , error ) {
2016-07-28 19:31:11 +08:00
cp := & models . InputConfig { Name : name }
2020-11-04 05:40:57 +08:00
c . getFieldDuration ( tbl , "interval" , & cp . Interval )
c . getFieldDuration ( tbl , "precision" , & cp . Precision )
c . getFieldDuration ( tbl , "collection_jitter" , & cp . CollectionJitter )
2022-02-16 01:39:12 +08:00
c . getFieldDuration ( tbl , "collection_offset" , & cp . CollectionOffset )
2020-11-04 05:40:57 +08:00
c . getFieldString ( tbl , "name_prefix" , & cp . MeasurementPrefix )
c . getFieldString ( tbl , "name_suffix" , & cp . MeasurementSuffix )
c . getFieldString ( tbl , "name_override" , & cp . NameOverride )
c . getFieldString ( tbl , "alias" , & cp . Alias )
2019-08-22 07:49:07 +08:00
2015-12-12 04:07:32 +08:00
cp . Tags = make ( map [ string ] string )
if node , ok := tbl . Fields [ "tags" ] ; ok {
if subtbl , ok := node . ( * ast . Table ) ; ok {
2020-11-04 05:40:57 +08:00
if err := c . toml . UnmarshalTable ( subtbl , cp . Tags ) ; err != nil {
return nil , fmt . Errorf ( "could not parse tags for input %s" , name )
2015-12-12 04:07:32 +08:00
}
}
}
2020-11-04 05:40:57 +08:00
if c . hasErrs ( ) {
return nil , c . firstErr ( )
}
2016-04-13 07:06:27 +08:00
var err error
2020-11-04 05:40:57 +08:00
cp . Filter , err = c . buildFilter ( tbl )
2016-04-13 07:06:27 +08:00
if err != nil {
return cp , err
}
2015-12-01 22:15:28 +08:00
return cp , nil
}
2020-11-04 05:40:57 +08:00
// buildSerializer grabs the necessary entries from the ast.Table for creating
// a serializers.Serializer object, and creates it, which can then be added onto
// an Output object.
2021-03-23 01:21:36 +08:00
func ( c * Config ) buildSerializer ( tbl * ast . Table ) ( serializers . Serializer , error ) {
sc := & serializers . Config { TimestampUnits : 1 * time . Second }
2020-11-04 05:40:57 +08:00
c . getFieldString ( tbl , "data_format" , & sc . DataFormat )
if sc . DataFormat == "" {
sc . DataFormat = "influx"
}
c . getFieldString ( tbl , "prefix" , & sc . Prefix )
c . getFieldString ( tbl , "template" , & sc . Template )
c . getFieldStringSlice ( tbl , "templates" , & sc . Templates )
c . getFieldString ( tbl , "carbon2_format" , & sc . Carbon2Format )
2021-04-08 22:31:31 +08:00
c . getFieldString ( tbl , "carbon2_sanitize_replace_char" , & sc . Carbon2SanitizeReplaceChar )
2022-06-23 03:06:50 +08:00
c . getFieldBool ( tbl , "csv_column_prefix" , & sc . CSVPrefix )
c . getFieldBool ( tbl , "csv_header" , & sc . CSVHeader )
c . getFieldString ( tbl , "csv_separator" , & sc . CSVSeparator )
c . getFieldString ( tbl , "csv_timestamp_format" , & sc . TimestampFormat )
2020-11-04 05:40:57 +08:00
c . getFieldInt ( tbl , "influx_max_line_bytes" , & sc . InfluxMaxLineBytes )
c . getFieldBool ( tbl , "influx_sort_fields" , & sc . InfluxSortFields )
c . getFieldBool ( tbl , "influx_uint_support" , & sc . InfluxUintSupport )
c . getFieldBool ( tbl , "graphite_tag_support" , & sc . GraphiteTagSupport )
2021-05-19 00:29:30 +08:00
c . getFieldString ( tbl , "graphite_tag_sanitize_mode" , & sc . GraphiteTagSanitizeMode )
2020-11-04 05:40:57 +08:00
c . getFieldString ( tbl , "graphite_separator" , & sc . GraphiteSeparator )
c . getFieldDuration ( tbl , "json_timestamp_units" , & sc . TimestampUnits )
2021-09-21 23:12:44 +08:00
c . getFieldString ( tbl , "json_timestamp_format" , & sc . TimestampFormat )
2022-07-22 00:37:36 +08:00
c . getFieldString ( tbl , "json_transformation" , & sc . Transformation )
2020-11-04 05:40:57 +08:00
c . getFieldBool ( tbl , "splunkmetric_hec_routing" , & sc . HecRouting )
c . getFieldBool ( tbl , "splunkmetric_multimetric" , & sc . SplunkmetricMultiMetric )
2022-11-01 20:41:46 +08:00
c . getFieldBool ( tbl , "splunkmetric_omit_event_tag" , & sc . SplunkmetricOmitEventTag )
2020-11-04 05:40:57 +08:00
c . getFieldStringSlice ( tbl , "wavefront_source_override" , & sc . WavefrontSourceOverride )
c . getFieldBool ( tbl , "wavefront_use_strict" , & sc . WavefrontUseStrict )
2021-12-15 06:04:30 +08:00
c . getFieldBool ( tbl , "wavefront_disable_prefix_conversion" , & sc . WavefrontDisablePrefixConversion )
2020-11-04 05:40:57 +08:00
c . getFieldBool ( tbl , "prometheus_export_timestamp" , & sc . PrometheusExportTimestamp )
c . getFieldBool ( tbl , "prometheus_sort_metrics" , & sc . PrometheusSortMetrics )
c . getFieldBool ( tbl , "prometheus_string_as_label" , & sc . PrometheusStringAsLabel )
2022-08-18 15:22:40 +08:00
c . getFieldBool ( tbl , "prometheus_compact_encoding" , & sc . PrometheusCompactEncoding )
2020-11-04 05:40:57 +08:00
if c . hasErrs ( ) {
return nil , c . firstErr ( )
}
return serializers . NewSerializer ( sc )
}
// buildOutput parses output specific items from the ast.Table,
2022-10-13 03:19:47 +08:00
// builds the filter and returns a
2020-11-04 05:40:57 +08:00
// models.OutputConfig to be inserted into models.RunningInput
// Note: error exists in the return for future calls that might require error
func ( c * Config ) buildOutput ( name string , tbl * ast . Table ) ( * models . OutputConfig , error ) {
filter , err := c . buildFilter ( tbl )
if err != nil {
return nil , err
}
oc := & models . OutputConfig {
Name : name ,
Filter : filter ,
}
// TODO: support FieldPass/FieldDrop on outputs
c . getFieldDuration ( tbl , "flush_interval" , & oc . FlushInterval )
2021-02-10 03:12:49 +08:00
c . getFieldDuration ( tbl , "flush_jitter" , & oc . FlushJitter )
2020-11-04 05:40:57 +08:00
c . getFieldInt ( tbl , "metric_buffer_limit" , & oc . MetricBufferLimit )
c . getFieldInt ( tbl , "metric_batch_size" , & oc . MetricBatchSize )
c . getFieldString ( tbl , "alias" , & oc . Alias )
c . getFieldString ( tbl , "name_override" , & oc . NameOverride )
c . getFieldString ( tbl , "name_suffix" , & oc . NameSuffix )
c . getFieldString ( tbl , "name_prefix" , & oc . NamePrefix )
if c . hasErrs ( ) {
return nil , c . firstErr ( )
}
return oc , nil
}
2021-03-23 01:21:36 +08:00
func ( c * Config ) missingTomlField ( _ reflect . Type , key string ) error {
2020-11-04 05:40:57 +08:00
switch key {
2022-06-23 03:50:43 +08:00
// General options to ignore
case "alias" ,
"collection_jitter" , "collection_offset" ,
"data_format" , "delay" , "drop" , "drop_original" ,
"fielddrop" , "fieldpass" , "flush_interval" , "flush_jitter" ,
"grace" ,
"interval" ,
"lvm" , // What is this used for?
"metric_batch_size" , "metric_buffer_limit" ,
"name_override" , "name_prefix" , "name_suffix" , "namedrop" , "namepass" ,
"order" ,
"pass" , "period" , "precision" ,
"tagdrop" , "tagexclude" , "taginclude" , "tagpass" , "tags" :
// Parser options to ignore
2022-09-22 03:02:41 +08:00
case "data_type" , "influx_parser_type" :
2022-06-23 03:50:43 +08:00
// Serializer options to ignore
case "prefix" , "template" , "templates" ,
"carbon2_format" , "carbon2_sanitize_replace_char" ,
"csv_column_prefix" , "csv_header" , "csv_separator" , "csv_timestamp_format" ,
"graphite_tag_sanitize_mode" , "graphite_tag_support" , "graphite_separator" ,
"influx_max_line_bytes" , "influx_sort_fields" , "influx_uint_support" ,
2022-09-09 21:26:12 +08:00
"json_timestamp_format" , "json_timestamp_units" , "json_transformation" ,
2022-06-23 03:50:43 +08:00
"prometheus_export_timestamp" , "prometheus_sort_metrics" , "prometheus_string_as_label" ,
2022-08-18 15:22:40 +08:00
"prometheus_compact_encoding" ,
2022-11-01 20:41:46 +08:00
"splunkmetric_hec_routing" , "splunkmetric_multimetric" , "splunkmetric_omit_event_tag" ,
2022-06-23 03:50:43 +08:00
"wavefront_disable_prefix_conversion" , "wavefront_source_override" , "wavefront_use_strict" :
2020-11-04 05:40:57 +08:00
default :
2022-06-16 20:04:45 +08:00
c . unusedFieldsMutex . Lock ( )
2020-11-04 05:40:57 +08:00
c . UnusedFields [ key ] = true
2022-06-16 20:04:45 +08:00
c . unusedFieldsMutex . Unlock ( )
2016-02-06 08:36:35 +08:00
}
2020-11-04 05:40:57 +08:00
return nil
}
2016-02-06 08:36:35 +08:00
2022-01-13 06:54:42 +08:00
func ( c * Config ) setLocalMissingTomlFieldTracker ( counter map [ string ] int ) {
2022-10-21 17:09:20 +08:00
f := func ( t reflect . Type , key string ) error {
// Check if we are in a root element that might share options among
// each other. Those root elements are plugins of all types.
// All other elements are subtables of their respective plugin and
// should just be hit once anyway. Therefore, we mark them with a
// high number to handle them correctly later.
pt := reflect . PtrTo ( t )
root := pt . Implements ( reflect . TypeOf ( ( * telegraf . Input ) ( nil ) ) . Elem ( ) )
root = root || pt . Implements ( reflect . TypeOf ( ( * telegraf . ServiceInput ) ( nil ) ) . Elem ( ) )
root = root || pt . Implements ( reflect . TypeOf ( ( * telegraf . Output ) ( nil ) ) . Elem ( ) )
root = root || pt . Implements ( reflect . TypeOf ( ( * telegraf . Aggregator ) ( nil ) ) . Elem ( ) )
root = root || pt . Implements ( reflect . TypeOf ( ( * telegraf . Processor ) ( nil ) ) . Elem ( ) )
root = root || pt . Implements ( reflect . TypeOf ( ( * telegraf . Parser ) ( nil ) ) . Elem ( ) )
c , ok := counter [ key ]
if ! root {
counter [ key ] = 100
} else if ! ok {
2022-01-13 06:54:42 +08:00
counter [ key ] = 1
2022-10-21 17:09:20 +08:00
} else {
counter [ key ] = c + 1
2022-01-13 06:54:42 +08:00
}
return nil
}
c . toml . MissingField = f
}
func ( c * Config ) resetMissingTomlFieldTracker ( ) {
c . toml . MissingField = c . missingTomlField
}
2020-11-04 05:40:57 +08:00
func ( c * Config ) getFieldString ( tbl * ast . Table , fieldName string , target * string ) {
if node , ok := tbl . Fields [ fieldName ] ; ok {
2016-02-06 08:36:35 +08:00
if kv , ok := node . ( * ast . KeyValue ) ; ok {
if str , ok := kv . Value . ( * ast . String ) ; ok {
2020-11-04 05:40:57 +08:00
* target = str . Value
2016-02-06 08:36:35 +08:00
}
}
}
2020-11-04 05:40:57 +08:00
}
2016-02-06 08:36:35 +08:00
2020-11-04 05:40:57 +08:00
func ( c * Config ) getFieldDuration ( tbl * ast . Table , fieldName string , target interface { } ) {
if node , ok := tbl . Fields [ fieldName ] ; ok {
2016-02-06 08:36:35 +08:00
if kv , ok := node . ( * ast . KeyValue ) ; ok {
2020-11-04 05:40:57 +08:00
if str , ok := kv . Value . ( * ast . String ) ; ok {
d , err := time . ParseDuration ( str . Value )
if err != nil {
c . addError ( tbl , fmt . Errorf ( "error parsing duration: %w" , err ) )
return
2016-02-06 08:36:35 +08:00
}
2020-11-04 05:40:57 +08:00
targetVal := reflect . ValueOf ( target ) . Elem ( )
targetVal . Set ( reflect . ValueOf ( d ) )
2016-02-06 08:36:35 +08:00
}
}
}
2020-11-04 05:40:57 +08:00
}
2016-02-06 08:36:35 +08:00
2020-11-04 05:40:57 +08:00
func ( c * Config ) getFieldBool ( tbl * ast . Table , fieldName string , target * bool ) {
var err error
if node , ok := tbl . Fields [ fieldName ] ; ok {
2016-02-06 08:36:35 +08:00
if kv , ok := node . ( * ast . KeyValue ) ; ok {
2020-11-04 05:40:57 +08:00
switch t := kv . Value . ( type ) {
case * ast . Boolean :
* target , err = t . Boolean ( )
if err != nil {
c . addError ( tbl , fmt . Errorf ( "unknown boolean value type %q, expecting boolean" , kv . Value ) )
return
2016-02-06 08:36:35 +08:00
}
2020-11-04 05:40:57 +08:00
case * ast . String :
* target , err = strconv . ParseBool ( t . Value )
if err != nil {
c . addError ( tbl , fmt . Errorf ( "unknown boolean value type %q, expecting boolean" , kv . Value ) )
return
}
default :
c . addError ( tbl , fmt . Errorf ( "unknown boolean value type %q, expecting boolean" , kv . Value . Source ( ) ) )
return
2016-02-06 08:36:35 +08:00
}
}
}
2020-11-04 05:40:57 +08:00
}
2016-02-06 08:36:35 +08:00
2020-11-04 05:40:57 +08:00
func ( c * Config ) getFieldInt ( tbl * ast . Table , fieldName string , target * int ) {
if node , ok := tbl . Fields [ fieldName ] ; ok {
2018-08-23 10:26:48 +08:00
if kv , ok := node . ( * ast . KeyValue ) ; ok {
2020-11-04 05:40:57 +08:00
if iAst , ok := kv . Value . ( * ast . Integer ) ; ok {
i , err := iAst . Int ( )
if err != nil {
c . addError ( tbl , fmt . Errorf ( "unexpected int type %q, expecting int" , iAst . Value ) )
return
2018-08-23 10:26:48 +08:00
}
2020-11-04 05:40:57 +08:00
* target = int ( i )
2018-08-23 10:26:48 +08:00
}
}
}
2020-11-04 05:40:57 +08:00
}
2018-08-23 10:26:48 +08:00
2020-11-04 05:40:57 +08:00
func ( c * Config ) getFieldInt64 ( tbl * ast . Table , fieldName string , target * int64 ) {
if node , ok := tbl . Fields [ fieldName ] ; ok {
2020-01-22 02:10:02 +08:00
if kv , ok := node . ( * ast . KeyValue ) ; ok {
2020-11-04 05:40:57 +08:00
if iAst , ok := kv . Value . ( * ast . Integer ) ; ok {
i , err := iAst . Int ( )
2020-01-22 02:10:02 +08:00
if err != nil {
2020-11-04 05:40:57 +08:00
c . addError ( tbl , fmt . Errorf ( "unexpected int type %q, expecting int" , iAst . Value ) )
return
2020-01-22 02:10:02 +08:00
}
2020-11-04 05:40:57 +08:00
* target = i
2020-01-22 02:10:02 +08:00
}
}
}
2020-11-04 05:40:57 +08:00
}
2020-01-22 02:10:02 +08:00
2020-11-04 05:40:57 +08:00
func ( c * Config ) getFieldStringSlice ( tbl * ast . Table , fieldName string , target * [ ] string ) {
if node , ok := tbl . Fields [ fieldName ] ; ok {
2017-04-13 01:41:26 +08:00
if kv , ok := node . ( * ast . KeyValue ) ; ok {
2022-02-23 09:47:04 +08:00
ary , ok := kv . Value . ( * ast . Array )
if ! ok {
2021-02-27 02:58:13 +08:00
c . addError ( tbl , fmt . Errorf ( "found unexpected format while parsing %q, expecting string array/slice format" , fieldName ) )
return
2017-04-13 01:41:26 +08:00
}
2022-02-23 09:47:04 +08:00
for _ , elem := range ary . Value {
if str , ok := elem . ( * ast . String ) ; ok {
* target = append ( * target , str . Value )
}
}
2017-04-13 01:41:26 +08:00
}
}
2020-11-04 05:40:57 +08:00
}
2021-03-04 04:26:09 +08:00
2020-11-04 05:40:57 +08:00
func ( c * Config ) getFieldTagFilter ( tbl * ast . Table , fieldName string , target * [ ] models . TagFilter ) {
if node , ok := tbl . Fields [ fieldName ] ; ok {
2018-01-09 07:11:36 +08:00
if subtbl , ok := node . ( * ast . Table ) ; ok {
for name , val := range subtbl . Fields {
if kv , ok := val . ( * ast . KeyValue ) ; ok {
2022-02-23 09:47:04 +08:00
ary , ok := kv . Value . ( * ast . Array )
if ! ok {
2021-02-27 02:58:13 +08:00
c . addError ( tbl , fmt . Errorf ( "found unexpected format while parsing %q, expecting string array/slice format on each entry" , fieldName ) )
return
2019-06-18 04:34:54 +08:00
}
2022-02-23 09:47:04 +08:00
tagFilter := models . TagFilter { Name : name }
for _ , elem := range ary . Value {
if str , ok := elem . ( * ast . String ) ; ok {
2022-10-13 03:19:47 +08:00
tagFilter . Values = append ( tagFilter . Values , str . Value )
2022-02-23 09:47:04 +08:00
}
}
* target = append ( * target , tagFilter )
2019-06-18 04:34:54 +08:00
}
}
}
}
2016-02-06 08:36:35 +08:00
}
2020-11-04 05:40:57 +08:00
func keys ( m map [ string ] bool ) [ ] string {
result := [ ] string { }
for k := range m {
result = append ( result , k )
2020-03-14 06:04:23 +08:00
}
2020-11-04 05:40:57 +08:00
return result
}
2020-03-14 06:04:23 +08:00
2022-10-25 03:51:07 +08:00
func setDefaultParser ( category string , name string ) string {
// Legacy support, exec plugin originally parsed JSON by default.
if category == "inputs" && name == "exec" {
return "json"
}
return "influx"
}
2020-11-04 05:40:57 +08:00
func ( c * Config ) hasErrs ( ) bool {
return len ( c . errs ) > 0
}
2020-03-14 06:04:23 +08:00
2020-11-04 05:40:57 +08:00
func ( c * Config ) firstErr ( ) error {
if len ( c . errs ) == 0 {
return nil
2020-03-14 06:04:23 +08:00
}
2020-11-04 05:40:57 +08:00
return c . errs [ 0 ]
}
2020-03-14 06:04:23 +08:00
2020-11-04 05:40:57 +08:00
func ( c * Config ) addError ( tbl * ast . Table , err error ) {
c . errs = append ( c . errs , fmt . Errorf ( "line %d:%d: %w" , tbl . Line , tbl . Position , err ) )
2015-11-25 05:22:11 +08:00
}
2020-06-05 22:43:43 +08:00
// unwrappable lets you retrieve the original telegraf.Processor from the
// StreamingProcessor. This is necessary because the toml Unmarshaller won't
// look inside composed types.
type unwrappable interface {
Unwrap ( ) telegraf . Processor
}