2015-11-25 05:22:11 +08:00
package config
2015-04-02 00:34:32 +08:00
import (
2016-04-02 03:53:34 +08:00
"bytes"
2015-04-02 00:34:32 +08:00
"fmt"
"io/ioutil"
2015-11-14 07:14:07 +08:00
"log"
2018-11-06 06:19:46 +08:00
"net/http"
"net/url"
2016-04-02 03:53:34 +08:00
"os"
2015-10-19 15:09:36 +08:00
"path/filepath"
2020-11-04 05:40:57 +08:00
"reflect"
2016-04-02 03:53:34 +08:00
"regexp"
2016-08-08 22:55:16 +08:00
"runtime"
2015-04-02 00:34:32 +08:00
"sort"
2016-09-08 22:22:10 +08:00
"strconv"
2015-04-02 00:34:32 +08:00
"strings"
"time"
2016-01-28 05:21:36 +08:00
"github.com/influxdata/telegraf"
2016-01-21 02:57:35 +08:00
"github.com/influxdata/telegraf/internal"
2020-05-05 02:09:10 +08:00
"github.com/influxdata/telegraf/models"
2016-09-08 22:22:10 +08:00
"github.com/influxdata/telegraf/plugins/aggregators"
2016-01-21 02:57:35 +08:00
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/outputs"
2016-02-06 08:36:35 +08:00
"github.com/influxdata/telegraf/plugins/parsers"
2016-09-08 22:22:10 +08:00
"github.com/influxdata/telegraf/plugins/processors"
2016-02-11 06:50:07 +08:00
"github.com/influxdata/telegraf/plugins/serializers"
2016-04-02 03:53:34 +08:00
"github.com/influxdata/toml"
2016-03-01 18:12:28 +08:00
"github.com/influxdata/toml/ast"
2015-04-02 00:34:32 +08:00
)
2016-04-01 07:50:24 +08:00
var (
2019-04-26 11:34:40 +08:00
// Default sections
2019-06-06 05:07:02 +08:00
sectionDefaults = [ ] string { "global_tags" , "agent" , "outputs" ,
2019-04-26 11:34:40 +08:00
"processors" , "aggregators" , "inputs" }
2016-04-01 07:50:24 +08:00
// Default input plugins
inputDefaults = [ ] string { "cpu" , "mem" , "swap" , "system" , "kernel" ,
"processes" , "disk" , "diskio" }
// Default output plugins
outputDefaults = [ ] string { "influxdb" }
2016-04-02 03:53:34 +08:00
// envVarRe is a regex to find environment variables in the config file
2019-03-30 07:02:10 +08:00
envVarRe = regexp . MustCompile ( ` \$\ { (\w+)\}|\$(\w+) ` )
2018-01-05 07:28:00 +08:00
envVarEscaper = strings . NewReplacer (
` " ` , ` \" ` ,
` \ ` , ` \\ ` ,
)
2021-02-13 00:38:40 +08:00
httpLoadConfigRetryInterval = 10 * time . Second
2016-04-01 07:50:24 +08:00
)
2015-08-12 00:34:00 +08:00
// Config specifies the URL/user/password for the database that telegraf
2015-08-04 22:58:32 +08:00
// will be logging to, as well as all the plugins that the user has
// specified
2015-04-02 00:34:32 +08:00
type Config struct {
2020-11-04 05:40:57 +08:00
toml * toml . Config
errs [ ] error // config load errors.
UnusedFields map [ string ] bool
2015-11-25 05:22:11 +08:00
Tags map [ string ] string
2016-01-08 04:39:43 +08:00
InputFilters [ ] string
2015-11-25 05:22:11 +08:00
OutputFilters [ ] string
2015-08-12 04:02:04 +08:00
2016-09-08 22:22:10 +08:00
Agent * AgentConfig
Inputs [ ] * models . RunningInput
Outputs [ ] * models . RunningOutput
Aggregators [ ] * models . RunningAggregator
2016-09-27 23:17:58 +08:00
// Processors have a slice wrapper type because they need to be sorted
2020-06-05 22:43:43 +08:00
Processors models . RunningProcessors
AggProcessors models . RunningProcessors
2015-04-02 00:34:32 +08:00
}
2020-11-04 05:40:57 +08:00
// NewConfig creates a new struct to hold the Telegraf config.
// For historical reasons, It holds the actual instances of the running plugins
// once the configuration is parsed.
2015-11-24 07:28:11 +08:00
func NewConfig ( ) * Config {
c := & Config {
2020-11-04 05:40:57 +08:00
UnusedFields : map [ string ] bool { } ,
2015-11-26 09:42:07 +08:00
// Agent defaults:
Agent : & AgentConfig {
2021-04-10 01:15:04 +08:00
Interval : Duration ( 10 * time . Second ) ,
2019-05-04 01:55:11 +08:00
RoundInterval : true ,
2021-04-10 01:15:04 +08:00
FlushInterval : Duration ( 10 * time . Second ) ,
2019-10-23 04:32:03 +08:00
LogTarget : "file" ,
2019-05-04 01:55:11 +08:00
LogfileRotationMaxArchives : 5 ,
2015-11-26 09:42:07 +08:00
} ,
2015-11-25 05:22:11 +08:00
Tags : make ( map [ string ] string ) ,
2016-07-28 19:31:11 +08:00
Inputs : make ( [ ] * models . RunningInput , 0 ) ,
Outputs : make ( [ ] * models . RunningOutput , 0 ) ,
2016-09-08 22:22:10 +08:00
Processors : make ( [ ] * models . RunningProcessor , 0 ) ,
2020-06-05 22:43:43 +08:00
AggProcessors : make ( [ ] * models . RunningProcessor , 0 ) ,
2016-01-08 04:39:43 +08:00
InputFilters : make ( [ ] string , 0 ) ,
2015-11-25 05:22:11 +08:00
OutputFilters : make ( [ ] string , 0 ) ,
2015-11-24 07:28:11 +08:00
}
2020-11-04 05:40:57 +08:00
tomlCfg := & toml . Config {
NormFieldName : toml . DefaultConfig . NormFieldName ,
FieldToKey : toml . DefaultConfig . FieldToKey ,
MissingField : c . missingTomlField ,
}
c . toml = tomlCfg
2015-11-24 07:28:11 +08:00
return c
}
2020-11-04 05:40:57 +08:00
// AgentConfig defines configuration that will be used by the Telegraf agent
2015-11-26 09:42:07 +08:00
type AgentConfig struct {
// Interval at which to gather information
2021-04-10 01:15:04 +08:00
Interval Duration
2015-11-26 09:42:07 +08:00
// RoundInterval rounds collection interval to 'interval'.
// ie, if Interval=10s then always collect on :00, :10, :20, etc.
RoundInterval bool
2017-04-13 01:42:11 +08:00
// By default or when set to "0s", precision will be set to the same
// timestamp order as the collection interval, with the maximum being 1s.
2016-06-13 22:21:11 +08:00
// ie, when interval = "10s", precision will be "1s"
// when interval = "250ms", precision will be "1ms"
// Precision will NOT be used for service inputs. It is up to each individual
// service input to set the timestamp at the appropriate precision.
2021-04-10 01:15:04 +08:00
Precision Duration
2016-06-13 22:21:11 +08:00
2016-01-20 04:00:36 +08:00
// CollectionJitter is used to jitter the collection by a random amount.
// Each plugin will sleep for a random time within jitter before collecting.
// This can be used to avoid many plugins querying things like sysfs at the
// same time, which can have a measurable effect on the system.
2021-04-10 01:15:04 +08:00
CollectionJitter Duration
2016-01-20 04:00:36 +08:00
2016-02-16 08:21:38 +08:00
// FlushInterval is the Interval at which to flush data
2021-04-10 01:15:04 +08:00
FlushInterval Duration
2015-11-26 09:42:07 +08:00
2016-01-20 04:00:36 +08:00
// FlushJitter Jitters the flush interval by a random amount.
// This is primarily to avoid large write spikes for users running a large
// number of telegraf instances.
// ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
2021-04-10 01:15:04 +08:00
FlushJitter Duration
2015-11-26 09:42:07 +08:00
2016-04-24 18:43:54 +08:00
// MetricBatchSize is the maximum number of metrics that is wrote to an
// output plugin in one call.
MetricBatchSize int
2016-01-23 02:54:12 +08:00
// MetricBufferLimit is the max number of metrics that each output plugin
// will cache. The buffer is cleared when a successful write occurs. When
2016-04-24 18:43:54 +08:00
// full, the oldest metrics will be overwritten. This number should be a
// multiple of MetricBatchSize. Due to current implementation, this could
// not be less than 2 times MetricBatchSize.
2016-01-23 02:54:12 +08:00
MetricBufferLimit int
2016-02-16 08:21:38 +08:00
// FlushBufferWhenFull tells Telegraf to flush the metric buffer whenever
// it fills up, regardless of FlushInterval. Setting this option to true
// does _not_ deactivate FlushInterval.
2020-06-17 00:25:57 +08:00
FlushBufferWhenFull bool // deprecated in 0.13; has no effect
2016-02-16 08:21:38 +08:00
2016-06-13 22:21:11 +08:00
// TODO(cam): Remove UTC and parameter, they are no longer
2015-11-26 09:42:07 +08:00
// valid for the agent config. Leaving them here for now for backwards-
2017-11-01 08:00:06 +08:00
// compatibility
2020-06-17 00:25:57 +08:00
UTC bool ` toml:"utc" ` // deprecated in 1.0.0; has no effect
2015-11-26 09:42:07 +08:00
2016-01-16 03:25:56 +08:00
// Debug is the option for running in debug mode
2019-05-04 01:55:11 +08:00
Debug bool ` toml:"debug" `
2016-01-16 03:25:56 +08:00
2019-05-04 01:55:11 +08:00
// Quiet is the option for running in quiet mode
Quiet bool ` toml:"quiet" `
2016-10-01 05:37:56 +08:00
2019-10-23 04:32:03 +08:00
// Log target controls the destination for logs and can be one of "file",
// "stderr" or, on Windows, "eventlog". When set to "file", the output file
// is determined by the "logfile" setting.
2019-08-29 05:34:44 +08:00
LogTarget string ` toml:"logtarget" `
2019-10-23 04:32:03 +08:00
// Name of the file to be logged to when using the "file" logtarget. If set to
// the empty string then logs are written to stderr.
2019-05-04 01:55:11 +08:00
Logfile string ` toml:"logfile" `
2019-05-04 01:25:28 +08:00
2019-06-04 08:38:21 +08:00
// The file will be rotated after the time interval specified. When set
// to 0 no time based rotation is performed.
2021-04-10 01:15:04 +08:00
LogfileRotationInterval Duration ` toml:"logfile_rotation_interval" `
2019-05-04 01:25:28 +08:00
2019-06-04 08:38:21 +08:00
// The logfile will be rotated when it becomes larger than the specified
// size. When set to 0 no size based rotation is performed.
2021-04-10 01:15:04 +08:00
LogfileRotationMaxSize Size ` toml:"logfile_rotation_max_size" `
2019-05-04 01:55:11 +08:00
// Maximum number of rotated archives to keep, any older logs are deleted.
// If set to -1, no archives are removed.
LogfileRotationMaxArchives int ` toml:"logfile_rotation_max_archives" `
2019-05-04 01:25:28 +08:00
2021-04-17 02:39:19 +08:00
// Pick a timezone to use when logging or type 'local' for local time.
LogWithTimezone string ` toml:"log_with_timezone" `
2016-03-22 05:33:19 +08:00
Hostname string
OmitHostname bool
2015-11-26 09:42:07 +08:00
}
2020-08-20 06:18:52 +08:00
// InputNames returns a list of strings of the configured inputs.
2016-01-08 04:39:43 +08:00
func ( c * Config ) InputNames ( ) [ ] string {
2015-11-25 05:22:11 +08:00
var name [ ] string
2016-01-08 04:39:43 +08:00
for _ , input := range c . Inputs {
2019-02-27 08:03:13 +08:00
name = append ( name , input . Config . Name )
2015-08-12 04:02:04 +08:00
}
2020-08-20 06:18:52 +08:00
return PluginNameCounts ( name )
2015-08-08 04:31:25 +08:00
}
2020-08-20 06:18:52 +08:00
// AggregatorNames returns a list of strings of the configured aggregators.
2018-06-01 02:56:49 +08:00
func ( c * Config ) AggregatorNames ( ) [ ] string {
var name [ ] string
for _ , aggregator := range c . Aggregators {
2019-02-27 10:22:12 +08:00
name = append ( name , aggregator . Config . Name )
2018-06-01 02:56:49 +08:00
}
2020-08-20 06:18:52 +08:00
return PluginNameCounts ( name )
2018-06-01 02:56:49 +08:00
}
2020-08-20 06:18:52 +08:00
// ProcessorNames returns a list of strings of the configured processors.
2018-06-01 02:56:49 +08:00
func ( c * Config ) ProcessorNames ( ) [ ] string {
var name [ ] string
for _ , processor := range c . Processors {
2019-08-22 07:49:07 +08:00
name = append ( name , processor . Config . Name )
2018-06-01 02:56:49 +08:00
}
2020-08-20 06:18:52 +08:00
return PluginNameCounts ( name )
2018-06-01 02:56:49 +08:00
}
2020-08-20 06:18:52 +08:00
// OutputNames returns a list of strings of the configured outputs.
2015-11-25 05:22:11 +08:00
func ( c * Config ) OutputNames ( ) [ ] string {
var name [ ] string
for _ , output := range c . Outputs {
2019-08-22 07:49:07 +08:00
name = append ( name , output . Config . Name )
2015-11-24 09:00:54 +08:00
}
2020-08-20 06:18:52 +08:00
return PluginNameCounts ( name )
}
2020-09-16 04:55:23 +08:00
// PluginNameCounts returns a list of sorted plugin names and their count
2020-08-20 06:18:52 +08:00
func PluginNameCounts ( plugins [ ] string ) [ ] string {
names := make ( map [ string ] int )
for _ , plugin := range plugins {
names [ plugin ] ++
}
var namecount [ ] string
for name , count := range names {
if count == 1 {
namecount = append ( namecount , name )
} else {
namecount = append ( namecount , fmt . Sprintf ( "%s (%dx)" , name , count ) )
}
}
2020-09-16 04:55:23 +08:00
sort . Strings ( namecount )
2020-08-20 06:18:52 +08:00
return namecount
2015-11-24 09:00:54 +08:00
}
2015-08-04 22:58:32 +08:00
// ListTags returns a string of tags specified in the config,
// line-protocol style
2015-04-02 00:34:32 +08:00
func ( c * Config ) ListTags ( ) string {
var tags [ ] string
for k , v := range c . Tags {
tags = append ( tags , fmt . Sprintf ( "%s=%s" , k , v ) )
}
sort . Strings ( tags )
return strings . Join ( tags , " " )
}
2015-05-19 06:10:11 +08:00
2016-02-18 12:57:33 +08:00
var header = ` # Telegraf Configuration
2016-04-01 07:50:24 +08:00
#
2015-05-23 07:45:14 +08:00
# Telegraf is entirely plugin driven . All metrics are gathered from the
2016-01-23 02:54:12 +08:00
# declared inputs , and sent to the declared outputs .
2016-04-01 07:50:24 +08:00
#
2016-01-23 02:54:12 +08:00
# Plugins must be declared in here to be active .
# To deactivate a plugin , comment out the name and any variables .
2016-04-01 07:50:24 +08:00
#
2016-01-23 02:54:12 +08:00
# Use ' telegraf - config telegraf . conf - test ' to see what metrics a config
2015-05-19 06:51:11 +08:00
# file would generate .
2016-04-02 03:53:34 +08:00
#
2019-03-30 07:02:10 +08:00
# Environment variables can be used anywhere in this config file , simply surround
# them with $ { } . For strings the variable must be within quotes ( ie , "${STR_VAR}" ) ,
# for numbers and booleans they should be plain ( ie , $ { INT_VAR } , $ { BOOL_VAR } )
2015-05-19 06:51:11 +08:00
2019-04-26 11:34:40 +08:00
`
var globalTagsConfig = `
2016-01-28 02:09:14 +08:00
# Global tags can be specified here in key = "value" format .
2016-02-09 06:56:43 +08:00
[ global_tags ]
2016-01-28 02:09:14 +08:00
# dc = "us-east-1" # will tag all metrics with dc = us - east - 1
# rack = "1a"
2016-04-02 03:53:34 +08:00
# # Environment variables can be used as tags , and throughout the config file
# user = "$USER"
2015-05-23 07:26:32 +08:00
2019-04-26 11:34:40 +08:00
`
var agentConfig = `
2015-09-03 00:30:44 +08:00
# Configuration for telegraf agent
2015-08-26 07:59:12 +08:00
[ agent ]
2016-02-19 05:26:51 +08:00
# # Default data collection interval for all inputs
2015-10-16 05:53:29 +08:00
interval = "10s"
2016-02-19 05:26:51 +08:00
# # Rounds collection interval to ' interval '
# # ie , if interval = "10s" then always collect on : 00 , : 10 , : 20 , etc .
2015-10-22 04:05:27 +08:00
round_interval = true
2016-01-23 02:54:12 +08:00
2016-10-01 05:37:56 +08:00
# # Telegraf will send metrics to outputs in batches of at most
# # metric_batch_size metrics .
# # This controls the size of writes that Telegraf sends to output plugins .
2016-04-24 18:43:54 +08:00
metric_batch_size = 1000
2016-10-01 05:37:56 +08:00
2019-09-10 08:50:46 +08:00
# # Maximum number of unwritten metrics per output . Increasing this value
# # allows for longer periods of output downtime without dropping metrics at the
# # cost of higher maximum memory usage .
2016-04-24 18:43:54 +08:00
metric_buffer_limit = 10000
2016-01-23 02:54:12 +08:00
2016-02-19 05:26:51 +08:00
# # Collection jitter is used to jitter the collection by a random amount .
# # Each plugin will sleep for a random time within jitter before collecting .
# # This can be used to avoid many plugins querying things like sysfs at the
# # same time , which can have a measurable effect on the system .
2016-01-20 04:00:36 +08:00
collection_jitter = "0s"
2015-10-22 04:05:27 +08:00
2018-09-19 09:13:20 +08:00
# # Default flushing interval for all outputs . Maximum flush_interval will be
# # flush_interval + flush_jitter
2015-10-17 06:13:32 +08:00
flush_interval = "10s"
2016-02-19 05:26:51 +08:00
# # Jitter the flush interval by a random amount . This is primarily to avoid
# # large write spikes for users running a large number of telegraf instances .
# # ie , a jitter of 5 s and interval 10 s means flushes will happen every 10 - 15 s
2015-10-24 01:23:08 +08:00
flush_jitter = "0s"
2015-10-22 04:05:27 +08:00
2017-04-13 01:42:11 +08:00
# # By default or when set to "0s" , precision will be set to the same
# # timestamp order as the collection interval , with the maximum being 1 s .
# # ie , when interval = "10s" , precision will be "1s"
# # when interval = "250ms" , precision will be "1ms"
# # Precision will NOT be used for service inputs . It is up to each individual
# # service input to set the timestamp at the appropriate precision .
# # Valid time units are "ns" , "us" ( or "µs" ) , "ms" , "s" .
2016-06-13 22:21:11 +08:00
precision = ""
2016-10-01 05:37:56 +08:00
2019-05-04 01:55:11 +08:00
# # Log at debug level .
# debug = false
# # Log only error level messages .
# quiet = false
2019-10-23 04:32:03 +08:00
# # Log target controls the destination for logs and can be one of "file" ,
# # "stderr" or , on Windows , "eventlog" . When set to "file" , the output file
# # is determined by the "logfile" setting .
# logtarget = "file"
# # Name of the file to be logged to when using the "file" logtarget . If set to
# # the empty string then logs are written to stderr .
2019-05-04 01:55:11 +08:00
# logfile = ""
# # The logfile will be rotated after the time interval specified . When set
2019-08-01 04:30:30 +08:00
# # to 0 no time based rotation is performed . Logs are rotated only when
# # written to , if there is no log activity rotation may be delayed .
2019-05-04 01:55:11 +08:00
# logfile_rotation_interval = "0d"
# # The logfile will be rotated when it becomes larger than the specified
# # size . When set to 0 no size based rotation is performed .
# logfile_rotation_max_size = "0MB"
# # Maximum number of rotated archives to keep , any older logs are deleted .
# # If set to - 1 , no archives are removed .
# logfile_rotation_max_archives = 5
2016-10-01 05:37:56 +08:00
2021-04-17 02:39:19 +08:00
# # Pick a timezone to use when logging or type ' local ' for local time .
# # Example : America / Chicago
# log_with_timezone = ""
2016-02-19 05:26:51 +08:00
# # Override default hostname , if empty use os . Hostname ( )
2015-10-16 05:53:29 +08:00
hostname = ""
2016-03-22 05:33:19 +08:00
# # If set to true , do no set the "host" tag in the telegraf agent .
omit_hostname = false
2019-04-26 11:34:40 +08:00
`
2015-08-27 01:02:10 +08:00
2019-04-26 11:34:40 +08:00
var outputHeader = `
2016-04-01 07:50:24 +08:00
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# OUTPUT PLUGINS #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
2019-04-26 11:34:40 +08:00
2015-08-26 07:59:12 +08:00
`
2015-05-19 06:10:11 +08:00
2016-09-08 22:22:10 +08:00
var processorHeader = `
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# PROCESSOR PLUGINS #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
2019-04-26 11:34:40 +08:00
2016-09-08 22:22:10 +08:00
`
var aggregatorHeader = `
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# AGGREGATOR PLUGINS #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
2019-04-26 11:34:40 +08:00
2016-09-08 22:22:10 +08:00
`
2016-04-01 07:50:24 +08:00
var inputHeader = `
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# INPUT PLUGINS #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
2019-04-26 11:34:40 +08:00
2015-05-19 06:10:11 +08:00
`
2016-01-08 04:39:43 +08:00
var serviceInputHeader = `
2016-04-01 07:50:24 +08:00
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# SERVICE INPUT PLUGINS #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
2019-04-26 11:34:40 +08:00
2015-09-25 02:06:11 +08:00
`
2015-09-22 09:38:57 +08:00
// PrintSampleConfig prints the sample config
2016-10-05 17:58:30 +08:00
func PrintSampleConfig (
2019-04-26 11:34:40 +08:00
sectionFilters [ ] string ,
2016-10-05 17:58:30 +08:00
inputFilters [ ] string ,
outputFilters [ ] string ,
aggregatorFilters [ ] string ,
processorFilters [ ] string ,
) {
2019-04-26 11:34:40 +08:00
// print headers
2015-05-19 06:10:11 +08:00
fmt . Printf ( header )
2019-04-26 11:34:40 +08:00
if len ( sectionFilters ) == 0 {
sectionFilters = sectionDefaults
}
printFilteredGlobalSections ( sectionFilters )
2016-09-08 22:22:10 +08:00
// print output plugins
2019-04-26 11:34:40 +08:00
if sliceContains ( "outputs" , sectionFilters ) {
if len ( outputFilters ) != 0 {
if len ( outputFilters ) >= 3 && outputFilters [ 1 ] != "none" {
fmt . Printf ( outputHeader )
}
printFilteredOutputs ( outputFilters , false )
} else {
fmt . Printf ( outputHeader )
printFilteredOutputs ( outputDefaults , false )
// Print non-default outputs, commented
var pnames [ ] string
for pname := range outputs . Outputs {
if ! sliceContains ( pname , outputDefaults ) {
pnames = append ( pnames , pname )
}
2016-04-01 07:50:24 +08:00
}
2019-04-26 11:34:40 +08:00
sort . Strings ( pnames )
printFilteredOutputs ( pnames , true )
2015-09-22 09:38:57 +08:00
}
2015-05-19 06:10:11 +08:00
}
2016-09-08 22:22:10 +08:00
// print processor plugins
2019-04-26 11:34:40 +08:00
if sliceContains ( "processors" , sectionFilters ) {
if len ( processorFilters ) != 0 {
if len ( processorFilters ) >= 3 && processorFilters [ 1 ] != "none" {
fmt . Printf ( processorHeader )
}
printFilteredProcessors ( processorFilters , false )
} else {
fmt . Printf ( processorHeader )
pnames := [ ] string { }
for pname := range processors . Processors {
pnames = append ( pnames , pname )
}
sort . Strings ( pnames )
printFilteredProcessors ( pnames , true )
2016-10-05 17:58:30 +08:00
}
2016-09-08 22:22:10 +08:00
}
2019-04-26 11:34:40 +08:00
// print aggregator plugins
if sliceContains ( "aggregators" , sectionFilters ) {
if len ( aggregatorFilters ) != 0 {
if len ( aggregatorFilters ) >= 3 && aggregatorFilters [ 1 ] != "none" {
fmt . Printf ( aggregatorHeader )
}
printFilteredAggregators ( aggregatorFilters , false )
} else {
fmt . Printf ( aggregatorHeader )
pnames := [ ] string { }
for pname := range aggregators . Aggregators {
pnames = append ( pnames , pname )
}
sort . Strings ( pnames )
printFilteredAggregators ( pnames , true )
2016-10-05 17:58:30 +08:00
}
2016-09-08 22:22:10 +08:00
}
// print input plugins
2019-04-26 11:34:40 +08:00
if sliceContains ( "inputs" , sectionFilters ) {
if len ( inputFilters ) != 0 {
if len ( inputFilters ) >= 3 && inputFilters [ 1 ] != "none" {
fmt . Printf ( inputHeader )
}
printFilteredInputs ( inputFilters , false )
} else {
fmt . Printf ( inputHeader )
printFilteredInputs ( inputDefaults , false )
// Print non-default inputs, commented
var pnames [ ] string
for pname := range inputs . Inputs {
if ! sliceContains ( pname , inputDefaults ) {
pnames = append ( pnames , pname )
}
2016-04-01 07:50:24 +08:00
}
2019-04-26 11:34:40 +08:00
sort . Strings ( pnames )
printFilteredInputs ( pnames , true )
2016-04-01 07:50:24 +08:00
}
2015-08-26 07:59:12 +08:00
}
2016-04-01 07:50:24 +08:00
}
2015-05-19 06:10:11 +08:00
2016-09-08 22:22:10 +08:00
func printFilteredProcessors ( processorFilters [ ] string , commented bool ) {
// Filter processors
var pnames [ ] string
for pname := range processors . Processors {
if sliceContains ( pname , processorFilters ) {
pnames = append ( pnames , pname )
}
}
sort . Strings ( pnames )
// Print Outputs
for _ , pname := range pnames {
creator := processors . Processors [ pname ]
output := creator ( )
printConfig ( pname , output , "processors" , commented )
}
}
func printFilteredAggregators ( aggregatorFilters [ ] string , commented bool ) {
// Filter outputs
var anames [ ] string
for aname := range aggregators . Aggregators {
if sliceContains ( aname , aggregatorFilters ) {
anames = append ( anames , aname )
}
}
sort . Strings ( anames )
// Print Outputs
for _ , aname := range anames {
creator := aggregators . Aggregators [ aname ]
output := creator ( )
printConfig ( aname , output , "aggregators" , commented )
}
}
2016-04-01 07:50:24 +08:00
func printFilteredInputs ( inputFilters [ ] string , commented bool ) {
2016-01-08 04:39:43 +08:00
// Filter inputs
2015-08-26 07:59:12 +08:00
var pnames [ ] string
2016-01-08 04:39:43 +08:00
for pname := range inputs . Inputs {
2016-04-01 07:50:24 +08:00
if sliceContains ( pname , inputFilters ) {
2015-09-22 09:38:57 +08:00
pnames = append ( pnames , pname )
}
2015-08-26 07:59:12 +08:00
}
sort . Strings ( pnames )
2016-04-02 03:53:34 +08:00
// cache service inputs to print them at the end
2016-01-28 05:21:36 +08:00
servInputs := make ( map [ string ] telegraf . ServiceInput )
2016-04-02 03:53:34 +08:00
// for alphabetical looping:
servInputNames := [ ] string { }
// Print Inputs
2015-08-26 07:59:12 +08:00
for _ , pname := range pnames {
2020-07-01 14:19:16 +08:00
if pname == "cisco_telemetry_gnmi" {
continue
}
2016-01-08 04:39:43 +08:00
creator := inputs . Inputs [ pname ]
input := creator ( )
2015-05-19 06:10:11 +08:00
2016-01-08 04:39:43 +08:00
switch p := input . ( type ) {
2016-01-28 05:21:36 +08:00
case telegraf . ServiceInput :
2016-01-08 04:39:43 +08:00
servInputs [ pname ] = p
2016-04-02 03:53:34 +08:00
servInputNames = append ( servInputNames , pname )
2015-09-25 02:06:11 +08:00
continue
2015-05-19 06:10:11 +08:00
}
2015-09-25 02:06:11 +08:00
2016-04-01 07:50:24 +08:00
printConfig ( pname , input , "inputs" , commented )
2015-09-25 02:06:11 +08:00
}
2016-01-08 04:39:43 +08:00
// Print Service Inputs
2016-04-01 07:50:24 +08:00
if len ( servInputs ) == 0 {
return
}
2016-04-02 03:53:34 +08:00
sort . Strings ( servInputNames )
2019-04-26 11:34:40 +08:00
2016-01-08 04:39:43 +08:00
fmt . Printf ( serviceInputHeader )
2016-04-02 03:53:34 +08:00
for _ , name := range servInputNames {
printConfig ( name , servInputs [ name ] , "inputs" , commented )
2016-04-01 07:50:24 +08:00
}
}
func printFilteredOutputs ( outputFilters [ ] string , commented bool ) {
// Filter outputs
var onames [ ] string
for oname := range outputs . Outputs {
if sliceContains ( oname , outputFilters ) {
onames = append ( onames , oname )
}
}
sort . Strings ( onames )
// Print Outputs
for _ , oname := range onames {
creator := outputs . Outputs [ oname ]
output := creator ( )
printConfig ( oname , output , "outputs" , commented )
2015-09-25 02:06:11 +08:00
}
}
2019-04-26 11:34:40 +08:00
func printFilteredGlobalSections ( sectionFilters [ ] string ) {
if sliceContains ( "global_tags" , sectionFilters ) {
fmt . Printf ( globalTagsConfig )
}
2019-06-06 05:07:02 +08:00
if sliceContains ( "agent" , sectionFilters ) {
fmt . Printf ( agentConfig )
}
2019-04-26 11:34:40 +08:00
}
2020-06-05 22:43:43 +08:00
func printConfig ( name string , p telegraf . PluginDescriber , op string , commented bool ) {
2016-04-01 07:50:24 +08:00
comment := ""
if commented {
comment = "# "
}
fmt . Printf ( "\n%s# %s\n%s[[%s.%s]]" , comment , p . Description ( ) , comment ,
op , name )
2015-10-23 04:24:51 +08:00
config := p . SampleConfig ( )
2015-09-25 02:06:11 +08:00
if config == "" {
2016-04-01 07:50:24 +08:00
fmt . Printf ( "\n%s # no configuration\n\n" , comment )
2015-09-25 02:06:11 +08:00
} else {
2016-04-01 07:50:24 +08:00
lines := strings . Split ( config , "\n" )
for i , line := range lines {
if i == 0 || i == len ( lines ) - 1 {
fmt . Print ( "\n" )
continue
}
2016-05-31 23:25:02 +08:00
fmt . Print ( strings . TrimRight ( comment + line , " " ) + "\n" )
2016-04-01 07:50:24 +08:00
}
2015-05-19 06:10:11 +08:00
}
}
2015-08-25 04:52:46 +08:00
2015-09-22 09:38:57 +08:00
func sliceContains ( name string , list [ ] string ) bool {
for _ , b := range list {
if b == name {
return true
}
}
return false
}
2016-01-08 04:39:43 +08:00
// PrintInputConfig prints the config usage of a single input.
func PrintInputConfig ( name string ) error {
if creator , ok := inputs . Inputs [ name ] ; ok {
2016-04-01 07:50:24 +08:00
printConfig ( name , creator ( ) , "inputs" , false )
2015-08-25 04:52:46 +08:00
} else {
2020-11-04 05:40:57 +08:00
return fmt . Errorf ( "Input %s not found" , name )
2015-08-25 04:52:46 +08:00
}
return nil
}
2015-10-17 12:47:13 +08:00
2015-10-23 04:24:51 +08:00
// PrintOutputConfig prints the config usage of a single output.
func PrintOutputConfig ( name string ) error {
if creator , ok := outputs . Outputs [ name ] ; ok {
2016-04-01 07:50:24 +08:00
printConfig ( name , creator ( ) , "outputs" , false )
2015-10-23 04:24:51 +08:00
} else {
2020-11-04 05:40:57 +08:00
return fmt . Errorf ( "Output %s not found" , name )
2015-10-23 04:24:51 +08:00
}
return nil
}
2020-11-04 05:40:57 +08:00
// LoadDirectory loads all toml config files found in the specified path, recursively.
2015-10-19 15:09:36 +08:00
func ( c * Config ) LoadDirectory ( path string ) error {
2016-09-28 22:30:02 +08:00
walkfn := func ( thispath string , info os . FileInfo , _ error ) error {
2017-02-23 21:45:36 +08:00
if info == nil {
log . Printf ( "W! Telegraf is not permitted to read %s" , thispath )
return nil
}
2018-04-12 07:51:19 +08:00
2016-09-28 22:30:02 +08:00
if info . IsDir ( ) {
2018-04-12 07:51:19 +08:00
if strings . HasPrefix ( info . Name ( ) , ".." ) {
// skip Kubernetes mounts, prevening loading the same config twice
return filepath . SkipDir
}
2016-09-28 22:30:02 +08:00
return nil
2015-10-19 15:09:36 +08:00
}
2016-09-28 22:30:02 +08:00
name := info . Name ( )
2015-11-26 09:42:07 +08:00
if len ( name ) < 6 || name [ len ( name ) - 5 : ] != ".conf" {
2016-09-28 22:30:02 +08:00
return nil
2015-10-19 15:09:36 +08:00
}
2016-09-28 22:30:02 +08:00
err := c . LoadConfig ( thispath )
2015-10-19 15:09:36 +08:00
if err != nil {
return err
}
2016-09-28 22:30:02 +08:00
return nil
2015-10-19 15:09:36 +08:00
}
2016-09-28 22:30:02 +08:00
return filepath . Walk ( path , walkfn )
2015-10-19 15:09:36 +08:00
}
2016-04-29 05:19:03 +08:00
// Try to find a default config file at these locations (in order):
// 1. $TELEGRAF_CONFIG_PATH
// 2. $HOME/.telegraf/telegraf.conf
// 3. /etc/telegraf/telegraf.conf
//
func getDefaultConfigPath ( ) ( string , error ) {
envfile := os . Getenv ( "TELEGRAF_CONFIG_PATH" )
homefile := os . ExpandEnv ( "${HOME}/.telegraf/telegraf.conf" )
etcfile := "/etc/telegraf/telegraf.conf"
2016-08-08 22:55:16 +08:00
if runtime . GOOS == "windows" {
2019-08-28 04:47:01 +08:00
programFiles := os . Getenv ( "ProgramFiles" )
if programFiles == "" { // Should never happen
programFiles = ` C:\Program Files `
}
etcfile = programFiles + ` \Telegraf\telegraf.conf `
2016-08-08 22:55:16 +08:00
}
2016-04-29 05:19:03 +08:00
for _ , path := range [ ] string { envfile , homefile , etcfile } {
if _ , err := os . Stat ( path ) ; err == nil {
2016-10-01 05:37:56 +08:00
log . Printf ( "I! Using config file: %s" , path )
2016-04-29 05:19:03 +08:00
return path , nil
}
}
// if we got here, we didn't find a file in a default location
return "" , fmt . Errorf ( "No config file specified, and could not find one" +
" in $TELEGRAF_CONFIG_PATH, %s, or %s" , homefile , etcfile )
}
2015-11-25 05:22:11 +08:00
// LoadConfig loads the given config file and applies it to c
2015-11-24 07:28:11 +08:00
func ( c * Config ) LoadConfig ( path string ) error {
2016-04-29 05:19:03 +08:00
var err error
if path == "" {
if path , err = getDefaultConfigPath ( ) ; err != nil {
return err
}
}
2018-11-06 06:19:46 +08:00
data , err := loadConfig ( path )
if err != nil {
2020-06-05 22:43:43 +08:00
return fmt . Errorf ( "Error loading config file %s: %w" , path , err )
2018-11-06 06:19:46 +08:00
}
2020-06-05 22:43:43 +08:00
if err = c . LoadConfigData ( data ) ; err != nil {
return fmt . Errorf ( "Error loading config file %s: %w" , path , err )
}
return nil
}
// LoadConfigData loads TOML-formatted config data
func ( c * Config ) LoadConfigData ( data [ ] byte ) error {
2018-11-06 06:19:46 +08:00
tbl , err := parseConfig ( data )
2015-10-17 12:47:13 +08:00
if err != nil {
2020-06-05 22:43:43 +08:00
return fmt . Errorf ( "Error parsing data: %s" , err )
2015-10-17 12:47:13 +08:00
}
2016-04-30 06:12:15 +08:00
// Parse tags tables first:
for _ , tableName := range [ ] string { "tags" , "global_tags" } {
if val , ok := tbl . Fields [ tableName ] ; ok {
subTable , ok := val . ( * ast . Table )
if ! ok {
2020-06-05 22:43:43 +08:00
return fmt . Errorf ( "invalid configuration, bad table name %q" , tableName )
2016-04-30 06:12:15 +08:00
}
2020-11-04 05:40:57 +08:00
if err = c . toml . UnmarshalTable ( subTable , c . Tags ) ; err != nil {
return fmt . Errorf ( "error parsing table name %q: %s" , tableName , err )
2016-04-30 06:12:15 +08:00
}
}
}
// Parse agent table:
if val , ok := tbl . Fields [ "agent" ] ; ok {
subTable , ok := val . ( * ast . Table )
if ! ok {
2020-06-05 22:43:43 +08:00
return fmt . Errorf ( "invalid configuration, error parsing agent table" )
2016-04-30 06:12:15 +08:00
}
2020-11-04 05:40:57 +08:00
if err = c . toml . UnmarshalTable ( subTable , c . Agent ) ; err != nil {
return fmt . Errorf ( "error parsing [agent]: %w" , err )
2016-04-30 06:12:15 +08:00
}
}
2018-11-06 05:34:28 +08:00
if ! c . Agent . OmitHostname {
if c . Agent . Hostname == "" {
hostname , err := os . Hostname ( )
if err != nil {
return err
}
c . Agent . Hostname = hostname
}
c . Tags [ "host" ] = c . Agent . Hostname
}
2020-11-04 05:40:57 +08:00
if len ( c . UnusedFields ) > 0 {
return fmt . Errorf ( "line %d: configuration specified the fields %q, but they weren't used" , tbl . Line , keys ( c . UnusedFields ) )
}
2016-04-30 06:12:15 +08:00
// Parse all the rest of the plugins:
2015-10-17 12:47:13 +08:00
for name , val := range tbl . Fields {
2015-11-14 07:14:07 +08:00
subTable , ok := val . ( * ast . Table )
2015-10-17 12:47:13 +08:00
if ! ok {
2020-06-05 22:43:43 +08:00
return fmt . Errorf ( "invalid configuration, error parsing field %q as table" , name )
2015-10-17 12:47:13 +08:00
}
switch name {
2016-04-30 06:12:15 +08:00
case "agent" , "global_tags" , "tags" :
2015-10-17 12:47:13 +08:00
case "outputs" :
2016-01-08 04:39:43 +08:00
for pluginName , pluginVal := range subTable . Fields {
switch pluginSubTable := pluginVal . ( type ) {
2016-09-08 22:22:10 +08:00
// legacy [outputs.influxdb] support
2015-11-14 07:14:07 +08:00
case * ast . Table :
2016-01-08 04:39:43 +08:00
if err = c . addOutput ( pluginName , pluginSubTable ) ; err != nil {
2020-11-04 05:40:57 +08:00
return fmt . Errorf ( "error parsing %s, %w" , pluginName , err )
2015-11-25 05:22:11 +08:00
}
2015-11-14 07:14:07 +08:00
case [ ] * ast . Table :
2016-01-08 04:39:43 +08:00
for _ , t := range pluginSubTable {
if err = c . addOutput ( pluginName , t ) ; err != nil {
2020-11-04 05:40:57 +08:00
return fmt . Errorf ( "error parsing %s array, %w" , pluginName , err )
2015-11-25 05:22:11 +08:00
}
2015-11-14 07:14:07 +08:00
}
default :
2020-11-04 05:40:57 +08:00
return fmt . Errorf ( "unsupported config format: %s" ,
2020-06-05 22:43:43 +08:00
pluginName )
2015-10-17 12:47:13 +08:00
}
2020-11-04 05:40:57 +08:00
if len ( c . UnusedFields ) > 0 {
return fmt . Errorf ( "plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used" , name , pluginName , subTable . Line , keys ( c . UnusedFields ) )
}
2015-10-17 12:47:13 +08:00
}
2016-01-09 03:49:50 +08:00
case "inputs" , "plugins" :
2015-11-20 10:08:02 +08:00
for pluginName , pluginVal := range subTable . Fields {
switch pluginSubTable := pluginVal . ( type ) {
2016-09-08 22:22:10 +08:00
// legacy [inputs.cpu] support
2015-11-20 10:08:02 +08:00
case * ast . Table :
2016-01-08 04:39:43 +08:00
if err = c . addInput ( pluginName , pluginSubTable ) ; err != nil {
2020-11-04 05:40:57 +08:00
return fmt . Errorf ( "error parsing %s, %w" , pluginName , err )
2015-11-25 05:22:11 +08:00
}
2015-11-20 10:08:02 +08:00
case [ ] * ast . Table :
2015-11-25 05:22:11 +08:00
for _ , t := range pluginSubTable {
2016-01-08 04:39:43 +08:00
if err = c . addInput ( pluginName , t ) ; err != nil {
2020-11-04 05:40:57 +08:00
return fmt . Errorf ( "error parsing %s, %w" , pluginName , err )
2015-11-25 05:22:11 +08:00
}
2015-11-20 10:08:02 +08:00
}
default :
2020-06-05 22:43:43 +08:00
return fmt . Errorf ( "Unsupported config format: %s" ,
pluginName )
2015-11-20 10:08:02 +08:00
}
2020-11-04 05:40:57 +08:00
if len ( c . UnusedFields ) > 0 {
return fmt . Errorf ( "plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used" , name , pluginName , subTable . Line , keys ( c . UnusedFields ) )
}
2015-11-20 10:08:02 +08:00
}
2016-09-08 22:22:10 +08:00
case "processors" :
for pluginName , pluginVal := range subTable . Fields {
switch pluginSubTable := pluginVal . ( type ) {
case [ ] * ast . Table :
for _ , t := range pluginSubTable {
if err = c . addProcessor ( pluginName , t ) ; err != nil {
2020-11-04 05:40:57 +08:00
return fmt . Errorf ( "error parsing %s, %w" , pluginName , err )
2016-09-08 22:22:10 +08:00
}
}
default :
2020-06-05 22:43:43 +08:00
return fmt . Errorf ( "Unsupported config format: %s" ,
pluginName )
2016-09-08 22:22:10 +08:00
}
2020-11-04 05:40:57 +08:00
if len ( c . UnusedFields ) > 0 {
return fmt . Errorf ( "plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used" , name , pluginName , subTable . Line , keys ( c . UnusedFields ) )
}
2016-09-08 22:22:10 +08:00
}
case "aggregators" :
for pluginName , pluginVal := range subTable . Fields {
switch pluginSubTable := pluginVal . ( type ) {
case [ ] * ast . Table :
for _ , t := range pluginSubTable {
if err = c . addAggregator ( pluginName , t ) ; err != nil {
2020-06-05 22:43:43 +08:00
return fmt . Errorf ( "Error parsing %s, %s" , pluginName , err )
2016-09-08 22:22:10 +08:00
}
}
default :
2020-06-05 22:43:43 +08:00
return fmt . Errorf ( "Unsupported config format: %s" ,
pluginName )
2016-09-08 22:22:10 +08:00
}
2020-11-04 05:40:57 +08:00
if len ( c . UnusedFields ) > 0 {
return fmt . Errorf ( "plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used" , name , pluginName , subTable . Line , keys ( c . UnusedFields ) )
}
2016-09-08 22:22:10 +08:00
}
2016-01-08 04:39:43 +08:00
// Assume it's an input input for legacy config file support if no other
2015-11-20 10:08:02 +08:00
// identifiers are present
2015-10-17 12:47:13 +08:00
default :
2016-01-08 04:39:43 +08:00
if err = c . addInput ( name , subTable ) ; err != nil {
2020-06-05 22:43:43 +08:00
return fmt . Errorf ( "Error parsing %s, %s" , name , err )
2015-11-25 05:22:11 +08:00
}
2015-10-17 12:47:13 +08:00
}
}
2016-09-27 23:17:58 +08:00
if len ( c . Processors ) > 1 {
sort . Sort ( c . Processors )
}
2018-11-06 05:34:28 +08:00
2015-10-17 12:47:13 +08:00
return nil
}
2015-11-25 05:22:11 +08:00
2016-06-23 01:54:29 +08:00
// trimBOM trims the Byte-Order-Marks from the beginning of the file.
2017-11-01 08:00:06 +08:00
// this is for Windows compatibility only.
2016-06-23 01:54:29 +08:00
// see https://github.com/influxdata/telegraf/issues/1378
2016-06-24 15:47:31 +08:00
func trimBOM ( f [ ] byte ) [ ] byte {
return bytes . TrimPrefix ( f , [ ] byte ( "\xef\xbb\xbf" ) )
2016-06-23 01:54:29 +08:00
}
2018-01-05 07:28:00 +08:00
// escapeEnv escapes a value for inserting into a TOML string.
func escapeEnv ( value string ) string {
return envVarEscaper . Replace ( value )
}
2018-11-06 06:19:46 +08:00
func loadConfig ( config string ) ( [ ] byte , error ) {
u , err := url . Parse ( config )
2016-04-02 03:53:34 +08:00
if err != nil {
return nil , err
}
2018-11-06 06:19:46 +08:00
switch u . Scheme {
2019-01-05 02:40:44 +08:00
case "https" , "http" :
2018-11-06 06:19:46 +08:00
return fetchConfig ( u )
default :
// If it isn't a https scheme, try it as a file.
}
return ioutil . ReadFile ( config )
}
func fetchConfig ( u * url . URL ) ( [ ] byte , error ) {
req , err := http . NewRequest ( "GET" , u . String ( ) , nil )
if err != nil {
return nil , err
}
2019-10-01 07:55:47 +08:00
if v , exists := os . LookupEnv ( "INFLUX_TOKEN" ) ; exists {
req . Header . Add ( "Authorization" , "Token " + v )
}
2018-11-06 06:19:46 +08:00
req . Header . Add ( "Accept" , "application/toml" )
2020-07-02 03:52:05 +08:00
req . Header . Set ( "User-Agent" , internal . ProductToken ( ) )
2019-02-06 07:15:58 +08:00
2021-02-13 00:38:40 +08:00
retries := 3
for i := 0 ; i <= retries ; i ++ {
resp , err := http . DefaultClient . Do ( req )
if err != nil {
return nil , fmt . Errorf ( "Retry %d of %d failed connecting to HTTP config server %s" , i , retries , err )
}
if resp . StatusCode != http . StatusOK {
if i < retries {
log . Printf ( "Error getting HTTP config. Retry %d of %d in %s. Status=%d" , i , retries , httpLoadConfigRetryInterval , resp . StatusCode )
time . Sleep ( httpLoadConfigRetryInterval )
continue
}
return nil , fmt . Errorf ( "Retry %d of %d failed to retrieve remote config: %s" , i , retries , resp . Status )
}
defer resp . Body . Close ( )
return ioutil . ReadAll ( resp . Body )
2019-02-06 07:15:58 +08:00
}
2021-02-13 00:38:40 +08:00
return nil , nil
2018-11-06 06:19:46 +08:00
}
// parseConfig loads a TOML configuration from a provided path and
// returns the AST produced from the TOML parser. When loading the file, it
// will find environment variables and replace them.
func parseConfig ( contents [ ] byte ) ( * ast . Table , error ) {
2016-06-23 01:54:29 +08:00
contents = trimBOM ( contents )
2016-04-02 03:53:34 +08:00
2019-03-30 07:02:10 +08:00
parameters := envVarRe . FindAllSubmatch ( contents , - 1 )
for _ , parameter := range parameters {
if len ( parameter ) != 3 {
continue
}
2020-11-04 05:40:57 +08:00
var envVar [ ] byte
2019-03-30 07:02:10 +08:00
if parameter [ 1 ] != nil {
2020-11-04 05:40:57 +08:00
envVar = parameter [ 1 ]
2019-03-30 07:02:10 +08:00
} else if parameter [ 2 ] != nil {
2020-11-04 05:40:57 +08:00
envVar = parameter [ 2 ]
2019-03-30 07:02:10 +08:00
} else {
continue
}
2020-11-04 05:40:57 +08:00
envVal , ok := os . LookupEnv ( strings . TrimPrefix ( string ( envVar ) , "$" ) )
2018-01-05 07:28:00 +08:00
if ok {
2020-11-04 05:40:57 +08:00
envVal = escapeEnv ( envVal )
contents = bytes . Replace ( contents , parameter [ 0 ] , [ ] byte ( envVal ) , 1 )
2016-04-02 03:53:34 +08:00
}
}
return toml . Parse ( contents )
}
2016-09-08 22:22:10 +08:00
func ( c * Config ) addAggregator ( name string , table * ast . Table ) error {
creator , ok := aggregators . Aggregators [ name ]
if ! ok {
return fmt . Errorf ( "Undefined but requested aggregator: %s" , name )
}
aggregator := creator ( )
2020-11-04 05:40:57 +08:00
conf , err := c . buildAggregator ( name , table )
2016-09-08 22:22:10 +08:00
if err != nil {
return err
}
2020-11-04 05:40:57 +08:00
if err := c . toml . UnmarshalTable ( table , aggregator ) ; err != nil {
2016-09-08 22:22:10 +08:00
return err
}
2016-09-23 01:10:51 +08:00
c . Aggregators = append ( c . Aggregators , models . NewRunningAggregator ( aggregator , conf ) )
2016-09-08 22:22:10 +08:00
return nil
}
func ( c * Config ) addProcessor ( name string , table * ast . Table ) error {
creator , ok := processors . Processors [ name ]
if ! ok {
return fmt . Errorf ( "Undefined but requested processor: %s" , name )
}
2020-11-04 05:40:57 +08:00
processorConfig , err := c . buildProcessor ( name , table )
2016-09-08 22:22:10 +08:00
if err != nil {
return err
}
2021-03-23 01:21:36 +08:00
rf , err := c . newRunningProcessor ( creator , processorConfig , table )
2020-06-05 22:43:43 +08:00
if err != nil {
2016-09-08 22:22:10 +08:00
return err
}
2020-06-05 22:43:43 +08:00
c . Processors = append ( c . Processors , rf )
2016-09-08 22:22:10 +08:00
2020-06-05 22:43:43 +08:00
// save a copy for the aggregator
2021-03-23 01:21:36 +08:00
rf , err = c . newRunningProcessor ( creator , processorConfig , table )
2020-06-05 22:43:43 +08:00
if err != nil {
return err
}
c . AggProcessors = append ( c . AggProcessors , rf )
2016-09-08 22:22:10 +08:00
return nil
}
2020-06-05 22:43:43 +08:00
func ( c * Config ) newRunningProcessor (
creator processors . StreamingCreator ,
processorConfig * models . ProcessorConfig ,
table * ast . Table ,
) ( * models . RunningProcessor , error ) {
processor := creator ( )
if p , ok := processor . ( unwrappable ) ; ok {
2020-11-04 05:40:57 +08:00
if err := c . toml . UnmarshalTable ( table , p . Unwrap ( ) ) ; err != nil {
2020-06-05 22:43:43 +08:00
return nil , err
}
} else {
2020-11-04 05:40:57 +08:00
if err := c . toml . UnmarshalTable ( table , processor ) ; err != nil {
2020-06-05 22:43:43 +08:00
return nil , err
}
}
rf := models . NewRunningProcessor ( processor , processorConfig )
return rf , nil
}
2015-11-25 05:22:11 +08:00
func ( c * Config ) addOutput ( name string , table * ast . Table ) error {
if len ( c . OutputFilters ) > 0 && ! sliceContains ( name , c . OutputFilters ) {
return nil
}
creator , ok := outputs . Outputs [ name ]
if ! ok {
return fmt . Errorf ( "Undefined but requested output: %s" , name )
}
2015-12-01 22:15:28 +08:00
output := creator ( )
2015-11-25 05:22:11 +08:00
2016-02-11 06:50:07 +08:00
// If the output has a SetSerializer function, then this means it can write
// arbitrary types of output, so build the serializer and set it.
switch t := output . ( type ) {
case serializers . SerializerOutput :
2021-03-23 01:21:36 +08:00
serializer , err := c . buildSerializer ( table )
2016-02-11 06:50:07 +08:00
if err != nil {
return err
}
t . SetSerializer ( serializer )
}
2020-11-04 05:40:57 +08:00
outputConfig , err := c . buildOutput ( name , table )
2015-12-01 22:15:28 +08:00
if err != nil {
return err
}
2020-11-04 05:40:57 +08:00
if err := c . toml . UnmarshalTable ( table , output ) ; err != nil {
2015-11-25 05:22:11 +08:00
return err
}
2021-03-23 01:21:36 +08:00
ro := models . NewRunningOutput ( output , outputConfig , c . Agent . MetricBatchSize , c . Agent . MetricBufferLimit )
2015-11-25 05:22:11 +08:00
c . Outputs = append ( c . Outputs , ro )
return nil
}
2016-01-08 04:39:43 +08:00
func ( c * Config ) addInput ( name string , table * ast . Table ) error {
if len ( c . InputFilters ) > 0 && ! sliceContains ( name , c . InputFilters ) {
2015-11-25 05:22:11 +08:00
return nil
}
2016-01-08 04:39:43 +08:00
// Legacy support renaming io input to diskio
2015-12-16 04:21:02 +08:00
if name == "io" {
name = "diskio"
}
2016-01-08 04:39:43 +08:00
creator , ok := inputs . Inputs [ name ]
2015-11-25 05:22:11 +08:00
if ! ok {
2016-01-08 04:39:43 +08:00
return fmt . Errorf ( "Undefined but requested input: %s" , name )
2015-11-25 05:22:11 +08:00
}
2016-01-08 04:39:43 +08:00
input := creator ( )
2015-11-25 05:22:11 +08:00
2016-02-06 08:36:35 +08:00
// If the input has a SetParser function, then this means it can accept
// arbitrary types of input, so build the parser and set it.
2020-08-19 02:20:31 +08:00
if t , ok := input . ( parsers . ParserInput ) ; ok {
2020-11-04 05:40:57 +08:00
parser , err := c . buildParser ( name , table )
2016-02-06 08:36:35 +08:00
if err != nil {
return err
}
t . SetParser ( parser )
}
2020-08-19 02:20:31 +08:00
if t , ok := input . ( parsers . ParserFuncInput ) ; ok {
2020-11-04 05:40:57 +08:00
config , err := c . getParserConfig ( name , table )
2018-09-19 00:23:45 +08:00
if err != nil {
return err
}
t . SetParserFunc ( func ( ) ( parsers . Parser , error ) {
return parsers . NewParser ( config )
} )
}
2020-11-04 05:40:57 +08:00
pluginConfig , err := c . buildInput ( name , table )
2015-11-25 05:22:11 +08:00
if err != nil {
return err
}
2015-12-01 22:15:28 +08:00
2020-11-04 05:40:57 +08:00
if err := c . toml . UnmarshalTable ( table , input ) ; err != nil {
2015-12-01 22:15:28 +08:00
return err
}
2016-11-07 16:34:46 +08:00
rp := models . NewRunningInput ( input , pluginConfig )
2018-11-06 05:34:28 +08:00
rp . SetDefaultTags ( c . Tags )
2016-01-08 04:39:43 +08:00
c . Inputs = append ( c . Inputs , rp )
2015-11-25 05:22:11 +08:00
return nil
}
2016-10-06 20:29:46 +08:00
// buildAggregator parses Aggregator specific items from the ast.Table,
// builds the filter and returns a
// models.AggregatorConfig to be inserted into models.RunningAggregator
2020-11-04 05:40:57 +08:00
func ( c * Config ) buildAggregator ( name string , tbl * ast . Table ) ( * models . AggregatorConfig , error ) {
2016-10-07 23:43:44 +08:00
conf := & models . AggregatorConfig {
Name : name ,
Delay : time . Millisecond * 100 ,
Period : time . Second * 30 ,
2019-08-01 03:52:12 +08:00
Grace : time . Second * 0 ,
2016-10-07 23:43:44 +08:00
}
2016-09-23 01:10:51 +08:00
2020-11-04 05:40:57 +08:00
c . getFieldDuration ( tbl , "period" , & conf . Period )
c . getFieldDuration ( tbl , "delay" , & conf . Delay )
c . getFieldDuration ( tbl , "grace" , & conf . Grace )
c . getFieldBool ( tbl , "drop_original" , & conf . DropOriginal )
c . getFieldString ( tbl , "name_prefix" , & conf . MeasurementPrefix )
c . getFieldString ( tbl , "name_suffix" , & conf . MeasurementSuffix )
c . getFieldString ( tbl , "name_override" , & conf . NameOverride )
c . getFieldString ( tbl , "alias" , & conf . Alias )
2019-08-22 07:49:07 +08:00
2016-09-08 22:22:10 +08:00
conf . Tags = make ( map [ string ] string )
if node , ok := tbl . Fields [ "tags" ] ; ok {
if subtbl , ok := node . ( * ast . Table ) ; ok {
2020-11-04 05:40:57 +08:00
if err := c . toml . UnmarshalTable ( subtbl , conf . Tags ) ; err != nil {
2020-06-27 02:30:29 +08:00
return nil , fmt . Errorf ( "could not parse tags for input %s" , name )
2016-09-08 22:22:10 +08:00
}
}
}
2020-11-04 05:40:57 +08:00
if c . hasErrs ( ) {
return nil , c . firstErr ( )
}
2016-09-08 22:22:10 +08:00
var err error
2020-11-04 05:40:57 +08:00
conf . Filter , err = c . buildFilter ( tbl )
2016-09-08 22:22:10 +08:00
if err != nil {
return conf , err
}
return conf , nil
}
2016-10-06 20:29:46 +08:00
// buildProcessor parses Processor specific items from the ast.Table,
// builds the filter and returns a
// models.ProcessorConfig to be inserted into models.RunningProcessor
2020-11-04 05:40:57 +08:00
func ( c * Config ) buildProcessor ( name string , tbl * ast . Table ) ( * models . ProcessorConfig , error ) {
2016-09-08 22:22:10 +08:00
conf := & models . ProcessorConfig { Name : name }
2020-11-04 05:40:57 +08:00
c . getFieldInt64 ( tbl , "order" , & conf . Order )
c . getFieldString ( tbl , "alias" , & conf . Alias )
2016-09-27 23:17:58 +08:00
2020-11-04 05:40:57 +08:00
if c . hasErrs ( ) {
return nil , c . firstErr ( )
2019-08-22 07:49:07 +08:00
}
2016-09-08 22:22:10 +08:00
var err error
2020-11-04 05:40:57 +08:00
conf . Filter , err = c . buildFilter ( tbl )
2016-09-08 22:22:10 +08:00
if err != nil {
return conf , err
}
return conf , nil
}
2016-02-23 04:35:06 +08:00
// buildFilter builds a Filter
// (tagpass/tagdrop/namepass/namedrop/fieldpass/fielddrop) to
2016-07-28 19:31:11 +08:00
// be inserted into the models.OutputConfig/models.InputConfig
2016-04-13 07:06:27 +08:00
// to be used for glob filtering on tags and measurements
2020-11-04 05:40:57 +08:00
func ( c * Config ) buildFilter ( tbl * ast . Table ) ( models . Filter , error ) {
2016-07-28 19:31:11 +08:00
f := models . Filter { }
2015-11-25 05:22:11 +08:00
2020-11-04 05:40:57 +08:00
c . getFieldStringSlice ( tbl , "namepass" , & f . NamePass )
c . getFieldStringSlice ( tbl , "namedrop" , & f . NameDrop )
2015-11-25 05:22:11 +08:00
2020-11-04 05:40:57 +08:00
c . getFieldStringSlice ( tbl , "pass" , & f . FieldPass )
c . getFieldStringSlice ( tbl , "fieldpass" , & f . FieldPass )
2016-02-20 13:35:12 +08:00
2020-11-04 05:40:57 +08:00
c . getFieldStringSlice ( tbl , "drop" , & f . FieldDrop )
c . getFieldStringSlice ( tbl , "fielddrop" , & f . FieldDrop )
2016-02-20 13:35:12 +08:00
2020-11-04 05:40:57 +08:00
c . getFieldTagFilter ( tbl , "tagpass" , & f . TagPass )
c . getFieldTagFilter ( tbl , "tagdrop" , & f . TagDrop )
2015-11-25 05:22:11 +08:00
2020-11-04 05:40:57 +08:00
c . getFieldStringSlice ( tbl , "tagexclude" , & f . TagExclude )
c . getFieldStringSlice ( tbl , "taginclude" , & f . TagInclude )
2015-11-25 05:22:11 +08:00
2020-11-04 05:40:57 +08:00
if c . hasErrs ( ) {
return f , c . firstErr ( )
2016-04-13 07:06:27 +08:00
}
2016-09-05 23:16:37 +08:00
if err := f . Compile ( ) ; err != nil {
2016-04-13 07:06:27 +08:00
return f , err
}
return f , nil
2015-12-01 22:15:28 +08:00
}
2016-01-08 04:39:43 +08:00
// buildInput parses input specific items from the ast.Table,
2015-12-12 04:07:32 +08:00
// builds the filter and returns a
2016-07-28 19:31:11 +08:00
// models.InputConfig to be inserted into models.RunningInput
2020-11-04 05:40:57 +08:00
func ( c * Config ) buildInput ( name string , tbl * ast . Table ) ( * models . InputConfig , error ) {
2016-07-28 19:31:11 +08:00
cp := & models . InputConfig { Name : name }
2020-11-04 05:40:57 +08:00
c . getFieldDuration ( tbl , "interval" , & cp . Interval )
c . getFieldDuration ( tbl , "precision" , & cp . Precision )
c . getFieldDuration ( tbl , "collection_jitter" , & cp . CollectionJitter )
c . getFieldString ( tbl , "name_prefix" , & cp . MeasurementPrefix )
c . getFieldString ( tbl , "name_suffix" , & cp . MeasurementSuffix )
c . getFieldString ( tbl , "name_override" , & cp . NameOverride )
c . getFieldString ( tbl , "alias" , & cp . Alias )
2019-08-22 07:49:07 +08:00
2015-12-12 04:07:32 +08:00
cp . Tags = make ( map [ string ] string )
if node , ok := tbl . Fields [ "tags" ] ; ok {
if subtbl , ok := node . ( * ast . Table ) ; ok {
2020-11-04 05:40:57 +08:00
if err := c . toml . UnmarshalTable ( subtbl , cp . Tags ) ; err != nil {
return nil , fmt . Errorf ( "could not parse tags for input %s" , name )
2015-12-12 04:07:32 +08:00
}
}
}
2020-11-04 05:40:57 +08:00
if c . hasErrs ( ) {
return nil , c . firstErr ( )
}
2016-04-13 07:06:27 +08:00
var err error
2020-11-04 05:40:57 +08:00
cp . Filter , err = c . buildFilter ( tbl )
2016-04-13 07:06:27 +08:00
if err != nil {
return cp , err
}
2015-12-01 22:15:28 +08:00
return cp , nil
}
2016-02-06 08:36:35 +08:00
// buildParser grabs the necessary entries from the ast.Table for creating
// a parsers.Parser object, and creates it, which can then be added onto
// an Input object.
2020-11-04 05:40:57 +08:00
func ( c * Config ) buildParser ( name string , tbl * ast . Table ) ( parsers . Parser , error ) {
config , err := c . getParserConfig ( name , tbl )
2018-09-19 00:23:45 +08:00
if err != nil {
return nil , err
}
2021-03-04 04:26:09 +08:00
parser , err := parsers . NewParser ( config )
if err != nil {
return nil , err
}
logger := models . NewLogger ( "parsers" , config . DataFormat , name )
models . SetLoggerOnPlugin ( parser , logger )
return parser , nil
2018-09-19 00:23:45 +08:00
}
2020-11-04 05:40:57 +08:00
func ( c * Config ) getParserConfig ( name string , tbl * ast . Table ) ( * parsers . Config , error ) {
pc := & parsers . Config {
2020-01-22 02:10:02 +08:00
JSONStrict : true ,
}
2016-02-06 08:36:35 +08:00
2020-11-04 05:40:57 +08:00
c . getFieldString ( tbl , "data_format" , & pc . DataFormat )
2016-02-06 08:36:35 +08:00
2016-02-11 02:50:05 +08:00
// Legacy support, exec plugin originally parsed JSON by default.
2020-11-04 05:40:57 +08:00
if name == "exec" && pc . DataFormat == "" {
pc . DataFormat = "json"
} else if pc . DataFormat == "" {
pc . DataFormat = "influx"
}
c . getFieldString ( tbl , "separator" , & pc . Separator )
c . getFieldStringSlice ( tbl , "templates" , & pc . Templates )
c . getFieldStringSlice ( tbl , "tag_keys" , & pc . TagKeys )
c . getFieldStringSlice ( tbl , "json_string_fields" , & pc . JSONStringFields )
c . getFieldString ( tbl , "json_name_key" , & pc . JSONNameKey )
c . getFieldString ( tbl , "json_query" , & pc . JSONQuery )
c . getFieldString ( tbl , "json_time_key" , & pc . JSONTimeKey )
c . getFieldString ( tbl , "json_time_format" , & pc . JSONTimeFormat )
c . getFieldString ( tbl , "json_timezone" , & pc . JSONTimezone )
c . getFieldBool ( tbl , "json_strict" , & pc . JSONStrict )
c . getFieldString ( tbl , "data_type" , & pc . DataType )
c . getFieldString ( tbl , "collectd_auth_file" , & pc . CollectdAuthFile )
c . getFieldString ( tbl , "collectd_security_level" , & pc . CollectdSecurityLevel )
c . getFieldString ( tbl , "collectd_parse_multivalue" , & pc . CollectdSplit )
c . getFieldStringSlice ( tbl , "collectd_typesdb" , & pc . CollectdTypesDB )
c . getFieldString ( tbl , "dropwizard_metric_registry_path" , & pc . DropwizardMetricRegistryPath )
c . getFieldString ( tbl , "dropwizard_time_path" , & pc . DropwizardTimePath )
c . getFieldString ( tbl , "dropwizard_time_format" , & pc . DropwizardTimeFormat )
c . getFieldString ( tbl , "dropwizard_tags_path" , & pc . DropwizardTagsPath )
c . getFieldStringMap ( tbl , "dropwizard_tag_paths" , & pc . DropwizardTagPathsMap )
//for grok data_format
c . getFieldStringSlice ( tbl , "grok_named_patterns" , & pc . GrokNamedPatterns )
c . getFieldStringSlice ( tbl , "grok_patterns" , & pc . GrokPatterns )
c . getFieldString ( tbl , "grok_custom_patterns" , & pc . GrokCustomPatterns )
c . getFieldStringSlice ( tbl , "grok_custom_pattern_files" , & pc . GrokCustomPatternFiles )
c . getFieldString ( tbl , "grok_timezone" , & pc . GrokTimezone )
c . getFieldString ( tbl , "grok_unique_timestamp" , & pc . GrokUniqueTimestamp )
//for csv parser
c . getFieldStringSlice ( tbl , "csv_column_names" , & pc . CSVColumnNames )
c . getFieldStringSlice ( tbl , "csv_column_types" , & pc . CSVColumnTypes )
c . getFieldStringSlice ( tbl , "csv_tag_columns" , & pc . CSVTagColumns )
c . getFieldString ( tbl , "csv_timezone" , & pc . CSVTimezone )
c . getFieldString ( tbl , "csv_delimiter" , & pc . CSVDelimiter )
c . getFieldString ( tbl , "csv_comment" , & pc . CSVComment )
c . getFieldString ( tbl , "csv_measurement_column" , & pc . CSVMeasurementColumn )
c . getFieldString ( tbl , "csv_timestamp_column" , & pc . CSVTimestampColumn )
c . getFieldString ( tbl , "csv_timestamp_format" , & pc . CSVTimestampFormat )
c . getFieldInt ( tbl , "csv_header_row_count" , & pc . CSVHeaderRowCount )
c . getFieldInt ( tbl , "csv_skip_rows" , & pc . CSVSkipRows )
c . getFieldInt ( tbl , "csv_skip_columns" , & pc . CSVSkipColumns )
c . getFieldBool ( tbl , "csv_trim_space" , & pc . CSVTrimSpace )
2021-01-12 02:53:06 +08:00
c . getFieldStringSlice ( tbl , "csv_skip_values" , & pc . CSVSkipValues )
2020-11-04 05:40:57 +08:00
c . getFieldStringSlice ( tbl , "form_urlencoded_tag_keys" , & pc . FormUrlencodedTagKeys )
2021-03-12 10:53:32 +08:00
c . getFieldString ( tbl , "value_field_name" , & pc . ValueFieldName )
2021-03-04 04:26:09 +08:00
//for XML parser
if node , ok := tbl . Fields [ "xml" ] ; ok {
if subtbls , ok := node . ( [ ] * ast . Table ) ; ok {
pc . XMLConfig = make ( [ ] parsers . XMLConfig , len ( subtbls ) )
for i , subtbl := range subtbls {
subcfg := pc . XMLConfig [ i ]
c . getFieldString ( subtbl , "metric_name" , & subcfg . MetricQuery )
c . getFieldString ( subtbl , "metric_selection" , & subcfg . Selection )
c . getFieldString ( subtbl , "timestamp" , & subcfg . Timestamp )
c . getFieldString ( subtbl , "timestamp_format" , & subcfg . TimestampFmt )
c . getFieldStringMap ( subtbl , "tags" , & subcfg . Tags )
c . getFieldStringMap ( subtbl , "fields" , & subcfg . Fields )
c . getFieldStringMap ( subtbl , "fields_int" , & subcfg . FieldsInt )
c . getFieldString ( subtbl , "field_selection" , & subcfg . FieldSelection )
c . getFieldBool ( subtbl , "field_name_expansion" , & subcfg . FieldNameExpand )
c . getFieldString ( subtbl , "field_name" , & subcfg . FieldNameQuery )
c . getFieldString ( subtbl , "field_value" , & subcfg . FieldValueQuery )
pc . XMLConfig [ i ] = subcfg
}
}
}
2020-11-04 05:40:57 +08:00
pc . MetricName = name
if c . hasErrs ( ) {
return nil , c . firstErr ( )
}
return pc , nil
}
// buildSerializer grabs the necessary entries from the ast.Table for creating
// a serializers.Serializer object, and creates it, which can then be added onto
// an Output object.
2021-03-23 01:21:36 +08:00
func ( c * Config ) buildSerializer ( tbl * ast . Table ) ( serializers . Serializer , error ) {
sc := & serializers . Config { TimestampUnits : 1 * time . Second }
2020-11-04 05:40:57 +08:00
c . getFieldString ( tbl , "data_format" , & sc . DataFormat )
if sc . DataFormat == "" {
sc . DataFormat = "influx"
}
c . getFieldString ( tbl , "prefix" , & sc . Prefix )
c . getFieldString ( tbl , "template" , & sc . Template )
c . getFieldStringSlice ( tbl , "templates" , & sc . Templates )
c . getFieldString ( tbl , "carbon2_format" , & sc . Carbon2Format )
2021-04-08 22:31:31 +08:00
c . getFieldString ( tbl , "carbon2_sanitize_replace_char" , & sc . Carbon2SanitizeReplaceChar )
2020-11-04 05:40:57 +08:00
c . getFieldInt ( tbl , "influx_max_line_bytes" , & sc . InfluxMaxLineBytes )
c . getFieldBool ( tbl , "influx_sort_fields" , & sc . InfluxSortFields )
c . getFieldBool ( tbl , "influx_uint_support" , & sc . InfluxUintSupport )
c . getFieldBool ( tbl , "graphite_tag_support" , & sc . GraphiteTagSupport )
c . getFieldString ( tbl , "graphite_separator" , & sc . GraphiteSeparator )
c . getFieldDuration ( tbl , "json_timestamp_units" , & sc . TimestampUnits )
c . getFieldBool ( tbl , "splunkmetric_hec_routing" , & sc . HecRouting )
c . getFieldBool ( tbl , "splunkmetric_multimetric" , & sc . SplunkmetricMultiMetric )
c . getFieldStringSlice ( tbl , "wavefront_source_override" , & sc . WavefrontSourceOverride )
c . getFieldBool ( tbl , "wavefront_use_strict" , & sc . WavefrontUseStrict )
c . getFieldBool ( tbl , "prometheus_export_timestamp" , & sc . PrometheusExportTimestamp )
c . getFieldBool ( tbl , "prometheus_sort_metrics" , & sc . PrometheusSortMetrics )
c . getFieldBool ( tbl , "prometheus_string_as_label" , & sc . PrometheusStringAsLabel )
if c . hasErrs ( ) {
return nil , c . firstErr ( )
}
return serializers . NewSerializer ( sc )
}
// buildOutput parses output specific items from the ast.Table,
// builds the filter and returns an
// models.OutputConfig to be inserted into models.RunningInput
// Note: error exists in the return for future calls that might require error
func ( c * Config ) buildOutput ( name string , tbl * ast . Table ) ( * models . OutputConfig , error ) {
filter , err := c . buildFilter ( tbl )
if err != nil {
return nil , err
}
oc := & models . OutputConfig {
Name : name ,
Filter : filter ,
}
// TODO: support FieldPass/FieldDrop on outputs
c . getFieldDuration ( tbl , "flush_interval" , & oc . FlushInterval )
2021-02-10 03:12:49 +08:00
c . getFieldDuration ( tbl , "flush_jitter" , & oc . FlushJitter )
2020-11-04 05:40:57 +08:00
c . getFieldInt ( tbl , "metric_buffer_limit" , & oc . MetricBufferLimit )
c . getFieldInt ( tbl , "metric_batch_size" , & oc . MetricBatchSize )
c . getFieldString ( tbl , "alias" , & oc . Alias )
c . getFieldString ( tbl , "name_override" , & oc . NameOverride )
c . getFieldString ( tbl , "name_suffix" , & oc . NameSuffix )
c . getFieldString ( tbl , "name_prefix" , & oc . NamePrefix )
if c . hasErrs ( ) {
return nil , c . firstErr ( )
}
return oc , nil
}
2021-03-23 01:21:36 +08:00
func ( c * Config ) missingTomlField ( _ reflect . Type , key string ) error {
2020-11-04 05:40:57 +08:00
switch key {
2021-04-08 22:31:31 +08:00
case "alias" , "carbon2_format" , "carbon2_sanitize_replace_char" , "collectd_auth_file" ,
"collectd_parse_multivalue" , "collectd_security_level" , "collectd_typesdb" , "collection_jitter" ,
"csv_column_names" , "csv_column_types" , "csv_comment" , "csv_delimiter" , "csv_header_row_count" ,
2020-11-05 07:23:52 +08:00
"csv_measurement_column" , "csv_skip_columns" , "csv_skip_rows" , "csv_tag_columns" ,
2021-01-12 02:53:06 +08:00
"csv_timestamp_column" , "csv_timestamp_format" , "csv_timezone" , "csv_trim_space" , "csv_skip_values" ,
2020-11-05 07:23:52 +08:00
"data_format" , "data_type" , "delay" , "drop" , "drop_original" , "dropwizard_metric_registry_path" ,
"dropwizard_tag_paths" , "dropwizard_tags_path" , "dropwizard_time_format" , "dropwizard_time_path" ,
"fielddrop" , "fieldpass" , "flush_interval" , "flush_jitter" , "form_urlencoded_tag_keys" ,
"grace" , "graphite_separator" , "graphite_tag_support" , "grok_custom_pattern_files" ,
"grok_custom_patterns" , "grok_named_patterns" , "grok_patterns" , "grok_timezone" ,
"grok_unique_timestamp" , "influx_max_line_bytes" , "influx_sort_fields" , "influx_uint_support" ,
"interval" , "json_name_key" , "json_query" , "json_strict" , "json_string_fields" ,
"json_time_format" , "json_time_key" , "json_timestamp_units" , "json_timezone" ,
"metric_batch_size" , "metric_buffer_limit" , "name_override" , "name_prefix" ,
"name_suffix" , "namedrop" , "namepass" , "order" , "pass" , "period" , "precision" ,
"prefix" , "prometheus_export_timestamp" , "prometheus_sort_metrics" , "prometheus_string_as_label" ,
"separator" , "splunkmetric_hec_routing" , "splunkmetric_multimetric" , "tag_keys" ,
2020-11-17 05:57:50 +08:00
"tagdrop" , "tagexclude" , "taginclude" , "tagpass" , "tags" , "template" , "templates" ,
2021-03-12 10:53:32 +08:00
"value_field_name" , "wavefront_source_override" , "wavefront_use_strict" , "xml" :
2020-11-05 07:23:52 +08:00
2020-11-04 05:40:57 +08:00
// ignore fields that are common to all plugins.
default :
c . UnusedFields [ key ] = true
2016-02-06 08:36:35 +08:00
}
2020-11-04 05:40:57 +08:00
return nil
}
2016-02-06 08:36:35 +08:00
2020-11-04 05:40:57 +08:00
func ( c * Config ) getFieldString ( tbl * ast . Table , fieldName string , target * string ) {
if node , ok := tbl . Fields [ fieldName ] ; ok {
2016-02-06 08:36:35 +08:00
if kv , ok := node . ( * ast . KeyValue ) ; ok {
if str , ok := kv . Value . ( * ast . String ) ; ok {
2020-11-04 05:40:57 +08:00
* target = str . Value
2016-02-06 08:36:35 +08:00
}
}
}
2020-11-04 05:40:57 +08:00
}
2016-02-06 08:36:35 +08:00
2020-11-04 05:40:57 +08:00
func ( c * Config ) getFieldDuration ( tbl * ast . Table , fieldName string , target interface { } ) {
if node , ok := tbl . Fields [ fieldName ] ; ok {
2016-02-06 08:36:35 +08:00
if kv , ok := node . ( * ast . KeyValue ) ; ok {
2020-11-04 05:40:57 +08:00
if str , ok := kv . Value . ( * ast . String ) ; ok {
d , err := time . ParseDuration ( str . Value )
if err != nil {
c . addError ( tbl , fmt . Errorf ( "error parsing duration: %w" , err ) )
return
2016-02-06 08:36:35 +08:00
}
2020-11-04 05:40:57 +08:00
targetVal := reflect . ValueOf ( target ) . Elem ( )
targetVal . Set ( reflect . ValueOf ( d ) )
2016-02-06 08:36:35 +08:00
}
}
}
2020-11-04 05:40:57 +08:00
}
2016-02-06 08:36:35 +08:00
2020-11-04 05:40:57 +08:00
func ( c * Config ) getFieldBool ( tbl * ast . Table , fieldName string , target * bool ) {
var err error
if node , ok := tbl . Fields [ fieldName ] ; ok {
2016-02-06 08:36:35 +08:00
if kv , ok := node . ( * ast . KeyValue ) ; ok {
2020-11-04 05:40:57 +08:00
switch t := kv . Value . ( type ) {
case * ast . Boolean :
* target , err = t . Boolean ( )
if err != nil {
c . addError ( tbl , fmt . Errorf ( "unknown boolean value type %q, expecting boolean" , kv . Value ) )
return
2016-02-06 08:36:35 +08:00
}
2020-11-04 05:40:57 +08:00
case * ast . String :
* target , err = strconv . ParseBool ( t . Value )
if err != nil {
c . addError ( tbl , fmt . Errorf ( "unknown boolean value type %q, expecting boolean" , kv . Value ) )
return
}
default :
c . addError ( tbl , fmt . Errorf ( "unknown boolean value type %q, expecting boolean" , kv . Value . Source ( ) ) )
return
2016-02-06 08:36:35 +08:00
}
}
}
2020-11-04 05:40:57 +08:00
}
2016-02-06 08:36:35 +08:00
2020-11-04 05:40:57 +08:00
func ( c * Config ) getFieldInt ( tbl * ast . Table , fieldName string , target * int ) {
if node , ok := tbl . Fields [ fieldName ] ; ok {
2018-08-23 10:26:48 +08:00
if kv , ok := node . ( * ast . KeyValue ) ; ok {
2020-11-04 05:40:57 +08:00
if iAst , ok := kv . Value . ( * ast . Integer ) ; ok {
i , err := iAst . Int ( )
if err != nil {
c . addError ( tbl , fmt . Errorf ( "unexpected int type %q, expecting int" , iAst . Value ) )
return
2018-08-23 10:26:48 +08:00
}
2020-11-04 05:40:57 +08:00
* target = int ( i )
2018-08-23 10:26:48 +08:00
}
}
}
2020-11-04 05:40:57 +08:00
}
2018-08-23 10:26:48 +08:00
2020-11-04 05:40:57 +08:00
func ( c * Config ) getFieldInt64 ( tbl * ast . Table , fieldName string , target * int64 ) {
if node , ok := tbl . Fields [ fieldName ] ; ok {
2020-01-22 02:10:02 +08:00
if kv , ok := node . ( * ast . KeyValue ) ; ok {
2020-11-04 05:40:57 +08:00
if iAst , ok := kv . Value . ( * ast . Integer ) ; ok {
i , err := iAst . Int ( )
2020-01-22 02:10:02 +08:00
if err != nil {
2020-11-04 05:40:57 +08:00
c . addError ( tbl , fmt . Errorf ( "unexpected int type %q, expecting int" , iAst . Value ) )
return
2020-01-22 02:10:02 +08:00
}
2020-11-04 05:40:57 +08:00
* target = i
2020-01-22 02:10:02 +08:00
}
}
}
2020-11-04 05:40:57 +08:00
}
2020-01-22 02:10:02 +08:00
2020-11-04 05:40:57 +08:00
func ( c * Config ) getFieldStringSlice ( tbl * ast . Table , fieldName string , target * [ ] string ) {
if node , ok := tbl . Fields [ fieldName ] ; ok {
2017-04-13 01:41:26 +08:00
if kv , ok := node . ( * ast . KeyValue ) ; ok {
if ary , ok := kv . Value . ( * ast . Array ) ; ok {
for _ , elem := range ary . Value {
if str , ok := elem . ( * ast . String ) ; ok {
2020-11-04 05:40:57 +08:00
* target = append ( * target , str . Value )
2017-04-13 01:41:26 +08:00
}
}
2021-02-27 02:58:13 +08:00
} else {
c . addError ( tbl , fmt . Errorf ( "found unexpected format while parsing %q, expecting string array/slice format" , fieldName ) )
return
2017-04-13 01:41:26 +08:00
}
}
}
2020-11-04 05:40:57 +08:00
}
2021-03-04 04:26:09 +08:00
2020-11-04 05:40:57 +08:00
func ( c * Config ) getFieldTagFilter ( tbl * ast . Table , fieldName string , target * [ ] models . TagFilter ) {
if node , ok := tbl . Fields [ fieldName ] ; ok {
2018-01-09 07:11:36 +08:00
if subtbl , ok := node . ( * ast . Table ) ; ok {
for name , val := range subtbl . Fields {
if kv , ok := val . ( * ast . KeyValue ) ; ok {
2020-11-04 05:40:57 +08:00
tagfilter := models . TagFilter { Name : name }
if ary , ok := kv . Value . ( * ast . Array ) ; ok {
for _ , elem := range ary . Value {
if str , ok := elem . ( * ast . String ) ; ok {
tagfilter . Filter = append ( tagfilter . Filter , str . Value )
}
}
2021-02-27 02:58:13 +08:00
} else {
c . addError ( tbl , fmt . Errorf ( "found unexpected format while parsing %q, expecting string array/slice format on each entry" , fieldName ) )
return
2019-06-18 04:34:54 +08:00
}
2020-11-04 05:40:57 +08:00
* target = append ( * target , tagfilter )
2019-06-18 04:34:54 +08:00
}
}
}
}
2016-02-06 08:36:35 +08:00
}
2020-11-04 05:40:57 +08:00
func ( c * Config ) getFieldStringMap ( tbl * ast . Table , fieldName string , target * map [ string ] string ) {
* target = map [ string ] string { }
if node , ok := tbl . Fields [ fieldName ] ; ok {
if subtbl , ok := node . ( * ast . Table ) ; ok {
for name , val := range subtbl . Fields {
if kv , ok := val . ( * ast . KeyValue ) ; ok {
if str , ok := kv . Value . ( * ast . String ) ; ok {
( * target ) [ name ] = str . Value
2019-04-06 05:46:12 +08:00
}
}
}
}
}
2016-02-11 06:50:07 +08:00
}
2020-11-04 05:40:57 +08:00
func keys ( m map [ string ] bool ) [ ] string {
result := [ ] string { }
for k := range m {
result = append ( result , k )
2020-03-14 06:04:23 +08:00
}
2020-11-04 05:40:57 +08:00
return result
}
2020-03-14 06:04:23 +08:00
2020-11-04 05:40:57 +08:00
func ( c * Config ) hasErrs ( ) bool {
return len ( c . errs ) > 0
}
2020-03-14 06:04:23 +08:00
2020-11-04 05:40:57 +08:00
func ( c * Config ) firstErr ( ) error {
if len ( c . errs ) == 0 {
return nil
2020-03-14 06:04:23 +08:00
}
2020-11-04 05:40:57 +08:00
return c . errs [ 0 ]
}
2020-03-14 06:04:23 +08:00
2020-11-04 05:40:57 +08:00
func ( c * Config ) addError ( tbl * ast . Table , err error ) {
c . errs = append ( c . errs , fmt . Errorf ( "line %d:%d: %w" , tbl . Line , tbl . Position , err ) )
2015-11-25 05:22:11 +08:00
}
2020-06-05 22:43:43 +08:00
// unwrappable lets you retrieve the original telegraf.Processor from the
// StreamingProcessor. This is necessary because the toml Unmarshaller won't
// look inside composed types.
type unwrappable interface {
Unwrap ( ) telegraf . Processor
}