2015-11-25 05:22:11 +08:00
|
|
|
package config
|
2015-04-02 00:34:32 +08:00
|
|
|
|
|
|
|
|
import (
|
2016-04-02 03:53:34 +08:00
|
|
|
"bytes"
|
2022-01-19 06:04:09 +08:00
|
|
|
"crypto/tls"
|
2022-07-29 04:30:36 +08:00
|
|
|
"errors"
|
2015-04-02 00:34:32 +08:00
|
|
|
"fmt"
|
2021-09-29 05:16:32 +08:00
|
|
|
"io"
|
2015-11-14 07:14:07 +08:00
|
|
|
"log"
|
2018-11-06 06:19:46 +08:00
|
|
|
"net/http"
|
|
|
|
|
"net/url"
|
2016-04-02 03:53:34 +08:00
|
|
|
"os"
|
2015-10-19 15:09:36 +08:00
|
|
|
"path/filepath"
|
2020-11-04 05:40:57 +08:00
|
|
|
"reflect"
|
2016-04-02 03:53:34 +08:00
|
|
|
"regexp"
|
2016-08-08 22:55:16 +08:00
|
|
|
"runtime"
|
2015-04-02 00:34:32 +08:00
|
|
|
"sort"
|
2016-09-08 22:22:10 +08:00
|
|
|
"strconv"
|
2015-04-02 00:34:32 +08:00
|
|
|
"strings"
|
2022-06-16 20:04:45 +08:00
|
|
|
"sync"
|
2015-04-02 00:34:32 +08:00
|
|
|
"time"
|
|
|
|
|
|
2023-05-24 15:47:25 +08:00
|
|
|
"github.com/compose-spec/compose-go/template"
|
|
|
|
|
"github.com/compose-spec/compose-go/utils"
|
2021-12-02 03:38:43 +08:00
|
|
|
"github.com/coreos/go-semver/semver"
|
2022-10-13 03:19:47 +08:00
|
|
|
"github.com/influxdata/toml"
|
|
|
|
|
"github.com/influxdata/toml/ast"
|
2021-12-02 03:38:43 +08:00
|
|
|
|
2016-01-28 05:21:36 +08:00
|
|
|
"github.com/influxdata/telegraf"
|
2016-01-21 02:57:35 +08:00
|
|
|
"github.com/influxdata/telegraf/internal"
|
2020-05-05 02:09:10 +08:00
|
|
|
"github.com/influxdata/telegraf/models"
|
2023-03-02 06:34:48 +08:00
|
|
|
"github.com/influxdata/telegraf/persister"
|
2016-09-08 22:22:10 +08:00
|
|
|
"github.com/influxdata/telegraf/plugins/aggregators"
|
2016-01-21 02:57:35 +08:00
|
|
|
"github.com/influxdata/telegraf/plugins/inputs"
|
|
|
|
|
"github.com/influxdata/telegraf/plugins/outputs"
|
2016-02-06 08:36:35 +08:00
|
|
|
"github.com/influxdata/telegraf/plugins/parsers"
|
2023-01-27 04:51:39 +08:00
|
|
|
"github.com/influxdata/telegraf/plugins/parsers/csv"
|
2016-09-08 22:22:10 +08:00
|
|
|
"github.com/influxdata/telegraf/plugins/processors"
|
2022-12-09 00:53:06 +08:00
|
|
|
"github.com/influxdata/telegraf/plugins/secretstores"
|
2016-02-11 06:50:07 +08:00
|
|
|
"github.com/influxdata/telegraf/plugins/serializers"
|
2015-04-02 00:34:32 +08:00
|
|
|
)
|
|
|
|
|
|
2023-06-16 02:11:38 +08:00
|
|
|
// envVarPattern is a regex to determine environment variables in the
|
|
|
|
|
// config file for substitution. Those should start with a dollar signs.
|
|
|
|
|
// Expression modified from
|
|
|
|
|
// https://github.com/compose-spec/compose-go/blob/v1.14.0/template/template.go
|
|
|
|
|
const envVarPattern = `\\(?P<escaped>\$)|\$(?i:(?P<named>[_a-z][_a-z0-9]*)|\${(?:(?P<braced>[_a-z][_a-z0-9]*(?::?[-+?](.*))?)}|(?P<invalid>)))`
|
|
|
|
|
|
2016-04-01 07:50:24 +08:00
|
|
|
var (
|
2021-02-13 00:38:40 +08:00
|
|
|
httpLoadConfigRetryInterval = 10 * time.Second
|
2021-05-27 00:13:50 +08:00
|
|
|
|
|
|
|
|
// fetchURLRe is a regex to determine whether the requested file should
|
|
|
|
|
// be fetched from a remote or read from the filesystem.
|
|
|
|
|
fetchURLRe = regexp.MustCompile(`^\w+://`)
|
2023-03-09 18:54:27 +08:00
|
|
|
|
2023-06-16 02:11:38 +08:00
|
|
|
// envVarRe is the compiled regex of envVarPattern
|
|
|
|
|
envVarRe = regexp.MustCompile(envVarPattern)
|
|
|
|
|
|
2023-03-09 18:54:27 +08:00
|
|
|
// Password specified via command-line
|
|
|
|
|
Password Secret
|
2016-04-01 07:50:24 +08:00
|
|
|
)
|
|
|
|
|
|
2015-08-12 00:34:00 +08:00
|
|
|
// Config specifies the URL/user/password for the database that telegraf
|
2015-08-04 22:58:32 +08:00
|
|
|
// will be logging to, as well as all the plugins that the user has
|
|
|
|
|
// specified
|
2015-04-02 00:34:32 +08:00
|
|
|
type Config struct {
|
2022-06-16 20:04:45 +08:00
|
|
|
toml *toml.Config
|
|
|
|
|
errs []error // config load errors.
|
|
|
|
|
UnusedFields map[string]bool
|
|
|
|
|
unusedFieldsMutex *sync.Mutex
|
2020-11-04 05:40:57 +08:00
|
|
|
|
2022-12-09 00:53:06 +08:00
|
|
|
Tags map[string]string
|
|
|
|
|
InputFilters []string
|
|
|
|
|
OutputFilters []string
|
|
|
|
|
SecretStoreFilters []string
|
|
|
|
|
|
|
|
|
|
SecretStores map[string]telegraf.SecretStore
|
2015-08-12 04:02:04 +08:00
|
|
|
|
2016-09-08 22:22:10 +08:00
|
|
|
Agent *AgentConfig
|
|
|
|
|
Inputs []*models.RunningInput
|
|
|
|
|
Outputs []*models.RunningOutput
|
|
|
|
|
Aggregators []*models.RunningAggregator
|
2016-09-27 23:17:58 +08:00
|
|
|
// Processors have a slice wrapper type because they need to be sorted
|
2022-12-01 00:28:23 +08:00
|
|
|
Processors models.RunningProcessors
|
|
|
|
|
AggProcessors models.RunningProcessors
|
|
|
|
|
fileProcessors OrderedPlugins
|
|
|
|
|
fileAggProcessors OrderedPlugins
|
|
|
|
|
|
2022-09-16 22:50:26 +08:00
|
|
|
// Parsers are created by their inputs during gather. Config doesn't keep track of them
|
|
|
|
|
// like the other plugins because they need to be garbage collected (See issue #11809)
|
2021-12-02 03:38:43 +08:00
|
|
|
|
|
|
|
|
Deprecations map[string][]int64
|
|
|
|
|
version *semver.Version
|
2023-03-02 06:34:48 +08:00
|
|
|
|
|
|
|
|
Persister *persister.Persister
|
2023-04-03 16:49:07 +08:00
|
|
|
|
|
|
|
|
NumberSecrets uint64
|
2015-04-02 00:34:32 +08:00
|
|
|
}
|
|
|
|
|
|
2022-12-01 00:28:23 +08:00
|
|
|
// Ordered plugins used to keep the order in which they appear in a file
|
|
|
|
|
type OrderedPlugin struct {
|
|
|
|
|
Line int
|
|
|
|
|
plugin any
|
|
|
|
|
}
|
|
|
|
|
type OrderedPlugins []*OrderedPlugin
|
|
|
|
|
|
|
|
|
|
func (op OrderedPlugins) Len() int { return len(op) }
|
|
|
|
|
func (op OrderedPlugins) Swap(i, j int) { op[i], op[j] = op[j], op[i] }
|
|
|
|
|
func (op OrderedPlugins) Less(i, j int) bool { return op[i].Line < op[j].Line }
|
|
|
|
|
|
2020-11-04 05:40:57 +08:00
|
|
|
// NewConfig creates a new struct to hold the Telegraf config.
|
|
|
|
|
// For historical reasons, It holds the actual instances of the running plugins
|
|
|
|
|
// once the configuration is parsed.
|
2015-11-24 07:28:11 +08:00
|
|
|
func NewConfig() *Config {
|
|
|
|
|
c := &Config{
|
2022-06-16 20:04:45 +08:00
|
|
|
UnusedFields: map[string]bool{},
|
|
|
|
|
unusedFieldsMutex: &sync.Mutex{},
|
2020-11-04 05:40:57 +08:00
|
|
|
|
2015-11-26 09:42:07 +08:00
|
|
|
// Agent defaults:
|
|
|
|
|
Agent: &AgentConfig{
|
2021-04-10 01:15:04 +08:00
|
|
|
Interval: Duration(10 * time.Second),
|
2019-05-04 01:55:11 +08:00
|
|
|
RoundInterval: true,
|
2021-04-10 01:15:04 +08:00
|
|
|
FlushInterval: Duration(10 * time.Second),
|
2019-10-23 04:32:03 +08:00
|
|
|
LogTarget: "file",
|
2019-05-04 01:55:11 +08:00
|
|
|
LogfileRotationMaxArchives: 5,
|
2015-11-26 09:42:07 +08:00
|
|
|
},
|
|
|
|
|
|
2022-12-09 00:53:06 +08:00
|
|
|
Tags: make(map[string]string),
|
|
|
|
|
Inputs: make([]*models.RunningInput, 0),
|
|
|
|
|
Outputs: make([]*models.RunningOutput, 0),
|
|
|
|
|
Processors: make([]*models.RunningProcessor, 0),
|
|
|
|
|
AggProcessors: make([]*models.RunningProcessor, 0),
|
|
|
|
|
SecretStores: make(map[string]telegraf.SecretStore),
|
|
|
|
|
fileProcessors: make([]*OrderedPlugin, 0),
|
|
|
|
|
fileAggProcessors: make([]*OrderedPlugin, 0),
|
|
|
|
|
InputFilters: make([]string, 0),
|
|
|
|
|
OutputFilters: make([]string, 0),
|
|
|
|
|
SecretStoreFilters: make([]string, 0),
|
|
|
|
|
Deprecations: make(map[string][]int64),
|
2021-12-02 03:38:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Handle unknown version
|
2022-08-25 10:46:58 +08:00
|
|
|
version := internal.Version
|
2021-12-02 03:38:43 +08:00
|
|
|
if version == "" || version == "unknown" {
|
|
|
|
|
version = "0.0.0-unknown"
|
2015-11-24 07:28:11 +08:00
|
|
|
}
|
2021-12-02 03:38:43 +08:00
|
|
|
c.version = semver.New(version)
|
2020-11-04 05:40:57 +08:00
|
|
|
|
|
|
|
|
tomlCfg := &toml.Config{
|
|
|
|
|
NormFieldName: toml.DefaultConfig.NormFieldName,
|
|
|
|
|
FieldToKey: toml.DefaultConfig.FieldToKey,
|
|
|
|
|
MissingField: c.missingTomlField,
|
|
|
|
|
}
|
|
|
|
|
c.toml = tomlCfg
|
|
|
|
|
|
2015-11-24 07:28:11 +08:00
|
|
|
return c
|
|
|
|
|
}
|
|
|
|
|
|
2020-11-04 05:40:57 +08:00
|
|
|
// AgentConfig defines configuration that will be used by the Telegraf agent
|
2015-11-26 09:42:07 +08:00
|
|
|
type AgentConfig struct {
|
|
|
|
|
// Interval at which to gather information
|
2021-04-10 01:15:04 +08:00
|
|
|
Interval Duration
|
2015-11-26 09:42:07 +08:00
|
|
|
|
|
|
|
|
// RoundInterval rounds collection interval to 'interval'.
|
|
|
|
|
// ie, if Interval=10s then always collect on :00, :10, :20, etc.
|
|
|
|
|
RoundInterval bool
|
|
|
|
|
|
2022-03-03 23:20:02 +08:00
|
|
|
// Collected metrics are rounded to the precision specified. Precision is
|
|
|
|
|
// specified as an interval with an integer + unit (e.g. 0s, 10ms, 2us, 4s).
|
|
|
|
|
// Valid time units are "ns", "us" (or "µs"), "ms", "s".
|
|
|
|
|
//
|
2022-10-13 03:19:47 +08:00
|
|
|
// By default, or when set to "0s", precision will be set to the same
|
2022-03-03 23:20:02 +08:00
|
|
|
// timestamp order as the collection interval, with the maximum being 1s:
|
2016-06-13 22:21:11 +08:00
|
|
|
// ie, when interval = "10s", precision will be "1s"
|
|
|
|
|
// when interval = "250ms", precision will be "1ms"
|
2022-03-03 23:20:02 +08:00
|
|
|
//
|
2016-06-13 22:21:11 +08:00
|
|
|
// Precision will NOT be used for service inputs. It is up to each individual
|
|
|
|
|
// service input to set the timestamp at the appropriate precision.
|
2021-04-10 01:15:04 +08:00
|
|
|
Precision Duration
|
2016-06-13 22:21:11 +08:00
|
|
|
|
2016-01-20 04:00:36 +08:00
|
|
|
// CollectionJitter is used to jitter the collection by a random amount.
|
|
|
|
|
// Each plugin will sleep for a random time within jitter before collecting.
|
|
|
|
|
// This can be used to avoid many plugins querying things like sysfs at the
|
|
|
|
|
// same time, which can have a measurable effect on the system.
|
2021-04-10 01:15:04 +08:00
|
|
|
CollectionJitter Duration
|
2016-01-20 04:00:36 +08:00
|
|
|
|
2022-02-16 01:39:12 +08:00
|
|
|
// CollectionOffset is used to shift the collection by the given amount.
|
2022-10-13 03:19:47 +08:00
|
|
|
// This can be used to avoid many plugins querying constraint devices
|
2022-02-16 01:39:12 +08:00
|
|
|
// at the same time by manually scheduling them in time.
|
|
|
|
|
CollectionOffset Duration
|
|
|
|
|
|
2016-02-16 08:21:38 +08:00
|
|
|
// FlushInterval is the Interval at which to flush data
|
2021-04-10 01:15:04 +08:00
|
|
|
FlushInterval Duration
|
2015-11-26 09:42:07 +08:00
|
|
|
|
2016-01-20 04:00:36 +08:00
|
|
|
// FlushJitter Jitters the flush interval by a random amount.
|
|
|
|
|
// This is primarily to avoid large write spikes for users running a large
|
|
|
|
|
// number of telegraf instances.
|
|
|
|
|
// ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
|
2021-04-10 01:15:04 +08:00
|
|
|
FlushJitter Duration
|
2015-11-26 09:42:07 +08:00
|
|
|
|
2022-10-13 03:19:47 +08:00
|
|
|
// MetricBatchSize is the maximum number of metrics that is written to an
|
2016-04-24 18:43:54 +08:00
|
|
|
// output plugin in one call.
|
|
|
|
|
MetricBatchSize int
|
|
|
|
|
|
2016-01-23 02:54:12 +08:00
|
|
|
// MetricBufferLimit is the max number of metrics that each output plugin
|
|
|
|
|
// will cache. The buffer is cleared when a successful write occurs. When
|
2016-04-24 18:43:54 +08:00
|
|
|
// full, the oldest metrics will be overwritten. This number should be a
|
|
|
|
|
// multiple of MetricBatchSize. Due to current implementation, this could
|
|
|
|
|
// not be less than 2 times MetricBatchSize.
|
2016-01-23 02:54:12 +08:00
|
|
|
MetricBufferLimit int
|
|
|
|
|
|
2016-02-16 08:21:38 +08:00
|
|
|
// FlushBufferWhenFull tells Telegraf to flush the metric buffer whenever
|
|
|
|
|
// it fills up, regardless of FlushInterval. Setting this option to true
|
|
|
|
|
// does _not_ deactivate FlushInterval.
|
2023-06-02 18:36:19 +08:00
|
|
|
FlushBufferWhenFull bool `toml:"flush_buffer_when_full" deprecated:"0.13.0;1.30.0;option is ignored"`
|
2016-02-16 08:21:38 +08:00
|
|
|
|
2016-06-13 22:21:11 +08:00
|
|
|
// TODO(cam): Remove UTC and parameter, they are no longer
|
2015-11-26 09:42:07 +08:00
|
|
|
// valid for the agent config. Leaving them here for now for backwards-
|
2017-11-01 08:00:06 +08:00
|
|
|
// compatibility
|
2022-03-02 06:05:53 +08:00
|
|
|
UTC bool `toml:"utc" deprecated:"1.0.0;option is ignored"`
|
2015-11-26 09:42:07 +08:00
|
|
|
|
2016-01-16 03:25:56 +08:00
|
|
|
// Debug is the option for running in debug mode
|
2019-05-04 01:55:11 +08:00
|
|
|
Debug bool `toml:"debug"`
|
2016-01-16 03:25:56 +08:00
|
|
|
|
2019-05-04 01:55:11 +08:00
|
|
|
// Quiet is the option for running in quiet mode
|
|
|
|
|
Quiet bool `toml:"quiet"`
|
2016-10-01 05:37:56 +08:00
|
|
|
|
2019-10-23 04:32:03 +08:00
|
|
|
// Log target controls the destination for logs and can be one of "file",
|
|
|
|
|
// "stderr" or, on Windows, "eventlog". When set to "file", the output file
|
|
|
|
|
// is determined by the "logfile" setting.
|
2019-08-29 05:34:44 +08:00
|
|
|
LogTarget string `toml:"logtarget"`
|
|
|
|
|
|
2019-10-23 04:32:03 +08:00
|
|
|
// Name of the file to be logged to when using the "file" logtarget. If set to
|
|
|
|
|
// the empty string then logs are written to stderr.
|
2019-05-04 01:55:11 +08:00
|
|
|
Logfile string `toml:"logfile"`
|
2019-05-04 01:25:28 +08:00
|
|
|
|
2019-06-04 08:38:21 +08:00
|
|
|
// The file will be rotated after the time interval specified. When set
|
|
|
|
|
// to 0 no time based rotation is performed.
|
2021-04-10 01:15:04 +08:00
|
|
|
LogfileRotationInterval Duration `toml:"logfile_rotation_interval"`
|
2019-05-04 01:25:28 +08:00
|
|
|
|
2019-06-04 08:38:21 +08:00
|
|
|
// The logfile will be rotated when it becomes larger than the specified
|
|
|
|
|
// size. When set to 0 no size based rotation is performed.
|
2021-04-10 01:15:04 +08:00
|
|
|
LogfileRotationMaxSize Size `toml:"logfile_rotation_max_size"`
|
2019-05-04 01:55:11 +08:00
|
|
|
|
|
|
|
|
// Maximum number of rotated archives to keep, any older logs are deleted.
|
|
|
|
|
// If set to -1, no archives are removed.
|
|
|
|
|
LogfileRotationMaxArchives int `toml:"logfile_rotation_max_archives"`
|
2019-05-04 01:25:28 +08:00
|
|
|
|
2021-04-17 02:39:19 +08:00
|
|
|
// Pick a timezone to use when logging or type 'local' for local time.
|
|
|
|
|
LogWithTimezone string `toml:"log_with_timezone"`
|
|
|
|
|
|
2016-03-22 05:33:19 +08:00
|
|
|
Hostname string
|
|
|
|
|
OmitHostname bool
|
2022-03-18 11:43:46 +08:00
|
|
|
|
2022-03-23 23:27:58 +08:00
|
|
|
// Method for translating SNMP objects. 'netsnmp' to call external programs,
|
|
|
|
|
// 'gosmi' to use the built-in library.
|
|
|
|
|
SnmpTranslator string `toml:"snmp_translator"`
|
2023-03-02 06:34:48 +08:00
|
|
|
|
|
|
|
|
// Name of the file to load the state of plugins from and store the state to.
|
|
|
|
|
// If uncommented and not empty, this file will be used to save the state of
|
|
|
|
|
// stateful plugins on termination of Telegraf. If the file exists on start,
|
|
|
|
|
// the state in the file will be restored for the plugins.
|
|
|
|
|
Statefile string `toml:"statefile"`
|
2023-06-01 01:06:50 +08:00
|
|
|
|
|
|
|
|
// Flag to always keep tags explicitly defined in the plugin itself and
|
|
|
|
|
// ensure those tags always pass filtering.
|
|
|
|
|
AlwaysIncludeLocalTags bool `toml:"always_include_local_tags"`
|
2015-11-26 09:42:07 +08:00
|
|
|
}
|
|
|
|
|
|
2020-08-20 06:18:52 +08:00
|
|
|
// InputNames returns a list of strings of the configured inputs.
|
2016-01-08 04:39:43 +08:00
|
|
|
func (c *Config) InputNames() []string {
|
2022-11-16 01:57:50 +08:00
|
|
|
name := make([]string, 0, len(c.Inputs))
|
2016-01-08 04:39:43 +08:00
|
|
|
for _, input := range c.Inputs {
|
2019-02-27 08:03:13 +08:00
|
|
|
name = append(name, input.Config.Name)
|
2015-08-12 04:02:04 +08:00
|
|
|
}
|
2020-08-20 06:18:52 +08:00
|
|
|
return PluginNameCounts(name)
|
2015-08-08 04:31:25 +08:00
|
|
|
}
|
|
|
|
|
|
2020-08-20 06:18:52 +08:00
|
|
|
// AggregatorNames returns a list of strings of the configured aggregators.
|
2018-06-01 02:56:49 +08:00
|
|
|
func (c *Config) AggregatorNames() []string {
|
2022-11-16 01:57:50 +08:00
|
|
|
name := make([]string, 0, len(c.Aggregators))
|
2018-06-01 02:56:49 +08:00
|
|
|
for _, aggregator := range c.Aggregators {
|
2019-02-27 10:22:12 +08:00
|
|
|
name = append(name, aggregator.Config.Name)
|
2018-06-01 02:56:49 +08:00
|
|
|
}
|
2020-08-20 06:18:52 +08:00
|
|
|
return PluginNameCounts(name)
|
2018-06-01 02:56:49 +08:00
|
|
|
}
|
|
|
|
|
|
2020-08-20 06:18:52 +08:00
|
|
|
// ProcessorNames returns a list of strings of the configured processors.
|
2018-06-01 02:56:49 +08:00
|
|
|
func (c *Config) ProcessorNames() []string {
|
2022-11-16 01:57:50 +08:00
|
|
|
name := make([]string, 0, len(c.Processors))
|
2018-06-01 02:56:49 +08:00
|
|
|
for _, processor := range c.Processors {
|
2019-08-22 07:49:07 +08:00
|
|
|
name = append(name, processor.Config.Name)
|
2018-06-01 02:56:49 +08:00
|
|
|
}
|
2020-08-20 06:18:52 +08:00
|
|
|
return PluginNameCounts(name)
|
2018-06-01 02:56:49 +08:00
|
|
|
}
|
|
|
|
|
|
2020-08-20 06:18:52 +08:00
|
|
|
// OutputNames returns a list of strings of the configured outputs.
|
2015-11-25 05:22:11 +08:00
|
|
|
func (c *Config) OutputNames() []string {
|
2022-11-16 01:57:50 +08:00
|
|
|
name := make([]string, 0, len(c.Outputs))
|
2015-11-25 05:22:11 +08:00
|
|
|
for _, output := range c.Outputs {
|
2019-08-22 07:49:07 +08:00
|
|
|
name = append(name, output.Config.Name)
|
2015-11-24 09:00:54 +08:00
|
|
|
}
|
2020-08-20 06:18:52 +08:00
|
|
|
return PluginNameCounts(name)
|
|
|
|
|
}
|
|
|
|
|
|
2022-12-09 00:53:06 +08:00
|
|
|
// SecretstoreNames returns a list of strings of the configured secret-stores.
|
|
|
|
|
func (c *Config) SecretstoreNames() []string {
|
|
|
|
|
names := make([]string, 0, len(c.SecretStores))
|
|
|
|
|
for name := range c.SecretStores {
|
|
|
|
|
names = append(names, name)
|
|
|
|
|
}
|
|
|
|
|
return PluginNameCounts(names)
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-16 04:55:23 +08:00
|
|
|
// PluginNameCounts returns a list of sorted plugin names and their count
|
2020-08-20 06:18:52 +08:00
|
|
|
func PluginNameCounts(plugins []string) []string {
|
|
|
|
|
names := make(map[string]int)
|
|
|
|
|
for _, plugin := range plugins {
|
|
|
|
|
names[plugin]++
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var namecount []string
|
|
|
|
|
for name, count := range names {
|
|
|
|
|
if count == 1 {
|
|
|
|
|
namecount = append(namecount, name)
|
|
|
|
|
} else {
|
|
|
|
|
namecount = append(namecount, fmt.Sprintf("%s (%dx)", name, count))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-16 04:55:23 +08:00
|
|
|
sort.Strings(namecount)
|
2020-08-20 06:18:52 +08:00
|
|
|
return namecount
|
2015-11-24 09:00:54 +08:00
|
|
|
}
|
|
|
|
|
|
2015-08-04 22:58:32 +08:00
|
|
|
// ListTags returns a string of tags specified in the config,
|
|
|
|
|
// line-protocol style
|
2015-04-02 00:34:32 +08:00
|
|
|
func (c *Config) ListTags() string {
|
2022-11-16 01:57:50 +08:00
|
|
|
tags := make([]string, 0, len(c.Tags))
|
2015-04-02 00:34:32 +08:00
|
|
|
for k, v := range c.Tags {
|
|
|
|
|
tags = append(tags, fmt.Sprintf("%s=%s", k, v))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
sort.Strings(tags)
|
|
|
|
|
|
|
|
|
|
return strings.Join(tags, " ")
|
|
|
|
|
}
|
2015-05-19 06:10:11 +08:00
|
|
|
|
2015-09-22 09:38:57 +08:00
|
|
|
func sliceContains(name string, list []string) bool {
|
|
|
|
|
for _, b := range list {
|
|
|
|
|
if b == name {
|
|
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return false
|
|
|
|
|
}
|
|
|
|
|
|
2022-11-08 03:54:52 +08:00
|
|
|
// WalkDirectory collects all toml files that need to be loaded
|
|
|
|
|
func WalkDirectory(path string) ([]string, error) {
|
|
|
|
|
var files []string
|
2016-09-28 22:30:02 +08:00
|
|
|
walkfn := func(thispath string, info os.FileInfo, _ error) error {
|
2017-02-23 21:45:36 +08:00
|
|
|
if info == nil {
|
|
|
|
|
log.Printf("W! Telegraf is not permitted to read %s", thispath)
|
|
|
|
|
return nil
|
|
|
|
|
}
|
2018-04-12 07:51:19 +08:00
|
|
|
|
2016-09-28 22:30:02 +08:00
|
|
|
if info.IsDir() {
|
2018-04-12 07:51:19 +08:00
|
|
|
if strings.HasPrefix(info.Name(), "..") {
|
2022-11-08 03:54:52 +08:00
|
|
|
// skip Kubernetes mounts, preventing loading the same config twice
|
2018-04-12 07:51:19 +08:00
|
|
|
return filepath.SkipDir
|
|
|
|
|
}
|
|
|
|
|
|
2016-09-28 22:30:02 +08:00
|
|
|
return nil
|
2015-10-19 15:09:36 +08:00
|
|
|
}
|
2016-09-28 22:30:02 +08:00
|
|
|
name := info.Name()
|
2015-11-26 09:42:07 +08:00
|
|
|
if len(name) < 6 || name[len(name)-5:] != ".conf" {
|
2016-09-28 22:30:02 +08:00
|
|
|
return nil
|
2015-10-19 15:09:36 +08:00
|
|
|
}
|
2022-11-08 03:54:52 +08:00
|
|
|
files = append(files, thispath)
|
2016-09-28 22:30:02 +08:00
|
|
|
return nil
|
2015-10-19 15:09:36 +08:00
|
|
|
}
|
2022-11-08 03:54:52 +08:00
|
|
|
return files, filepath.Walk(path, walkfn)
|
2015-10-19 15:09:36 +08:00
|
|
|
}
|
|
|
|
|
|
2016-04-29 05:19:03 +08:00
|
|
|
// Try to find a default config file at these locations (in order):
|
2022-08-18 15:22:40 +08:00
|
|
|
// 1. $TELEGRAF_CONFIG_PATH
|
|
|
|
|
// 2. $HOME/.telegraf/telegraf.conf
|
2023-02-08 00:02:01 +08:00
|
|
|
// 3. /etc/telegraf/telegraf.conf and /etc/telegraf/telegraf.d/*.conf
|
2023-06-09 16:10:09 +08:00
|
|
|
func GetDefaultConfigPath() ([]string, error) {
|
2016-04-29 05:19:03 +08:00
|
|
|
envfile := os.Getenv("TELEGRAF_CONFIG_PATH")
|
|
|
|
|
homefile := os.ExpandEnv("${HOME}/.telegraf/telegraf.conf")
|
|
|
|
|
etcfile := "/etc/telegraf/telegraf.conf"
|
2023-03-16 00:53:37 +08:00
|
|
|
etcfolder := "/etc/telegraf/telegraf.d"
|
2023-02-08 00:02:01 +08:00
|
|
|
|
2016-08-08 22:55:16 +08:00
|
|
|
if runtime.GOOS == "windows" {
|
2019-08-28 04:47:01 +08:00
|
|
|
programFiles := os.Getenv("ProgramFiles")
|
|
|
|
|
if programFiles == "" { // Should never happen
|
|
|
|
|
programFiles = `C:\Program Files`
|
|
|
|
|
}
|
|
|
|
|
etcfile = programFiles + `\Telegraf\telegraf.conf`
|
2023-03-16 00:53:37 +08:00
|
|
|
etcfolder = programFiles + `\Telegraf\telegraf.d\`
|
2016-08-08 22:55:16 +08:00
|
|
|
}
|
2023-02-08 00:02:01 +08:00
|
|
|
|
|
|
|
|
for _, path := range []string{envfile, homefile} {
|
2021-06-03 11:22:15 +08:00
|
|
|
if isURL(path) {
|
2023-02-08 00:02:01 +08:00
|
|
|
return []string{path}, nil
|
2021-06-03 11:22:15 +08:00
|
|
|
}
|
2016-04-29 05:19:03 +08:00
|
|
|
if _, err := os.Stat(path); err == nil {
|
2023-02-08 00:02:01 +08:00
|
|
|
return []string{path}, nil
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// At this point we need to check if the files under /etc/telegraf are
|
|
|
|
|
// populated and return them all.
|
|
|
|
|
confFiles := []string{}
|
|
|
|
|
if _, err := os.Stat(etcfile); err == nil {
|
|
|
|
|
confFiles = append(confFiles, etcfile)
|
|
|
|
|
}
|
|
|
|
|
if _, err := os.Stat(etcfolder); err == nil {
|
|
|
|
|
files, err := WalkDirectory(etcfolder)
|
|
|
|
|
if err != nil {
|
2023-02-23 21:49:36 +08:00
|
|
|
log.Printf("W! unable walk %q: %s", etcfolder, err)
|
2023-02-08 00:02:01 +08:00
|
|
|
}
|
|
|
|
|
confFiles = append(confFiles, files...)
|
|
|
|
|
}
|
|
|
|
|
if len(confFiles) > 0 {
|
|
|
|
|
return confFiles, nil
|
2016-04-29 05:19:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// if we got here, we didn't find a file in a default location
|
2023-02-08 00:02:01 +08:00
|
|
|
return nil, fmt.Errorf("no config file specified, and could not find one"+
|
|
|
|
|
" in $TELEGRAF_CONFIG_PATH, %s, %s, or %s/*.conf", homefile, etcfile, etcfolder)
|
2016-04-29 05:19:03 +08:00
|
|
|
}
|
|
|
|
|
|
2021-06-03 11:22:15 +08:00
|
|
|
// isURL checks if string is valid url
|
|
|
|
|
func isURL(str string) bool {
|
|
|
|
|
u, err := url.Parse(str)
|
|
|
|
|
return err == nil && u.Scheme != "" && u.Host != ""
|
|
|
|
|
}
|
|
|
|
|
|
2023-02-08 00:02:01 +08:00
|
|
|
// LoadConfig loads the given config files and applies it to c
|
2015-11-24 07:28:11 +08:00
|
|
|
func (c *Config) LoadConfig(path string) error {
|
2016-04-29 05:19:03 +08:00
|
|
|
var err error
|
2023-02-08 00:02:01 +08:00
|
|
|
paths := []string{}
|
|
|
|
|
|
2016-04-29 05:19:03 +08:00
|
|
|
if path == "" {
|
2023-06-09 16:10:09 +08:00
|
|
|
if paths, err = GetDefaultConfigPath(); err != nil {
|
2016-04-29 05:19:03 +08:00
|
|
|
return err
|
|
|
|
|
}
|
2023-02-08 00:02:01 +08:00
|
|
|
} else {
|
|
|
|
|
paths = append(paths, path)
|
2018-11-06 06:19:46 +08:00
|
|
|
}
|
|
|
|
|
|
2023-02-08 00:02:01 +08:00
|
|
|
for _, path := range paths {
|
2023-04-04 20:57:02 +08:00
|
|
|
if !c.Agent.Quiet {
|
|
|
|
|
log.Printf("I! Loading config: %s", path)
|
|
|
|
|
}
|
|
|
|
|
|
2023-06-09 16:10:09 +08:00
|
|
|
data, _, err := LoadConfigFile(path)
|
2023-02-08 00:02:01 +08:00
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("error loading config file %s: %w", path, err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if err = c.LoadConfigData(data); err != nil {
|
|
|
|
|
return fmt.Errorf("error loading config file %s: %w", path, err)
|
|
|
|
|
}
|
2020-06-05 22:43:43 +08:00
|
|
|
}
|
2023-02-08 00:02:01 +08:00
|
|
|
|
2020-06-05 22:43:43 +08:00
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2022-11-08 03:54:52 +08:00
|
|
|
func (c *Config) LoadAll(configFiles ...string) error {
|
|
|
|
|
for _, fConfig := range configFiles {
|
|
|
|
|
if err := c.LoadConfig(fConfig); err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-12-01 00:28:23 +08:00
|
|
|
// Sort the processors according to their `order` setting while
|
|
|
|
|
// using a stable sort to keep the file loading / file position order.
|
|
|
|
|
sort.Stable(c.Processors)
|
|
|
|
|
sort.Stable(c.AggProcessors)
|
|
|
|
|
|
2023-01-11 02:00:15 +08:00
|
|
|
// Set snmp agent translator default
|
|
|
|
|
if c.Agent.SnmpTranslator == "" {
|
|
|
|
|
c.Agent.SnmpTranslator = "netsnmp"
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-03 16:49:07 +08:00
|
|
|
// Check if there is enough lockable memory for the secret
|
|
|
|
|
c.NumberSecrets = uint64(secretCount.Load())
|
|
|
|
|
|
2022-12-09 00:53:06 +08:00
|
|
|
// Let's link all secrets to their secret-stores
|
|
|
|
|
return c.LinkSecrets()
|
2022-11-08 03:54:52 +08:00
|
|
|
}
|
|
|
|
|
|
2020-06-05 22:43:43 +08:00
|
|
|
// LoadConfigData loads TOML-formatted config data
|
|
|
|
|
func (c *Config) LoadConfigData(data []byte) error {
|
2018-11-06 06:19:46 +08:00
|
|
|
tbl, err := parseConfig(data)
|
2015-10-17 12:47:13 +08:00
|
|
|
if err != nil {
|
2023-02-22 19:57:53 +08:00
|
|
|
return fmt.Errorf("error parsing data: %w", err)
|
2015-10-17 12:47:13 +08:00
|
|
|
}
|
|
|
|
|
|
2016-04-30 06:12:15 +08:00
|
|
|
// Parse tags tables first:
|
|
|
|
|
for _, tableName := range []string{"tags", "global_tags"} {
|
|
|
|
|
if val, ok := tbl.Fields[tableName]; ok {
|
|
|
|
|
subTable, ok := val.(*ast.Table)
|
|
|
|
|
if !ok {
|
2020-06-05 22:43:43 +08:00
|
|
|
return fmt.Errorf("invalid configuration, bad table name %q", tableName)
|
2016-04-30 06:12:15 +08:00
|
|
|
}
|
2020-11-04 05:40:57 +08:00
|
|
|
if err = c.toml.UnmarshalTable(subTable, c.Tags); err != nil {
|
2023-02-22 19:57:53 +08:00
|
|
|
return fmt.Errorf("error parsing table name %q: %w", tableName, err)
|
2016-04-30 06:12:15 +08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Parse agent table:
|
|
|
|
|
if val, ok := tbl.Fields["agent"]; ok {
|
|
|
|
|
subTable, ok := val.(*ast.Table)
|
|
|
|
|
if !ok {
|
2020-06-05 22:43:43 +08:00
|
|
|
return fmt.Errorf("invalid configuration, error parsing agent table")
|
2016-04-30 06:12:15 +08:00
|
|
|
}
|
2020-11-04 05:40:57 +08:00
|
|
|
if err = c.toml.UnmarshalTable(subTable, c.Agent); err != nil {
|
|
|
|
|
return fmt.Errorf("error parsing [agent]: %w", err)
|
2016-04-30 06:12:15 +08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-06 05:34:28 +08:00
|
|
|
if !c.Agent.OmitHostname {
|
|
|
|
|
if c.Agent.Hostname == "" {
|
|
|
|
|
hostname, err := os.Hostname()
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
c.Agent.Hostname = hostname
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
c.Tags["host"] = c.Agent.Hostname
|
|
|
|
|
}
|
|
|
|
|
|
2022-12-08 22:18:09 +08:00
|
|
|
// Warn when explicitly setting the old snmp translator
|
|
|
|
|
if c.Agent.SnmpTranslator == "netsnmp" {
|
|
|
|
|
models.PrintOptionValueDeprecationNotice(telegraf.Warn, "agent", "snmp_translator", "netsnmp", telegraf.DeprecationInfo{
|
|
|
|
|
Since: "1.25.0",
|
|
|
|
|
RemovalIn: "2.0.0",
|
|
|
|
|
Notice: "Use 'gosmi' instead",
|
|
|
|
|
})
|
|
|
|
|
}
|
2022-03-18 11:43:46 +08:00
|
|
|
|
2023-03-02 06:34:48 +08:00
|
|
|
// Setup the persister if requested
|
|
|
|
|
if c.Agent.Statefile != "" {
|
|
|
|
|
c.Persister = &persister.Persister{
|
|
|
|
|
Filename: c.Agent.Statefile,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-11-04 05:40:57 +08:00
|
|
|
if len(c.UnusedFields) > 0 {
|
|
|
|
|
return fmt.Errorf("line %d: configuration specified the fields %q, but they weren't used", tbl.Line, keys(c.UnusedFields))
|
|
|
|
|
}
|
|
|
|
|
|
2022-12-01 00:28:23 +08:00
|
|
|
// Initialize the file-sorting slices
|
|
|
|
|
c.fileProcessors = make(OrderedPlugins, 0)
|
|
|
|
|
c.fileAggProcessors = make(OrderedPlugins, 0)
|
|
|
|
|
|
2016-04-30 06:12:15 +08:00
|
|
|
// Parse all the rest of the plugins:
|
2015-10-17 12:47:13 +08:00
|
|
|
for name, val := range tbl.Fields {
|
2015-11-14 07:14:07 +08:00
|
|
|
subTable, ok := val.(*ast.Table)
|
2015-10-17 12:47:13 +08:00
|
|
|
if !ok {
|
2020-06-05 22:43:43 +08:00
|
|
|
return fmt.Errorf("invalid configuration, error parsing field %q as table", name)
|
2015-10-17 12:47:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
switch name {
|
2016-04-30 06:12:15 +08:00
|
|
|
case "agent", "global_tags", "tags":
|
2015-10-17 12:47:13 +08:00
|
|
|
case "outputs":
|
2016-01-08 04:39:43 +08:00
|
|
|
for pluginName, pluginVal := range subTable.Fields {
|
|
|
|
|
switch pluginSubTable := pluginVal.(type) {
|
2016-09-08 22:22:10 +08:00
|
|
|
// legacy [outputs.influxdb] support
|
2015-11-14 07:14:07 +08:00
|
|
|
case *ast.Table:
|
2016-01-08 04:39:43 +08:00
|
|
|
if err = c.addOutput(pluginName, pluginSubTable); err != nil {
|
2020-11-04 05:40:57 +08:00
|
|
|
return fmt.Errorf("error parsing %s, %w", pluginName, err)
|
2015-11-25 05:22:11 +08:00
|
|
|
}
|
2015-11-14 07:14:07 +08:00
|
|
|
case []*ast.Table:
|
2016-01-08 04:39:43 +08:00
|
|
|
for _, t := range pluginSubTable {
|
|
|
|
|
if err = c.addOutput(pluginName, t); err != nil {
|
2020-11-04 05:40:57 +08:00
|
|
|
return fmt.Errorf("error parsing %s array, %w", pluginName, err)
|
2015-11-25 05:22:11 +08:00
|
|
|
}
|
2015-11-14 07:14:07 +08:00
|
|
|
}
|
|
|
|
|
default:
|
2020-11-04 05:40:57 +08:00
|
|
|
return fmt.Errorf("unsupported config format: %s",
|
2020-06-05 22:43:43 +08:00
|
|
|
pluginName)
|
2015-10-17 12:47:13 +08:00
|
|
|
}
|
2020-11-04 05:40:57 +08:00
|
|
|
if len(c.UnusedFields) > 0 {
|
2022-11-11 21:32:11 +08:00
|
|
|
return fmt.Errorf("plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used",
|
|
|
|
|
name, pluginName, subTable.Line, keys(c.UnusedFields))
|
2020-11-04 05:40:57 +08:00
|
|
|
}
|
2015-10-17 12:47:13 +08:00
|
|
|
}
|
2016-01-09 03:49:50 +08:00
|
|
|
case "inputs", "plugins":
|
2015-11-20 10:08:02 +08:00
|
|
|
for pluginName, pluginVal := range subTable.Fields {
|
|
|
|
|
switch pluginSubTable := pluginVal.(type) {
|
2016-09-08 22:22:10 +08:00
|
|
|
// legacy [inputs.cpu] support
|
2015-11-20 10:08:02 +08:00
|
|
|
case *ast.Table:
|
2016-01-08 04:39:43 +08:00
|
|
|
if err = c.addInput(pluginName, pluginSubTable); err != nil {
|
2020-11-04 05:40:57 +08:00
|
|
|
return fmt.Errorf("error parsing %s, %w", pluginName, err)
|
2015-11-25 05:22:11 +08:00
|
|
|
}
|
2015-11-20 10:08:02 +08:00
|
|
|
case []*ast.Table:
|
2015-11-25 05:22:11 +08:00
|
|
|
for _, t := range pluginSubTable {
|
2016-01-08 04:39:43 +08:00
|
|
|
if err = c.addInput(pluginName, t); err != nil {
|
2020-11-04 05:40:57 +08:00
|
|
|
return fmt.Errorf("error parsing %s, %w", pluginName, err)
|
2015-11-25 05:22:11 +08:00
|
|
|
}
|
2015-11-20 10:08:02 +08:00
|
|
|
}
|
|
|
|
|
default:
|
2022-10-13 03:19:47 +08:00
|
|
|
return fmt.Errorf("unsupported config format: %s",
|
2020-06-05 22:43:43 +08:00
|
|
|
pluginName)
|
2015-11-20 10:08:02 +08:00
|
|
|
}
|
2020-11-04 05:40:57 +08:00
|
|
|
if len(c.UnusedFields) > 0 {
|
2022-11-11 21:32:11 +08:00
|
|
|
return fmt.Errorf("plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used",
|
|
|
|
|
name, pluginName, subTable.Line, keys(c.UnusedFields))
|
2020-11-04 05:40:57 +08:00
|
|
|
}
|
2015-11-20 10:08:02 +08:00
|
|
|
}
|
2016-09-08 22:22:10 +08:00
|
|
|
case "processors":
|
|
|
|
|
for pluginName, pluginVal := range subTable.Fields {
|
|
|
|
|
switch pluginSubTable := pluginVal.(type) {
|
|
|
|
|
case []*ast.Table:
|
|
|
|
|
for _, t := range pluginSubTable {
|
2022-12-01 00:28:23 +08:00
|
|
|
if err = c.addProcessor(pluginName, t); err != nil {
|
2020-11-04 05:40:57 +08:00
|
|
|
return fmt.Errorf("error parsing %s, %w", pluginName, err)
|
2016-09-08 22:22:10 +08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
default:
|
2022-10-13 03:19:47 +08:00
|
|
|
return fmt.Errorf("unsupported config format: %s",
|
2020-06-05 22:43:43 +08:00
|
|
|
pluginName)
|
2016-09-08 22:22:10 +08:00
|
|
|
}
|
2020-11-04 05:40:57 +08:00
|
|
|
if len(c.UnusedFields) > 0 {
|
2022-11-11 21:32:11 +08:00
|
|
|
return fmt.Errorf(
|
|
|
|
|
"plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used",
|
|
|
|
|
name,
|
|
|
|
|
pluginName,
|
|
|
|
|
subTable.Line,
|
|
|
|
|
keys(c.UnusedFields),
|
|
|
|
|
)
|
2020-11-04 05:40:57 +08:00
|
|
|
}
|
2016-09-08 22:22:10 +08:00
|
|
|
}
|
|
|
|
|
case "aggregators":
|
|
|
|
|
for pluginName, pluginVal := range subTable.Fields {
|
|
|
|
|
switch pluginSubTable := pluginVal.(type) {
|
|
|
|
|
case []*ast.Table:
|
|
|
|
|
for _, t := range pluginSubTable {
|
|
|
|
|
if err = c.addAggregator(pluginName, t); err != nil {
|
2023-02-22 19:57:53 +08:00
|
|
|
return fmt.Errorf("error parsing %s, %w", pluginName, err)
|
2016-09-08 22:22:10 +08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
default:
|
2022-10-13 03:19:47 +08:00
|
|
|
return fmt.Errorf("unsupported config format: %s",
|
2020-06-05 22:43:43 +08:00
|
|
|
pluginName)
|
2016-09-08 22:22:10 +08:00
|
|
|
}
|
2020-11-04 05:40:57 +08:00
|
|
|
if len(c.UnusedFields) > 0 {
|
2022-11-11 21:32:11 +08:00
|
|
|
return fmt.Errorf("plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used",
|
|
|
|
|
name, pluginName, subTable.Line, keys(c.UnusedFields))
|
2020-11-04 05:40:57 +08:00
|
|
|
}
|
2016-09-08 22:22:10 +08:00
|
|
|
}
|
2022-12-09 00:53:06 +08:00
|
|
|
case "secretstores":
|
|
|
|
|
for pluginName, pluginVal := range subTable.Fields {
|
|
|
|
|
switch pluginSubTable := pluginVal.(type) {
|
|
|
|
|
case []*ast.Table:
|
|
|
|
|
for _, t := range pluginSubTable {
|
|
|
|
|
if err = c.addSecretStore(pluginName, t); err != nil {
|
2023-02-22 19:57:53 +08:00
|
|
|
return fmt.Errorf("error parsing %s, %w", pluginName, err)
|
2022-12-09 00:53:06 +08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
default:
|
|
|
|
|
return fmt.Errorf("unsupported config format: %s", pluginName)
|
|
|
|
|
}
|
|
|
|
|
if len(c.UnusedFields) > 0 {
|
|
|
|
|
msg := "plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used"
|
|
|
|
|
return fmt.Errorf(msg, name, pluginName, subTable.Line, keys(c.UnusedFields))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-10-13 03:19:47 +08:00
|
|
|
// Assume it's an input for legacy config file support if no other
|
2015-11-20 10:08:02 +08:00
|
|
|
// identifiers are present
|
2015-10-17 12:47:13 +08:00
|
|
|
default:
|
2016-01-08 04:39:43 +08:00
|
|
|
if err = c.addInput(name, subTable); err != nil {
|
2023-02-22 19:57:53 +08:00
|
|
|
return fmt.Errorf("error parsing %s, %w", name, err)
|
2015-11-25 05:22:11 +08:00
|
|
|
}
|
2015-10-17 12:47:13 +08:00
|
|
|
}
|
|
|
|
|
}
|
2016-09-27 23:17:58 +08:00
|
|
|
|
2022-12-01 00:28:23 +08:00
|
|
|
// Sort the processor according to the order they appeared in this file
|
|
|
|
|
// In a later stage, we sort them using the `order` option.
|
|
|
|
|
sort.Sort(c.fileProcessors)
|
|
|
|
|
for _, op := range c.fileProcessors {
|
|
|
|
|
c.Processors = append(c.Processors, op.plugin.(*models.RunningProcessor))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
sort.Sort(c.fileAggProcessors)
|
|
|
|
|
for _, op := range c.fileAggProcessors {
|
|
|
|
|
c.AggProcessors = append(c.AggProcessors, op.plugin.(*models.RunningProcessor))
|
2016-09-27 23:17:58 +08:00
|
|
|
}
|
2018-11-06 05:34:28 +08:00
|
|
|
|
2015-10-17 12:47:13 +08:00
|
|
|
return nil
|
|
|
|
|
}
|
2015-11-25 05:22:11 +08:00
|
|
|
|
2016-06-23 01:54:29 +08:00
|
|
|
// trimBOM trims the Byte-Order-Marks from the beginning of the file.
|
2017-11-01 08:00:06 +08:00
|
|
|
// this is for Windows compatibility only.
|
2016-06-23 01:54:29 +08:00
|
|
|
// see https://github.com/influxdata/telegraf/issues/1378
|
2016-06-24 15:47:31 +08:00
|
|
|
func trimBOM(f []byte) []byte {
|
|
|
|
|
return bytes.TrimPrefix(f, []byte("\xef\xbb\xbf"))
|
2016-06-23 01:54:29 +08:00
|
|
|
}
|
|
|
|
|
|
2023-06-09 16:10:09 +08:00
|
|
|
// LoadConfigFile loads the content of a configuration file and returns it
|
|
|
|
|
// together with a flag denoting if the file is from a remote location such
|
|
|
|
|
// as a web server.
|
|
|
|
|
func LoadConfigFile(config string) ([]byte, bool, error) {
|
2021-05-27 00:13:50 +08:00
|
|
|
if fetchURLRe.MatchString(config) {
|
|
|
|
|
u, err := url.Parse(config)
|
|
|
|
|
if err != nil {
|
2023-06-09 16:10:09 +08:00
|
|
|
return nil, true, err
|
2021-05-27 00:13:50 +08:00
|
|
|
}
|
2018-11-06 06:19:46 +08:00
|
|
|
|
2021-05-27 00:13:50 +08:00
|
|
|
switch u.Scheme {
|
|
|
|
|
case "https", "http":
|
2023-06-09 16:10:09 +08:00
|
|
|
data, err := fetchConfig(u)
|
|
|
|
|
return data, true, err
|
2021-05-27 00:13:50 +08:00
|
|
|
default:
|
2023-06-09 16:10:09 +08:00
|
|
|
return nil, true, fmt.Errorf("scheme %q not supported", u.Scheme)
|
2021-05-27 00:13:50 +08:00
|
|
|
}
|
2018-11-06 06:19:46 +08:00
|
|
|
}
|
2021-05-27 00:13:50 +08:00
|
|
|
|
|
|
|
|
// If it isn't a https scheme, try it as a file
|
2022-09-14 00:43:03 +08:00
|
|
|
buffer, err := os.ReadFile(config)
|
|
|
|
|
if err != nil {
|
2023-06-09 16:10:09 +08:00
|
|
|
return nil, false, err
|
2022-09-14 00:43:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
mimeType := http.DetectContentType(buffer)
|
|
|
|
|
if !strings.Contains(mimeType, "text/plain") {
|
2023-06-09 16:10:09 +08:00
|
|
|
return nil, false, fmt.Errorf("provided config is not a TOML file: %s", config)
|
2022-09-14 00:43:03 +08:00
|
|
|
}
|
|
|
|
|
|
2023-06-09 16:10:09 +08:00
|
|
|
return buffer, false, nil
|
2018-11-06 06:19:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func fetchConfig(u *url.URL) ([]byte, error) {
|
|
|
|
|
req, err := http.NewRequest("GET", u.String(), nil)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
2019-10-01 07:55:47 +08:00
|
|
|
|
|
|
|
|
if v, exists := os.LookupEnv("INFLUX_TOKEN"); exists {
|
|
|
|
|
req.Header.Add("Authorization", "Token "+v)
|
|
|
|
|
}
|
2018-11-06 06:19:46 +08:00
|
|
|
req.Header.Add("Accept", "application/toml")
|
2020-07-02 03:52:05 +08:00
|
|
|
req.Header.Set("User-Agent", internal.ProductToken())
|
2019-02-06 07:15:58 +08:00
|
|
|
|
2021-02-13 00:38:40 +08:00
|
|
|
retries := 3
|
|
|
|
|
for i := 0; i <= retries; i++ {
|
2022-10-04 23:26:02 +08:00
|
|
|
body, err, retry := func() ([]byte, error, bool) {
|
|
|
|
|
resp, err := http.DefaultClient.Do(req)
|
|
|
|
|
if err != nil {
|
2023-02-22 19:57:53 +08:00
|
|
|
return nil, fmt.Errorf("retry %d of %d failed connecting to HTTP config server: %w", i, retries, err), false
|
2022-10-04 23:26:02 +08:00
|
|
|
}
|
|
|
|
|
defer resp.Body.Close()
|
|
|
|
|
if resp.StatusCode != http.StatusOK {
|
|
|
|
|
if i < retries {
|
|
|
|
|
log.Printf("Error getting HTTP config. Retry %d of %d in %s. Status=%d", i, retries, httpLoadConfigRetryInterval, resp.StatusCode)
|
|
|
|
|
return nil, nil, true
|
|
|
|
|
}
|
|
|
|
|
return nil, fmt.Errorf("retry %d of %d failed to retrieve remote config: %s", i, retries, resp.Status), false
|
|
|
|
|
}
|
|
|
|
|
body, err := io.ReadAll(resp.Body)
|
|
|
|
|
return body, err, false
|
|
|
|
|
}()
|
|
|
|
|
|
2021-02-13 00:38:40 +08:00
|
|
|
if err != nil {
|
2022-10-04 23:26:02 +08:00
|
|
|
return nil, err
|
2021-02-13 00:38:40 +08:00
|
|
|
}
|
|
|
|
|
|
2022-10-04 23:26:02 +08:00
|
|
|
if retry {
|
|
|
|
|
time.Sleep(httpLoadConfigRetryInterval)
|
|
|
|
|
continue
|
2021-02-13 00:38:40 +08:00
|
|
|
}
|
2022-10-04 23:26:02 +08:00
|
|
|
|
|
|
|
|
return body, err
|
2019-02-06 07:15:58 +08:00
|
|
|
}
|
|
|
|
|
|
2021-02-13 00:38:40 +08:00
|
|
|
return nil, nil
|
2018-11-06 06:19:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// parseConfig loads a TOML configuration from a provided path and
|
|
|
|
|
// returns the AST produced from the TOML parser. When loading the file, it
|
|
|
|
|
// will find environment variables and replace them.
|
|
|
|
|
func parseConfig(contents []byte) (*ast.Table, error) {
|
2016-06-23 01:54:29 +08:00
|
|
|
contents = trimBOM(contents)
|
2023-05-24 15:47:25 +08:00
|
|
|
var err error
|
|
|
|
|
contents, err = removeComments(contents)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
outputBytes, err := substituteEnvironment(contents)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
return toml.Parse(outputBytes)
|
|
|
|
|
}
|
2016-04-02 03:53:34 +08:00
|
|
|
|
2023-05-24 15:47:25 +08:00
|
|
|
func removeComments(contents []byte) ([]byte, error) {
|
|
|
|
|
tomlReader := bytes.NewReader(contents)
|
|
|
|
|
|
|
|
|
|
// Initialize variables for tracking state
|
|
|
|
|
var inQuote, inComment bool
|
|
|
|
|
var quoteChar, prevChar byte
|
2019-03-30 07:02:10 +08:00
|
|
|
|
2023-05-24 15:47:25 +08:00
|
|
|
// Initialize buffer for modified TOML data
|
|
|
|
|
var output bytes.Buffer
|
|
|
|
|
|
|
|
|
|
buf := make([]byte, 1)
|
|
|
|
|
// Iterate over each character in the file
|
|
|
|
|
for {
|
|
|
|
|
_, err := tomlReader.Read(buf)
|
|
|
|
|
if err != nil {
|
|
|
|
|
if errors.Is(err, io.EOF) {
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
char := buf[0]
|
|
|
|
|
|
|
|
|
|
if inComment {
|
|
|
|
|
// If we're currently in a comment, check if this character ends the comment
|
|
|
|
|
if char == '\n' {
|
|
|
|
|
// End of line, comment is finished
|
|
|
|
|
inComment = false
|
|
|
|
|
_, _ = output.WriteRune('\n')
|
|
|
|
|
}
|
|
|
|
|
} else if inQuote {
|
|
|
|
|
// If we're currently in a quote, check if this character ends the quote
|
|
|
|
|
if char == quoteChar && prevChar != '\\' {
|
|
|
|
|
// End of quote, we're no longer in a quote
|
|
|
|
|
inQuote = false
|
|
|
|
|
}
|
|
|
|
|
output.WriteByte(char)
|
2019-03-30 07:02:10 +08:00
|
|
|
} else {
|
2023-05-24 15:47:25 +08:00
|
|
|
// Not in a comment or a quote
|
|
|
|
|
if char == '"' || char == '\'' {
|
|
|
|
|
// Start of quote
|
|
|
|
|
inQuote = true
|
|
|
|
|
quoteChar = char
|
|
|
|
|
output.WriteByte(char)
|
|
|
|
|
} else if char == '#' {
|
|
|
|
|
// Start of comment
|
|
|
|
|
inComment = true
|
|
|
|
|
} else {
|
|
|
|
|
// Not a comment or a quote, just output the character
|
|
|
|
|
output.WriteByte(char)
|
|
|
|
|
}
|
|
|
|
|
prevChar = char
|
2019-03-30 07:02:10 +08:00
|
|
|
}
|
2023-05-24 15:47:25 +08:00
|
|
|
}
|
|
|
|
|
return output.Bytes(), nil
|
|
|
|
|
}
|
2019-03-30 07:02:10 +08:00
|
|
|
|
2023-05-24 15:47:25 +08:00
|
|
|
func substituteEnvironment(contents []byte) ([]byte, error) {
|
|
|
|
|
envMap := utils.GetAsEqualsMap(os.Environ())
|
2023-06-16 02:11:38 +08:00
|
|
|
retVal, err := template.SubstituteWith(string(contents), func(k string) (string, bool) {
|
2023-05-24 15:47:25 +08:00
|
|
|
if v, ok := envMap[k]; ok {
|
|
|
|
|
return v, ok
|
2016-04-02 03:53:34 +08:00
|
|
|
}
|
2023-05-24 15:47:25 +08:00
|
|
|
return "", false
|
2023-06-16 02:11:38 +08:00
|
|
|
}, envVarRe)
|
2023-05-24 15:47:25 +08:00
|
|
|
var invalidTmplError *template.InvalidTemplateError
|
|
|
|
|
if err != nil && !errors.As(err, &invalidTmplError) {
|
|
|
|
|
return nil, err
|
2016-04-02 03:53:34 +08:00
|
|
|
}
|
2023-05-24 15:47:25 +08:00
|
|
|
return []byte(retVal), nil
|
2016-04-02 03:53:34 +08:00
|
|
|
}
|
|
|
|
|
|
2016-09-08 22:22:10 +08:00
|
|
|
func (c *Config) addAggregator(name string, table *ast.Table) error {
|
|
|
|
|
creator, ok := aggregators.Aggregators[name]
|
|
|
|
|
if !ok {
|
2021-12-02 03:38:43 +08:00
|
|
|
// Handle removed, deprecated plugins
|
|
|
|
|
if di, deprecated := aggregators.Deprecations[name]; deprecated {
|
|
|
|
|
printHistoricPluginDeprecationNotice("aggregators", name, di)
|
|
|
|
|
return fmt.Errorf("plugin deprecated")
|
|
|
|
|
}
|
2022-10-13 03:19:47 +08:00
|
|
|
return fmt.Errorf("undefined but requested aggregator: %s", name)
|
2016-09-08 22:22:10 +08:00
|
|
|
}
|
|
|
|
|
aggregator := creator()
|
|
|
|
|
|
2020-11-04 05:40:57 +08:00
|
|
|
conf, err := c.buildAggregator(name, table)
|
2016-09-08 22:22:10 +08:00
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
2020-11-04 05:40:57 +08:00
|
|
|
if err := c.toml.UnmarshalTable(table, aggregator); err != nil {
|
2016-09-08 22:22:10 +08:00
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
2021-12-02 03:38:43 +08:00
|
|
|
if err := c.printUserDeprecation("aggregators", name, aggregator); err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
2016-09-23 01:10:51 +08:00
|
|
|
c.Aggregators = append(c.Aggregators, models.NewRunningAggregator(aggregator, conf))
|
2016-09-08 22:22:10 +08:00
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2022-12-09 00:53:06 +08:00
|
|
|
func (c *Config) addSecretStore(name string, table *ast.Table) error {
|
|
|
|
|
if len(c.SecretStoreFilters) > 0 && !sliceContains(name, c.SecretStoreFilters) {
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var storeid string
|
|
|
|
|
c.getFieldString(table, "id", &storeid)
|
2023-02-08 02:15:02 +08:00
|
|
|
if storeid == "" {
|
|
|
|
|
return fmt.Errorf("%q secret-store without ID", name)
|
|
|
|
|
}
|
|
|
|
|
if !secretStorePattern.MatchString(storeid) {
|
|
|
|
|
return fmt.Errorf("invalid secret-store ID %q, must only contain letters, numbers or underscore", storeid)
|
|
|
|
|
}
|
2022-12-09 00:53:06 +08:00
|
|
|
|
|
|
|
|
creator, ok := secretstores.SecretStores[name]
|
|
|
|
|
if !ok {
|
|
|
|
|
// Handle removed, deprecated plugins
|
|
|
|
|
if di, deprecated := secretstores.Deprecations[name]; deprecated {
|
|
|
|
|
printHistoricPluginDeprecationNotice("secretstores", name, di)
|
2023-02-08 02:15:02 +08:00
|
|
|
return errors.New("plugin deprecated")
|
2022-12-09 00:53:06 +08:00
|
|
|
}
|
|
|
|
|
return fmt.Errorf("undefined but requested secretstores: %s", name)
|
|
|
|
|
}
|
|
|
|
|
store := creator(storeid)
|
|
|
|
|
|
|
|
|
|
if err := c.toml.UnmarshalTable(table, store); err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if err := c.printUserDeprecation("secretstores", name, store); err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if err := store.Init(); err != nil {
|
2023-01-09 22:27:07 +08:00
|
|
|
return fmt.Errorf("error initializing secret-store %q: %w", storeid, err)
|
2022-12-09 00:53:06 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if _, found := c.SecretStores[storeid]; found {
|
|
|
|
|
return fmt.Errorf("duplicate ID %q for secretstore %q", storeid, name)
|
|
|
|
|
}
|
|
|
|
|
c.SecretStores[storeid] = store
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (c *Config) LinkSecrets() error {
|
|
|
|
|
for _, s := range unlinkedSecrets {
|
|
|
|
|
resolvers := make(map[string]telegraf.ResolveFunc)
|
|
|
|
|
for _, ref := range s.GetUnlinked() {
|
|
|
|
|
// Split the reference and lookup the resolver
|
|
|
|
|
storeid, key := splitLink(ref)
|
|
|
|
|
store, found := c.SecretStores[storeid]
|
|
|
|
|
if !found {
|
|
|
|
|
return fmt.Errorf("unknown secret-store for %q", ref)
|
|
|
|
|
}
|
|
|
|
|
resolver, err := store.GetResolver(key)
|
|
|
|
|
if err != nil {
|
2023-02-22 19:57:53 +08:00
|
|
|
return fmt.Errorf("retrieving resolver for %q failed: %w", ref, err)
|
2022-12-09 00:53:06 +08:00
|
|
|
}
|
|
|
|
|
resolvers[ref] = resolver
|
|
|
|
|
}
|
|
|
|
|
// Inject the resolver list into the secret
|
|
|
|
|
if err := s.Link(resolvers); err != nil {
|
2023-02-22 19:57:53 +08:00
|
|
|
return fmt.Errorf("retrieving resolver failed: %w", err)
|
2022-12-09 00:53:06 +08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2022-10-25 03:51:07 +08:00
|
|
|
func (c *Config) probeParser(parentcategory string, parentname string, table *ast.Table) bool {
|
2022-01-13 06:54:42 +08:00
|
|
|
var dataformat string
|
|
|
|
|
c.getFieldString(table, "data_format", &dataformat)
|
2022-10-25 03:51:07 +08:00
|
|
|
if dataformat == "" {
|
|
|
|
|
dataformat = setDefaultParser(parentcategory, parentname)
|
|
|
|
|
}
|
2022-01-13 06:54:42 +08:00
|
|
|
|
2022-10-21 17:09:20 +08:00
|
|
|
creator, ok := parsers.Parsers[dataformat]
|
|
|
|
|
if !ok {
|
|
|
|
|
return false
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Try to parse the options to detect if any of them is misspelled
|
|
|
|
|
// We don't actually use the parser, so no need to check the error.
|
|
|
|
|
parser := creator("")
|
|
|
|
|
_ = c.toml.UnmarshalTable(table, parser)
|
|
|
|
|
|
|
|
|
|
return true
|
2022-01-13 06:54:42 +08:00
|
|
|
}
|
|
|
|
|
|
2022-09-27 04:24:34 +08:00
|
|
|
func (c *Config) addParser(parentcategory, parentname string, table *ast.Table) (*models.RunningParser, error) {
|
2022-01-13 06:54:42 +08:00
|
|
|
var dataformat string
|
|
|
|
|
c.getFieldString(table, "data_format", &dataformat)
|
2022-07-29 04:30:36 +08:00
|
|
|
if dataformat == "" {
|
2022-10-25 03:51:07 +08:00
|
|
|
dataformat = setDefaultParser(parentcategory, parentname)
|
2022-07-29 04:30:36 +08:00
|
|
|
}
|
2022-10-25 03:51:07 +08:00
|
|
|
|
2022-09-22 03:02:41 +08:00
|
|
|
var influxParserType string
|
|
|
|
|
c.getFieldString(table, "influx_parser_type", &influxParserType)
|
|
|
|
|
if dataformat == "influx" && influxParserType == "upstream" {
|
|
|
|
|
dataformat = "influx_upstream"
|
|
|
|
|
}
|
2022-07-29 04:30:36 +08:00
|
|
|
|
2022-01-13 06:54:42 +08:00
|
|
|
creator, ok := parsers.Parsers[dataformat]
|
|
|
|
|
if !ok {
|
2022-10-13 03:19:47 +08:00
|
|
|
return nil, fmt.Errorf("undefined but requested parser: %s", dataformat)
|
2022-01-13 06:54:42 +08:00
|
|
|
}
|
|
|
|
|
parser := creator(parentname)
|
|
|
|
|
|
2023-01-27 04:51:39 +08:00
|
|
|
// Handle reset-mode of CSV parsers to stay backward compatible (see issue #12022)
|
|
|
|
|
if dataformat == "csv" && parentcategory == "inputs" {
|
|
|
|
|
if parentname == "exec" {
|
|
|
|
|
csvParser := parser.(*csv.Parser)
|
|
|
|
|
csvParser.ResetMode = "always"
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-13 06:54:42 +08:00
|
|
|
if err := c.toml.UnmarshalTable(table, parser); err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-12 03:52:42 +08:00
|
|
|
conf := &models.ParserConfig{
|
|
|
|
|
Parent: parentname,
|
|
|
|
|
DataFormat: dataformat,
|
|
|
|
|
}
|
2022-01-13 06:54:42 +08:00
|
|
|
running := models.NewRunningParser(parser, conf)
|
2022-11-09 03:04:12 +08:00
|
|
|
err := running.Init()
|
2022-09-16 22:50:26 +08:00
|
|
|
return running, err
|
2022-01-13 06:54:42 +08:00
|
|
|
}
|
|
|
|
|
|
2023-04-12 03:52:42 +08:00
|
|
|
func (c *Config) addSerializer(parentname string, table *ast.Table) (*models.RunningSerializer, error) {
|
|
|
|
|
var dataformat string
|
|
|
|
|
c.getFieldString(table, "data_format", &dataformat)
|
|
|
|
|
if dataformat == "" {
|
|
|
|
|
dataformat = "influx"
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
creator, ok := serializers.Serializers[dataformat]
|
|
|
|
|
if !ok {
|
|
|
|
|
return nil, fmt.Errorf("undefined but requested serializer: %s", dataformat)
|
|
|
|
|
}
|
|
|
|
|
serializer := creator()
|
|
|
|
|
|
|
|
|
|
if err := c.toml.UnmarshalTable(table, serializer); err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
conf := &models.SerializerConfig{
|
|
|
|
|
Parent: parentname,
|
|
|
|
|
DataFormat: dataformat,
|
|
|
|
|
}
|
|
|
|
|
running := models.NewRunningSerializer(serializer, conf)
|
|
|
|
|
err := running.Init()
|
|
|
|
|
return running, err
|
|
|
|
|
}
|
|
|
|
|
|
2022-12-01 00:28:23 +08:00
|
|
|
func (c *Config) addProcessor(name string, table *ast.Table) error {
|
2016-09-08 22:22:10 +08:00
|
|
|
creator, ok := processors.Processors[name]
|
|
|
|
|
if !ok {
|
2021-12-02 03:38:43 +08:00
|
|
|
// Handle removed, deprecated plugins
|
|
|
|
|
if di, deprecated := processors.Deprecations[name]; deprecated {
|
|
|
|
|
printHistoricPluginDeprecationNotice("processors", name, di)
|
|
|
|
|
return fmt.Errorf("plugin deprecated")
|
|
|
|
|
}
|
2022-10-13 03:19:47 +08:00
|
|
|
return fmt.Errorf("undefined but requested processor: %s", name)
|
2016-09-08 22:22:10 +08:00
|
|
|
}
|
2022-09-27 04:24:34 +08:00
|
|
|
|
|
|
|
|
// For processors with parsers we need to compute the set of
|
|
|
|
|
// options that is not covered by both, the parser and the processor.
|
|
|
|
|
// We achieve this by keeping a local book of missing entries
|
|
|
|
|
// that counts the number of misses. In case we have a parser
|
|
|
|
|
// for the input both need to miss the entry. We count the
|
|
|
|
|
// missing entries at the end.
|
|
|
|
|
missCount := make(map[string]int)
|
2022-10-21 17:09:20 +08:00
|
|
|
missCountThreshold := 0
|
2022-09-27 04:24:34 +08:00
|
|
|
c.setLocalMissingTomlFieldTracker(missCount)
|
|
|
|
|
defer c.resetMissingTomlFieldTracker()
|
2016-09-08 22:22:10 +08:00
|
|
|
|
2023-03-02 06:34:48 +08:00
|
|
|
// Setup the processor running before the aggregators
|
|
|
|
|
processorBeforeConfig, err := c.buildProcessor("processors", name, table)
|
2016-09-08 22:22:10 +08:00
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
2023-06-02 02:10:06 +08:00
|
|
|
processorBefore, count, err := c.setupProcessor(processorBeforeConfig.Name, creator, table)
|
2022-10-25 03:21:24 +08:00
|
|
|
if err != nil {
|
2016-09-08 22:22:10 +08:00
|
|
|
return err
|
|
|
|
|
}
|
2023-03-02 06:34:48 +08:00
|
|
|
rf := models.NewRunningProcessor(processorBefore, processorBeforeConfig)
|
2022-12-01 00:28:23 +08:00
|
|
|
c.fileProcessors = append(c.fileProcessors, &OrderedPlugin{table.Line, rf})
|
2016-09-08 22:22:10 +08:00
|
|
|
|
2022-10-25 03:21:24 +08:00
|
|
|
// Setup another (new) processor instance running after the aggregator
|
2023-03-02 06:34:48 +08:00
|
|
|
processorAfterConfig, err := c.buildProcessor("aggprocessors", name, table)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
processorAfter, _, err := c.setupProcessor(processorAfterConfig.Name, creator, table)
|
2022-10-25 03:21:24 +08:00
|
|
|
if err != nil {
|
2020-06-05 22:43:43 +08:00
|
|
|
return err
|
|
|
|
|
}
|
2023-03-02 06:34:48 +08:00
|
|
|
rf = models.NewRunningProcessor(processorAfter, processorAfterConfig)
|
2022-12-01 00:28:23 +08:00
|
|
|
c.fileAggProcessors = append(c.fileAggProcessors, &OrderedPlugin{table.Line, rf})
|
2016-09-08 22:22:10 +08:00
|
|
|
|
2023-06-02 02:10:06 +08:00
|
|
|
// Check the number of misses against the threshold. We need to double
|
|
|
|
|
// the count as the processor setup is executed twice.
|
|
|
|
|
missCountThreshold = 2 * count
|
2022-10-21 17:09:20 +08:00
|
|
|
for key, count := range missCount {
|
|
|
|
|
if count <= missCountThreshold {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
if err := c.missingTomlField(nil, key); err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-09-08 22:22:10 +08:00
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2023-06-02 02:10:06 +08:00
|
|
|
func (c *Config) setupProcessor(name string, creator processors.StreamingCreator, table *ast.Table) (telegraf.StreamingProcessor, int, error) {
|
|
|
|
|
var optionTestCount int
|
2022-10-25 03:21:24 +08:00
|
|
|
|
|
|
|
|
streamingProcessor := creator()
|
|
|
|
|
|
|
|
|
|
var processor interface{}
|
2023-06-02 18:32:10 +08:00
|
|
|
if p, ok := streamingProcessor.(processors.HasUnwrap); ok {
|
2022-10-25 03:21:24 +08:00
|
|
|
processor = p.Unwrap()
|
|
|
|
|
} else {
|
|
|
|
|
processor = streamingProcessor
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// If the (underlying) processor has a SetParser or SetParserFunc function,
|
|
|
|
|
// it can accept arbitrary data-formats, so build the requested parser and
|
|
|
|
|
// set it.
|
|
|
|
|
if t, ok := processor.(telegraf.ParserPlugin); ok {
|
|
|
|
|
parser, err := c.addParser("processors", name, table)
|
|
|
|
|
if err != nil {
|
2023-06-02 02:10:06 +08:00
|
|
|
return nil, 0, fmt.Errorf("adding parser failed: %w", err)
|
2020-06-05 22:43:43 +08:00
|
|
|
}
|
2022-10-25 03:21:24 +08:00
|
|
|
t.SetParser(parser)
|
2023-06-02 02:10:06 +08:00
|
|
|
optionTestCount++
|
2022-10-25 03:21:24 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if t, ok := processor.(telegraf.ParserFuncPlugin); ok {
|
2022-10-25 03:51:07 +08:00
|
|
|
if !c.probeParser("processors", name, table) {
|
2023-06-02 02:10:06 +08:00
|
|
|
return nil, 0, errors.New("parser not found")
|
2022-10-25 03:21:24 +08:00
|
|
|
}
|
|
|
|
|
t.SetParserFunc(func() (telegraf.Parser, error) {
|
|
|
|
|
return c.addParser("processors", name, table)
|
|
|
|
|
})
|
2023-06-02 02:10:06 +08:00
|
|
|
optionTestCount++
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// If the (underlying) processor has a SetSerializer function it can accept
|
|
|
|
|
// arbitrary data-formats, so build the requested serializer and set it.
|
|
|
|
|
if t, ok := processor.(telegraf.SerializerPlugin); ok {
|
|
|
|
|
serializer, err := c.addSerializer(name, table)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, 0, fmt.Errorf("adding serializer failed: %w", err)
|
|
|
|
|
}
|
|
|
|
|
t.SetSerializer(serializer)
|
|
|
|
|
optionTestCount++
|
2020-06-05 22:43:43 +08:00
|
|
|
}
|
|
|
|
|
|
2022-09-27 04:24:34 +08:00
|
|
|
if err := c.toml.UnmarshalTable(table, processor); err != nil {
|
2023-06-02 02:10:06 +08:00
|
|
|
return nil, 0, fmt.Errorf("unmarshalling failed: %w", err)
|
2021-12-02 03:38:43 +08:00
|
|
|
}
|
|
|
|
|
|
2022-10-25 03:21:24 +08:00
|
|
|
err := c.printUserDeprecation("processors", name, processor)
|
2023-06-02 02:10:06 +08:00
|
|
|
return streamingProcessor, optionTestCount, err
|
2020-06-05 22:43:43 +08:00
|
|
|
}
|
|
|
|
|
|
2015-11-25 05:22:11 +08:00
|
|
|
func (c *Config) addOutput(name string, table *ast.Table) error {
|
|
|
|
|
if len(c.OutputFilters) > 0 && !sliceContains(name, c.OutputFilters) {
|
|
|
|
|
return nil
|
|
|
|
|
}
|
2023-04-12 03:52:42 +08:00
|
|
|
|
|
|
|
|
// For inputs with parsers we need to compute the set of
|
|
|
|
|
// options that is not covered by both, the parser and the input.
|
|
|
|
|
// We achieve this by keeping a local book of missing entries
|
|
|
|
|
// that counts the number of misses. In case we have a parser
|
|
|
|
|
// for the input both need to miss the entry. We count the
|
|
|
|
|
// missing entries at the end.
|
|
|
|
|
missThreshold := 0
|
|
|
|
|
missCount := make(map[string]int)
|
|
|
|
|
c.setLocalMissingTomlFieldTracker(missCount)
|
|
|
|
|
defer c.resetMissingTomlFieldTracker()
|
|
|
|
|
|
2015-11-25 05:22:11 +08:00
|
|
|
creator, ok := outputs.Outputs[name]
|
|
|
|
|
if !ok {
|
2021-12-02 03:38:43 +08:00
|
|
|
// Handle removed, deprecated plugins
|
|
|
|
|
if di, deprecated := outputs.Deprecations[name]; deprecated {
|
|
|
|
|
printHistoricPluginDeprecationNotice("outputs", name, di)
|
|
|
|
|
return fmt.Errorf("plugin deprecated")
|
|
|
|
|
}
|
2022-02-23 09:47:04 +08:00
|
|
|
return fmt.Errorf("undefined but requested output: %s", name)
|
2015-11-25 05:22:11 +08:00
|
|
|
}
|
2015-12-01 22:15:28 +08:00
|
|
|
output := creator()
|
2015-11-25 05:22:11 +08:00
|
|
|
|
2016-02-11 06:50:07 +08:00
|
|
|
// If the output has a SetSerializer function, then this means it can write
|
|
|
|
|
// arbitrary types of output, so build the serializer and set it.
|
2023-04-12 03:52:42 +08:00
|
|
|
if t, ok := output.(telegraf.SerializerPlugin); ok {
|
|
|
|
|
missThreshold = 1
|
2023-06-02 02:10:06 +08:00
|
|
|
serializer, err := c.addSerializer(name, table)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
2023-04-12 03:52:42 +08:00
|
|
|
}
|
2023-06-02 02:10:06 +08:00
|
|
|
t.SetSerializer(serializer)
|
2023-04-12 03:52:42 +08:00
|
|
|
} else if t, ok := output.(serializers.SerializerOutput); ok {
|
|
|
|
|
// Keep the old interface for backward compatibility
|
|
|
|
|
// DEPRECATED: Please switch your plugin to telegraf.Serializers
|
|
|
|
|
missThreshold = 1
|
2023-06-02 02:10:06 +08:00
|
|
|
serializer, err := c.addSerializer(name, table)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
2016-02-11 06:50:07 +08:00
|
|
|
}
|
2023-06-02 02:10:06 +08:00
|
|
|
t.SetSerializer(serializer)
|
2016-02-11 06:50:07 +08:00
|
|
|
}
|
|
|
|
|
|
2020-11-04 05:40:57 +08:00
|
|
|
outputConfig, err := c.buildOutput(name, table)
|
2015-12-01 22:15:28 +08:00
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
2020-11-04 05:40:57 +08:00
|
|
|
if err := c.toml.UnmarshalTable(table, output); err != nil {
|
2015-11-25 05:22:11 +08:00
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
2021-12-02 03:38:43 +08:00
|
|
|
if err := c.printUserDeprecation("outputs", name, output); err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-19 06:04:09 +08:00
|
|
|
if c, ok := interface{}(output).(interface{ TLSConfig() (*tls.Config, error) }); ok {
|
|
|
|
|
if _, err := c.TLSConfig(); err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-12 03:52:42 +08:00
|
|
|
// Check the number of misses against the threshold
|
|
|
|
|
for key, count := range missCount {
|
|
|
|
|
if count <= missThreshold {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
if err := c.missingTomlField(nil, key); err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-23 01:21:36 +08:00
|
|
|
ro := models.NewRunningOutput(output, outputConfig, c.Agent.MetricBatchSize, c.Agent.MetricBufferLimit)
|
2015-11-25 05:22:11 +08:00
|
|
|
c.Outputs = append(c.Outputs, ro)
|
2023-04-12 03:52:42 +08:00
|
|
|
|
2015-11-25 05:22:11 +08:00
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2016-01-08 04:39:43 +08:00
|
|
|
func (c *Config) addInput(name string, table *ast.Table) error {
|
|
|
|
|
if len(c.InputFilters) > 0 && !sliceContains(name, c.InputFilters) {
|
2015-11-25 05:22:11 +08:00
|
|
|
return nil
|
|
|
|
|
}
|
2021-12-02 03:38:43 +08:00
|
|
|
|
2022-01-13 06:54:42 +08:00
|
|
|
// For inputs with parsers we need to compute the set of
|
|
|
|
|
// options that is not covered by both, the parser and the input.
|
|
|
|
|
// We achieve this by keeping a local book of missing entries
|
|
|
|
|
// that counts the number of misses. In case we have a parser
|
|
|
|
|
// for the input both need to miss the entry. We count the
|
|
|
|
|
// missing entries at the end.
|
|
|
|
|
missCount := make(map[string]int)
|
2022-10-21 17:09:20 +08:00
|
|
|
missCountThreshold := 0
|
2022-01-13 06:54:42 +08:00
|
|
|
c.setLocalMissingTomlFieldTracker(missCount)
|
|
|
|
|
defer c.resetMissingTomlFieldTracker()
|
|
|
|
|
|
2016-01-08 04:39:43 +08:00
|
|
|
creator, ok := inputs.Inputs[name]
|
2015-11-25 05:22:11 +08:00
|
|
|
if !ok {
|
2021-12-02 03:38:43 +08:00
|
|
|
// Handle removed, deprecated plugins
|
|
|
|
|
if di, deprecated := inputs.Deprecations[name]; deprecated {
|
|
|
|
|
printHistoricPluginDeprecationNotice("inputs", name, di)
|
|
|
|
|
return fmt.Errorf("plugin deprecated")
|
|
|
|
|
}
|
|
|
|
|
|
2022-10-13 03:19:47 +08:00
|
|
|
return fmt.Errorf("undefined but requested input: %s", name)
|
2015-11-25 05:22:11 +08:00
|
|
|
}
|
2016-01-08 04:39:43 +08:00
|
|
|
input := creator()
|
2015-11-25 05:22:11 +08:00
|
|
|
|
2022-01-13 06:54:42 +08:00
|
|
|
// If the input has a SetParser or SetParserFunc function, it can accept
|
|
|
|
|
// arbitrary data-formats, so build the requested parser and set it.
|
2022-09-27 04:24:34 +08:00
|
|
|
if t, ok := input.(telegraf.ParserPlugin); ok {
|
2022-10-21 17:09:20 +08:00
|
|
|
missCountThreshold = 1
|
2022-09-27 04:24:34 +08:00
|
|
|
parser, err := c.addParser("inputs", name, table)
|
2022-07-29 04:30:36 +08:00
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("adding parser failed: %w", err)
|
2016-02-06 08:36:35 +08:00
|
|
|
}
|
2022-07-29 04:30:36 +08:00
|
|
|
t.SetParser(parser)
|
2016-02-06 08:36:35 +08:00
|
|
|
}
|
|
|
|
|
|
2022-09-27 04:24:34 +08:00
|
|
|
if t, ok := input.(telegraf.ParserFuncPlugin); ok {
|
2022-10-21 17:09:20 +08:00
|
|
|
missCountThreshold = 1
|
2022-10-25 03:51:07 +08:00
|
|
|
if !c.probeParser("inputs", name, table) {
|
2022-07-29 04:30:36 +08:00
|
|
|
return errors.New("parser not found")
|
|
|
|
|
}
|
|
|
|
|
t.SetParserFunc(func() (telegraf.Parser, error) {
|
2022-09-27 04:24:34 +08:00
|
|
|
return c.addParser("inputs", name, table)
|
2022-07-29 04:30:36 +08:00
|
|
|
})
|
2022-01-13 06:54:42 +08:00
|
|
|
}
|
|
|
|
|
|
2020-11-04 05:40:57 +08:00
|
|
|
pluginConfig, err := c.buildInput(name, table)
|
2015-11-25 05:22:11 +08:00
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
2015-12-01 22:15:28 +08:00
|
|
|
|
2020-11-04 05:40:57 +08:00
|
|
|
if err := c.toml.UnmarshalTable(table, input); err != nil {
|
2015-12-01 22:15:28 +08:00
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
2021-12-02 03:38:43 +08:00
|
|
|
if err := c.printUserDeprecation("inputs", name, input); err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-19 06:04:09 +08:00
|
|
|
if c, ok := interface{}(input).(interface{ TLSConfig() (*tls.Config, error) }); ok {
|
|
|
|
|
if _, err := c.TLSConfig(); err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-13 06:54:42 +08:00
|
|
|
// Check the number of misses against the threshold
|
|
|
|
|
for key, count := range missCount {
|
2022-10-21 17:09:20 +08:00
|
|
|
if count <= missCountThreshold {
|
2022-01-13 06:54:42 +08:00
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
if err := c.missingTomlField(nil, key); err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-12 03:52:42 +08:00
|
|
|
rp := models.NewRunningInput(input, pluginConfig)
|
|
|
|
|
rp.SetDefaultTags(c.Tags)
|
|
|
|
|
c.Inputs = append(c.Inputs, rp)
|
|
|
|
|
|
2015-11-25 05:22:11 +08:00
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2016-10-06 20:29:46 +08:00
|
|
|
// buildAggregator parses Aggregator specific items from the ast.Table,
|
|
|
|
|
// builds the filter and returns a
|
|
|
|
|
// models.AggregatorConfig to be inserted into models.RunningAggregator
|
2020-11-04 05:40:57 +08:00
|
|
|
func (c *Config) buildAggregator(name string, tbl *ast.Table) (*models.AggregatorConfig, error) {
|
2016-10-07 23:43:44 +08:00
|
|
|
conf := &models.AggregatorConfig{
|
|
|
|
|
Name: name,
|
|
|
|
|
Delay: time.Millisecond * 100,
|
|
|
|
|
Period: time.Second * 30,
|
2019-08-01 03:52:12 +08:00
|
|
|
Grace: time.Second * 0,
|
2016-10-07 23:43:44 +08:00
|
|
|
}
|
2016-09-23 01:10:51 +08:00
|
|
|
|
2020-11-04 05:40:57 +08:00
|
|
|
c.getFieldDuration(tbl, "period", &conf.Period)
|
|
|
|
|
c.getFieldDuration(tbl, "delay", &conf.Delay)
|
|
|
|
|
c.getFieldDuration(tbl, "grace", &conf.Grace)
|
|
|
|
|
c.getFieldBool(tbl, "drop_original", &conf.DropOriginal)
|
|
|
|
|
c.getFieldString(tbl, "name_prefix", &conf.MeasurementPrefix)
|
|
|
|
|
c.getFieldString(tbl, "name_suffix", &conf.MeasurementSuffix)
|
|
|
|
|
c.getFieldString(tbl, "name_override", &conf.NameOverride)
|
|
|
|
|
c.getFieldString(tbl, "alias", &conf.Alias)
|
2019-08-22 07:49:07 +08:00
|
|
|
|
2016-09-08 22:22:10 +08:00
|
|
|
conf.Tags = make(map[string]string)
|
|
|
|
|
if node, ok := tbl.Fields["tags"]; ok {
|
|
|
|
|
if subtbl, ok := node.(*ast.Table); ok {
|
2020-11-04 05:40:57 +08:00
|
|
|
if err := c.toml.UnmarshalTable(subtbl, conf.Tags); err != nil {
|
2020-06-27 02:30:29 +08:00
|
|
|
return nil, fmt.Errorf("could not parse tags for input %s", name)
|
2016-09-08 22:22:10 +08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-11-04 05:40:57 +08:00
|
|
|
if c.hasErrs() {
|
|
|
|
|
return nil, c.firstErr()
|
|
|
|
|
}
|
|
|
|
|
|
2016-09-08 22:22:10 +08:00
|
|
|
var err error
|
2020-11-04 05:40:57 +08:00
|
|
|
conf.Filter, err = c.buildFilter(tbl)
|
2016-09-08 22:22:10 +08:00
|
|
|
if err != nil {
|
|
|
|
|
return conf, err
|
|
|
|
|
}
|
2023-03-02 06:34:48 +08:00
|
|
|
|
|
|
|
|
// Generate an ID for the plugin
|
|
|
|
|
conf.ID, err = generatePluginID("aggregators."+name, tbl)
|
|
|
|
|
return conf, err
|
2016-09-08 22:22:10 +08:00
|
|
|
}
|
|
|
|
|
|
2016-10-06 20:29:46 +08:00
|
|
|
// buildProcessor parses Processor specific items from the ast.Table,
|
|
|
|
|
// builds the filter and returns a
|
|
|
|
|
// models.ProcessorConfig to be inserted into models.RunningProcessor
|
2023-03-02 06:34:48 +08:00
|
|
|
func (c *Config) buildProcessor(category, name string, tbl *ast.Table) (*models.ProcessorConfig, error) {
|
2022-12-01 00:28:23 +08:00
|
|
|
conf := &models.ProcessorConfig{Name: name}
|
2016-09-08 22:22:10 +08:00
|
|
|
|
2020-11-04 05:40:57 +08:00
|
|
|
c.getFieldInt64(tbl, "order", &conf.Order)
|
|
|
|
|
c.getFieldString(tbl, "alias", &conf.Alias)
|
2016-09-27 23:17:58 +08:00
|
|
|
|
2020-11-04 05:40:57 +08:00
|
|
|
if c.hasErrs() {
|
|
|
|
|
return nil, c.firstErr()
|
2019-08-22 07:49:07 +08:00
|
|
|
}
|
|
|
|
|
|
2016-09-08 22:22:10 +08:00
|
|
|
var err error
|
2020-11-04 05:40:57 +08:00
|
|
|
conf.Filter, err = c.buildFilter(tbl)
|
2016-09-08 22:22:10 +08:00
|
|
|
if err != nil {
|
|
|
|
|
return conf, err
|
|
|
|
|
}
|
2023-03-02 06:34:48 +08:00
|
|
|
|
|
|
|
|
// Generate an ID for the plugin
|
|
|
|
|
conf.ID, err = generatePluginID(category+"."+name, tbl)
|
|
|
|
|
return conf, err
|
2016-09-08 22:22:10 +08:00
|
|
|
}
|
|
|
|
|
|
2016-02-23 04:35:06 +08:00
|
|
|
// buildFilter builds a Filter
|
|
|
|
|
// (tagpass/tagdrop/namepass/namedrop/fieldpass/fielddrop) to
|
2016-07-28 19:31:11 +08:00
|
|
|
// be inserted into the models.OutputConfig/models.InputConfig
|
2016-04-13 07:06:27 +08:00
|
|
|
// to be used for glob filtering on tags and measurements
|
2020-11-04 05:40:57 +08:00
|
|
|
func (c *Config) buildFilter(tbl *ast.Table) (models.Filter, error) {
|
2016-07-28 19:31:11 +08:00
|
|
|
f := models.Filter{}
|
2015-11-25 05:22:11 +08:00
|
|
|
|
2020-11-04 05:40:57 +08:00
|
|
|
c.getFieldStringSlice(tbl, "namepass", &f.NamePass)
|
|
|
|
|
c.getFieldStringSlice(tbl, "namedrop", &f.NameDrop)
|
2015-11-25 05:22:11 +08:00
|
|
|
|
2020-11-04 05:40:57 +08:00
|
|
|
c.getFieldStringSlice(tbl, "pass", &f.FieldPass)
|
|
|
|
|
c.getFieldStringSlice(tbl, "fieldpass", &f.FieldPass)
|
2016-02-20 13:35:12 +08:00
|
|
|
|
2020-11-04 05:40:57 +08:00
|
|
|
c.getFieldStringSlice(tbl, "drop", &f.FieldDrop)
|
|
|
|
|
c.getFieldStringSlice(tbl, "fielddrop", &f.FieldDrop)
|
2016-02-20 13:35:12 +08:00
|
|
|
|
2022-10-13 03:19:47 +08:00
|
|
|
c.getFieldTagFilter(tbl, "tagpass", &f.TagPassFilters)
|
|
|
|
|
c.getFieldTagFilter(tbl, "tagdrop", &f.TagDropFilters)
|
2015-11-25 05:22:11 +08:00
|
|
|
|
2020-11-04 05:40:57 +08:00
|
|
|
c.getFieldStringSlice(tbl, "tagexclude", &f.TagExclude)
|
|
|
|
|
c.getFieldStringSlice(tbl, "taginclude", &f.TagInclude)
|
2015-11-25 05:22:11 +08:00
|
|
|
|
2023-04-29 02:42:25 +08:00
|
|
|
c.getFieldString(tbl, "metricpass", &f.MetricPass)
|
|
|
|
|
|
2020-11-04 05:40:57 +08:00
|
|
|
if c.hasErrs() {
|
|
|
|
|
return f, c.firstErr()
|
2016-04-13 07:06:27 +08:00
|
|
|
}
|
|
|
|
|
|
2016-09-05 23:16:37 +08:00
|
|
|
if err := f.Compile(); err != nil {
|
2016-04-13 07:06:27 +08:00
|
|
|
return f, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return f, nil
|
2015-12-01 22:15:28 +08:00
|
|
|
}
|
|
|
|
|
|
2016-01-08 04:39:43 +08:00
|
|
|
// buildInput parses input specific items from the ast.Table,
|
2015-12-12 04:07:32 +08:00
|
|
|
// builds the filter and returns a
|
2016-07-28 19:31:11 +08:00
|
|
|
// models.InputConfig to be inserted into models.RunningInput
|
2020-11-04 05:40:57 +08:00
|
|
|
func (c *Config) buildInput(name string, tbl *ast.Table) (*models.InputConfig, error) {
|
2023-06-01 01:06:50 +08:00
|
|
|
cp := &models.InputConfig{
|
|
|
|
|
Name: name,
|
|
|
|
|
AlwaysIncludeLocalTags: c.Agent.AlwaysIncludeLocalTags,
|
|
|
|
|
}
|
2020-11-04 05:40:57 +08:00
|
|
|
c.getFieldDuration(tbl, "interval", &cp.Interval)
|
|
|
|
|
c.getFieldDuration(tbl, "precision", &cp.Precision)
|
|
|
|
|
c.getFieldDuration(tbl, "collection_jitter", &cp.CollectionJitter)
|
2022-02-16 01:39:12 +08:00
|
|
|
c.getFieldDuration(tbl, "collection_offset", &cp.CollectionOffset)
|
2020-11-04 05:40:57 +08:00
|
|
|
c.getFieldString(tbl, "name_prefix", &cp.MeasurementPrefix)
|
|
|
|
|
c.getFieldString(tbl, "name_suffix", &cp.MeasurementSuffix)
|
|
|
|
|
c.getFieldString(tbl, "name_override", &cp.NameOverride)
|
|
|
|
|
c.getFieldString(tbl, "alias", &cp.Alias)
|
2019-08-22 07:49:07 +08:00
|
|
|
|
2015-12-12 04:07:32 +08:00
|
|
|
cp.Tags = make(map[string]string)
|
|
|
|
|
if node, ok := tbl.Fields["tags"]; ok {
|
|
|
|
|
if subtbl, ok := node.(*ast.Table); ok {
|
2020-11-04 05:40:57 +08:00
|
|
|
if err := c.toml.UnmarshalTable(subtbl, cp.Tags); err != nil {
|
|
|
|
|
return nil, fmt.Errorf("could not parse tags for input %s", name)
|
2015-12-12 04:07:32 +08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-11-04 05:40:57 +08:00
|
|
|
if c.hasErrs() {
|
|
|
|
|
return nil, c.firstErr()
|
|
|
|
|
}
|
|
|
|
|
|
2016-04-13 07:06:27 +08:00
|
|
|
var err error
|
2020-11-04 05:40:57 +08:00
|
|
|
cp.Filter, err = c.buildFilter(tbl)
|
2016-04-13 07:06:27 +08:00
|
|
|
if err != nil {
|
|
|
|
|
return cp, err
|
|
|
|
|
}
|
2023-03-02 06:34:48 +08:00
|
|
|
|
|
|
|
|
// Generate an ID for the plugin
|
|
|
|
|
cp.ID, err = generatePluginID("inputs."+name, tbl)
|
|
|
|
|
return cp, err
|
2015-12-01 22:15:28 +08:00
|
|
|
}
|
|
|
|
|
|
2020-11-04 05:40:57 +08:00
|
|
|
// buildOutput parses output specific items from the ast.Table,
|
2022-10-13 03:19:47 +08:00
|
|
|
// builds the filter and returns a
|
2020-11-04 05:40:57 +08:00
|
|
|
// models.OutputConfig to be inserted into models.RunningInput
|
|
|
|
|
// Note: error exists in the return for future calls that might require error
|
|
|
|
|
func (c *Config) buildOutput(name string, tbl *ast.Table) (*models.OutputConfig, error) {
|
|
|
|
|
filter, err := c.buildFilter(tbl)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
oc := &models.OutputConfig{
|
|
|
|
|
Name: name,
|
|
|
|
|
Filter: filter,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// TODO: support FieldPass/FieldDrop on outputs
|
|
|
|
|
|
|
|
|
|
c.getFieldDuration(tbl, "flush_interval", &oc.FlushInterval)
|
2021-02-10 03:12:49 +08:00
|
|
|
c.getFieldDuration(tbl, "flush_jitter", &oc.FlushJitter)
|
2020-11-04 05:40:57 +08:00
|
|
|
|
|
|
|
|
c.getFieldInt(tbl, "metric_buffer_limit", &oc.MetricBufferLimit)
|
|
|
|
|
c.getFieldInt(tbl, "metric_batch_size", &oc.MetricBatchSize)
|
|
|
|
|
c.getFieldString(tbl, "alias", &oc.Alias)
|
|
|
|
|
c.getFieldString(tbl, "name_override", &oc.NameOverride)
|
|
|
|
|
c.getFieldString(tbl, "name_suffix", &oc.NameSuffix)
|
|
|
|
|
c.getFieldString(tbl, "name_prefix", &oc.NamePrefix)
|
|
|
|
|
|
|
|
|
|
if c.hasErrs() {
|
|
|
|
|
return nil, c.firstErr()
|
|
|
|
|
}
|
|
|
|
|
|
2023-03-02 06:34:48 +08:00
|
|
|
// Generate an ID for the plugin
|
|
|
|
|
oc.ID, err = generatePluginID("outputs."+name, tbl)
|
|
|
|
|
return oc, err
|
2020-11-04 05:40:57 +08:00
|
|
|
}
|
|
|
|
|
|
2021-03-23 01:21:36 +08:00
|
|
|
func (c *Config) missingTomlField(_ reflect.Type, key string) error {
|
2020-11-04 05:40:57 +08:00
|
|
|
switch key {
|
2022-06-23 03:50:43 +08:00
|
|
|
// General options to ignore
|
2023-06-01 01:06:50 +08:00
|
|
|
case "alias", "always_include_local_tags",
|
2022-06-23 03:50:43 +08:00
|
|
|
"collection_jitter", "collection_offset",
|
|
|
|
|
"data_format", "delay", "drop", "drop_original",
|
|
|
|
|
"fielddrop", "fieldpass", "flush_interval", "flush_jitter",
|
|
|
|
|
"grace",
|
|
|
|
|
"interval",
|
|
|
|
|
"lvm", // What is this used for?
|
2023-04-29 02:42:25 +08:00
|
|
|
"metric_batch_size", "metric_buffer_limit", "metricpass",
|
2022-06-23 03:50:43 +08:00
|
|
|
"name_override", "name_prefix", "name_suffix", "namedrop", "namepass",
|
|
|
|
|
"order",
|
|
|
|
|
"pass", "period", "precision",
|
|
|
|
|
"tagdrop", "tagexclude", "taginclude", "tagpass", "tags":
|
|
|
|
|
|
2023-01-09 22:27:07 +08:00
|
|
|
// Secret-store options to ignore
|
|
|
|
|
case "id":
|
|
|
|
|
|
2023-06-02 02:10:06 +08:00
|
|
|
// Parser and serializer options to ignore
|
2022-09-22 03:02:41 +08:00
|
|
|
case "data_type", "influx_parser_type":
|
2022-06-23 03:50:43 +08:00
|
|
|
|
2020-11-04 05:40:57 +08:00
|
|
|
default:
|
2022-06-16 20:04:45 +08:00
|
|
|
c.unusedFieldsMutex.Lock()
|
2020-11-04 05:40:57 +08:00
|
|
|
c.UnusedFields[key] = true
|
2022-06-16 20:04:45 +08:00
|
|
|
c.unusedFieldsMutex.Unlock()
|
2016-02-06 08:36:35 +08:00
|
|
|
}
|
2020-11-04 05:40:57 +08:00
|
|
|
return nil
|
|
|
|
|
}
|
2016-02-06 08:36:35 +08:00
|
|
|
|
2022-01-13 06:54:42 +08:00
|
|
|
func (c *Config) setLocalMissingTomlFieldTracker(counter map[string]int) {
|
2022-10-21 17:09:20 +08:00
|
|
|
f := func(t reflect.Type, key string) error {
|
|
|
|
|
// Check if we are in a root element that might share options among
|
|
|
|
|
// each other. Those root elements are plugins of all types.
|
|
|
|
|
// All other elements are subtables of their respective plugin and
|
|
|
|
|
// should just be hit once anyway. Therefore, we mark them with a
|
|
|
|
|
// high number to handle them correctly later.
|
|
|
|
|
pt := reflect.PtrTo(t)
|
|
|
|
|
root := pt.Implements(reflect.TypeOf((*telegraf.Input)(nil)).Elem())
|
|
|
|
|
root = root || pt.Implements(reflect.TypeOf((*telegraf.ServiceInput)(nil)).Elem())
|
|
|
|
|
root = root || pt.Implements(reflect.TypeOf((*telegraf.Output)(nil)).Elem())
|
|
|
|
|
root = root || pt.Implements(reflect.TypeOf((*telegraf.Aggregator)(nil)).Elem())
|
|
|
|
|
root = root || pt.Implements(reflect.TypeOf((*telegraf.Processor)(nil)).Elem())
|
2023-06-02 02:10:06 +08:00
|
|
|
root = root || pt.Implements(reflect.TypeOf((*telegraf.StreamingProcessor)(nil)).Elem())
|
2022-10-21 17:09:20 +08:00
|
|
|
root = root || pt.Implements(reflect.TypeOf((*telegraf.Parser)(nil)).Elem())
|
2023-04-12 03:52:42 +08:00
|
|
|
root = root || pt.Implements(reflect.TypeOf((*telegraf.Serializer)(nil)).Elem())
|
2022-10-21 17:09:20 +08:00
|
|
|
|
|
|
|
|
c, ok := counter[key]
|
|
|
|
|
if !root {
|
|
|
|
|
counter[key] = 100
|
|
|
|
|
} else if !ok {
|
2022-01-13 06:54:42 +08:00
|
|
|
counter[key] = 1
|
2022-10-21 17:09:20 +08:00
|
|
|
} else {
|
|
|
|
|
counter[key] = c + 1
|
2022-01-13 06:54:42 +08:00
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
c.toml.MissingField = f
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (c *Config) resetMissingTomlFieldTracker() {
|
|
|
|
|
c.toml.MissingField = c.missingTomlField
|
|
|
|
|
}
|
|
|
|
|
|
2020-11-04 05:40:57 +08:00
|
|
|
func (c *Config) getFieldString(tbl *ast.Table, fieldName string, target *string) {
|
|
|
|
|
if node, ok := tbl.Fields[fieldName]; ok {
|
2016-02-06 08:36:35 +08:00
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
2020-11-04 05:40:57 +08:00
|
|
|
*target = str.Value
|
2016-02-06 08:36:35 +08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2020-11-04 05:40:57 +08:00
|
|
|
}
|
2016-02-06 08:36:35 +08:00
|
|
|
|
2020-11-04 05:40:57 +08:00
|
|
|
func (c *Config) getFieldDuration(tbl *ast.Table, fieldName string, target interface{}) {
|
|
|
|
|
if node, ok := tbl.Fields[fieldName]; ok {
|
2016-02-06 08:36:35 +08:00
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
2020-11-04 05:40:57 +08:00
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
|
d, err := time.ParseDuration(str.Value)
|
|
|
|
|
if err != nil {
|
|
|
|
|
c.addError(tbl, fmt.Errorf("error parsing duration: %w", err))
|
|
|
|
|
return
|
2016-02-06 08:36:35 +08:00
|
|
|
}
|
2020-11-04 05:40:57 +08:00
|
|
|
targetVal := reflect.ValueOf(target).Elem()
|
|
|
|
|
targetVal.Set(reflect.ValueOf(d))
|
2016-02-06 08:36:35 +08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2020-11-04 05:40:57 +08:00
|
|
|
}
|
2016-02-06 08:36:35 +08:00
|
|
|
|
2020-11-04 05:40:57 +08:00
|
|
|
func (c *Config) getFieldBool(tbl *ast.Table, fieldName string, target *bool) {
|
|
|
|
|
var err error
|
|
|
|
|
if node, ok := tbl.Fields[fieldName]; ok {
|
2016-02-06 08:36:35 +08:00
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
2020-11-04 05:40:57 +08:00
|
|
|
switch t := kv.Value.(type) {
|
|
|
|
|
case *ast.Boolean:
|
|
|
|
|
*target, err = t.Boolean()
|
|
|
|
|
if err != nil {
|
|
|
|
|
c.addError(tbl, fmt.Errorf("unknown boolean value type %q, expecting boolean", kv.Value))
|
|
|
|
|
return
|
2016-02-06 08:36:35 +08:00
|
|
|
}
|
2020-11-04 05:40:57 +08:00
|
|
|
case *ast.String:
|
|
|
|
|
*target, err = strconv.ParseBool(t.Value)
|
|
|
|
|
if err != nil {
|
|
|
|
|
c.addError(tbl, fmt.Errorf("unknown boolean value type %q, expecting boolean", kv.Value))
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
default:
|
|
|
|
|
c.addError(tbl, fmt.Errorf("unknown boolean value type %q, expecting boolean", kv.Value.Source()))
|
|
|
|
|
return
|
2016-02-06 08:36:35 +08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2020-11-04 05:40:57 +08:00
|
|
|
}
|
2016-02-06 08:36:35 +08:00
|
|
|
|
2020-11-04 05:40:57 +08:00
|
|
|
func (c *Config) getFieldInt(tbl *ast.Table, fieldName string, target *int) {
|
|
|
|
|
if node, ok := tbl.Fields[fieldName]; ok {
|
2018-08-23 10:26:48 +08:00
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
2020-11-04 05:40:57 +08:00
|
|
|
if iAst, ok := kv.Value.(*ast.Integer); ok {
|
|
|
|
|
i, err := iAst.Int()
|
|
|
|
|
if err != nil {
|
|
|
|
|
c.addError(tbl, fmt.Errorf("unexpected int type %q, expecting int", iAst.Value))
|
|
|
|
|
return
|
2018-08-23 10:26:48 +08:00
|
|
|
}
|
2020-11-04 05:40:57 +08:00
|
|
|
*target = int(i)
|
2018-08-23 10:26:48 +08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2020-11-04 05:40:57 +08:00
|
|
|
}
|
2018-08-23 10:26:48 +08:00
|
|
|
|
2020-11-04 05:40:57 +08:00
|
|
|
func (c *Config) getFieldInt64(tbl *ast.Table, fieldName string, target *int64) {
|
|
|
|
|
if node, ok := tbl.Fields[fieldName]; ok {
|
2020-01-22 02:10:02 +08:00
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
2020-11-04 05:40:57 +08:00
|
|
|
if iAst, ok := kv.Value.(*ast.Integer); ok {
|
|
|
|
|
i, err := iAst.Int()
|
2020-01-22 02:10:02 +08:00
|
|
|
if err != nil {
|
2020-11-04 05:40:57 +08:00
|
|
|
c.addError(tbl, fmt.Errorf("unexpected int type %q, expecting int", iAst.Value))
|
|
|
|
|
return
|
2020-01-22 02:10:02 +08:00
|
|
|
}
|
2020-11-04 05:40:57 +08:00
|
|
|
*target = i
|
2023-03-21 23:37:57 +08:00
|
|
|
} else {
|
|
|
|
|
c.addError(tbl, fmt.Errorf("found unexpected format while parsing %q, expecting int", fieldName))
|
2020-01-22 02:10:02 +08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2020-11-04 05:40:57 +08:00
|
|
|
}
|
2020-01-22 02:10:02 +08:00
|
|
|
|
2020-11-04 05:40:57 +08:00
|
|
|
func (c *Config) getFieldStringSlice(tbl *ast.Table, fieldName string, target *[]string) {
|
|
|
|
|
if node, ok := tbl.Fields[fieldName]; ok {
|
2017-04-13 01:41:26 +08:00
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
2022-02-23 09:47:04 +08:00
|
|
|
ary, ok := kv.Value.(*ast.Array)
|
|
|
|
|
if !ok {
|
2021-02-27 02:58:13 +08:00
|
|
|
c.addError(tbl, fmt.Errorf("found unexpected format while parsing %q, expecting string array/slice format", fieldName))
|
|
|
|
|
return
|
2017-04-13 01:41:26 +08:00
|
|
|
}
|
2022-02-23 09:47:04 +08:00
|
|
|
for _, elem := range ary.Value {
|
|
|
|
|
if str, ok := elem.(*ast.String); ok {
|
|
|
|
|
*target = append(*target, str.Value)
|
|
|
|
|
}
|
|
|
|
|
}
|
2017-04-13 01:41:26 +08:00
|
|
|
}
|
|
|
|
|
}
|
2020-11-04 05:40:57 +08:00
|
|
|
}
|
2021-03-04 04:26:09 +08:00
|
|
|
|
2020-11-04 05:40:57 +08:00
|
|
|
func (c *Config) getFieldTagFilter(tbl *ast.Table, fieldName string, target *[]models.TagFilter) {
|
|
|
|
|
if node, ok := tbl.Fields[fieldName]; ok {
|
2018-01-09 07:11:36 +08:00
|
|
|
if subtbl, ok := node.(*ast.Table); ok {
|
|
|
|
|
for name, val := range subtbl.Fields {
|
|
|
|
|
if kv, ok := val.(*ast.KeyValue); ok {
|
2022-02-23 09:47:04 +08:00
|
|
|
ary, ok := kv.Value.(*ast.Array)
|
|
|
|
|
if !ok {
|
2021-02-27 02:58:13 +08:00
|
|
|
c.addError(tbl, fmt.Errorf("found unexpected format while parsing %q, expecting string array/slice format on each entry", fieldName))
|
|
|
|
|
return
|
2019-06-18 04:34:54 +08:00
|
|
|
}
|
2022-02-23 09:47:04 +08:00
|
|
|
|
|
|
|
|
tagFilter := models.TagFilter{Name: name}
|
|
|
|
|
for _, elem := range ary.Value {
|
|
|
|
|
if str, ok := elem.(*ast.String); ok {
|
2022-10-13 03:19:47 +08:00
|
|
|
tagFilter.Values = append(tagFilter.Values, str.Value)
|
2022-02-23 09:47:04 +08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
*target = append(*target, tagFilter)
|
2019-06-18 04:34:54 +08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2016-02-06 08:36:35 +08:00
|
|
|
}
|
|
|
|
|
|
2020-11-04 05:40:57 +08:00
|
|
|
func keys(m map[string]bool) []string {
|
|
|
|
|
result := []string{}
|
|
|
|
|
for k := range m {
|
|
|
|
|
result = append(result, k)
|
2020-03-14 06:04:23 +08:00
|
|
|
}
|
2020-11-04 05:40:57 +08:00
|
|
|
return result
|
|
|
|
|
}
|
2020-03-14 06:04:23 +08:00
|
|
|
|
2022-10-25 03:51:07 +08:00
|
|
|
func setDefaultParser(category string, name string) string {
|
|
|
|
|
// Legacy support, exec plugin originally parsed JSON by default.
|
|
|
|
|
if category == "inputs" && name == "exec" {
|
|
|
|
|
return "json"
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return "influx"
|
|
|
|
|
}
|
|
|
|
|
|
2020-11-04 05:40:57 +08:00
|
|
|
func (c *Config) hasErrs() bool {
|
|
|
|
|
return len(c.errs) > 0
|
|
|
|
|
}
|
2020-03-14 06:04:23 +08:00
|
|
|
|
2020-11-04 05:40:57 +08:00
|
|
|
func (c *Config) firstErr() error {
|
|
|
|
|
if len(c.errs) == 0 {
|
|
|
|
|
return nil
|
2020-03-14 06:04:23 +08:00
|
|
|
}
|
2020-11-04 05:40:57 +08:00
|
|
|
return c.errs[0]
|
|
|
|
|
}
|
2020-03-14 06:04:23 +08:00
|
|
|
|
2020-11-04 05:40:57 +08:00
|
|
|
func (c *Config) addError(tbl *ast.Table, err error) {
|
|
|
|
|
c.errs = append(c.errs, fmt.Errorf("line %d:%d: %w", tbl.Line, tbl.Position, err))
|
2015-11-25 05:22:11 +08:00
|
|
|
}
|