feat(logging): Implement structured logging (#15751)

Co-authored-by: Thomas Casteleyn <thomas.casteleyn@me.com>
This commit is contained in:
Sven Rebhan 2024-09-06 22:52:22 +02:00 committed by GitHub
parent 895f96f21b
commit c3e53193d2
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
38 changed files with 1764 additions and 206 deletions

View File

@ -1,6 +1,20 @@
<!-- markdownlint-disable MD024 --> <!-- markdownlint-disable MD024 -->
# Changelog # Changelog
## Unreleased
### Important Changes
- This release contains a logging overhaul as well as some new features for
logging (see PRs [#15556](https://github.com/influxdata/telegraf/pull/15556),
[#15629](https://github.com/influxdata/telegraf/pull/15629),
[#15677](https://github.com/influxdata/telegraf/pull/15677),
[#15695](https://github.com/influxdata/telegraf/pull/15695) and
[#15751](https://github.com/influxdata/telegraf/pull/15751)).
As a consequence the redunant `logtarget` setting is deprecated, `stderr` is
used if no `logfile` is provided, otherwise messages are logged to the given
file. For using the Windows `eventlog` set `logformat = "eventlog"`!
## v1.31.3 [2024-08-12] ## v1.31.3 [2024-08-12]
### Bugfixes ### Bugfixes

View File

@ -53,13 +53,12 @@
## Log only error level messages. ## Log only error level messages.
# quiet = false # quiet = false
## Log target controls the destination for logs and can be one of "file", ## Log format controls the way messages are logged and can be one of "text",
## "stderr" or, on Windows, "eventlog". When set to "file", the output file ## "structured" or, on Windows, "eventlog".
## is determined by the "logfile" setting. # logformat = "text"
# logtarget = "file"
## Name of the file to be logged to when using the "file" logtarget. If set to ## Name of the file to be logged to or stderr if unset or empty. This
## the empty string then logs are written to stderr. ## setting is ignored for the "eventlog" format.
# logfile = "" # logfile = ""
## The logfile will be rotated after the time interval specified. When set ## The logfile will be rotated after the time interval specified. When set
@ -80,9 +79,9 @@
# log_with_timezone = "" # log_with_timezone = ""
## Override default hostname, if empty use os.Hostname() ## Override default hostname, if empty use os.Hostname()
hostname = "" # hostname = ""
## If set to true, do no set the "host" tag in the telegraf agent. ## If set to true, do no set the "host" tag in the telegraf agent.
omit_hostname = false # omit_hostname = false
## Method of translating SNMP objects. Can be "netsnmp" (deprecated) which ## Method of translating SNMP objects. Can be "netsnmp" (deprecated) which
## translates by calling external programs snmptranslate and snmptable, ## translates by calling external programs snmptranslate and snmptable,
@ -95,7 +94,7 @@
## the state in the file will be restored for the plugins. ## the state in the file will be restored for the plugins.
# statefile = "" # statefile = ""
## Flag to skip running processors after aggregators ## Flag to skip running processors after aggregators
## By default, processors are run a second time after aggregators. Changing ## By default, processors are run a second time after aggregators. Changing
## this setting to true will skip the second run of processors. ## this setting to true will skip the second run of processors.
# skip_processors_after_aggregators = false # skip_processors_after_aggregators = false

View File

@ -362,6 +362,7 @@ func (t *Telegraf) runAgent(ctx context.Context, reloadConfig bool) error {
Debug: c.Agent.Debug || t.debug, Debug: c.Agent.Debug || t.debug,
Quiet: c.Agent.Quiet || t.quiet, Quiet: c.Agent.Quiet || t.quiet,
LogTarget: c.Agent.LogTarget, LogTarget: c.Agent.LogTarget,
LogFormat: c.Agent.LogFormat,
Logfile: c.Agent.Logfile, Logfile: c.Agent.Logfile,
RotationInterval: time.Duration(c.Agent.LogfileRotationInterval), RotationInterval: time.Duration(c.Agent.LogfileRotationInterval),
RotationMaxSize: int64(c.Agent.LogfileRotationMaxSize), RotationMaxSize: int64(c.Agent.LogfileRotationMaxSize),

View File

@ -123,7 +123,7 @@ func NewConfig() *Config {
Interval: Duration(10 * time.Second), Interval: Duration(10 * time.Second),
RoundInterval: true, RoundInterval: true,
FlushInterval: Duration(10 * time.Second), FlushInterval: Duration(10 * time.Second),
LogTarget: "file", LogFormat: "text",
LogfileRotationMaxArchives: 5, LogfileRotationMaxArchives: 5,
}, },
@ -226,12 +226,15 @@ type AgentConfig struct {
Quiet bool `toml:"quiet"` Quiet bool `toml:"quiet"`
// Log target controls the destination for logs and can be one of "file", // Log target controls the destination for logs and can be one of "file",
// "stderr" or, on Windows, "eventlog". When set to "file", the output file // "stderr" or, on Windows, "eventlog". When set to "file", the output file
// is determined by the "logfile" setting. // is determined by the "logfile" setting
LogTarget string `toml:"logtarget"` LogTarget string `toml:"logtarget" deprecated:"1.32.0;1.40.0;use 'logformat' and 'logfile' instead"`
// Name of the file to be logged to when using the "file" logtarget. If set to // Log format controls the way messages are logged and can be one of "text",
// the empty string then logs are written to stderr. // "structured" or, on Windows, "eventlog".
LogFormat string `toml:"logformat"`
// Name of the file to be logged to or stderr if empty. Ignored for "eventlog" format.
Logfile string `toml:"logfile"` Logfile string `toml:"logfile"`
// The file will be rotated after the time interval specified. When set // The file will be rotated after the time interval specified. When set

View File

@ -151,8 +151,31 @@ func ApplyMigrations(data []byte) ([]byte, uint64, error) {
return nil, 0, fmt.Errorf("assigning text failed: %w", err) return nil, 0, fmt.Errorf("assigning text failed: %w", err)
} }
// Do the actual plugin migration(s)
var applied uint64 var applied uint64
// Do the actual global section migration(s)
for idx, s := range sections {
if strings.Contains(s.name, ".") {
continue
}
log.Printf("D! applying global migrations to section %q in line %d...", s.name, s.begin)
for _, migrate := range migrations.GlobalMigrations {
result, msg, err := migrate(s.name, s.content)
if err != nil {
if errors.Is(err, migrations.ErrNotApplicable) {
continue
}
return nil, 0, fmt.Errorf("migrating options of %q (line %d) failed: %w", s.name, s.begin, err)
}
if msg != "" {
log.Printf("I! Global section %q in line %d: %s", s.name, s.begin, msg)
}
s.raw = bytes.NewBuffer(result)
applied++
}
sections[idx] = s
}
// Do the actual plugin migration(s)
for idx, s := range sections { for idx, s := range sections {
migrate, found := migrations.PluginMigrations[s.name] migrate, found := migrations.PluginMigrations[s.name]
if !found { if !found {

View File

@ -296,14 +296,14 @@ The agent table configures Telegraf and the defaults used across all plugins.
- **quiet**: - **quiet**:
Log only error level messages. Log only error level messages.
- **logtarget**: - **logformat**:
Log target controls the destination for logs and can be one of "file", Log format controls the way messages are logged and can be one of "text",
"stderr" or, on Windows, "eventlog". When set to "file", the output file is "structured" or, on Windows, "eventlog". The output file (if any) is
determined by the "logfile" setting. determined by the `logfile` setting.
- **logfile**: - **logfile**:
Name of the file to be logged to when using the "file" logtarget. If set to Name of the file to be logged to or stderr if unset or empty. This
the empty string then logs are written to stderr. setting is ignored for the "eventlog" format.
- **logfile_rotation_interval**: - **logfile_rotation_interval**:
The logfile will be rotated after the time interval specified. When set to The logfile will be rotated after the time interval specified. When set to

View File

@ -75,6 +75,9 @@ type Logger interface { //nolint:interfacebloat // All functions are required
// Level returns the configured log-level of the logger // Level returns the configured log-level of the logger
Level() LogLevel Level() LogLevel
// AddAttribute allows to add a key-value attribute to the logging output
AddAttribute(key string, value interface{})
// Errorf logs an error message, patterned after log.Printf. // Errorf logs an error message, patterned after log.Printf.
Errorf(format string, args ...interface{}) Errorf(format string, args ...interface{})
// Error logs an error message, patterned after log.Print. // Error logs an error message, patterned after log.Print.

View File

@ -27,7 +27,7 @@ func (l *eventLogger) Close() error {
return l.eventlog.Close() return l.eventlog.Close()
} }
func (l *eventLogger) Print(level telegraf.LogLevel, _ time.Time, prefix string, args ...interface{}) { func (l *eventLogger) Print(level telegraf.LogLevel, _ time.Time, prefix string, _ map[string]interface{}, args ...interface{}) {
// Skip debug and beyond as they cannot be logged // Skip debug and beyond as they cannot be logged
if level >= telegraf.Debug { if level >= telegraf.Debug {
return return
@ -47,6 +47,8 @@ func (l *eventLogger) Print(level telegraf.LogLevel, _ time.Time, prefix string,
if err != nil { if err != nil {
l.errlog.Printf("E! Writing log message failed: %v", err) l.errlog.Printf("E! Writing log message failed: %v", err)
} }
// TODO attributes...
} }
func createEventLogger(cfg *Config) (sink, error) { func createEventLogger(cfg *Config) (sink, error) {

View File

@ -54,7 +54,7 @@ func TestEventLogIntegration(t *testing.T) {
t.Skip("Skipping integration test in short mode") t.Skip("Skipping integration test in short mode")
} }
config := &Config{ config := &Config{
LogTarget: "eventlog", LogFormat: "eventlog",
Logfile: "", Logfile: "",
} }
require.NoError(t, SetupLogging(config)) require.NoError(t, SetupLogging(config))
@ -76,7 +76,7 @@ func TestRestrictedEventLogIntegration(t *testing.T) {
} }
config := &Config{ config := &Config{
LogTarget: "eventlog", LogFormat: "eventlog",
Quiet: true, Quiet: true,
} }
require.NoError(t, SetupLogging(config)) require.NoError(t, SetupLogging(config))

View File

@ -6,6 +6,7 @@ import (
"io" "io"
"log" "log"
"os" "os"
"strings"
"sync" "sync"
"time" "time"
@ -13,10 +14,11 @@ import (
) )
type entry struct { type entry struct {
timestamp time.Time timestamp time.Time
level telegraf.LogLevel level telegraf.LogLevel
prefix string prefix string
args []interface{} attributes map[string]interface{}
args []interface{}
} }
type handler struct { type handler struct {
@ -60,7 +62,7 @@ func (h *handler) switchSink(impl sink, level telegraf.LogLevel, tz *time.Locati
current := h.earlylogs.Front() current := h.earlylogs.Front()
for current != nil { for current != nil {
e := current.Value.(*entry) e := current.Value.(*entry)
h.impl.Print(e.level, e.timestamp.In(h.timezone), e.prefix, e.args...) h.impl.Print(e.level, e.timestamp.In(h.timezone), e.prefix, e.attributes, e.args...)
next := current.Next() next := current.Next()
h.earlylogs.Remove(current) h.earlylogs.Remove(current)
current = next current = next
@ -69,12 +71,13 @@ func (h *handler) switchSink(impl sink, level telegraf.LogLevel, tz *time.Locati
h.Unlock() h.Unlock()
} }
func (h *handler) add(level telegraf.LogLevel, ts time.Time, prefix string, args ...interface{}) *entry { func (h *handler) add(level telegraf.LogLevel, ts time.Time, prefix string, attr map[string]interface{}, args ...interface{}) *entry {
e := &entry{ e := &entry{
timestamp: ts, timestamp: ts,
level: level, level: level,
prefix: prefix, prefix: prefix,
args: args, attributes: attr,
args: args,
} }
h.Lock() h.Lock()
@ -109,7 +112,16 @@ type redirectLogger struct {
writer io.Writer writer io.Writer
} }
func (l *redirectLogger) Print(level telegraf.LogLevel, ts time.Time, prefix string, args ...interface{}) { func (l *redirectLogger) Print(level telegraf.LogLevel, ts time.Time, prefix string, attr map[string]interface{}, args ...interface{}) {
msg := append([]interface{}{ts.In(time.UTC).Format(time.RFC3339), " ", level.Indicator(), " ", prefix}, args...) var attrMsg string
if len(attr) > 0 {
var parts []string
for k, v := range attr {
parts = append(parts, fmt.Sprintf("%s=%v", k, v))
}
attrMsg = " (" + strings.Join(parts, ",") + ")"
}
msg := append([]interface{}{ts.In(time.UTC).Format(time.RFC3339), " ", level.Indicator(), " ", prefix + attrMsg}, args...)
fmt.Fprintln(l.writer, msg...) fmt.Fprintln(l.writer, msg...)
} }

View File

@ -1,6 +1,7 @@
package logger package logger
import ( import (
"errors"
"fmt" "fmt"
"io" "io"
"log" "log"
@ -21,13 +22,7 @@ var (
// sink interface that has to be implemented by a logging sink // sink interface that has to be implemented by a logging sink
type sink interface { type sink interface {
Print(telegraf.LogLevel, time.Time, string, ...interface{}) Print(telegraf.LogLevel, time.Time, string, map[string]interface{}, ...interface{})
}
// Attr represents an attribute appended to structured logging
type Attr struct {
Key string
Value interface{}
} }
// logger is the actual implementation of the telegraf logger interface // logger is the actual implementation of the telegraf logger interface
@ -36,46 +31,25 @@ type logger struct {
category string category string
name string name string
alias string alias string
suffix string
prefix string prefix string
onError []func()
onError []func() attributes map[string]interface{}
} }
// New creates a new logging instance to be used in models // New creates a new logging instance to be used in models
func New(category, name, alias string) *logger { func New(category, name, alias string) *logger {
l := &logger{ l := &logger{
category: category, category: category,
name: name, name: name,
alias: alias, alias: alias,
attributes: map[string]interface{}{"category": category, "plugin": name},
} }
l.formatPrefix() if alias != "" {
l.attributes["alias"] = alias
return l
}
// SubLogger creates a new logger with the given name added as suffix
func (l *logger) SubLogger(name string) telegraf.Logger {
suffix := l.suffix
if suffix != "" && name != "" {
suffix += "."
} }
suffix += name
nl := &logger{ // Format the prefix
level: l.level,
category: l.category,
name: l.name,
alias: l.alias,
suffix: suffix,
}
nl.formatPrefix()
return nl
}
func (l *logger) formatPrefix() {
l.prefix = l.category l.prefix = l.category
if l.prefix != "" && l.name != "" { if l.prefix != "" && l.name != "" {
@ -88,13 +62,11 @@ func (l *logger) formatPrefix() {
} }
l.prefix += l.alias l.prefix += l.alias
if l.suffix != "" {
l.prefix += "(" + l.suffix + ")"
}
if l.prefix != "" { if l.prefix != "" {
l.prefix = "[" + l.prefix + "] " l.prefix = "[" + l.prefix + "] "
} }
return l
} }
// Level returns the current log-level of the logger // Level returns the current log-level of the logger
@ -105,27 +77,14 @@ func (l *logger) Level() telegraf.LogLevel {
return instance.level return instance.level
} }
// SetLevel overrides the current log-level of the logger // AddAttribute allows to add a key-value attribute to the logging output
func (l *logger) SetLevel(level telegraf.LogLevel) { func (l *logger) AddAttribute(key string, value interface{}) {
l.level = &level // Do not allow to overwrite general keys
} switch key {
case "category", "plugin", "alias":
// SetLevel changes the log-level to the given one default:
func (l *logger) SetLogLevel(name string) error { l.attributes[key] = value
if name == "" {
return nil
} }
level := telegraf.LogLevelFromString(name)
if level == telegraf.None {
return fmt.Errorf("invalid log-level %q", name)
}
l.SetLevel(level)
return nil
}
// Register a callback triggered when errors are about to be written to the log
func (l *logger) RegisterErrorCallback(f func()) {
l.onError = append(l.onError, f)
} }
// Error logging including callbacks // Error logging including callbacks
@ -179,7 +138,7 @@ func (l *logger) Trace(args ...interface{}) {
func (l *logger) Print(level telegraf.LogLevel, ts time.Time, args ...interface{}) { func (l *logger) Print(level telegraf.LogLevel, ts time.Time, args ...interface{}) {
// Check if we are in early logging state and store the message in this case // Check if we are in early logging state and store the message in this case
if instance.impl == nil { if instance.impl == nil {
instance.add(level, ts, l.prefix, args...) instance.add(level, ts, l.prefix, l.attributes, args...)
} }
// Skip all messages with insufficient log-levels // Skip all messages with insufficient log-levels
@ -187,24 +146,45 @@ func (l *logger) Print(level telegraf.LogLevel, ts time.Time, args ...interface{
return return
} }
if instance.impl != nil { if instance.impl != nil {
instance.impl.Print(level, ts.In(instance.timezone), l.prefix, args...) instance.impl.Print(level, ts.In(instance.timezone), l.prefix, l.attributes, args...)
} else { } else {
msg := append([]interface{}{ts.In(instance.timezone).Format(time.RFC3339), " ", level.Indicator(), " ", l.prefix}, args...) msg := append([]interface{}{ts.In(instance.timezone).Format(time.RFC3339), " ", level.Indicator(), " ", l.prefix}, args...)
instance.earlysink.Print(msg...) instance.earlysink.Print(msg...)
} }
} }
// SetLevel overrides the current log-level of the logger
func (l *logger) SetLevel(level telegraf.LogLevel) {
l.level = &level
}
// SetLevel changes the log-level to the given one
func (l *logger) SetLogLevel(name string) error {
if name == "" {
return nil
}
level := telegraf.LogLevelFromString(name)
if level == telegraf.None {
return fmt.Errorf("invalid log-level %q", name)
}
l.SetLevel(level)
return nil
}
// Register a callback triggered when errors are about to be written to the log
func (l *logger) RegisterErrorCallback(f func()) {
l.onError = append(l.onError, f)
}
type Config struct { type Config struct {
// will set the log level to DEBUG // will set the log level to DEBUG
Debug bool Debug bool
// will set the log level to ERROR // will set the log level to ERROR
Quiet bool Quiet bool
//stderr, stdout, file or eventlog (Windows only) // format and target of log messages
LogTarget string LogTarget string
// will direct the logging output to a file. Empty string is LogFormat string
// interpreted as stderr. If there is an error opening the file the Logfile string
// logger will fall back to stderr
Logfile string
// will rotate when current file at the specified time interval // will rotate when current file at the specified time interval
RotationInterval time.Duration RotationInterval time.Duration
// will rotate when current file size exceeds this parameter. // will rotate when current file size exceeds this parameter.
@ -222,6 +202,31 @@ type Config struct {
// SetupLogging configures the logging output. // SetupLogging configures the logging output.
func SetupLogging(cfg *Config) error { func SetupLogging(cfg *Config) error {
// Issue deprecation warning for option
switch cfg.LogTarget {
case "":
// Best-case no target set or file already migrated...
case "stderr":
msg := "Agent setting %q is deprecated, please leave %q empty and remove this setting!"
deprecation := "The setting will be removed in v1.40.0."
log.Printf("W! "+msg+" "+deprecation, "logtarget", "logfile")
cfg.Logfile = ""
case "file":
msg := "Agent setting %q is deprecated, please just set %q and remove this setting!"
deprecation := "The setting will be removed in v1.40.0."
log.Printf("W! "+msg+" "+deprecation, "logtarget", "logfile")
case "eventlog":
msg := "Agent setting %q is deprecated, please set %q to %q and remove this setting!"
deprecation := "The setting will be removed in v1.40.0."
log.Printf("W! "+msg+" "+deprecation, "logtarget", "logformat", "eventlog")
if cfg.LogFormat != "" && cfg.LogFormat != "eventlog" {
return errors.New("contradicting setting between 'logtarget' and 'logformat'")
}
cfg.LogFormat = "eventlog"
default:
return fmt.Errorf("invalid deprecated 'logtarget' setting %q", cfg.LogTarget)
}
if cfg.Debug { if cfg.Debug {
cfg.logLevel = telegraf.Debug cfg.logLevel = telegraf.Debug
} }
@ -236,8 +241,8 @@ func SetupLogging(cfg *Config) error {
cfg.InstanceName = "telegraf" cfg.InstanceName = "telegraf"
} }
if cfg.LogTarget == "" || cfg.LogTarget == "file" && cfg.Logfile == "" { if cfg.LogFormat == "" {
cfg.LogTarget = "stderr" cfg.LogFormat = "text"
} }
// Get configured timezone // Get configured timezone
@ -250,13 +255,12 @@ func SetupLogging(cfg *Config) error {
return fmt.Errorf("setting logging timezone failed: %w", err) return fmt.Errorf("setting logging timezone failed: %w", err)
} }
// Get the logging factory // Get the logging factory and create the root instance
creator, ok := registry[cfg.LogTarget] creator, found := registry[cfg.LogFormat]
if !ok { if !found {
return fmt.Errorf("unsupported log target: %s, using stderr", cfg.LogTarget) return fmt.Errorf("unsupported log-format: %s", cfg.LogFormat)
} }
// Create the root logging instance
l, err := creator(cfg) l, err := creator(cfg)
if err != nil { if err != nil {
return err return err
@ -268,7 +272,8 @@ func SetupLogging(cfg *Config) error {
} }
// Update the logging instance // Update the logging instance
instance.switchSink(l, cfg.logLevel, tz, cfg.LogTarget == "stderr") skipEarlyLogs := cfg.LogFormat == "text" && cfg.Logfile == ""
instance.switchSink(l, cfg.logLevel, tz, skipEarlyLogs)
return nil return nil
} }

View File

@ -1,12 +1,24 @@
package logger package logger
import ( import (
"os"
"testing" "testing"
"github.com/influxdata/telegraf/selfstat" "github.com/influxdata/telegraf/selfstat"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func TestTextLogTargetDefault(t *testing.T) {
instance = defaultHandler()
cfg := &Config{
Quiet: true,
}
require.NoError(t, SetupLogging(cfg))
logger, ok := instance.impl.(*textLogger)
require.Truef(t, ok, "logging instance is not a default-logger but %T", instance.impl)
require.Equal(t, logger.logger.Writer(), os.Stderr)
}
func TestErrorCounting(t *testing.T) { func TestErrorCounting(t *testing.T) {
reg := selfstat.Register( reg := selfstat.Register(
"gather", "gather",

View File

@ -0,0 +1,79 @@
package logger
import (
"context"
"fmt"
"io"
"log"
"log/slog"
"os"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal/rotate"
)
type structuredLogger struct {
handler slog.Handler
output io.Writer
errlog *log.Logger
}
func (l *structuredLogger) Close() error {
if closer, ok := l.output.(io.Closer); ok {
return closer.Close()
}
return nil
}
func (l *structuredLogger) Print(level telegraf.LogLevel, ts time.Time, _ string, attr map[string]interface{}, args ...interface{}) {
record := slog.Record{
Time: ts,
Message: fmt.Sprint(args...),
Level: slog.Level(level),
}
for k, v := range attr {
record.Add(k, v)
}
if err := l.handler.Handle(context.Background(), record); err != nil {
l.errlog.Printf("E! Writing log message failed: %v", err)
}
}
var defaultStructuredHandlerOptions = &slog.HandlerOptions{
Level: slog.Level(-99),
ReplaceAttr: func(_ []string, attr slog.Attr) slog.Attr {
// Translate the Telegraf log-levels to strings
if attr.Key == slog.LevelKey {
if level, ok := attr.Value.Any().(slog.Level); ok {
attr.Value = slog.StringValue(telegraf.LogLevel(level).String())
}
}
return attr
},
}
func init() {
add("structured", func(cfg *Config) (sink, error) {
var writer io.Writer = os.Stderr
if cfg.Logfile != "" {
w, err := rotate.NewFileWriter(
cfg.Logfile,
cfg.RotationInterval,
cfg.RotationMaxSize,
cfg.RotationMaxArchives,
)
if err != nil {
return nil, err
}
writer = w
}
return &structuredLogger{
handler: slog.NewJSONHandler(writer, defaultStructuredHandlerOptions),
output: writer,
errlog: log.New(os.Stderr, "", 0),
}, nil
})
}

View File

@ -0,0 +1,322 @@
package logger
import (
"encoding/json"
"io"
"log"
"log/slog"
"os"
"path/filepath"
"testing"
"time"
"github.com/influxdata/telegraf"
"github.com/stretchr/testify/require"
)
func TestStructuredStderr(t *testing.T) {
instance = defaultHandler()
cfg := &Config{
LogFormat: "structured",
Quiet: true,
}
require.NoError(t, SetupLogging(cfg))
logger, ok := instance.impl.(*structuredLogger)
require.Truef(t, ok, "logging instance is not a structured-logger but %T", instance.impl)
require.Equal(t, logger.output, os.Stderr)
}
func TestStructuredFile(t *testing.T) {
tmpfile, err := os.CreateTemp("", "")
require.NoError(t, err)
defer os.Remove(tmpfile.Name())
cfg := &Config{
Logfile: tmpfile.Name(),
LogFormat: "structured",
RotationMaxArchives: -1,
}
require.NoError(t, SetupLogging(cfg))
log.Printf("I! TEST")
log.Printf("D! TEST") // <- should be ignored
buf, err := os.ReadFile(tmpfile.Name())
require.NoError(t, err)
expected := map[string]interface{}{
"level": "INFO",
"msg": "TEST",
}
var actual map[string]interface{}
require.NoError(t, json.Unmarshal(buf, &actual))
require.Contains(t, actual, "time")
require.NotEmpty(t, actual["time"])
delete(actual, "time")
require.Equal(t, expected, actual)
}
func TestStructuredFileDebug(t *testing.T) {
tmpfile, err := os.CreateTemp("", "")
require.NoError(t, err)
defer os.Remove(tmpfile.Name())
cfg := &Config{
Logfile: tmpfile.Name(),
LogFormat: "structured",
RotationMaxArchives: -1,
Debug: true,
}
require.NoError(t, SetupLogging(cfg))
log.Printf("D! TEST")
buf, err := os.ReadFile(tmpfile.Name())
require.NoError(t, err)
expected := map[string]interface{}{
"level": "DEBUG",
"msg": "TEST",
}
var actual map[string]interface{}
require.NoError(t, json.Unmarshal(buf, &actual))
require.Contains(t, actual, "time")
require.NotEmpty(t, actual["time"])
delete(actual, "time")
require.Equal(t, expected, actual)
}
func TestStructuredFileError(t *testing.T) {
tmpfile, err := os.CreateTemp("", "")
require.NoError(t, err)
defer os.Remove(tmpfile.Name())
cfg := &Config{
Logfile: tmpfile.Name(),
LogFormat: "structured",
RotationMaxArchives: -1,
Quiet: true,
}
require.NoError(t, SetupLogging(cfg))
log.Printf("E! TEST")
log.Printf("I! TEST") // <- should be ignored
buf, err := os.ReadFile(tmpfile.Name())
require.NoError(t, err)
require.Greater(t, len(buf), 19)
expected := map[string]interface{}{
"level": "ERROR",
"msg": "TEST",
}
var actual map[string]interface{}
require.NoError(t, json.Unmarshal(buf, &actual))
require.Contains(t, actual, "time")
require.NotEmpty(t, actual["time"])
delete(actual, "time")
require.Equal(t, expected, actual)
}
func TestStructuredAddDefaultLogLevel(t *testing.T) {
tmpfile, err := os.CreateTemp("", "")
require.NoError(t, err)
defer os.Remove(tmpfile.Name())
cfg := &Config{
Logfile: tmpfile.Name(),
LogFormat: "structured",
RotationMaxArchives: -1,
Debug: true,
}
require.NoError(t, SetupLogging(cfg))
log.Printf("TEST")
buf, err := os.ReadFile(tmpfile.Name())
require.NoError(t, err)
expected := map[string]interface{}{
"level": "INFO",
"msg": "TEST",
}
var actual map[string]interface{}
require.NoError(t, json.Unmarshal(buf, &actual))
require.Contains(t, actual, "time")
require.NotEmpty(t, actual["time"])
delete(actual, "time")
require.Equal(t, expected, actual)
}
func TestStructuredDerivedLogger(t *testing.T) {
instance = defaultHandler()
tmpfile, err := os.CreateTemp("", "")
require.NoError(t, err)
defer os.Remove(tmpfile.Name())
cfg := &Config{
Logfile: tmpfile.Name(),
LogFormat: "structured",
RotationMaxArchives: -1,
Debug: true,
}
require.NoError(t, SetupLogging(cfg))
l := New("testing", "test", "")
l.Info("TEST")
buf, err := os.ReadFile(tmpfile.Name())
require.NoError(t, err)
expected := map[string]interface{}{
"level": "INFO",
"msg": "TEST",
"category": "testing",
"plugin": "test",
}
var actual map[string]interface{}
require.NoError(t, json.Unmarshal(buf, &actual))
require.Contains(t, actual, "time")
require.NotEmpty(t, actual["time"])
delete(actual, "time")
require.Equal(t, expected, actual)
}
func TestStructuredDerivedLoggerWithAttributes(t *testing.T) {
instance = defaultHandler()
tmpfile, err := os.CreateTemp("", "")
require.NoError(t, err)
defer os.Remove(tmpfile.Name())
cfg := &Config{
Logfile: tmpfile.Name(),
LogFormat: "structured",
RotationMaxArchives: -1,
Debug: true,
}
require.NoError(t, SetupLogging(cfg))
l := New("testing", "test", "myalias")
l.AddAttribute("alias", "foo") // Should be ignored
l.AddAttribute("device_id", 123)
l.Info("TEST")
buf, err := os.ReadFile(tmpfile.Name())
require.NoError(t, err)
expected := map[string]interface{}{
"level": "INFO",
"msg": "TEST",
"category": "testing",
"plugin": "test",
"alias": "myalias",
"device_id": float64(123),
}
var actual map[string]interface{}
require.NoError(t, json.Unmarshal(buf, &actual))
require.Contains(t, actual, "time")
require.NotEmpty(t, actual["time"])
delete(actual, "time")
require.Equal(t, expected, actual)
}
func TestStructuredWriteToTruncatedFile(t *testing.T) {
tmpfile, err := os.CreateTemp("", "")
require.NoError(t, err)
defer os.Remove(tmpfile.Name())
cfg := &Config{
Logfile: tmpfile.Name(),
LogFormat: "structured",
RotationMaxArchives: -1,
Debug: true,
}
require.NoError(t, SetupLogging(cfg))
log.Printf("TEST")
buf, err := os.ReadFile(tmpfile.Name())
require.NoError(t, err)
expected := map[string]interface{}{
"level": "INFO",
"msg": "TEST",
}
var actual map[string]interface{}
require.NoError(t, json.Unmarshal(buf, &actual))
require.Contains(t, actual, "time")
require.NotEmpty(t, actual["time"])
delete(actual, "time")
require.Equal(t, expected, actual)
require.NoError(t, os.Truncate(tmpfile.Name(), 0))
log.Printf("SHOULD BE FIRST")
buf, err = os.ReadFile(tmpfile.Name())
require.NoError(t, err)
expected = map[string]interface{}{
"level": "INFO",
"msg": "SHOULD BE FIRST",
}
require.NoError(t, json.Unmarshal(buf, &actual))
require.Contains(t, actual, "time")
require.NotEmpty(t, actual["time"])
delete(actual, "time")
require.Equal(t, expected, actual)
}
func TestStructuredWriteToFileInRotation(t *testing.T) {
tempDir := t.TempDir()
cfg := &Config{
Logfile: filepath.Join(tempDir, "test.log"),
LogFormat: "structured",
RotationMaxArchives: -1,
RotationMaxSize: 30,
}
require.NoError(t, SetupLogging(cfg))
// Close the writer here, otherwise the temp folder cannot be deleted because the current log file is in use.
defer CloseLogging() //nolint:errcheck // We cannot do anything if this fails
log.Printf("I! TEST 1") // Writes 31 bytes, will rotate
log.Printf("I! TEST") // Writes 29 byes, no rotation expected
files, err := os.ReadDir(tempDir)
require.NoError(t, err)
require.Len(t, files, 2)
}
func BenchmarkTelegrafStructuredLogWrite(b *testing.B) {
// Discard all logging output
l := &structuredLogger{
handler: slog.NewJSONHandler(io.Discard, defaultStructuredHandlerOptions),
output: io.Discard,
errlog: log.New(os.Stderr, "", 0),
}
ts := time.Now()
for i := 0; i < b.N; i++ {
l.Print(telegraf.Debug, ts, "", nil, "test")
}
}

View File

@ -16,11 +16,11 @@ const (
LogTargetStderr = "stderr" LogTargetStderr = "stderr"
) )
type defaultLogger struct { type textLogger struct {
logger *log.Logger logger *log.Logger
} }
func (l *defaultLogger) Close() error { func (l *textLogger) Close() error {
writer := l.logger.Writer() writer := l.logger.Writer()
// Close the writer if possible and avoid closing stderr // Close the writer if possible and avoid closing stderr
@ -34,18 +34,14 @@ func (l *defaultLogger) Close() error {
return errors.New("the underlying writer cannot be closed") return errors.New("the underlying writer cannot be closed")
} }
func (l *defaultLogger) SetOutput(w io.Writer) { func (l *textLogger) Print(level telegraf.LogLevel, ts time.Time, prefix string, _ map[string]interface{}, args ...interface{}) {
l.logger.SetOutput(w)
}
func (l *defaultLogger) Print(level telegraf.LogLevel, ts time.Time, prefix string, args ...interface{}) {
msg := append([]interface{}{ts.Format(time.RFC3339), " ", level.Indicator(), " ", prefix}, args...) msg := append([]interface{}{ts.Format(time.RFC3339), " ", level.Indicator(), " ", prefix}, args...)
l.logger.Print(msg...) l.logger.Print(msg...)
} }
func createDefaultLogger(cfg *Config) (sink, error) { func createTextLogger(cfg *Config) (sink, error) {
var writer io.Writer = os.Stderr var writer io.Writer = os.Stderr
if cfg.LogTarget == "file" && cfg.Logfile != "" { if cfg.Logfile != "" {
w, err := rotate.NewFileWriter( w, err := rotate.NewFileWriter(
cfg.Logfile, cfg.Logfile,
cfg.RotationInterval, cfg.RotationInterval,
@ -58,10 +54,9 @@ func createDefaultLogger(cfg *Config) (sink, error) {
writer = w writer = w
} }
return &defaultLogger{logger: log.New(writer, "", 0)}, nil return &textLogger{logger: log.New(writer, "", 0)}, nil
} }
func init() { func init() {
add("stderr", createDefaultLogger) add("text", createTextLogger)
add("file", createDefaultLogger)
} }

View File

@ -12,37 +12,26 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func TestLogTargetDefault(t *testing.T) { func TestTextStderr(t *testing.T) {
instance = defaultHandler() instance = defaultHandler()
cfg := &Config{ cfg := &Config{
Quiet: true, LogFormat: "text",
}
require.NoError(t, SetupLogging(cfg))
logger, ok := instance.impl.(*defaultLogger)
require.True(t, ok, "logging instance is not a default-logger")
require.Equal(t, logger.logger.Writer(), os.Stderr)
}
func TestLogTargetStderr(t *testing.T) {
instance = defaultHandler()
cfg := &Config{
LogTarget: "stderr",
Quiet: true, Quiet: true,
} }
require.NoError(t, SetupLogging(cfg)) require.NoError(t, SetupLogging(cfg))
logger, ok := instance.impl.(*defaultLogger) logger, ok := instance.impl.(*textLogger)
require.True(t, ok, "logging instance is not a default-logger") require.Truef(t, ok, "logging instance is not a text-logger but %T", instance.impl)
require.Equal(t, logger.logger.Writer(), os.Stderr) require.Equal(t, logger.logger.Writer(), os.Stderr)
} }
func TestLogTargetFile(t *testing.T) { func TestTextFile(t *testing.T) {
tmpfile, err := os.CreateTemp("", "") tmpfile, err := os.CreateTemp("", "")
require.NoError(t, err) require.NoError(t, err)
defer os.Remove(tmpfile.Name()) defer os.Remove(tmpfile.Name())
cfg := &Config{ cfg := &Config{
Logfile: tmpfile.Name(), Logfile: tmpfile.Name(),
LogTarget: "file", LogFormat: "text",
RotationMaxArchives: -1, RotationMaxArchives: -1,
} }
require.NoError(t, SetupLogging(cfg)) require.NoError(t, SetupLogging(cfg))
@ -53,17 +42,17 @@ func TestLogTargetFile(t *testing.T) {
buf, err := os.ReadFile(tmpfile.Name()) buf, err := os.ReadFile(tmpfile.Name())
require.NoError(t, err) require.NoError(t, err)
require.Greater(t, len(buf), 19) require.Greater(t, len(buf), 19)
require.Equal(t, buf[19:], []byte("Z I! TEST\n")) require.Equal(t, "Z I! TEST\n", string(buf[19:]))
} }
func TestLogTargetFileDebug(t *testing.T) { func TestTextFileDebug(t *testing.T) {
tmpfile, err := os.CreateTemp("", "") tmpfile, err := os.CreateTemp("", "")
require.NoError(t, err) require.NoError(t, err)
defer os.Remove(tmpfile.Name()) defer os.Remove(tmpfile.Name())
cfg := &Config{ cfg := &Config{
Logfile: tmpfile.Name(), Logfile: tmpfile.Name(),
LogTarget: "file", LogFormat: "text",
RotationMaxArchives: -1, RotationMaxArchives: -1,
Debug: true, Debug: true,
} }
@ -74,17 +63,17 @@ func TestLogTargetFileDebug(t *testing.T) {
buf, err := os.ReadFile(tmpfile.Name()) buf, err := os.ReadFile(tmpfile.Name())
require.NoError(t, err) require.NoError(t, err)
require.Greater(t, len(buf), 19) require.Greater(t, len(buf), 19)
require.Equal(t, buf[19:], []byte("Z D! TEST\n")) require.Equal(t, "Z D! TEST\n", string(buf[19:]))
} }
func TestLogTargetFileError(t *testing.T) { func TestTextFileError(t *testing.T) {
tmpfile, err := os.CreateTemp("", "") tmpfile, err := os.CreateTemp("", "")
require.NoError(t, err) require.NoError(t, err)
defer os.Remove(tmpfile.Name()) defer os.Remove(tmpfile.Name())
cfg := &Config{ cfg := &Config{
Logfile: tmpfile.Name(), Logfile: tmpfile.Name(),
LogTarget: "file", LogFormat: "text",
RotationMaxArchives: -1, RotationMaxArchives: -1,
Quiet: true, Quiet: true,
} }
@ -96,17 +85,17 @@ func TestLogTargetFileError(t *testing.T) {
buf, err := os.ReadFile(tmpfile.Name()) buf, err := os.ReadFile(tmpfile.Name())
require.NoError(t, err) require.NoError(t, err)
require.Greater(t, len(buf), 19) require.Greater(t, len(buf), 19)
require.Equal(t, buf[19:], []byte("Z E! TEST\n")) require.Equal(t, "Z E! TEST\n", string(buf[19:]))
} }
func TestAddDefaultLogLevel(t *testing.T) { func TestTextAddDefaultLogLevel(t *testing.T) {
tmpfile, err := os.CreateTemp("", "") tmpfile, err := os.CreateTemp("", "")
require.NoError(t, err) require.NoError(t, err)
defer os.Remove(tmpfile.Name()) defer os.Remove(tmpfile.Name())
cfg := &Config{ cfg := &Config{
Logfile: tmpfile.Name(), Logfile: tmpfile.Name(),
LogTarget: "file", LogFormat: "text",
RotationMaxArchives: -1, RotationMaxArchives: -1,
Debug: true, Debug: true,
} }
@ -117,17 +106,17 @@ func TestAddDefaultLogLevel(t *testing.T) {
buf, err := os.ReadFile(tmpfile.Name()) buf, err := os.ReadFile(tmpfile.Name())
require.NoError(t, err) require.NoError(t, err)
require.Greater(t, len(buf), 19) require.Greater(t, len(buf), 19)
require.Equal(t, buf[19:], []byte("Z I! TEST\n")) require.Equal(t, "Z I! TEST\n", string(buf[19:]))
} }
func TestWriteToTruncatedFile(t *testing.T) { func TestTextWriteToTruncatedFile(t *testing.T) {
tmpfile, err := os.CreateTemp("", "") tmpfile, err := os.CreateTemp("", "")
require.NoError(t, err) require.NoError(t, err)
defer os.Remove(tmpfile.Name()) defer os.Remove(tmpfile.Name())
cfg := &Config{ cfg := &Config{
Logfile: tmpfile.Name(), Logfile: tmpfile.Name(),
LogTarget: "file", LogFormat: "text",
RotationMaxArchives: -1, RotationMaxArchives: -1,
Debug: true, Debug: true,
} }
@ -138,24 +127,22 @@ func TestWriteToTruncatedFile(t *testing.T) {
buf, err := os.ReadFile(tmpfile.Name()) buf, err := os.ReadFile(tmpfile.Name())
require.NoError(t, err) require.NoError(t, err)
require.Greater(t, len(buf), 19) require.Greater(t, len(buf), 19)
require.Equal(t, buf[19:], []byte("Z I! TEST\n")) require.Equal(t, "Z I! TEST\n", string(buf[19:]))
tmpf, err := os.OpenFile(tmpfile.Name(), os.O_RDWR|os.O_TRUNC, 0640) require.NoError(t, os.Truncate(tmpfile.Name(), 0))
require.NoError(t, err)
require.NoError(t, tmpf.Close())
log.Printf("SHOULD BE FIRST") log.Printf("SHOULD BE FIRST")
buf, err = os.ReadFile(tmpfile.Name()) buf, err = os.ReadFile(tmpfile.Name())
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, buf[19:], []byte("Z I! SHOULD BE FIRST\n")) require.Equal(t, "Z I! SHOULD BE FIRST\n", string(buf[19:]))
} }
func TestWriteToFileInRotation(t *testing.T) { func TestTextWriteToFileInRotation(t *testing.T) {
tempDir := t.TempDir() tempDir := t.TempDir()
cfg := &Config{ cfg := &Config{
Logfile: filepath.Join(tempDir, "test.log"), Logfile: filepath.Join(tempDir, "test.log"),
LogTarget: "file", LogFormat: "text",
RotationMaxArchives: -1, RotationMaxArchives: -1,
RotationMaxSize: 30, RotationMaxSize: 30,
} }
@ -172,16 +159,69 @@ func TestWriteToFileInRotation(t *testing.T) {
require.Len(t, files, 2) require.Len(t, files, 2)
} }
func BenchmarkTelegrafLogWrite(b *testing.B) { func TestTextWriteDerivedLogger(t *testing.T) {
l, err := createDefaultLogger(&Config{}) instance = defaultHandler()
tmpfile, err := os.CreateTemp("", "")
require.NoError(t, err)
defer os.Remove(tmpfile.Name())
cfg := &Config{
Logfile: tmpfile.Name(),
LogFormat: "text",
RotationMaxArchives: -1,
Debug: true,
}
require.NoError(t, SetupLogging(cfg))
l := New("testing", "test", "")
l.Info("TEST")
buf, err := os.ReadFile(tmpfile.Name())
require.NoError(t, err)
require.Greater(t, len(buf), 19)
require.Equal(t, "Z I! [testing.test] TEST\n", string(buf[19:]))
}
func TestTextWriteDerivedLoggerWithAttributes(t *testing.T) {
instance = defaultHandler()
tmpfile, err := os.CreateTemp("", "")
require.NoError(t, err)
defer os.Remove(tmpfile.Name())
cfg := &Config{
Logfile: tmpfile.Name(),
LogFormat: "text",
RotationMaxArchives: -1,
Debug: true,
}
require.NoError(t, SetupLogging(cfg))
l := New("testing", "test", "myalias")
// All attributes should be ignored
l.AddAttribute("alias", "foo")
l.AddAttribute("device_id", 123)
l.Info("TEST")
buf, err := os.ReadFile(tmpfile.Name())
require.NoError(t, err)
require.Greater(t, len(buf), 19)
require.Equal(t, "Z I! [testing.test::myalias] TEST\n", string(buf[19:]))
}
func BenchmarkTelegrafTextLogWrite(b *testing.B) {
l, err := createTextLogger(&Config{})
require.NoError(b, err) require.NoError(b, err)
// Discard all logging output // Discard all logging output
dl := l.(*defaultLogger) dl := l.(*textLogger)
dl.SetOutput(io.Discard) dl.logger.SetOutput(io.Discard)
ts := time.Now() ts := time.Now()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
dl.Print(telegraf.Debug, ts, "", "test") dl.Print(telegraf.Debug, ts, "", nil, "test")
} }
} }

View File

@ -0,0 +1,5 @@
//go:build !custom || migrations
package all
import _ "github.com/influxdata/telegraf/migrations/global_agent" // register migration

View File

@ -0,0 +1,75 @@
package global_agent
import (
"errors"
"github.com/influxdata/toml"
"github.com/influxdata/toml/ast"
"github.com/influxdata/telegraf/migrations"
)
// Migration function
func migrate(name string, tbl *ast.Table) ([]byte, string, error) {
// Migrate the agent section only...
if name != "agent" {
return nil, "", migrations.ErrNotApplicable
}
// Decode the old data structure
var agent map[string]interface{}
if err := toml.UnmarshalTable(tbl, &agent); err != nil {
return nil, "", err
}
// Check for deprecated option(s) and migrate them
var applied bool
// Migrate log settings
var logtarget string
var logtargetFound bool
if raw, found := agent["logtarget"]; found {
if v, ok := raw.(string); ok {
logtarget = v
logtargetFound = true
}
}
var logformat string
var logformatFound bool
if raw, found := agent["logformat"]; found {
if v, ok := raw.(string); ok {
logformat = v
logformatFound = true
}
}
if logtargetFound {
switch logtarget {
case "stderr":
delete(agent, "logfile")
case "file":
case "eventlog":
if logformatFound && logformat != "eventlog" {
return nil, "", errors.New("contradicting setting for 'logtarget' and 'logformat'")
}
agent["logformat"] = "eventlog"
delete(agent, "logfile")
}
applied = true
delete(agent, "logtarget")
}
// No options migrated so we can exit early
if !applied {
return nil, "", migrations.ErrNotApplicable
}
output, err := toml.Marshal(map[string]map[string]interface{}{"agent": agent})
return output, "", err
}
// Register the migration function for the plugin type
func init() {
migrations.AddGlobalMigration(migrate)
}

View File

@ -0,0 +1,104 @@
package global_agent_test
import (
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf/config"
_ "github.com/influxdata/telegraf/migrations/global_agent" // register migration
)
func TestNoMigration(t *testing.T) {
fn := filepath.Join("testcases", "default.conf")
// Read the input data
input, remote, err := config.LoadConfigFile(fn)
require.NoError(t, err)
require.False(t, remote)
require.NotEmpty(t, input)
// Expect the output to be equal to the input
expectedBuffer, err := os.ReadFile(fn)
require.NoError(t, err)
expected := config.NewConfig()
require.NoError(t, expected.LoadConfigData(expectedBuffer))
require.NotNil(t, expected.Agent)
// Migrate
output, n, err := config.ApplyMigrations(input)
require.NoError(t, err)
require.NotEmpty(t, output)
require.Zero(t, n)
actual := config.NewConfig()
require.NoError(t, actual.LoadConfigData(output))
require.NotNil(t, actual.Agent)
// Test the output
require.EqualValues(t, expected.Agent, actual.Agent, string(output))
require.Equal(t, string(expectedBuffer), string(output))
}
func TestLogTargetEventlogCollision(t *testing.T) {
fn := filepath.Join("testcases", "logtarget_eventlog_collision.conf")
// Read the input data
input, remote, err := config.LoadConfigFile(fn)
require.NoError(t, err)
require.False(t, remote)
require.NotEmpty(t, input)
// Migrate
_, n, err := config.ApplyMigrations(input)
require.ErrorContains(t, err, "contradicting setting for 'logtarget' and 'logformat'")
require.Zero(t, n)
}
func TestCases(t *testing.T) {
// Get all directories in testdata
folders, err := os.ReadDir("testcases")
require.NoError(t, err)
for _, f := range folders {
// Only handle folders
if !f.IsDir() {
continue
}
t.Run(f.Name(), func(t *testing.T) {
testcasePath := filepath.Join("testcases", f.Name())
inputFile := filepath.Join(testcasePath, "telegraf.conf")
expectedFile := filepath.Join(testcasePath, "expected.conf")
// Read the expected output
expected := config.NewConfig()
require.NoError(t, expected.LoadConfig(expectedFile))
require.NotNil(t, expected.Agent)
// Read the input data
input, remote, err := config.LoadConfigFile(inputFile)
require.NoError(t, err)
require.False(t, remote)
require.NotEmpty(t, input)
// Migrate
output, n, err := config.ApplyMigrations(input)
require.NoError(t, err)
require.NotEmpty(t, output)
require.Positive(t, n, "expected migration application but none applied")
actual := config.NewConfig()
require.NoError(t, actual.LoadConfigData(output))
require.NotNil(t, actual.Agent)
// Test the output
require.EqualValues(t, expected.Agent, actual.Agent, string(output))
expectedBuffer, err := os.ReadFile(expectedFile)
require.NoError(t, err)
require.Equal(t, string(expectedBuffer), string(output))
})
}
}

View File

@ -0,0 +1,100 @@
# Configuration for telegraf agent
[agent]
## Default data collection interval for all inputs
interval = "10s"
## Rounds collection interval to 'interval'
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
## Telegraf will send metrics to outputs in batches of at most
## metric_batch_size metrics.
## This controls the size of writes that Telegraf sends to output plugins.
metric_batch_size = 1000
## Maximum number of unwritten metrics per output. Increasing this value
## allows for longer periods of output downtime without dropping metrics at the
## cost of higher maximum memory usage.
metric_buffer_limit = 10000
## Collection jitter is used to jitter the collection by a random amount.
## Each plugin will sleep for a random time within jitter before collecting.
## This can be used to avoid many plugins querying things like sysfs at the
## same time, which can have a measurable effect on the system.
collection_jitter = "0s"
## Collection offset is used to shift the collection by the given amount.
## This can be be used to avoid many plugins querying constraint devices
## at the same time by manually scheduling them in time.
# collection_offset = "0s"
## Default flushing interval for all outputs. Maximum flush_interval will be
## flush_interval + flush_jitter
flush_interval = "10s"
## Jitter the flush interval by a random amount. This is primarily to avoid
## large write spikes for users running a large number of telegraf instances.
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
## Collected metrics are rounded to the precision specified. Precision is
## specified as an interval with an integer + unit (e.g. 0s, 10ms, 2us, 4s).
## Valid time units are "ns", "us" (or "µs"), "ms", "s".
##
## By default or when set to "0s", precision will be set to the same
## timestamp order as the collection interval, with the maximum being 1s:
## ie, when interval = "10s", precision will be "1s"
## when interval = "250ms", precision will be "1ms"
##
## Precision will NOT be used for service inputs. It is up to each individual
## service input to set the timestamp at the appropriate precision.
precision = "0s"
## Log at debug level.
# debug = false
## Log only error level messages.
# quiet = false
## Log format controls the way messages are logged and can be one of "text",
## "structured" or, on Windows, "eventlog".
# logformat = "text"
## Name of the file to be logged to or stderr if unset or empty. This
## setting is ignored for the "eventlog" format.
# logfile = ""
## The logfile will be rotated after the time interval specified. When set
## to 0 no time based rotation is performed. Logs are rotated only when
## written to, if there is no log activity rotation may be delayed.
# logfile_rotation_interval = "0h"
## The logfile will be rotated when it becomes larger than the specified
## size. When set to 0 no size based rotation is performed.
# logfile_rotation_max_size = "0MB"
## Maximum number of rotated archives to keep, any older logs are deleted.
## If set to -1, no archives are removed.
# logfile_rotation_max_archives = 5
## Pick a timezone to use when logging or type 'local' for local time.
## Example: America/Chicago
# log_with_timezone = ""
## Override default hostname, if empty use os.Hostname()
# hostname = ""
## If set to true, do no set the "host" tag in the telegraf agent.
# omit_hostname = false
## Method of translating SNMP objects. Can be "netsnmp" (deprecated) which
## translates by calling external programs snmptranslate and snmptable,
## or "gosmi" which translates using the built-in gosmi library.
# snmp_translator = "netsnmp"
## Name of the file to load the state of plugins from and store the state to.
## If uncommented and not empty, this file will be used to save the state of
## stateful plugins on termination of Telegraf. If the file exists on start,
## the state in the file will be restored for the plugins.
# statefile = ""
## Flag to skip running processors after aggregators
## By default, processors are run a second time after aggregators. Changing
## this setting to true will skip the second run of processors.
# skip_processors_after_aggregators = false

View File

@ -0,0 +1,10 @@
[agent]
collection_jitter = "0s"
flush_interval = "10s"
flush_jitter = "0s"
interval = "10s"
logformat = "eventlog"
metric_batch_size = 1000
metric_buffer_limit = 10000
precision = "0s"
round_interval = true

View File

@ -0,0 +1,102 @@
# Configuration for telegraf agent
[agent]
## Default data collection interval for all inputs
interval = "10s"
## Rounds collection interval to 'interval'
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
## Telegraf will send metrics to outputs in batches of at most
## metric_batch_size metrics.
## This controls the size of writes that Telegraf sends to output plugins.
metric_batch_size = 1000
## Maximum number of unwritten metrics per output. Increasing this value
## allows for longer periods of output downtime without dropping metrics at the
## cost of higher maximum memory usage.
metric_buffer_limit = 10000
## Collection jitter is used to jitter the collection by a random amount.
## Each plugin will sleep for a random time within jitter before collecting.
## This can be used to avoid many plugins querying things like sysfs at the
## same time, which can have a measurable effect on the system.
collection_jitter = "0s"
## Collection offset is used to shift the collection by the given amount.
## This can be be used to avoid many plugins querying constraint devices
## at the same time by manually scheduling them in time.
# collection_offset = "0s"
## Default flushing interval for all outputs. Maximum flush_interval will be
## flush_interval + flush_jitter
flush_interval = "10s"
## Jitter the flush interval by a random amount. This is primarily to avoid
## large write spikes for users running a large number of telegraf instances.
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
## Collected metrics are rounded to the precision specified. Precision is
## specified as an interval with an integer + unit (e.g. 0s, 10ms, 2us, 4s).
## Valid time units are "ns", "us" (or "µs"), "ms", "s".
##
## By default or when set to "0s", precision will be set to the same
## timestamp order as the collection interval, with the maximum being 1s:
## ie, when interval = "10s", precision will be "1s"
## when interval = "250ms", precision will be "1ms"
##
## Precision will NOT be used for service inputs. It is up to each individual
## service input to set the timestamp at the appropriate precision.
precision = "0s"
## Log at debug level.
# debug = false
## Log only error level messages.
# quiet = false
## Log format controls the way messages are logged and can be one of "text",
## "structured" or, on Windows, "eventlog".
# logformat = "text"
logtarget = "eventlog"
## Name of the file to be logged to or stderr if unset or empty. This
## setting is ignored for the "eventlog" format.
# logfile = "lala.log"
## The logfile will be rotated after the time interval specified. When set
## to 0 no time based rotation is performed. Logs are rotated only when
## written to, if there is no log activity rotation may be delayed.
# logfile_rotation_interval = "0h"
## The logfile will be rotated when it becomes larger than the specified
## size. When set to 0 no size based rotation is performed.
# logfile_rotation_max_size = "0MB"
## Maximum number of rotated archives to keep, any older logs are deleted.
## If set to -1, no archives are removed.
# logfile_rotation_max_archives = 5
## Pick a timezone to use when logging or type 'local' for local time.
## Example: America/Chicago
# log_with_timezone = ""
## Override default hostname, if empty use os.Hostname()
# hostname = ""
## If set to true, do no set the "host" tag in the telegraf agent.
# omit_hostname = false
## Method of translating SNMP objects. Can be "netsnmp" (deprecated) which
## translates by calling external programs snmptranslate and snmptable,
## or "gosmi" which translates using the built-in gosmi library.
# snmp_translator = "netsnmp"
## Name of the file to load the state of plugins from and store the state to.
## If uncommented and not empty, this file will be used to save the state of
## stateful plugins on termination of Telegraf. If the file exists on start,
## the state in the file will be restored for the plugins.
# statefile = ""
## Flag to skip running processors after aggregators
## By default, processors are run a second time after aggregators. Changing
## this setting to true will skip the second run of processors.
# skip_processors_after_aggregators = false

View File

@ -0,0 +1,102 @@
# Configuration for telegraf agent
[agent]
## Default data collection interval for all inputs
interval = "10s"
## Rounds collection interval to 'interval'
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
## Telegraf will send metrics to outputs in batches of at most
## metric_batch_size metrics.
## This controls the size of writes that Telegraf sends to output plugins.
metric_batch_size = 1000
## Maximum number of unwritten metrics per output. Increasing this value
## allows for longer periods of output downtime without dropping metrics at the
## cost of higher maximum memory usage.
metric_buffer_limit = 10000
## Collection jitter is used to jitter the collection by a random amount.
## Each plugin will sleep for a random time within jitter before collecting.
## This can be used to avoid many plugins querying things like sysfs at the
## same time, which can have a measurable effect on the system.
collection_jitter = "0s"
## Collection offset is used to shift the collection by the given amount.
## This can be be used to avoid many plugins querying constraint devices
## at the same time by manually scheduling them in time.
# collection_offset = "0s"
## Default flushing interval for all outputs. Maximum flush_interval will be
## flush_interval + flush_jitter
flush_interval = "10s"
## Jitter the flush interval by a random amount. This is primarily to avoid
## large write spikes for users running a large number of telegraf instances.
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
## Collected metrics are rounded to the precision specified. Precision is
## specified as an interval with an integer + unit (e.g. 0s, 10ms, 2us, 4s).
## Valid time units are "ns", "us" (or "µs"), "ms", "s".
##
## By default or when set to "0s", precision will be set to the same
## timestamp order as the collection interval, with the maximum being 1s:
## ie, when interval = "10s", precision will be "1s"
## when interval = "250ms", precision will be "1ms"
##
## Precision will NOT be used for service inputs. It is up to each individual
## service input to set the timestamp at the appropriate precision.
precision = "0s"
## Log at debug level.
# debug = false
## Log only error level messages.
# quiet = false
## Log format controls the way messages are logged and can be one of "text",
## "structured" or, on Windows, "eventlog".
logformat = "text"
logtarget = "eventlog"
## Name of the file to be logged to or stderr if unset or empty. This
## setting is ignored for the "eventlog" format.
# logfile = ""
## The logfile will be rotated after the time interval specified. When set
## to 0 no time based rotation is performed. Logs are rotated only when
## written to, if there is no log activity rotation may be delayed.
# logfile_rotation_interval = "0h"
## The logfile will be rotated when it becomes larger than the specified
## size. When set to 0 no size based rotation is performed.
# logfile_rotation_max_size = "0MB"
## Maximum number of rotated archives to keep, any older logs are deleted.
## If set to -1, no archives are removed.
# logfile_rotation_max_archives = 5
## Pick a timezone to use when logging or type 'local' for local time.
## Example: America/Chicago
# log_with_timezone = ""
## Override default hostname, if empty use os.Hostname()
# hostname = ""
## If set to true, do no set the "host" tag in the telegraf agent.
# omit_hostname = false
## Method of translating SNMP objects. Can be "netsnmp" (deprecated) which
## translates by calling external programs snmptranslate and snmptable,
## or "gosmi" which translates using the built-in gosmi library.
# snmp_translator = "netsnmp"
## Name of the file to load the state of plugins from and store the state to.
## If uncommented and not empty, this file will be used to save the state of
## stateful plugins on termination of Telegraf. If the file exists on start,
## the state in the file will be restored for the plugins.
# statefile = ""
## Flag to skip running processors after aggregators
## By default, processors are run a second time after aggregators. Changing
## this setting to true will skip the second run of processors.
# skip_processors_after_aggregators = false

View File

@ -0,0 +1,10 @@
[agent]
collection_jitter = "0s"
flush_interval = "10s"
flush_jitter = "0s"
interval = "10s"
logformat = "eventlog"
metric_batch_size = 1000
metric_buffer_limit = 10000
precision = "0s"
round_interval = true

View File

@ -0,0 +1,102 @@
# Configuration for telegraf agent
[agent]
## Default data collection interval for all inputs
interval = "10s"
## Rounds collection interval to 'interval'
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
## Telegraf will send metrics to outputs in batches of at most
## metric_batch_size metrics.
## This controls the size of writes that Telegraf sends to output plugins.
metric_batch_size = 1000
## Maximum number of unwritten metrics per output. Increasing this value
## allows for longer periods of output downtime without dropping metrics at the
## cost of higher maximum memory usage.
metric_buffer_limit = 10000
## Collection jitter is used to jitter the collection by a random amount.
## Each plugin will sleep for a random time within jitter before collecting.
## This can be used to avoid many plugins querying things like sysfs at the
## same time, which can have a measurable effect on the system.
collection_jitter = "0s"
## Collection offset is used to shift the collection by the given amount.
## This can be be used to avoid many plugins querying constraint devices
## at the same time by manually scheduling them in time.
# collection_offset = "0s"
## Default flushing interval for all outputs. Maximum flush_interval will be
## flush_interval + flush_jitter
flush_interval = "10s"
## Jitter the flush interval by a random amount. This is primarily to avoid
## large write spikes for users running a large number of telegraf instances.
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
## Collected metrics are rounded to the precision specified. Precision is
## specified as an interval with an integer + unit (e.g. 0s, 10ms, 2us, 4s).
## Valid time units are "ns", "us" (or "µs"), "ms", "s".
##
## By default or when set to "0s", precision will be set to the same
## timestamp order as the collection interval, with the maximum being 1s:
## ie, when interval = "10s", precision will be "1s"
## when interval = "250ms", precision will be "1ms"
##
## Precision will NOT be used for service inputs. It is up to each individual
## service input to set the timestamp at the appropriate precision.
precision = "0s"
## Log at debug level.
# debug = false
## Log only error level messages.
# quiet = false
## Log format controls the way messages are logged and can be one of "text",
## "structured" or, on Windows, "eventlog".
# logformat = "text"
logtarget = "eventlog"
## Name of the file to be logged to or stderr if unset or empty. This
## setting is ignored for the "eventlog" format.
logfile = "lala.log"
## The logfile will be rotated after the time interval specified. When set
## to 0 no time based rotation is performed. Logs are rotated only when
## written to, if there is no log activity rotation may be delayed.
# logfile_rotation_interval = "0h"
## The logfile will be rotated when it becomes larger than the specified
## size. When set to 0 no size based rotation is performed.
# logfile_rotation_max_size = "0MB"
## Maximum number of rotated archives to keep, any older logs are deleted.
## If set to -1, no archives are removed.
# logfile_rotation_max_archives = 5
## Pick a timezone to use when logging or type 'local' for local time.
## Example: America/Chicago
# log_with_timezone = ""
## Override default hostname, if empty use os.Hostname()
# hostname = ""
## If set to true, do no set the "host" tag in the telegraf agent.
# omit_hostname = false
## Method of translating SNMP objects. Can be "netsnmp" (deprecated) which
## translates by calling external programs snmptranslate and snmptable,
## or "gosmi" which translates using the built-in gosmi library.
# snmp_translator = "netsnmp"
## Name of the file to load the state of plugins from and store the state to.
## If uncommented and not empty, this file will be used to save the state of
## stateful plugins on termination of Telegraf. If the file exists on start,
## the state in the file will be restored for the plugins.
# statefile = ""
## Flag to skip running processors after aggregators
## By default, processors are run a second time after aggregators. Changing
## this setting to true will skip the second run of processors.
# skip_processors_after_aggregators = false

View File

@ -0,0 +1,10 @@
[agent]
collection_jitter = "0s"
flush_interval = "10s"
flush_jitter = "0s"
interval = "10s"
logfile = "lala.log"
metric_batch_size = 1000
metric_buffer_limit = 10000
precision = "0s"
round_interval = true

View File

@ -0,0 +1,102 @@
# Configuration for telegraf agent
[agent]
## Default data collection interval for all inputs
interval = "10s"
## Rounds collection interval to 'interval'
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
## Telegraf will send metrics to outputs in batches of at most
## metric_batch_size metrics.
## This controls the size of writes that Telegraf sends to output plugins.
metric_batch_size = 1000
## Maximum number of unwritten metrics per output. Increasing this value
## allows for longer periods of output downtime without dropping metrics at the
## cost of higher maximum memory usage.
metric_buffer_limit = 10000
## Collection jitter is used to jitter the collection by a random amount.
## Each plugin will sleep for a random time within jitter before collecting.
## This can be used to avoid many plugins querying things like sysfs at the
## same time, which can have a measurable effect on the system.
collection_jitter = "0s"
## Collection offset is used to shift the collection by the given amount.
## This can be be used to avoid many plugins querying constraint devices
## at the same time by manually scheduling them in time.
# collection_offset = "0s"
## Default flushing interval for all outputs. Maximum flush_interval will be
## flush_interval + flush_jitter
flush_interval = "10s"
## Jitter the flush interval by a random amount. This is primarily to avoid
## large write spikes for users running a large number of telegraf instances.
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
## Collected metrics are rounded to the precision specified. Precision is
## specified as an interval with an integer + unit (e.g. 0s, 10ms, 2us, 4s).
## Valid time units are "ns", "us" (or "µs"), "ms", "s".
##
## By default or when set to "0s", precision will be set to the same
## timestamp order as the collection interval, with the maximum being 1s:
## ie, when interval = "10s", precision will be "1s"
## when interval = "250ms", precision will be "1ms"
##
## Precision will NOT be used for service inputs. It is up to each individual
## service input to set the timestamp at the appropriate precision.
precision = "0s"
## Log at debug level.
# debug = false
## Log only error level messages.
# quiet = false
## Log format controls the way messages are logged and can be one of "text",
## "structured" or, on Windows, "eventlog".
# logformat = "text"
logtarget = "file"
## Name of the file to be logged to or stderr if unset or empty. This
## setting is ignored for the "eventlog" format.
logfile = "lala.log"
## The logfile will be rotated after the time interval specified. When set
## to 0 no time based rotation is performed. Logs are rotated only when
## written to, if there is no log activity rotation may be delayed.
# logfile_rotation_interval = "0h"
## The logfile will be rotated when it becomes larger than the specified
## size. When set to 0 no size based rotation is performed.
# logfile_rotation_max_size = "0MB"
## Maximum number of rotated archives to keep, any older logs are deleted.
## If set to -1, no archives are removed.
# logfile_rotation_max_archives = 5
## Pick a timezone to use when logging or type 'local' for local time.
## Example: America/Chicago
# log_with_timezone = ""
## Override default hostname, if empty use os.Hostname()
# hostname = ""
## If set to true, do no set the "host" tag in the telegraf agent.
# omit_hostname = false
## Method of translating SNMP objects. Can be "netsnmp" (deprecated) which
## translates by calling external programs snmptranslate and snmptable,
## or "gosmi" which translates using the built-in gosmi library.
# snmp_translator = "netsnmp"
## Name of the file to load the state of plugins from and store the state to.
## If uncommented and not empty, this file will be used to save the state of
## stateful plugins on termination of Telegraf. If the file exists on start,
## the state in the file will be restored for the plugins.
# statefile = ""
## Flag to skip running processors after aggregators
## By default, processors are run a second time after aggregators. Changing
## this setting to true will skip the second run of processors.
# skip_processors_after_aggregators = false

View File

@ -0,0 +1,9 @@
[agent]
collection_jitter = "0s"
flush_interval = "10s"
flush_jitter = "0s"
interval = "10s"
metric_batch_size = 1000
metric_buffer_limit = 10000
precision = "0s"
round_interval = true

View File

@ -0,0 +1,102 @@
# Configuration for telegraf agent
[agent]
## Default data collection interval for all inputs
interval = "10s"
## Rounds collection interval to 'interval'
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
## Telegraf will send metrics to outputs in batches of at most
## metric_batch_size metrics.
## This controls the size of writes that Telegraf sends to output plugins.
metric_batch_size = 1000
## Maximum number of unwritten metrics per output. Increasing this value
## allows for longer periods of output downtime without dropping metrics at the
## cost of higher maximum memory usage.
metric_buffer_limit = 10000
## Collection jitter is used to jitter the collection by a random amount.
## Each plugin will sleep for a random time within jitter before collecting.
## This can be used to avoid many plugins querying things like sysfs at the
## same time, which can have a measurable effect on the system.
collection_jitter = "0s"
## Collection offset is used to shift the collection by the given amount.
## This can be be used to avoid many plugins querying constraint devices
## at the same time by manually scheduling them in time.
# collection_offset = "0s"
## Default flushing interval for all outputs. Maximum flush_interval will be
## flush_interval + flush_jitter
flush_interval = "10s"
## Jitter the flush interval by a random amount. This is primarily to avoid
## large write spikes for users running a large number of telegraf instances.
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
## Collected metrics are rounded to the precision specified. Precision is
## specified as an interval with an integer + unit (e.g. 0s, 10ms, 2us, 4s).
## Valid time units are "ns", "us" (or "µs"), "ms", "s".
##
## By default or when set to "0s", precision will be set to the same
## timestamp order as the collection interval, with the maximum being 1s:
## ie, when interval = "10s", precision will be "1s"
## when interval = "250ms", precision will be "1ms"
##
## Precision will NOT be used for service inputs. It is up to each individual
## service input to set the timestamp at the appropriate precision.
precision = "0s"
## Log at debug level.
# debug = false
## Log only error level messages.
# quiet = false
## Log format controls the way messages are logged and can be one of "text",
## "structured" or, on Windows, "eventlog".
# logformat = "text"
logtarget = "file"
## Name of the file to be logged to or stderr if unset or empty. This
## setting is ignored for the "eventlog" format.
# logfile = "lala.log"
## The logfile will be rotated after the time interval specified. When set
## to 0 no time based rotation is performed. Logs are rotated only when
## written to, if there is no log activity rotation may be delayed.
# logfile_rotation_interval = "0h"
## The logfile will be rotated when it becomes larger than the specified
## size. When set to 0 no size based rotation is performed.
# logfile_rotation_max_size = "0MB"
## Maximum number of rotated archives to keep, any older logs are deleted.
## If set to -1, no archives are removed.
# logfile_rotation_max_archives = 5
## Pick a timezone to use when logging or type 'local' for local time.
## Example: America/Chicago
# log_with_timezone = ""
## Override default hostname, if empty use os.Hostname()
# hostname = ""
## If set to true, do no set the "host" tag in the telegraf agent.
# omit_hostname = false
## Method of translating SNMP objects. Can be "netsnmp" (deprecated) which
## translates by calling external programs snmptranslate and snmptable,
## or "gosmi" which translates using the built-in gosmi library.
# snmp_translator = "netsnmp"
## Name of the file to load the state of plugins from and store the state to.
## If uncommented and not empty, this file will be used to save the state of
## stateful plugins on termination of Telegraf. If the file exists on start,
## the state in the file will be restored for the plugins.
# statefile = ""
## Flag to skip running processors after aggregators
## By default, processors are run a second time after aggregators. Changing
## this setting to true will skip the second run of processors.
# skip_processors_after_aggregators = false

View File

@ -0,0 +1,9 @@
[agent]
collection_jitter = "0s"
flush_interval = "10s"
flush_jitter = "0s"
interval = "10s"
metric_batch_size = 1000
metric_buffer_limit = 10000
precision = "0s"
round_interval = true

View File

@ -0,0 +1,102 @@
# Configuration for telegraf agent
[agent]
## Default data collection interval for all inputs
interval = "10s"
## Rounds collection interval to 'interval'
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
## Telegraf will send metrics to outputs in batches of at most
## metric_batch_size metrics.
## This controls the size of writes that Telegraf sends to output plugins.
metric_batch_size = 1000
## Maximum number of unwritten metrics per output. Increasing this value
## allows for longer periods of output downtime without dropping metrics at the
## cost of higher maximum memory usage.
metric_buffer_limit = 10000
## Collection jitter is used to jitter the collection by a random amount.
## Each plugin will sleep for a random time within jitter before collecting.
## This can be used to avoid many plugins querying things like sysfs at the
## same time, which can have a measurable effect on the system.
collection_jitter = "0s"
## Collection offset is used to shift the collection by the given amount.
## This can be be used to avoid many plugins querying constraint devices
## at the same time by manually scheduling them in time.
# collection_offset = "0s"
## Default flushing interval for all outputs. Maximum flush_interval will be
## flush_interval + flush_jitter
flush_interval = "10s"
## Jitter the flush interval by a random amount. This is primarily to avoid
## large write spikes for users running a large number of telegraf instances.
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
## Collected metrics are rounded to the precision specified. Precision is
## specified as an interval with an integer + unit (e.g. 0s, 10ms, 2us, 4s).
## Valid time units are "ns", "us" (or "µs"), "ms", "s".
##
## By default or when set to "0s", precision will be set to the same
## timestamp order as the collection interval, with the maximum being 1s:
## ie, when interval = "10s", precision will be "1s"
## when interval = "250ms", precision will be "1ms"
##
## Precision will NOT be used for service inputs. It is up to each individual
## service input to set the timestamp at the appropriate precision.
precision = "0s"
## Log at debug level.
# debug = false
## Log only error level messages.
# quiet = false
## Log format controls the way messages are logged and can be one of "text",
## "structured" or, on Windows, "eventlog".
# logformat = "text"
logtarget = "stderr"
## Name of the file to be logged to or stderr if unset or empty. This
## setting is ignored for the "eventlog" format.
# logfile = "lala.log"
## The logfile will be rotated after the time interval specified. When set
## to 0 no time based rotation is performed. Logs are rotated only when
## written to, if there is no log activity rotation may be delayed.
# logfile_rotation_interval = "0h"
## The logfile will be rotated when it becomes larger than the specified
## size. When set to 0 no size based rotation is performed.
# logfile_rotation_max_size = "0MB"
## Maximum number of rotated archives to keep, any older logs are deleted.
## If set to -1, no archives are removed.
# logfile_rotation_max_archives = 5
## Pick a timezone to use when logging or type 'local' for local time.
## Example: America/Chicago
# log_with_timezone = ""
## Override default hostname, if empty use os.Hostname()
# hostname = ""
## If set to true, do no set the "host" tag in the telegraf agent.
# omit_hostname = false
## Method of translating SNMP objects. Can be "netsnmp" (deprecated) which
## translates by calling external programs snmptranslate and snmptable,
## or "gosmi" which translates using the built-in gosmi library.
# snmp_translator = "netsnmp"
## Name of the file to load the state of plugins from and store the state to.
## If uncommented and not empty, this file will be used to save the state of
## stateful plugins on termination of Telegraf. If the file exists on start,
## the state in the file will be restored for the plugins.
# statefile = ""
## Flag to skip running processors after aggregators
## By default, processors are run a second time after aggregators. Changing
## this setting to true will skip the second run of processors.
# skip_processors_after_aggregators = false

View File

@ -0,0 +1,9 @@
[agent]
collection_jitter = "0s"
flush_interval = "10s"
flush_jitter = "0s"
interval = "10s"
metric_batch_size = 1000
metric_buffer_limit = 10000
precision = "0s"
round_interval = true

View File

@ -0,0 +1,102 @@
# Configuration for telegraf agent
[agent]
## Default data collection interval for all inputs
interval = "10s"
## Rounds collection interval to 'interval'
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
## Telegraf will send metrics to outputs in batches of at most
## metric_batch_size metrics.
## This controls the size of writes that Telegraf sends to output plugins.
metric_batch_size = 1000
## Maximum number of unwritten metrics per output. Increasing this value
## allows for longer periods of output downtime without dropping metrics at the
## cost of higher maximum memory usage.
metric_buffer_limit = 10000
## Collection jitter is used to jitter the collection by a random amount.
## Each plugin will sleep for a random time within jitter before collecting.
## This can be used to avoid many plugins querying things like sysfs at the
## same time, which can have a measurable effect on the system.
collection_jitter = "0s"
## Collection offset is used to shift the collection by the given amount.
## This can be be used to avoid many plugins querying constraint devices
## at the same time by manually scheduling them in time.
# collection_offset = "0s"
## Default flushing interval for all outputs. Maximum flush_interval will be
## flush_interval + flush_jitter
flush_interval = "10s"
## Jitter the flush interval by a random amount. This is primarily to avoid
## large write spikes for users running a large number of telegraf instances.
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
## Collected metrics are rounded to the precision specified. Precision is
## specified as an interval with an integer + unit (e.g. 0s, 10ms, 2us, 4s).
## Valid time units are "ns", "us" (or "µs"), "ms", "s".
##
## By default or when set to "0s", precision will be set to the same
## timestamp order as the collection interval, with the maximum being 1s:
## ie, when interval = "10s", precision will be "1s"
## when interval = "250ms", precision will be "1ms"
##
## Precision will NOT be used for service inputs. It is up to each individual
## service input to set the timestamp at the appropriate precision.
precision = "0s"
## Log at debug level.
# debug = false
## Log only error level messages.
# quiet = false
## Log format controls the way messages are logged and can be one of "text",
## "structured" or, on Windows, "eventlog".
# logformat = "text"
logtarget = "stderr"
## Name of the file to be logged to or stderr if unset or empty. This
## setting is ignored for the "eventlog" format.
logfile = "lala.log"
## The logfile will be rotated after the time interval specified. When set
## to 0 no time based rotation is performed. Logs are rotated only when
## written to, if there is no log activity rotation may be delayed.
# logfile_rotation_interval = "0h"
## The logfile will be rotated when it becomes larger than the specified
## size. When set to 0 no size based rotation is performed.
# logfile_rotation_max_size = "0MB"
## Maximum number of rotated archives to keep, any older logs are deleted.
## If set to -1, no archives are removed.
# logfile_rotation_max_archives = 5
## Pick a timezone to use when logging or type 'local' for local time.
## Example: America/Chicago
# log_with_timezone = ""
## Override default hostname, if empty use os.Hostname()
# hostname = ""
## If set to true, do no set the "host" tag in the telegraf agent.
# omit_hostname = false
## Method of translating SNMP objects. Can be "netsnmp" (deprecated) which
## translates by calling external programs snmptranslate and snmptable,
## or "gosmi" which translates using the built-in gosmi library.
# snmp_translator = "netsnmp"
## Name of the file to load the state of plugins from and store the state to.
## If uncommented and not empty, this file will be used to save the state of
## stateful plugins on termination of Telegraf. If the file exists on start,
## the state in the file will be restored for the plugins.
# statefile = ""
## Flag to skip running processors after aggregators
## By default, processors are run a second time after aggregators. Changing
## this setting to true will skip the second run of processors.
# skip_processors_after_aggregators = false

View File

@ -39,17 +39,10 @@ func AddGeneralMigration(f GeneralMigrationFunc) {
GeneralMigrations = append(GeneralMigrations, f) GeneralMigrations = append(GeneralMigrations, f)
} }
type pluginTOMLStruct map[string]map[string][]interface{} type GlobalMigrationFunc func(string, *ast.Table) ([]byte, string, error)
func CreateTOMLStruct(category, name string) pluginTOMLStruct { var GlobalMigrations []GlobalMigrationFunc
return map[string]map[string][]interface{}{
category: {
name: make([]interface{}, 0),
},
}
}
func (p *pluginTOMLStruct) Add(category, name string, plugin interface{}) { func AddGlobalMigration(f GlobalMigrationFunc) {
cfg := map[string]map[string][]interface{}(*p) GlobalMigrations = append(GlobalMigrations, f)
cfg[category][name] = append(cfg[category][name], plugin)
} }

View File

@ -4,6 +4,21 @@ import (
"fmt" "fmt"
) )
type pluginTOMLStruct map[string]map[string][]interface{}
func CreateTOMLStruct(category, name string) pluginTOMLStruct {
return map[string]map[string][]interface{}{
category: {
name: make([]interface{}, 0),
},
}
}
func (p *pluginTOMLStruct) Add(category, name string, plugin interface{}) {
cfg := map[string]map[string][]interface{}(*p)
cfg[category][name] = append(cfg[category][name], plugin)
}
func AsStringSlice(raw interface{}) ([]string, error) { func AsStringSlice(raw interface{}) ([]string, error) {
rawList, ok := raw.([]interface{}) rawList, ok := raw.([]interface{})
if !ok { if !ok {

View File

@ -65,7 +65,8 @@ func (la *LogAccumulator) Level() telegraf.LogLevel {
return telegraf.Debug return telegraf.Debug
} }
func (*LogAccumulator) RegisterErrorCallback(func()) {} // Unused
func (*LogAccumulator) AddAttribute(string, interface{}) {}
func (la *LogAccumulator) append(level pgx.LogLevel, format string, args []interface{}) { func (la *LogAccumulator) append(level pgx.LogLevel, format string, args []interface{}) {
la.tb.Helper() la.tb.Helper()

View File

@ -50,58 +50,50 @@ func (l *CaptureLogger) loga(level byte, args ...any) {
l.print(Entry{level, l.Name, fmt.Sprint(args...)}) l.print(Entry{level, l.Name, fmt.Sprint(args...)})
} }
func (l *CaptureLogger) Level() telegraf.LogLevel { // We always want to output at debug level during testing to find issues easier
func (*CaptureLogger) Level() telegraf.LogLevel {
return telegraf.Debug return telegraf.Debug
} }
func (*CaptureLogger) RegisterErrorCallback(func()) {} // Adding attributes is not supported by the test-logger
func (*CaptureLogger) AddAttribute(string, interface{}) {}
// Errorf logs an error message, patterned after log.Printf.
func (l *CaptureLogger) Errorf(format string, args ...interface{}) { func (l *CaptureLogger) Errorf(format string, args ...interface{}) {
l.logf(LevelError, format, args...) l.logf(LevelError, format, args...)
} }
// Error logs an error message, patterned after log.Print.
func (l *CaptureLogger) Error(args ...interface{}) { func (l *CaptureLogger) Error(args ...interface{}) {
l.loga(LevelError, args...) l.loga(LevelError, args...)
} }
// Warnf logs a warning message, patterned after log.Printf.
func (l *CaptureLogger) Warnf(format string, args ...interface{}) { func (l *CaptureLogger) Warnf(format string, args ...interface{}) {
l.logf(LevelWarn, format, args...) l.logf(LevelWarn, format, args...)
} }
// Warn logs a warning message, patterned after log.Print.
func (l *CaptureLogger) Warn(args ...interface{}) { func (l *CaptureLogger) Warn(args ...interface{}) {
l.loga(LevelWarn, args...) l.loga(LevelWarn, args...)
} }
// Infof logs an information message, patterned after log.Printf.
func (l *CaptureLogger) Infof(format string, args ...interface{}) { func (l *CaptureLogger) Infof(format string, args ...interface{}) {
l.logf(LevelInfo, format, args...) l.logf(LevelInfo, format, args...)
} }
// Info logs an information message, patterned after log.Print.
func (l *CaptureLogger) Info(args ...interface{}) { func (l *CaptureLogger) Info(args ...interface{}) {
l.loga(LevelInfo, args...) l.loga(LevelInfo, args...)
} }
// Debugf logs a debug message, patterned after log.Printf.
func (l *CaptureLogger) Debugf(format string, args ...interface{}) { func (l *CaptureLogger) Debugf(format string, args ...interface{}) {
l.logf(LevelDebug, format, args...) l.logf(LevelDebug, format, args...)
} }
// Debug logs a debug message, patterned after log.Print.
func (l *CaptureLogger) Debug(args ...interface{}) { func (l *CaptureLogger) Debug(args ...interface{}) {
l.loga(LevelDebug, args...) l.loga(LevelDebug, args...)
} }
// Tracef logs a trace message, patterned after log.Printf.
func (l *CaptureLogger) Tracef(format string, args ...interface{}) { func (l *CaptureLogger) Tracef(format string, args ...interface{}) {
l.logf(LevelTrace, format, args...) l.logf(LevelTrace, format, args...)
} }
// Trace logs a trace message, patterned after log.Print.
func (l *CaptureLogger) Trace(args ...interface{}) { func (l *CaptureLogger) Trace(args ...interface{}) {
l.loga(LevelTrace, args...) l.loga(LevelTrace, args...)
} }

View File

@ -8,67 +8,59 @@ import (
var _ telegraf.Logger = &Logger{} var _ telegraf.Logger = &Logger{}
// Logger defines a logging structure for plugins.
type Logger struct { type Logger struct {
Name string // Name is the plugin name, will be printed in the `[]`. Name string // Name is the plugin name, will be printed in the `[]`.
Quiet bool Quiet bool
} }
func (l Logger) Level() telegraf.LogLevel { // We always want to output at debug level during testing to find issues easier
func (Logger) Level() telegraf.LogLevel {
return telegraf.Debug return telegraf.Debug
} }
func (Logger) RegisterErrorCallback(func()) {} // Adding attributes is not supported by the test-logger
func (Logger) AddAttribute(string, interface{}) {}
// Errorf logs an error message, patterned after log.Printf.
func (l Logger) Errorf(format string, args ...interface{}) { func (l Logger) Errorf(format string, args ...interface{}) {
log.Printf("E! ["+l.Name+"] "+format, args...) log.Printf("E! ["+l.Name+"] "+format, args...)
} }
// Error logs an error message, patterned after log.Print.
func (l Logger) Error(args ...interface{}) { func (l Logger) Error(args ...interface{}) {
log.Print(append([]interface{}{"E! [" + l.Name + "] "}, args...)...) log.Print(append([]interface{}{"E! [" + l.Name + "] "}, args...)...)
} }
// Warnf logs a warning message, patterned after log.Printf.
func (l Logger) Warnf(format string, args ...interface{}) { func (l Logger) Warnf(format string, args ...interface{}) {
log.Printf("W! ["+l.Name+"] "+format, args...) log.Printf("W! ["+l.Name+"] "+format, args...)
} }
// Warn logs a warning message, patterned after log.Print.
func (l Logger) Warn(args ...interface{}) { func (l Logger) Warn(args ...interface{}) {
log.Print(append([]interface{}{"W! [" + l.Name + "] "}, args...)...) log.Print(append([]interface{}{"W! [" + l.Name + "] "}, args...)...)
} }
// Infof logs an information message, patterned after log.Printf.
func (l Logger) Infof(format string, args ...interface{}) { func (l Logger) Infof(format string, args ...interface{}) {
if !l.Quiet { if !l.Quiet {
log.Printf("I! ["+l.Name+"] "+format, args...) log.Printf("I! ["+l.Name+"] "+format, args...)
} }
} }
// Info logs an information message, patterned after log.Print.
func (l Logger) Info(args ...interface{}) { func (l Logger) Info(args ...interface{}) {
if !l.Quiet { if !l.Quiet {
log.Print(append([]interface{}{"I! [" + l.Name + "] "}, args...)...) log.Print(append([]interface{}{"I! [" + l.Name + "] "}, args...)...)
} }
} }
// Debugf logs a debug message, patterned after log.Printf.
func (l Logger) Debugf(format string, args ...interface{}) { func (l Logger) Debugf(format string, args ...interface{}) {
if !l.Quiet { if !l.Quiet {
log.Printf("D! ["+l.Name+"] "+format, args...) log.Printf("D! ["+l.Name+"] "+format, args...)
} }
} }
// Debug logs a debug message, patterned after log.Print.
func (l Logger) Debug(args ...interface{}) { func (l Logger) Debug(args ...interface{}) {
if !l.Quiet { if !l.Quiet {
log.Print(append([]interface{}{"D! [" + l.Name + "] "}, args...)...) log.Print(append([]interface{}{"D! [" + l.Name + "] "}, args...)...)
} }
} }
// Tracef logs a trace message, patterned after log.Printf.
func (l Logger) Tracef(format string, args ...interface{}) { func (l Logger) Tracef(format string, args ...interface{}) {
if !l.Quiet { if !l.Quiet {
log.Printf("T! ["+l.Name+"] "+format, args...) log.Printf("T! ["+l.Name+"] "+format, args...)