feat(logging): Add 'trace' log-level (#15695)

This commit is contained in:
Sven Rebhan 2024-08-01 00:32:24 +02:00 committed by GitHub
parent a21d263c4c
commit d160276552
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
25 changed files with 207 additions and 152 deletions

View File

@ -410,7 +410,7 @@ Parameters that can be used with any input plugin:
- **name_suffix**: Specifies a suffix to attach to the measurement name. - **name_suffix**: Specifies a suffix to attach to the measurement name.
- **tags**: A map of tags to apply to a specific input's measurements. - **tags**: A map of tags to apply to a specific input's measurements.
- **log_level**: Override the log-level for this plugin. Possible values are - **log_level**: Override the log-level for this plugin. Possible values are
`error`, `warn`, `info` and `debug`. `error`, `warn`, `info`, `debug` and `trace`.
The [metric filtering][] parameters can be used to limit what metrics are The [metric filtering][] parameters can be used to limit what metrics are
emitted from the input plugin. emitted from the input plugin.

View File

@ -14,6 +14,8 @@ const (
Info Info
// Debug will log all of the above and debugging messages issued by plugins // Debug will log all of the above and debugging messages issued by plugins
Debug Debug
// Trace will log all of the above and trace messages issued by plugins
Trace
) )
func LogLevelFromString(name string) LogLevel { func LogLevelFromString(name string) LogLevel {
@ -26,6 +28,8 @@ func LogLevelFromString(name string) LogLevel {
return Info return Info
case "DEBUG", "debug": case "DEBUG", "debug":
return Debug return Debug
case "TRACE", "trace":
return Trace
} }
return None return None
} }
@ -40,6 +44,8 @@ func (e LogLevel) String() string {
return "INFO" return "INFO"
case Debug: case Debug:
return "DEBUG" return "DEBUG"
case Trace:
return "TRACE"
} }
return "NONE" return "NONE"
} }
@ -54,6 +60,8 @@ func (e LogLevel) Indicator() string {
return "I!" return "I!"
case Debug: case Debug:
return "D!" return "D!"
case Trace:
return "T!"
} }
return "U!" return "U!"
} }
@ -63,7 +71,7 @@ func (e LogLevel) Includes(level LogLevel) bool {
} }
// Logger defines an plugin-related interface for logging. // Logger defines an plugin-related interface for logging.
type Logger interface { type Logger interface { //nolint:interfacebloat // All functions are required
// Level returns the configured log-level of the logger // Level returns the configured log-level of the logger
Level() LogLevel Level() LogLevel
@ -71,10 +79,6 @@ type Logger interface {
Errorf(format string, args ...interface{}) Errorf(format string, args ...interface{})
// Error logs an error message, patterned after log.Print. // Error logs an error message, patterned after log.Print.
Error(args ...interface{}) Error(args ...interface{})
// Debugf logs a debug message, patterned after log.Printf.
Debugf(format string, args ...interface{})
// Debug logs a debug message, patterned after log.Print.
Debug(args ...interface{})
// Warnf logs a warning message, patterned after log.Printf. // Warnf logs a warning message, patterned after log.Printf.
Warnf(format string, args ...interface{}) Warnf(format string, args ...interface{})
// Warn logs a warning message, patterned after log.Print. // Warn logs a warning message, patterned after log.Print.
@ -83,4 +87,12 @@ type Logger interface {
Infof(format string, args ...interface{}) Infof(format string, args ...interface{})
// Info logs an information message, patterned after log.Print. // Info logs an information message, patterned after log.Print.
Info(args ...interface{}) Info(args ...interface{})
// Debugf logs a debug message, patterned after log.Printf.
Debugf(format string, args ...interface{})
// Debug logs a debug message, patterned after log.Print.
Debug(args ...interface{})
// Tracef logs a trace message, patterned after log.Printf.
Tracef(format string, args ...interface{})
// Trace logs a trace message, patterned after log.Print.
Trace(args ...interface{})
} }

View File

@ -55,19 +55,6 @@ func New(category, name, alias string) *logger {
return l return l
} }
// SetLevel changes the log-level to the given one
func (l *logger) SetLogLevel(name string) error {
if name == "" {
return nil
}
level := telegraf.LogLevelFromString(name)
if level == telegraf.None {
return fmt.Errorf("invalid log-level %q", name)
}
l.level = &level
return nil
}
// SubLogger creates a new logger with the given name added as suffix // SubLogger creates a new logger with the given name added as suffix
func (l *logger) SubLogger(name string) telegraf.Logger { func (l *logger) SubLogger(name string) telegraf.Logger {
suffix := l.suffix suffix := l.suffix
@ -118,6 +105,24 @@ func (l *logger) Level() telegraf.LogLevel {
return instance.level return instance.level
} }
// SetLevel overrides the current log-level of the logger
func (l *logger) SetLevel(level telegraf.LogLevel) {
l.level = &level
}
// SetLevel changes the log-level to the given one
func (l *logger) SetLogLevel(name string) error {
if name == "" {
return nil
}
level := telegraf.LogLevelFromString(name)
if level == telegraf.None {
return fmt.Errorf("invalid log-level %q", name)
}
l.SetLevel(level)
return nil
}
// Register a callback triggered when errors are about to be written to the log // Register a callback triggered when errors are about to be written to the log
func (l *logger) RegisterErrorCallback(f func()) { func (l *logger) RegisterErrorCallback(f func()) {
l.onError = append(l.onError, f) l.onError = append(l.onError, f)
@ -162,6 +167,15 @@ func (l *logger) Debug(args ...interface{}) {
l.Print(telegraf.Debug, time.Now(), args...) l.Print(telegraf.Debug, time.Now(), args...)
} }
// Trace logging, this is suppressed on console
func (l *logger) Tracef(format string, args ...interface{}) {
l.Trace(fmt.Sprintf(format, args...))
}
func (l *logger) Trace(args ...interface{}) {
l.Print(telegraf.Trace, time.Now(), args...)
}
func (l *logger) Print(level telegraf.LogLevel, ts time.Time, args ...interface{}) { func (l *logger) Print(level telegraf.LogLevel, ts time.Time, args ...interface{}) {
// Check if we are in early logging state and store the message in this case // Check if we are in early logging state and store the message in this case
if instance.impl == nil { if instance.impl == nil {

View File

@ -24,6 +24,8 @@ func (s *stdlogRedirector) Write(b []byte) (n int, err error) {
// Log with the given level // Log with the given level
switch level { switch level {
case 'T':
s.log.Trace(string(msg))
case 'D': case 'D':
s.log.Debug(string(msg)) s.log.Debug(string(msg))
case 'I': case 'I':

View File

@ -1,33 +1,41 @@
package kafka package kafka
import ( import (
"sync"
"github.com/IBM/sarama" "github.com/IBM/sarama"
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/logger" "github.com/influxdata/telegraf/logger"
) )
type Logger struct { var (
log = logger.New("sarama", "", "")
once sync.Once
)
type debugLogger struct{}
func (l *debugLogger) Print(v ...interface{}) {
log.Trace(v...)
} }
// DebugLogger logs messages from sarama at the debug level. func (l *debugLogger) Printf(format string, v ...interface{}) {
type DebugLogger struct { log.Tracef(format, v...)
Log telegraf.Logger
} }
func (l *DebugLogger) Print(v ...interface{}) { func (l *debugLogger) Println(v ...interface{}) {
l.Log.Debug(v...)
}
func (l *DebugLogger) Printf(format string, v ...interface{}) {
l.Log.Debugf(format, v...)
}
func (l *DebugLogger) Println(v ...interface{}) {
l.Print(v...) l.Print(v...)
} }
// SetLogger configures a debug logger for kafka (sarama) // SetLogger configures a debug logger for kafka (sarama)
func (k *Logger) SetLogger() { func SetLogger(level telegraf.LogLevel) {
sarama.Logger = &DebugLogger{Log: logger.New("sarama", "", "")} // Set-up the sarama logger only once
once.Do(func() {
sarama.Logger = &debugLogger{}
})
// Increase the log-level if needed.
if !log.Level().Includes(level) {
log.SetLevel(level)
}
} }

View File

@ -52,11 +52,8 @@ type KafkaConsumer struct {
ConsumerFetchDefault config.Size `toml:"consumer_fetch_default"` ConsumerFetchDefault config.Size `toml:"consumer_fetch_default"`
ConnectionStrategy string `toml:"connection_strategy"` ConnectionStrategy string `toml:"connection_strategy"`
ResolveCanonicalBootstrapServersOnly bool `toml:"resolve_canonical_bootstrap_servers_only"` ResolveCanonicalBootstrapServersOnly bool `toml:"resolve_canonical_bootstrap_servers_only"`
Log telegraf.Logger `toml:"-"`
kafka.ReadConfig kafka.ReadConfig
kafka.Logger
Log telegraf.Logger `toml:"-"`
ConsumerCreator ConsumerGroupCreator `toml:"-"` ConsumerCreator ConsumerGroupCreator `toml:"-"`
consumer ConsumerGroup consumer ConsumerGroup
@ -99,7 +96,7 @@ func (k *KafkaConsumer) SetParser(parser telegraf.Parser) {
} }
func (k *KafkaConsumer) Init() error { func (k *KafkaConsumer) Init() error {
k.SetLogger() kafka.SetLogger(k.Log.Level())
if k.MaxUndeliveredMessages == 0 { if k.MaxUndeliveredMessages == 0 {
k.MaxUndeliveredMessages = defaultMaxUndeliveredMessages k.MaxUndeliveredMessages = defaultMaxUndeliveredMessages

View File

@ -67,7 +67,7 @@ func TestInit(t *testing.T) {
}{ }{
{ {
name: "default config", name: "default config",
plugin: &KafkaConsumer{}, plugin: &KafkaConsumer{Log: testutil.Logger{}},
check: func(t *testing.T, plugin *KafkaConsumer) { check: func(t *testing.T, plugin *KafkaConsumer) {
require.Equal(t, defaultConsumerGroup, plugin.ConsumerGroup) require.Equal(t, defaultConsumerGroup, plugin.ConsumerGroup)
require.Equal(t, defaultMaxUndeliveredMessages, plugin.MaxUndeliveredMessages) require.Equal(t, defaultMaxUndeliveredMessages, plugin.MaxUndeliveredMessages)

View File

@ -60,9 +60,8 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
## and "RTU" for serial connections. ## and "RTU" for serial connections.
# transmission_mode = "auto" # transmission_mode = "auto"
## Trace the connection to the modbus device as debug messages ## Trace the connection to the modbus device
## Note: You have to enable telegraf's debug mode to see those messages! # log_level = "trace"
# debug_connection = false
## Define the configuration schema ## Define the configuration schema
## |---register -- define fields per register type in the original style (only supports one slave ID) ## |---register -- define fields per register type in the original style (only supports one slave ID)

View File

@ -58,7 +58,7 @@ type Modbus struct {
Timeout config.Duration `toml:"timeout"` Timeout config.Duration `toml:"timeout"`
Retries int `toml:"busy_retries"` Retries int `toml:"busy_retries"`
RetriesWaitTime config.Duration `toml:"busy_retries_wait"` RetriesWaitTime config.Duration `toml:"busy_retries_wait"`
DebugConnection bool `toml:"debug_connection"` DebugConnection bool `toml:"debug_connection" deprecated:"1.35.0;use 'log_level' 'trace' instead"`
Workarounds ModbusWorkarounds `toml:"workarounds"` Workarounds ModbusWorkarounds `toml:"workarounds"`
ConfigurationType string `toml:"configuration_type"` ConfigurationType string `toml:"configuration_type"`
Log telegraf.Logger `toml:"-"` Log telegraf.Logger `toml:"-"`
@ -261,6 +261,11 @@ func (m *Modbus) initClient() error {
return err return err
} }
var tracelog mb.Logger
if m.Log.Level().Includes(telegraf.Trace) || m.DebugConnection { // for backward compatibility
tracelog = m
}
switch u.Scheme { switch u.Scheme {
case "tcp": case "tcp":
host, port, err := net.SplitHostPort(u.Host) host, port, err := net.SplitHostPort(u.Host)
@ -271,23 +276,17 @@ func (m *Modbus) initClient() error {
case "", "auto", "TCP": case "", "auto", "TCP":
handler := mb.NewTCPClientHandler(host + ":" + port) handler := mb.NewTCPClientHandler(host + ":" + port)
handler.Timeout = time.Duration(m.Timeout) handler.Timeout = time.Duration(m.Timeout)
if m.DebugConnection { handler.Logger = tracelog
handler.Logger = m
}
m.handler = handler m.handler = handler
case "RTUoverTCP": case "RTUoverTCP":
handler := mb.NewRTUOverTCPClientHandler(host + ":" + port) handler := mb.NewRTUOverTCPClientHandler(host + ":" + port)
handler.Timeout = time.Duration(m.Timeout) handler.Timeout = time.Duration(m.Timeout)
if m.DebugConnection { handler.Logger = tracelog
handler.Logger = m
}
m.handler = handler m.handler = handler
case "ASCIIoverTCP": case "ASCIIoverTCP":
handler := mb.NewASCIIOverTCPClientHandler(host + ":" + port) handler := mb.NewASCIIOverTCPClientHandler(host + ":" + port)
handler.Timeout = time.Duration(m.Timeout) handler.Timeout = time.Duration(m.Timeout)
if m.DebugConnection { handler.Logger = tracelog
handler.Logger = m
}
m.handler = handler m.handler = handler
default: default:
return fmt.Errorf("invalid transmission mode %q for %q", m.TransmissionMode, u.Scheme) return fmt.Errorf("invalid transmission mode %q for %q", m.TransmissionMode, u.Scheme)
@ -305,9 +304,7 @@ func (m *Modbus) initClient() error {
handler.DataBits = m.DataBits handler.DataBits = m.DataBits
handler.Parity = m.Parity handler.Parity = m.Parity
handler.StopBits = m.StopBits handler.StopBits = m.StopBits
if m.DebugConnection { handler.Logger = tracelog
handler.Logger = m
}
if m.RS485 != nil { if m.RS485 != nil {
handler.RS485.Enabled = true handler.RS485.Enabled = true
handler.RS485.DelayRtsBeforeSend = time.Duration(m.RS485.DelayRtsBeforeSend) handler.RS485.DelayRtsBeforeSend = time.Duration(m.RS485.DelayRtsBeforeSend)
@ -324,9 +321,7 @@ func (m *Modbus) initClient() error {
handler.DataBits = m.DataBits handler.DataBits = m.DataBits
handler.Parity = m.Parity handler.Parity = m.Parity
handler.StopBits = m.StopBits handler.StopBits = m.StopBits
if m.DebugConnection { handler.Logger = tracelog
handler.Logger = m
}
if m.RS485 != nil { if m.RS485 != nil {
handler.RS485.Enabled = true handler.RS485.Enabled = true
handler.RS485.DelayRtsBeforeSend = time.Duration(m.RS485.DelayRtsBeforeSend) handler.RS485.DelayRtsBeforeSend = time.Duration(m.RS485.DelayRtsBeforeSend)
@ -541,7 +536,7 @@ func (m *Modbus) collectFields(acc telegraf.Accumulator, timestamp time.Time, ta
// Implement the logger interface of the modbus client // Implement the logger interface of the modbus client
func (m *Modbus) Printf(format string, v ...interface{}) { func (m *Modbus) Printf(format string, v ...interface{}) {
m.Log.Debugf(format, v...) m.Log.Tracef(format, v...)
} }
// Add this plugin to telegraf // Add this plugin to telegraf

View File

@ -42,9 +42,8 @@
## and "RTU" for serial connections. ## and "RTU" for serial connections.
# transmission_mode = "auto" # transmission_mode = "auto"
## Trace the connection to the modbus device as debug messages ## Trace the connection to the modbus device
## Note: You have to enable telegraf's debug mode to see those messages! # log_level = "trace"
# debug_connection = false
## Define the configuration schema ## Define the configuration schema
## |---register -- define fields per register type in the original style (only supports one slave ID) ## |---register -- define fields per register type in the original style (only supports one slave ID)

View File

@ -64,10 +64,8 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
## decoding. ## decoding.
# private_enterprise_number_files = [] # private_enterprise_number_files = []
## Dump incoming packets to the log ## Log incoming packets for tracing issues
## This can be helpful to debug parsing issues. Only active if # log_level = "trace"
## Telegraf is in debug mode.
# dump_packets = false
``` ```
## Private Enterprise Number mapping ## Private Enterprise Number mapping

View File

@ -28,7 +28,7 @@ type NetFlow struct {
ServiceAddress string `toml:"service_address"` ServiceAddress string `toml:"service_address"`
ReadBufferSize config.Size `toml:"read_buffer_size"` ReadBufferSize config.Size `toml:"read_buffer_size"`
Protocol string `toml:"protocol"` Protocol string `toml:"protocol"`
DumpPackets bool `toml:"dump_packets"` DumpPackets bool `toml:"dump_packets" deprecated:"1.35.0;use 'log_level' 'trace' instead"`
PENFiles []string `toml:"private_enterprise_number_files"` PENFiles []string `toml:"private_enterprise_number_files"`
Log telegraf.Logger `toml:"-"` Log telegraf.Logger `toml:"-"`
@ -135,8 +135,8 @@ func (n *NetFlow) read(acc telegraf.Accumulator) {
if count < 1 { if count < 1 {
continue continue
} }
if n.DumpPackets { if n.Log.Level().Includes(telegraf.Trace) || n.DumpPackets { // for backward compatibility
n.Log.Debugf("raw data: %s", hex.EncodeToString(buf[:count])) n.Log.Tracef("raw data: %s", hex.EncodeToString(buf[:count]))
} }
metrics, err := n.decoder.Decode(src.IP, buf[:count]) metrics, err := n.decoder.Decode(src.IP, buf[:count])
if err != nil { if err != nil {

View File

@ -24,7 +24,5 @@
## decoding. ## decoding.
# private_enterprise_number_files = [] # private_enterprise_number_files = []
## Dump incoming packets to the log ## Log incoming packets for tracing issues
## This can be helpful to debug parsing issues. Only active if # log_level = "trace"
## Telegraf is in debug mode.
# dump_packets = false

View File

@ -47,9 +47,8 @@ using the `startup_error_behavior` setting. Available values are:
## Timeout for requests ## Timeout for requests
# timeout = "10s" # timeout = "10s"
## Log detailed connection messages for debugging ## Log detailed connection messages for tracing issues
## This option only has an effect when Telegraf runs in debug mode # log_level = "trace"
# debug_connection = false
## Metric definition(s) ## Metric definition(s)
[[inputs.s7comm.metric]] [[inputs.s7comm.metric]]

View File

@ -8,7 +8,6 @@ import (
"hash/maphash" "hash/maphash"
"log" //nolint:depguard // Required for tracing connection issues "log" //nolint:depguard // Required for tracing connection issues
"net" "net"
"os"
"regexp" "regexp"
"strconv" "strconv"
"strings" "strings"
@ -94,7 +93,7 @@ type S7comm struct {
ConnectionType string `toml:"connection_type"` ConnectionType string `toml:"connection_type"`
BatchMaxSize int `toml:"pdu_size"` BatchMaxSize int `toml:"pdu_size"`
Timeout config.Duration `toml:"timeout"` Timeout config.Duration `toml:"timeout"`
DebugConnection bool `toml:"debug_connection"` DebugConnection bool `toml:"debug_connection" deprecated:"1.35.0;use 'log_level' 'trace' instead"`
Configs []metricDefinition `toml:"metric"` Configs []metricDefinition `toml:"metric"`
Log telegraf.Logger `toml:"-"` Log telegraf.Logger `toml:"-"`
@ -143,8 +142,8 @@ func (s *S7comm) Init() error {
// Create handler for the connection // Create handler for the connection
s.handler = gos7.NewTCPClientHandlerWithConnectType(s.Server, s.Rack, s.Slot, connectionTypeMap[s.ConnectionType]) s.handler = gos7.NewTCPClientHandlerWithConnectType(s.Server, s.Rack, s.Slot, connectionTypeMap[s.ConnectionType])
s.handler.Timeout = time.Duration(s.Timeout) s.handler.Timeout = time.Duration(s.Timeout)
if s.DebugConnection { if s.Log.Level().Includes(telegraf.Trace) || s.DebugConnection { // for backward compatibility
s.handler.Logger = log.New(os.Stderr, "D! [inputs.s7comm] ", log.LstdFlags) s.handler.Logger = log.New(&tracelogger{log: s.Log}, "", 0)
} }
// Create the requests // Create the requests
@ -417,6 +416,16 @@ func fieldID(seed maphash.Seed, def metricDefinition, field metricFieldDefinitio
return mh.Sum64() return mh.Sum64()
} }
// Logger for tracing internal messages
type tracelogger struct {
log telegraf.Logger
}
func (l *tracelogger) Write(b []byte) (n int, err error) {
l.log.Trace(string(b))
return len(b), nil
}
// Add this plugin to telegraf // Add this plugin to telegraf
func init() { func init() {
inputs.Add("s7comm", func() telegraf.Input { inputs.Add("s7comm", func() telegraf.Input {

View File

@ -17,9 +17,8 @@
## Timeout for requests ## Timeout for requests
# timeout = "10s" # timeout = "10s"
## Log detailed connection messages for debugging ## Log detailed connection messages for tracing issues
## This option only has an effect when Telegraf runs in debug mode # log_level = "trace"
# debug_connection = false
## Metric definition(s) ## Metric definition(s)
[[inputs.s7comm.metric]] [[inputs.s7comm.metric]]

View File

@ -42,10 +42,9 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
## Dump information for debugging ## Dump information for debugging
## Allows to print the raw variables (and corresponding types) as received ## Allows to print the raw variables (and corresponding types) as received
## from the NUT server ONCE for each UPS. The output is only available when ## from the NUT server ONCE for each UPS.
## running Telegraf in debug-mode.
## Please attach this information when reporting issues! ## Please attach this information when reporting issues!
# dump_raw_variables = false # log_level = "trace"
``` ```
## Pitfalls ## Pitfalls

View File

@ -21,7 +21,6 @@
## Dump information for debugging ## Dump information for debugging
## Allows to print the raw variables (and corresponding types) as received ## Allows to print the raw variables (and corresponding types) as received
## from the NUT server ONCE for each UPS. The output is only available when ## from the NUT server ONCE for each UPS.
## running Telegraf in debug-mode.
## Please attach this information when reporting issues! ## Please attach this information when reporting issues!
# dump_raw_variables = false # log_level = "trace"

View File

@ -60,7 +60,7 @@ type Upsd struct {
Password string `toml:"password"` Password string `toml:"password"`
ForceFloat bool `toml:"force_float"` ForceFloat bool `toml:"force_float"`
Additional []string `toml:"additional_fields"` Additional []string `toml:"additional_fields"`
DumpRaw bool `toml:"dump_raw_variables"` DumpRaw bool `toml:"dump_raw_variables" deprecated:"1.35.0;use 'log_level' 'trace' instead"`
Log telegraf.Logger `toml:"-"` Log telegraf.Logger `toml:"-"`
filter filter.Filter filter filter.Filter
@ -89,7 +89,7 @@ func (u *Upsd) Gather(acc telegraf.Accumulator) error {
if err != nil { if err != nil {
return err return err
} }
if u.DumpRaw { if u.Log.Level().Includes(telegraf.Trace) || u.DumpRaw { // for backward compatibility
for name, variables := range upsList { for name, variables := range upsList {
// Only dump the information once per UPS // Only dump the information once per UPS
if u.dumped[name] { if u.dumped[name] {
@ -101,7 +101,7 @@ func (u *Upsd) Gather(acc telegraf.Accumulator) error {
values = append(values, fmt.Sprintf("%s: %v", v.Name, v.Value)) values = append(values, fmt.Sprintf("%s: %v", v.Name, v.Value))
types = append(types, fmt.Sprintf("%s: %v", v.Name, v.OriginalType)) types = append(types, fmt.Sprintf("%s: %v", v.Name, v.OriginalType))
} }
u.Log.Debugf("Variables dump for UPS %q:\n%s\n-----\n%s", name, strings.Join(values, "\n"), strings.Join(types, "\n")) u.Log.Tracef("Variables dump for UPS %q:\n%s\n-----\n%s", name, strings.Join(values, "\n"), strings.Join(types, "\n"))
} }
} }
for name, variables := range upsList { for name, variables := range upsList {

View File

@ -31,16 +31,17 @@ var ValidTopicSuffixMethods = []string{
var zeroTime = time.Unix(0, 0) var zeroTime = time.Unix(0, 0)
type Kafka struct { type Kafka struct {
Brokers []string `toml:"brokers"` Brokers []string `toml:"brokers"`
Topic string `toml:"topic"` Topic string `toml:"topic"`
TopicTag string `toml:"topic_tag"` TopicTag string `toml:"topic_tag"`
ExcludeTopicTag bool `toml:"exclude_topic_tag"` ExcludeTopicTag bool `toml:"exclude_topic_tag"`
TopicSuffix TopicSuffix `toml:"topic_suffix"` TopicSuffix TopicSuffix `toml:"topic_suffix"`
RoutingTag string `toml:"routing_tag"` RoutingTag string `toml:"routing_tag"`
RoutingKey string `toml:"routing_key"` RoutingKey string `toml:"routing_key"`
ProducerTimestamp string `toml:"producer_timestamp"` ProducerTimestamp string `toml:"producer_timestamp"`
Log telegraf.Logger `toml:"-"`
proxy.Socks5ProxyConfig proxy.Socks5ProxyConfig
kafka.WriteConfig
// Legacy TLS config options // Legacy TLS config options
// TLS client certificate // TLS client certificate
@ -50,12 +51,6 @@ type Kafka struct {
// TLS certificate authority // TLS certificate authority
CA string CA string
kafka.WriteConfig
kafka.Logger
Log telegraf.Logger `toml:"-"`
saramaConfig *sarama.Config saramaConfig *sarama.Config
producerFunc func(addrs []string, config *sarama.Config) (sarama.SyncProducer, error) producerFunc func(addrs []string, config *sarama.Config) (sarama.SyncProducer, error)
producer sarama.SyncProducer producer sarama.SyncProducer
@ -123,10 +118,9 @@ func (k *Kafka) SetSerializer(serializer serializers.Serializer) {
} }
func (k *Kafka) Init() error { func (k *Kafka) Init() error {
k.SetLogger() kafka.SetLogger(k.Log.Level())
err := ValidateTopicSuffixMethod(k.TopicSuffix.Method) if err := ValidateTopicSuffixMethod(k.TopicSuffix.Method); err != nil {
if err != nil {
return err return err
} }
config := sarama.NewConfig() config := sarama.NewConfig()

View File

@ -182,16 +182,6 @@ func (la *LogAccumulator) Error(args ...interface{}) {
la.append(pgx.LogLevelError, "%v", args) la.append(pgx.LogLevelError, "%v", args)
} }
func (la *LogAccumulator) Debugf(format string, args ...interface{}) {
la.tb.Helper()
la.append(pgx.LogLevelDebug, format, args)
}
func (la *LogAccumulator) Debug(args ...interface{}) {
la.tb.Helper()
la.append(pgx.LogLevelDebug, "%v", args)
}
func (la *LogAccumulator) Warnf(format string, args ...interface{}) { func (la *LogAccumulator) Warnf(format string, args ...interface{}) {
la.tb.Helper() la.tb.Helper()
la.append(pgx.LogLevelWarn, format, args) la.append(pgx.LogLevelWarn, format, args)
@ -212,6 +202,26 @@ func (la *LogAccumulator) Info(args ...interface{}) {
la.append(pgx.LogLevelInfo, "%v", args) la.append(pgx.LogLevelInfo, "%v", args)
} }
func (la *LogAccumulator) Debugf(format string, args ...interface{}) {
la.tb.Helper()
la.append(pgx.LogLevelDebug, format, args)
}
func (la *LogAccumulator) Debug(args ...interface{}) {
la.tb.Helper()
la.append(pgx.LogLevelDebug, "%v", args)
}
func (la *LogAccumulator) Tracef(format string, args ...interface{}) {
la.tb.Helper()
la.append(pgx.LogLevelDebug, format, args)
}
func (la *LogAccumulator) Trace(args ...interface{}) {
la.tb.Helper()
la.append(pgx.LogLevelDebug, "%v", args)
}
var ctx = context.Background() var ctx = context.Background()
type PostgresqlTest struct { type PostgresqlTest struct {

View File

@ -136,8 +136,7 @@ XPath expressions.
# xpath_native_types = false # xpath_native_types = false
## Trace empty node selections for debugging ## Trace empty node selections for debugging
## This will only produce output in debugging mode. # log_level = "trace"
# xpath_trace = false
## Multiple parsing sections are allowed ## Multiple parsing sections are allowed
[[inputs.file.xpath]] [[inputs.file.xpath]]

View File

@ -45,7 +45,7 @@ type Parser struct {
PrintDocument bool `toml:"xpath_print_document"` PrintDocument bool `toml:"xpath_print_document"`
AllowEmptySelection bool `toml:"xpath_allow_empty_selection"` AllowEmptySelection bool `toml:"xpath_allow_empty_selection"`
NativeTypes bool `toml:"xpath_native_types"` NativeTypes bool `toml:"xpath_native_types"`
Trace bool `toml:"xpath_trace"` Trace bool `toml:"xpath_trace" deprecated:"1.35.0;use 'log_level' 'trace' instead"`
Configs []Config `toml:"xpath"` Configs []Config `toml:"xpath"`
DefaultMetricName string `toml:"-"` DefaultMetricName string `toml:"-"`
DefaultTags map[string]string `toml:"-"` DefaultTags map[string]string `toml:"-"`
@ -629,14 +629,14 @@ func (p *Parser) constructFieldName(root, node dataNode, name string, expand boo
} }
func (p *Parser) debugEmptyQuery(operation string, root dataNode, initialquery string) { func (p *Parser) debugEmptyQuery(operation string, root dataNode, initialquery string) {
if p.Log == nil || !p.Trace { if p.Log == nil || !(p.Log.Level().Includes(telegraf.Trace) || p.Trace) { // for backward compatibility
return return
} }
query := initialquery query := initialquery
// We already know that the // We already know that the
p.Log.Debugf("got 0 nodes for query %q in %s", query, operation) p.Log.Tracef("got 0 nodes for query %q in %s", query, operation)
for { for {
parts := splitLastPathElement(query) parts := splitLastPathElement(query)
if len(parts) < 1 { if len(parts) < 1 {
@ -646,10 +646,10 @@ func (p *Parser) debugEmptyQuery(operation string, root dataNode, initialquery s
q := parts[i] q := parts[i]
nodes, err := p.document.QueryAll(root, q) nodes, err := p.document.QueryAll(root, q)
if err != nil { if err != nil {
p.Log.Debugf("executing query %q in %s failed: %v", q, operation, err) p.Log.Tracef("executing query %q in %s failed: %v", q, operation, err)
return return
} }
p.Log.Debugf("got %d nodes for query %q in %s", len(nodes), q, operation) p.Log.Tracef("got %d nodes for query %q in %s", len(nodes), q, operation)
if len(nodes) > 0 && nodes[0] != nil { if len(nodes) > 0 && nodes[0] != nil {
return return
} }

View File

@ -15,6 +15,7 @@ const (
LevelWarn = 'W' LevelWarn = 'W'
LevelInfo = 'I' LevelInfo = 'I'
LevelDebug = 'D' LevelDebug = 'D'
LevelTrace = 'T'
) )
type Entry struct { type Entry struct {
@ -65,16 +66,6 @@ func (l *CaptureLogger) Error(args ...interface{}) {
l.loga(LevelError, args...) l.loga(LevelError, args...)
} }
// Debugf logs a debug message, patterned after log.Printf.
func (l *CaptureLogger) Debugf(format string, args ...interface{}) {
l.logf(LevelDebug, format, args...)
}
// Debug logs a debug message, patterned after log.Print.
func (l *CaptureLogger) Debug(args ...interface{}) {
l.loga(LevelDebug, args...)
}
// Warnf logs a warning message, patterned after log.Printf. // Warnf logs a warning message, patterned after log.Printf.
func (l *CaptureLogger) Warnf(format string, args ...interface{}) { func (l *CaptureLogger) Warnf(format string, args ...interface{}) {
l.logf(LevelWarn, format, args...) l.logf(LevelWarn, format, args...)
@ -95,6 +86,26 @@ func (l *CaptureLogger) Info(args ...interface{}) {
l.loga(LevelInfo, args...) l.loga(LevelInfo, args...)
} }
// Debugf logs a debug message, patterned after log.Printf.
func (l *CaptureLogger) Debugf(format string, args ...interface{}) {
l.logf(LevelDebug, format, args...)
}
// Debug logs a debug message, patterned after log.Print.
func (l *CaptureLogger) Debug(args ...interface{}) {
l.loga(LevelDebug, args...)
}
// Tracef logs a trace message, patterned after log.Printf.
func (l *CaptureLogger) Tracef(format string, args ...interface{}) {
l.logf(LevelTrace, format, args...)
}
// Trace logs a trace message, patterned after log.Print.
func (l *CaptureLogger) Trace(args ...interface{}) {
l.loga(LevelTrace, args...)
}
func (l *CaptureLogger) NMessages() int { func (l *CaptureLogger) NMessages() int {
l.Lock() l.Lock()
defer l.Unlock() defer l.Unlock()

View File

@ -30,20 +30,6 @@ func (l Logger) Error(args ...interface{}) {
log.Print(append([]interface{}{"E! [" + l.Name + "] "}, args...)...) log.Print(append([]interface{}{"E! [" + l.Name + "] "}, args...)...)
} }
// Debugf logs a debug message, patterned after log.Printf.
func (l Logger) Debugf(format string, args ...interface{}) {
if !l.Quiet {
log.Printf("D! ["+l.Name+"] "+format, args...)
}
}
// Debug logs a debug message, patterned after log.Print.
func (l Logger) Debug(args ...interface{}) {
if !l.Quiet {
log.Print(append([]interface{}{"D! [" + l.Name + "] "}, args...)...)
}
}
// Warnf logs a warning message, patterned after log.Printf. // Warnf logs a warning message, patterned after log.Printf.
func (l Logger) Warnf(format string, args ...interface{}) { func (l Logger) Warnf(format string, args ...interface{}) {
log.Printf("W! ["+l.Name+"] "+format, args...) log.Printf("W! ["+l.Name+"] "+format, args...)
@ -67,3 +53,31 @@ func (l Logger) Info(args ...interface{}) {
log.Print(append([]interface{}{"I! [" + l.Name + "] "}, args...)...) log.Print(append([]interface{}{"I! [" + l.Name + "] "}, args...)...)
} }
} }
// Debugf logs a debug message, patterned after log.Printf.
func (l Logger) Debugf(format string, args ...interface{}) {
if !l.Quiet {
log.Printf("D! ["+l.Name+"] "+format, args...)
}
}
// Debug logs a debug message, patterned after log.Print.
func (l Logger) Debug(args ...interface{}) {
if !l.Quiet {
log.Print(append([]interface{}{"D! [" + l.Name + "] "}, args...)...)
}
}
// Tracef logs a trace message, patterned after log.Printf.
func (l Logger) Tracef(format string, args ...interface{}) {
if !l.Quiet {
log.Printf("T! ["+l.Name+"] "+format, args...)
}
}
// Trace logs a trace message, patterned after log.Print.
func (l Logger) Trace(args ...interface{}) {
if !l.Quiet {
log.Print(append([]interface{}{"T! [" + l.Name + "] "}, args...)...)
}
}