chore: Check logging functions like printf (#13743)

This commit is contained in:
Sven Rebhan 2023-08-09 23:02:36 +02:00 committed by GitHub
parent 0cf7d23090
commit 61cf18c821
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
27 changed files with 115 additions and 101 deletions

View File

@ -152,6 +152,19 @@ linters-settings:
# Maximum allowed permissions mode for os.WriteFile and ioutil.WriteFile # Maximum allowed permissions mode for os.WriteFile and ioutil.WriteFile
# Default: "0600" # Default: "0600"
G306: "0640" G306: "0640"
govet:
settings:
## Check the logging function like it would be a printf
printf:
funcs:
- (github.com/influxdata/telegraf.Logger).Debugf
- (github.com/influxdata/telegraf.Logger).Infof
- (github.com/influxdata/telegraf.Logger).Warnf
- (github.com/influxdata/telegraf.Logger).Errorf
- (github.com/influxdata/telegraf.Logger).Debug
- (github.com/influxdata/telegraf.Logger).Info
- (github.com/influxdata/telegraf.Logger).Warn
- (github.com/influxdata/telegraf.Logger).Error
lll: lll:
# Max line length, lines longer will be reported. # Max line length, lines longer will be reported.
# '\t' is counted as 1 character by default, and can be changed with the tab-width option. # '\t' is counted as 1 character by default, and can be changed with the tab-width option.

View File

@ -169,7 +169,7 @@ func (o *OpcUAClient) Connect() error {
} }
if o.Client != nil { if o.Client != nil {
o.Log.Warnf("Closing connection due to Connect called while already instantiated", u) o.Log.Warnf("Closing connection to %q as already connected", u)
if err := o.Client.Close(); err != nil { if err := o.Client.Close(); err != nil {
// Only log the error but to not bail-out here as this prevents // Only log the error but to not bail-out here as this prevents
// reconnections for multiple parties (see e.g. #9523). // reconnections for multiple parties (see e.g. #9523).

View File

@ -147,7 +147,7 @@ func (c *CtrlXDataLayer) addMetric(se *sseclient.SseEvent, sub *Subscription) {
c.Log.Debug("Received keepalive event") c.Log.Debug("Received keepalive event")
default: default:
// Received a yet unsupported event type // Received a yet unsupported event type
c.Log.Debug("Received unsupported event: %q", se.Event) c.Log.Debugf("Received unsupported event: %q", se.Event)
} }
} }

View File

@ -98,10 +98,10 @@ func (h *handler) subscribeGNMI(ctx context.Context, acc telegraf.Accumulator, t
if h.trace { if h.trace {
buf, err := protojson.Marshal(reply) buf, err := protojson.Marshal(reply)
if err != nil { if err != nil {
h.log.Debugf("marshal failed: %v", err) h.log.Debugf("Marshal failed: %v", err)
} else { } else {
t := reply.GetUpdate().GetTimestamp() t := reply.GetUpdate().GetTimestamp()
h.log.Debugf("update_%v: %s", t, string(buf)) h.log.Debugf("Got update_%v: %s", t, string(buf))
} }
} }
if response, ok := reply.Response.(*gnmiLib.SubscribeResponse_Update); ok { if response, ok := reply.Response.(*gnmiLib.SubscribeResponse_Update); ok {
@ -137,7 +137,7 @@ func (h *handler) handleSubscribeResponseUpdate(acc telegraf.Accumulator, respon
// unmarshal extention // unmarshal extention
err := proto.Unmarshal(currentExt, juniperHeader) err := proto.Unmarshal(currentExt, juniperHeader)
if err != nil { if err != nil {
h.log.Errorf("unmarshal gnmi Juniper Header extention failed: %w", err) h.log.Errorf("unmarshal gnmi Juniper Header extension failed: %v", err)
break break
} }
// Add only relevant Tags from the Juniper Header extention. // Add only relevant Tags from the Juniper Header extention.
@ -156,7 +156,7 @@ func (h *handler) handleSubscribeResponseUpdate(acc telegraf.Accumulator, respon
var origin string var origin string
var err error var err error
if origin, prefix, prefixAliasPath, err = handlePath(response.Update.Prefix, prefixTags, h.aliases, ""); err != nil { if origin, prefix, prefixAliasPath, err = handlePath(response.Update.Prefix, prefixTags, h.aliases, ""); err != nil {
h.log.Errorf("handling path %q failed: %v", response.Update.Prefix, err) h.log.Errorf("Handling path %q failed: %v", response.Update.Prefix, err)
} }
prefix = origin + prefix prefix = origin + prefix
} }
@ -187,7 +187,7 @@ func (h *handler) handleSubscribeResponseUpdate(acc telegraf.Accumulator, respon
} }
h.log.Debugf("Tag-subscription update for %q: %+v", tagSub.Name, update) h.log.Debugf("Tag-subscription update for %q: %+v", tagSub.Name, update)
if err := h.tagStore.insert(tagSub, fullPath, fields, tags); err != nil { if err := h.tagStore.insert(tagSub, fullPath, fields, tags); err != nil {
h.log.Errorf("inserting tag failed: %w", err) h.log.Errorf("Inserting tag failed: %v", err)
} }
tagUpdate = true tagUpdate = true
break break
@ -259,7 +259,7 @@ func (h *handler) handleSubscribeResponseUpdate(acc telegraf.Accumulator, respon
key = strings.TrimLeft(key, "/.") key = strings.TrimLeft(key, "/.")
} }
if key == "" { if key == "" {
h.log.Errorf("invalid empty path: %q", k) h.log.Errorf("Invalid empty path: %q", k)
continue continue
} }
grouper.Add(name, tags, timestamp, key, v) grouper.Add(name, tags, timestamp, key, v)
@ -276,11 +276,11 @@ func (h *handler) handleSubscribeResponseUpdate(acc telegraf.Accumulator, respon
func (h *handler) handleTelemetryField(update *gnmiLib.Update, tags map[string]string, prefix string) (string, map[string]interface{}) { func (h *handler) handleTelemetryField(update *gnmiLib.Update, tags map[string]string, prefix string) (string, map[string]interface{}) {
_, gpath, aliasPath, err := handlePath(update.Path, tags, h.aliases, prefix) _, gpath, aliasPath, err := handlePath(update.Path, tags, h.aliases, prefix)
if err != nil { if err != nil {
h.log.Errorf("handling path %q failed: %v", update.Path, err) h.log.Errorf("Handling path %q failed: %v", update.Path, err)
} }
fields, err := gnmiToFields(strings.Replace(gpath, "-", "_", -1), update.Val) fields, err := gnmiToFields(strings.Replace(gpath, "-", "_", -1), update.Val)
if err != nil { if err != nil {
h.log.Errorf("error parsing update value %q: %v", update.Val, err) h.log.Errorf("Error parsing update value %q: %v", update.Val, err)
} }
return aliasPath, fields return aliasPath, fields
} }

View File

@ -88,7 +88,7 @@ func (gcs *GCS) Gather(acc telegraf.Accumulator) error {
} }
if err != nil { if err != nil {
gcs.Log.Errorf("Error during iteration of keys", err) gcs.Log.Errorf("Error during iteration of keys: %v", err)
return err return err
} }
@ -279,6 +279,6 @@ func init() {
func (gcs *GCS) closeReader(r *storage.Reader) { func (gcs *GCS) closeReader(r *storage.Reader) {
if err := r.Close(); err != nil { if err := r.Close(); err != nil {
gcs.Log.Errorf("Could not close reader", err) gcs.Log.Errorf("Could not close reader: %v", err)
} }
} }

View File

@ -190,7 +190,7 @@ func (i *IntelPMU) checkFileDescriptors() error {
// maximum file descriptors enforced on a kernel level // maximum file descriptors enforced on a kernel level
maxFd, err := readMaxFD(i.fileInfo) maxFd, err := readMaxFD(i.fileInfo)
if err != nil { if err != nil {
i.Log.Warnf("cannot obtain number of available file descriptors: %v", err) i.Log.Warnf("Cannot obtain number of available file descriptors: %v", err)
} else if allFd > maxFd { } else if allFd > maxFd {
return fmt.Errorf("required file descriptors number `%d` exceeds maximum number of available file descriptors `%d`"+ return fmt.Errorf("required file descriptors number `%d` exceeds maximum number of available file descriptors `%d`"+
": consider increasing the maximum number", allFd, maxFd) ": consider increasing the maximum number", allFd, maxFd)
@ -199,7 +199,7 @@ func (i *IntelPMU) checkFileDescriptors() error {
// soft limit for current process // soft limit for current process
limit, err := i.fileInfo.fileLimit() limit, err := i.fileInfo.fileLimit()
if err != nil { if err != nil {
i.Log.Warnf("cannot obtain limit value of open files: %v", err) i.Log.Warnf("Cannot obtain limit value of open files: %v", err)
} else if allFd > limit { } else if allFd > limit {
return fmt.Errorf("required file descriptors number `%d` exceeds soft limit of open files `%d`"+ return fmt.Errorf("required file descriptors number `%d` exceeds soft limit of open files `%d`"+
": consider increasing the limit", allFd, limit) ": consider increasing the limit", allFd, limit)
@ -249,7 +249,7 @@ func (i *IntelPMU) Stop() {
} }
err := event.Deactivate() err := event.Deactivate()
if err != nil { if err != nil {
i.Log.Warnf("failed to deactivate core event %q: %w", event, err) i.Log.Warnf("Failed to deactivate core event %q: %v", event, err)
} }
} }
} }
@ -264,7 +264,7 @@ func (i *IntelPMU) Stop() {
} }
err := event.Deactivate() err := event.Deactivate()
if err != nil { if err != nil {
i.Log.Warnf("failed to deactivate uncore event %q: %w", event, err) i.Log.Warnf("Failed to deactivate uncore event %q: %v", event, err)
} }
} }
} }

View File

@ -133,7 +133,7 @@ func (p *PowerStat) Gather(acc telegraf.Accumulator) error {
p.logOnce["msr"] = nil p.logOnce["msr"] = nil
p.addPerCoreMetrics(acc) p.addPerCoreMetrics(acc)
} else { } else {
err := errors.New("error while trying to read MSR (probably msr module was not loaded)") err := errors.New("Error while trying to read MSR (probably msr module was not loaded)")
if val := p.logOnce["msr"]; val == nil || val.Error() != err.Error() { if val := p.logOnce["msr"]; val == nil || val.Error() != err.Error() {
p.Log.Errorf("%v", err) p.Log.Errorf("%v", err)
// Remember that specific error occurs to omit logging next time // Remember that specific error occurs to omit logging next time
@ -171,7 +171,7 @@ func (p *PowerStat) addGlobalMetrics(acc telegraf.Accumulator) {
if err != nil { if err != nil {
// In case of an error skip calculating metrics for this socket // In case of an error skip calculating metrics for this socket
if val := p.logOnce[socketID+"rapl"]; val == nil || val.Error() != err.Error() { if val := p.logOnce[socketID+"rapl"]; val == nil || val.Error() != err.Error() {
p.Log.Errorf("error fetching rapl data for socket %s, err: %v", socketID, err) p.Log.Errorf("Error fetching rapl data for socket %s, err: %v", socketID, err)
// Remember that specific error occurs for socketID to omit logging next time // Remember that specific error occurs for socketID to omit logging next time
p.logOnce[socketID+"rapl"] = err p.logOnce[socketID+"rapl"] = err
} }
@ -208,7 +208,7 @@ func maxDiePerSocket(_ string) int {
func (p *PowerStat) addUncoreFreq(socketID string, die string, acc telegraf.Accumulator) { func (p *PowerStat) addUncoreFreq(socketID string, die string, acc telegraf.Accumulator) {
err := checkFile("/sys/devices/system/cpu/intel_uncore_frequency") err := checkFile("/sys/devices/system/cpu/intel_uncore_frequency")
if err != nil { if err != nil {
err := fmt.Errorf("error while checking existing intel_uncore_frequency (probably intel-uncore-frequency module was not loaded)") err := fmt.Errorf("Error while checking existing intel_uncore_frequency (probably intel-uncore-frequency module was not loaded)")
if val := p.logOnce["intel_uncore_frequency"]; val == nil || val.Error() != err.Error() { if val := p.logOnce["intel_uncore_frequency"]; val == nil || val.Error() != err.Error() {
p.Log.Errorf("%v", err) p.Log.Errorf("%v", err)
// Remember that specific error occurs to omit logging next time // Remember that specific error occurs to omit logging next time
@ -228,18 +228,18 @@ func (p *PowerStat) readUncoreFreq(typeFreq string, socketID string, die string,
p.logOnce[socketID+"msr"] = nil p.logOnce[socketID+"msr"] = nil
cpuID, err := p.GetCPUIDFromSocketID(socketID) cpuID, err := p.GetCPUIDFromSocketID(socketID)
if err != nil { if err != nil {
p.Log.Debugf("error while reading socket ID: %v", err) p.Log.Debugf("Error while reading socket ID: %v", err)
return return
} }
actualUncoreFreq, err := p.msr.readSingleMsr(cpuID, msrUncorePerfStatusString) actualUncoreFreq, err := p.msr.readSingleMsr(cpuID, msrUncorePerfStatusString)
if err != nil { if err != nil {
p.Log.Debugf("error while reading %s: %v", msrUncorePerfStatusString, err) p.Log.Debugf("Error while reading %s: %v", msrUncorePerfStatusString, err)
return return
} }
actualUncoreFreq = (actualUncoreFreq & 0x3F) * 100 actualUncoreFreq = (actualUncoreFreq & 0x3F) * 100
fields["uncore_frequency_mhz_cur"] = actualUncoreFreq fields["uncore_frequency_mhz_cur"] = actualUncoreFreq
} else { } else {
err := errors.New("error while trying to read MSR (probably msr module was not loaded), uncore_frequency_mhz_cur metric will not be collected") err := errors.New("Error while trying to read MSR (probably msr module was not loaded), uncore_frequency_mhz_cur metric will not be collected")
if val := p.logOnce[socketID+"msr"]; val == nil || val.Error() != err.Error() { if val := p.logOnce[socketID+"msr"]; val == nil || val.Error() != err.Error() {
p.Log.Errorf("%v", err) p.Log.Errorf("%v", err)
// Remember that specific error occurs for socketID to omit logging next time // Remember that specific error occurs for socketID to omit logging next time
@ -249,12 +249,12 @@ func (p *PowerStat) readUncoreFreq(typeFreq string, socketID string, die string,
} }
initMinFreq, err := p.msr.retrieveUncoreFrequency(socketID, typeFreq, "min", die) initMinFreq, err := p.msr.retrieveUncoreFrequency(socketID, typeFreq, "min", die)
if err != nil { if err != nil {
p.Log.Errorf("error while retrieving minimum uncore frequency of the socket %s, err: %v", socketID, err) p.Log.Errorf("Error while retrieving minimum uncore frequency of the socket %s, err: %v", socketID, err)
return return
} }
initMaxFreq, err := p.msr.retrieveUncoreFrequency(socketID, typeFreq, "max", die) initMaxFreq, err := p.msr.retrieveUncoreFrequency(socketID, typeFreq, "max", die)
if err != nil { if err != nil {
p.Log.Errorf("error while retrieving maximum uncore frequency of the socket %s, err: %v", socketID, err) p.Log.Errorf("Error while retrieving maximum uncore frequency of the socket %s, err: %v", socketID, err)
return return
} }
@ -272,7 +272,7 @@ func (p *PowerStat) readUncoreFreq(typeFreq string, socketID string, die string,
func (p *PowerStat) addThermalDesignPowerMetric(socketID string, acc telegraf.Accumulator) { func (p *PowerStat) addThermalDesignPowerMetric(socketID string, acc telegraf.Accumulator) {
maxPower, err := p.rapl.getConstraintMaxPowerWatts(socketID) maxPower, err := p.rapl.getConstraintMaxPowerWatts(socketID)
if err != nil { if err != nil {
p.Log.Errorf("error while retrieving TDP of the socket %s, err: %v", socketID, err) p.Log.Errorf("Error while retrieving TDP of the socket %s, err: %v", socketID, err)
return return
} }
@ -334,7 +334,7 @@ func (p *PowerStat) addMetricsForSingleCore(cpuID string, acc telegraf.Accumulat
err := p.msr.openAndReadMsr(cpuID) err := p.msr.openAndReadMsr(cpuID)
if err != nil { if err != nil {
// In case of an error exit the function. All metrics past this point are dependent on MSR // In case of an error exit the function. All metrics past this point are dependent on MSR
p.Log.Debugf("error while reading msr: %v", err) p.Log.Debugf("Error while reading msr: %v", err)
return return
} }
} }
@ -368,7 +368,7 @@ func (p *PowerStat) addCPUFrequencyMetric(cpuID string, acc telegraf.Accumulator
// In case of an error leave func // In case of an error leave func
if err != nil { if err != nil {
p.Log.Debugf("error while reading file: %v", err) p.Log.Debugf("Error while reading file: %v", err)
return return
} }
@ -435,7 +435,7 @@ func (p *PowerStat) addTurboRatioLimit(socketID string, acc telegraf.Accumulator
} }
} }
if cpuID == "" || model == "" { if cpuID == "" || model == "" {
p.Log.Debug("error while reading socket ID") p.Log.Debug("Error while reading socket ID")
return return
} }
// dump_hsw_turbo_ratio_limit // dump_hsw_turbo_ratio_limit
@ -443,7 +443,7 @@ func (p *PowerStat) addTurboRatioLimit(socketID string, acc telegraf.Accumulator
coreCounts := uint64(0x1211) // counting the number of active cores 17 and 18 coreCounts := uint64(0x1211) // counting the number of active cores 17 and 18
msrTurboRatioLimit2, err := p.msr.readSingleMsr(cpuID, msrTurboRatioLimit2String) msrTurboRatioLimit2, err := p.msr.readSingleMsr(cpuID, msrTurboRatioLimit2String)
if err != nil { if err != nil {
p.Log.Debugf("error while reading %s: %v", msrTurboRatioLimit2String, err) p.Log.Debugf("Error while reading %s: %v", msrTurboRatioLimit2String, err)
return return
} }
@ -456,7 +456,7 @@ func (p *PowerStat) addTurboRatioLimit(socketID string, acc telegraf.Accumulator
coreCounts := uint64(0x100F0E0D0C0B0A09) // counting the number of active cores 9 to 16 coreCounts := uint64(0x100F0E0D0C0B0A09) // counting the number of active cores 9 to 16
msrTurboRatioLimit1, err := p.msr.readSingleMsr(cpuID, msrTurboRatioLimit1String) msrTurboRatioLimit1, err := p.msr.readSingleMsr(cpuID, msrTurboRatioLimit1String)
if err != nil { if err != nil {
p.Log.Debugf("error while reading %s: %v", msrTurboRatioLimit1String, err) p.Log.Debugf("Error while reading %s: %v", msrTurboRatioLimit1String, err)
return return
} }
calculateTurboRatioGroup(coreCounts, msrTurboRatioLimit1, turboRatioLimitGroups) calculateTurboRatioGroup(coreCounts, msrTurboRatioLimit1, turboRatioLimitGroups)
@ -478,14 +478,14 @@ func (p *PowerStat) addTurboRatioLimit(socketID string, acc telegraf.Accumulator
coreCounts, err = p.msr.readSingleMsr(cpuID, msrTurboRatioLimit1String) coreCounts, err = p.msr.readSingleMsr(cpuID, msrTurboRatioLimit1String)
if err != nil { if err != nil {
p.Log.Debugf("error while reading %s: %v", msrTurboRatioLimit1String, err) p.Log.Debugf("Error while reading %s: %v", msrTurboRatioLimit1String, err)
return return
} }
} }
msrTurboRatioLimit, err := p.msr.readSingleMsr(cpuID, msrTurboRatioLimitString) msrTurboRatioLimit, err := p.msr.readSingleMsr(cpuID, msrTurboRatioLimitString)
if err != nil { if err != nil {
p.Log.Debugf("error while reading %s: %v", msrTurboRatioLimitString, err) p.Log.Debugf("Error while reading %s: %v", msrTurboRatioLimitString, err)
return return
} }
calculateTurboRatioGroup(coreCounts, msrTurboRatioLimit, turboRatioLimitGroups) calculateTurboRatioGroup(coreCounts, msrTurboRatioLimit, turboRatioLimitGroups)
@ -498,7 +498,7 @@ func (p *PowerStat) addTurboRatioLimit(socketID string, acc telegraf.Accumulator
msrTurboRatioLimit, err := p.msr.readSingleMsr(cpuID, msrAtomCoreTurboRatiosString) msrTurboRatioLimit, err := p.msr.readSingleMsr(cpuID, msrAtomCoreTurboRatiosString)
if err != nil { if err != nil {
p.Log.Debugf("error while reading %s: %v", msrAtomCoreTurboRatiosString, err) p.Log.Debugf("Error while reading %s: %v", msrAtomCoreTurboRatiosString, err)
return return
} }
value := uint64(0) value := uint64(0)
@ -515,7 +515,7 @@ func (p *PowerStat) addTurboRatioLimit(socketID string, acc telegraf.Accumulator
if model == strconv.FormatInt(0x57, 10) { // INTEL_FAM6_XEON_PHI_KNL if model == strconv.FormatInt(0x57, 10) { // INTEL_FAM6_XEON_PHI_KNL
msrTurboRatioLimit, err := p.msr.readSingleMsr(cpuID, msrTurboRatioLimitString) msrTurboRatioLimit, err := p.msr.readSingleMsr(cpuID, msrTurboRatioLimitString)
if err != nil { if err != nil {
p.Log.Debugf("error while reading %s: %v", msrTurboRatioLimitString, err) p.Log.Debugf("Error while reading %s: %v", msrTurboRatioLimitString, err)
return return
} }
@ -556,7 +556,7 @@ func (p *PowerStat) addCPUBusyFrequencyMetric(cpuID string, acc telegraf.Accumul
mperfDelta := coresData[cpuID].mperfDelta mperfDelta := coresData[cpuID].mperfDelta
// Avoid division by 0 // Avoid division by 0
if mperfDelta == 0 { if mperfDelta == 0 {
p.Log.Errorf("mperf delta should not equal 0 on core %s", cpuID) p.Log.Errorf("Value of mperf delta should not equal 0 on core %s", cpuID)
return return
} }
aperfMperf := float64(coresData[cpuID].aperfDelta) / float64(mperfDelta) aperfMperf := float64(coresData[cpuID].aperfDelta) / float64(mperfDelta)
@ -570,7 +570,7 @@ func (p *PowerStat) addCPUBusyFrequencyMetric(cpuID string, acc telegraf.Accumul
} }
if interval == 0 { if interval == 0 {
p.Log.Errorf("interval between last two Telegraf cycles is 0") p.Log.Errorf("Interval between last two Telegraf cycles is 0")
return return
} }
@ -594,7 +594,7 @@ func (p *PowerStat) addCPUC1StateResidencyMetric(cpuID string, acc telegraf.Accu
timestampDeltaBig := new(big.Int).SetUint64(coresData[cpuID].timeStampCounterDelta) timestampDeltaBig := new(big.Int).SetUint64(coresData[cpuID].timeStampCounterDelta)
// Avoid division by 0 // Avoid division by 0
if timestampDeltaBig.Sign() < 1 { if timestampDeltaBig.Sign() < 1 {
p.Log.Errorf("timestamp delta value %v should not be lower than 1", timestampDeltaBig) p.Log.Errorf("Timestamp delta value %v should not be lower than 1", timestampDeltaBig)
return return
} }
@ -633,7 +633,7 @@ func (p *PowerStat) addCPUC6StateResidencyMetric(cpuID string, acc telegraf.Accu
coresData := p.msr.getCPUCoresData() coresData := p.msr.getCPUCoresData()
// Avoid division by 0 // Avoid division by 0
if coresData[cpuID].timeStampCounterDelta == 0 { if coresData[cpuID].timeStampCounterDelta == 0 {
p.Log.Errorf("timestamp counter on offset %s should not equal 0 on cpuID %s", p.Log.Errorf("Timestamp counter on offset %d should not equal 0 on cpuID %s",
timestampCounterLocation, cpuID) timestampCounterLocation, cpuID)
return return
} }
@ -657,7 +657,7 @@ func (p *PowerStat) addCPUC0StateResidencyMetric(cpuID string, acc telegraf.Accu
coresData := p.msr.getCPUCoresData() coresData := p.msr.getCPUCoresData()
// Avoid division by 0 // Avoid division by 0
if coresData[cpuID].timeStampCounterDelta == 0 { if coresData[cpuID].timeStampCounterDelta == 0 {
p.Log.Errorf("timestamp counter on offset %s should not equal 0 on cpuID %s", p.Log.Errorf("Timestamp counter on offset %d should not equal 0 on cpuID %s",
timestampCounterLocation, cpuID) timestampCounterLocation, cpuID)
return return
} }
@ -686,13 +686,13 @@ func (p *PowerStat) addCPUC0StateResidencyMetric(cpuID string, acc telegraf.Accu
func (p *PowerStat) addCPUBaseFreq(socketID string, acc telegraf.Accumulator) { func (p *PowerStat) addCPUBaseFreq(socketID string, acc telegraf.Accumulator) {
cpuID, err := p.GetCPUIDFromSocketID(socketID) cpuID, err := p.GetCPUIDFromSocketID(socketID)
if err != nil { if err != nil {
p.Log.Debugf("error while getting CPU ID from Socket ID: %v", err) p.Log.Debugf("Error while getting CPU ID from Socket ID: %v", err)
return return
} }
msrPlatformInfoMsr, err := p.msr.readSingleMsr(cpuID, msrPlatformInfoString) msrPlatformInfoMsr, err := p.msr.readSingleMsr(cpuID, msrPlatformInfoString)
if err != nil { if err != nil {
p.Log.Debugf("error while reading %s: %v", msrPlatformInfoString, err) p.Log.Debugf("Error while reading %s: %v", msrPlatformInfoString, err)
return return
} }
@ -700,7 +700,7 @@ func (p *PowerStat) addCPUBaseFreq(socketID string, acc telegraf.Accumulator) {
// to get the freq -> ratio * busClock // to get the freq -> ratio * busClock
cpuBaseFreq := float64((msrPlatformInfoMsr>>8)&0xFF) * p.cpuBusClockValue cpuBaseFreq := float64((msrPlatformInfoMsr>>8)&0xFF) * p.cpuBusClockValue
if cpuBaseFreq == 0 { if cpuBaseFreq == 0 {
p.Log.Debugf("error while adding CPU base frequency, cpuBaseFreq is zero for the socket: %s", socketID) p.Log.Debugf("Error while adding CPU base frequency, cpuBaseFreq is zero for the socket: %s", socketID)
return return
} }
@ -716,7 +716,7 @@ func (p *PowerStat) addCPUBaseFreq(socketID string, acc telegraf.Accumulator) {
func (p *PowerStat) getBusClock(cpuID string) float64 { func (p *PowerStat) getBusClock(cpuID string) float64 {
cpuInfo, ok := p.cpuInfo[cpuID] cpuInfo, ok := p.cpuInfo[cpuID]
if !ok { if !ok {
p.Log.Debugf("cannot find cpuInfo for cpu: %s", cpuID) p.Log.Debugf("Cannot find cpuInfo for cpu: %s", cpuID)
return 0 return 0
} }
@ -734,7 +734,7 @@ func (p *PowerStat) getBusClock(cpuID string) float64 {
return p.getSilvermontBusClock(cpuID) return p.getSilvermontBusClock(cpuID)
} }
p.Log.Debugf("couldn't find the freq for the model: %d", model) p.Log.Debugf("Couldn't find the freq for the model: %s", model)
return 0.0 return 0.0
} }
@ -742,13 +742,13 @@ func (p *PowerStat) getSilvermontBusClock(cpuID string) float64 {
silvermontFreqTable := []float64{83.3, 100.0, 133.3, 116.7, 80.0} silvermontFreqTable := []float64{83.3, 100.0, 133.3, 116.7, 80.0}
msr, err := p.msr.readSingleMsr(cpuID, msrFSBFreqString) msr, err := p.msr.readSingleMsr(cpuID, msrFSBFreqString)
if err != nil { if err != nil {
p.Log.Debugf("error while reading %s: %v", msrFSBFreqString, err) p.Log.Debugf("Error while reading %s: %v", msrFSBFreqString, err)
return 0.0 return 0.0
} }
i := int(msr & 0xf) i := int(msr & 0xf)
if i >= len(silvermontFreqTable) { if i >= len(silvermontFreqTable) {
p.Log.Debugf("unknown msr value: %d, using default bus clock value: %d", i, silvermontFreqTable[3]) p.Log.Debugf("Unknown msr value: %d, using default bus clock value: %f", i, silvermontFreqTable[3])
//same behaviour as in turbostat //same behaviour as in turbostat
i = 3 i = 3
} }

View File

@ -327,7 +327,7 @@ func (m *OpenConfigTelemetry) collectData(
if ok { if ok {
timestamp = time.UnixMilli(int64(ts)) timestamp = time.UnixMilli(int64(ts))
} else { } else {
m.Log.Warnf("invalid type %T for _timestamp %v", group.data["_timestamp"], group.data["_timestamp"]) m.Log.Warnf("Invalid type %T for _timestamp %v", group.data["_timestamp"], group.data["_timestamp"])
} }
} }
@ -438,7 +438,7 @@ func (m *OpenConfigTelemetry) Start(acc telegraf.Accumulator) error {
if m.Username != "" && m.Password != "" && m.ClientID != "" { if m.Username != "" && m.Password != "" && m.ClientID != "" {
if err := m.authenticate(ctx, server, grpcClientConn); err != nil { if err := m.authenticate(ctx, server, grpcClientConn); err != nil {
m.Log.Errorf("error authenticating to %s: %w", grpcServer, err) m.Log.Errorf("Error authenticating to %s: %v", grpcServer, err)
continue continue
} }
} }

View File

@ -145,14 +145,14 @@ func (k *KinesisConsumer) connect(ac telegraf.Accumulator) error {
err := k.onMessage(k.acc, r) err := k.onMessage(k.acc, r)
if err != nil { if err != nil {
<-k.sem <-k.sem
k.Log.Errorf("Scan parser error: %s", err.Error()) k.Log.Errorf("Scan parser error: %v", err)
} }
return nil return nil
}) })
if err != nil { if err != nil {
k.cancel() k.cancel()
k.Log.Errorf("Scan encountered an error: %s", err.Error()) k.Log.Errorf("Scan encountered an error: %v", err)
k.cons = nil k.cons = nil
} }
}() }()
@ -220,7 +220,7 @@ func (k *KinesisConsumer) onDelivery(ctx context.Context) {
k.lastSeqNum = strToBint(sequenceNum) k.lastSeqNum = strToBint(sequenceNum)
if err := k.checkpoint.SetCheckpoint(chk.streamName, chk.shardID, sequenceNum); err != nil { if err := k.checkpoint.SetCheckpoint(chk.streamName, chk.shardID, sequenceNum); err != nil {
k.Log.Debug("Setting checkpoint failed: %v", err) k.Log.Debugf("Setting checkpoint failed: %v", err)
} }
} else { } else {
k.Log.Debug("Metric group failed to process") k.Log.Debug("Metric group failed to process")

View File

@ -139,7 +139,7 @@ func (m *MongoDB) setupConnection(connURL string) error {
return fmt.Errorf("unable to ping MongoDB: %w", err) return fmt.Errorf("unable to ping MongoDB: %w", err)
} }
m.Log.Errorf("unable to ping MongoDB: %s", err) m.Log.Errorf("Unable to ping MongoDB: %s", err)
} }
server := &Server{ server := &Server{
@ -156,7 +156,7 @@ func (m *MongoDB) Stop() {
for _, server := range m.clients { for _, server := range m.clients {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
if err := server.client.Disconnect(ctx); err != nil { if err := server.client.Disconnect(ctx); err != nil {
m.Log.Errorf("disconnecting from %q failed: %s", server, err) m.Log.Errorf("Disconnecting from %q failed: %v", server.hostname, err)
} }
cancel() cancel()
} }
@ -172,14 +172,14 @@ func (m *MongoDB) Gather(acc telegraf.Accumulator) error {
defer wg.Done() defer wg.Done()
if m.DisconnectedServersBehavior == "skip" { if m.DisconnectedServersBehavior == "skip" {
if err := srv.ping(); err != nil { if err := srv.ping(); err != nil {
m.Log.Debugf("failed to ping server: %s", err) m.Log.Debugf("Failed to ping server: %s", err)
return return
} }
} }
err := srv.gatherData(acc, m.GatherClusterStatus, m.GatherPerdbStats, m.GatherColStats, m.GatherTopStat, m.ColStatsDbs) err := srv.gatherData(acc, m.GatherClusterStatus, m.GatherPerdbStats, m.GatherColStats, m.GatherTopStat, m.ColStatsDbs)
if err != nil { if err != nil {
m.Log.Errorf("failed to gather data: %s", err) m.Log.Errorf("Failed to gather data: %s", err)
} }
}(client) }(client)
} }

View File

@ -298,7 +298,7 @@ func (*NFSClient) SampleConfig() string {
func (n *NFSClient) Gather(acc telegraf.Accumulator) error { func (n *NFSClient) Gather(acc telegraf.Accumulator) error {
file, err := os.Open(n.mountstatsPath) file, err := os.Open(n.mountstatsPath)
if err != nil { if err != nil {
n.Log.Errorf("Failed opening the [%s] file: %s ", file, err) n.Log.Errorf("Failed opening the %q file: %v ", file.Name(), err)
return err return err
} }
defer file.Close() defer file.Close()

View File

@ -70,7 +70,7 @@ func (o *OpensearchQuery) Init() error {
err := o.newClient() err := o.newClient()
if err != nil { if err != nil {
o.Log.Errorf("error creating OpenSearch client: %w", err) o.Log.Errorf("Error creating OpenSearch client: %v", err)
} }
for i, agg := range o.Aggregations { for i, agg := range o.Aggregations {

View File

@ -117,7 +117,7 @@ func gatherQemuData(px *Proxmox, acc telegraf.Accumulator) {
func gatherVMData(px *Proxmox, acc telegraf.Accumulator, rt ResourceType) { func gatherVMData(px *Proxmox, acc telegraf.Accumulator, rt ResourceType) {
vmStats, err := getVMStats(px, rt) vmStats, err := getVMStats(px, rt)
if err != nil { if err != nil {
px.Log.Error("Error getting VM stats: %v", err) px.Log.Errorf("Error getting VM stats: %v", err)
return return
} }

View File

@ -117,15 +117,15 @@ func (l *streamListener) setupConnection(conn net.Conn) error {
} }
if *l.KeepAlivePeriod == 0 { if *l.KeepAlivePeriod == 0 {
if err := tcpConn.SetKeepAlive(false); err != nil { if err := tcpConn.SetKeepAlive(false); err != nil {
l.Log.Warnf("Cannot set keep-alive: %w", err) l.Log.Warnf("Cannot set keep-alive: %v", err)
} }
} else { } else {
if err := tcpConn.SetKeepAlive(true); err != nil { if err := tcpConn.SetKeepAlive(true); err != nil {
l.Log.Warnf("Cannot set keep-alive: %w", err) l.Log.Warnf("Cannot set keep-alive: %v", err)
} }
err := tcpConn.SetKeepAlivePeriod(time.Duration(*l.KeepAlivePeriod)) err := tcpConn.SetKeepAlivePeriod(time.Duration(*l.KeepAlivePeriod))
if err != nil { if err != nil {
l.Log.Warnf("Cannot set keep-alive period: %w", err) l.Log.Warnf("Cannot set keep-alive period: %v", err)
} }
} }
} }

View File

@ -67,12 +67,12 @@ func (u *Upsd) gatherUps(acc telegraf.Accumulator, name string, variables []nut.
timeLeftS, err := internal.ToFloat64(metrics["battery.runtime"]) timeLeftS, err := internal.ToFloat64(metrics["battery.runtime"])
if err != nil { if err != nil {
u.Log.Warnf("'battery.runtime' type is not supported: %w", err) u.Log.Warnf("Type for 'battery.runtime' is not supported: %v", err)
} }
timeLeftNS, err := internal.ToInt64(timeLeftS * 1_000_000_000) timeLeftNS, err := internal.ToInt64(timeLeftS * 1_000_000_000)
if err != nil { if err != nil {
u.Log.Warnf("converting 'battery.runtime' to 'time_left_ns' failed: %w", err) u.Log.Warnf("Converting 'battery.runtime' to 'time_left_ns' failed: %v", err)
} }
fields := map[string]interface{}{ fields := map[string]interface{}{

View File

@ -89,7 +89,7 @@ func (w *WinEventLog) Stop() {
func (w *WinEventLog) GetState() interface{} { func (w *WinEventLog) GetState() interface{} {
bookmarkXML, err := w.renderBookmark(w.bookmark) bookmarkXML, err := w.renderBookmark(w.bookmark)
if err != nil { if err != nil {
w.Log.Errorf("State-persistence failed, cannot render bookmark: %w", err) w.Log.Errorf("State-persistence failed, cannot render bookmark: %v", err)
return "" return ""
} }
return bookmarkXML return bookmarkXML

View File

@ -380,7 +380,7 @@ func (m *WinPerfCounters) ParseConfig() error {
PerfObject.Measurement, PerfObject.IncludeTotal, PerfObject.UseRawValues) PerfObject.Measurement, PerfObject.IncludeTotal, PerfObject.UseRawValues)
if err != nil { if err != nil {
if PerfObject.FailOnMissing || PerfObject.WarnOnMissing { if PerfObject.FailOnMissing || PerfObject.WarnOnMissing {
m.Log.Errorf("invalid counterPath %q: %s", counterPath, err.Error()) m.Log.Errorf("Invalid counterPath %q: %s", counterPath, err.Error())
} }
if PerfObject.FailOnMissing { if PerfObject.FailOnMissing {
return err return err
@ -449,10 +449,10 @@ func (m *WinPerfCounters) Gather(acc telegraf.Accumulator) error {
for _, hostCounterInfo := range m.hostCounters { for _, hostCounterInfo := range m.hostCounters {
wg.Add(1) wg.Add(1)
go func(hostInfo *hostCountersInfo) { go func(hostInfo *hostCountersInfo) {
m.Log.Debugf("gathering from %s", hostInfo.computer) m.Log.Debugf("Gathering from %s", hostInfo.computer)
start := time.Now() start := time.Now()
err := m.gatherComputerCounters(hostInfo, acc) err := m.gatherComputerCounters(hostInfo, acc)
m.Log.Debugf("gathering from %s finished in %.3fs", hostInfo.computer, time.Since(start)) m.Log.Debugf("Gathering from %s finished in %v", hostInfo.computer, time.Since(start))
if err != nil { if err != nil {
acc.AddError(fmt.Errorf("error during collecting data on host %q: %w", hostInfo.computer, err)) acc.AddError(fmt.Errorf("error during collecting data on host %q: %w", hostInfo.computer, err))
} }
@ -482,7 +482,7 @@ func (m *WinPerfCounters) gatherComputerCounters(hostCounterInfo *hostCountersIn
if !isKnownCounterDataError(err) { if !isKnownCounterDataError(err) {
return fmt.Errorf("error while getting value for counter %q: %w", metric.counterPath, err) return fmt.Errorf("error while getting value for counter %q: %w", metric.counterPath, err)
} }
m.Log.Warnf("error while getting value for counter %q, instance: %s, will skip metric: %v", metric.counterPath, metric.instance, err) m.Log.Warnf("Error while getting value for counter %q, instance: %s, will skip metric: %v", metric.counterPath, metric.instance, err)
continue continue
} }
addCounterMeasurement(metric, metric.instance, value, collectedFields) addCounterMeasurement(metric, metric.instance, value, collectedFields)
@ -498,7 +498,7 @@ func (m *WinPerfCounters) gatherComputerCounters(hostCounterInfo *hostCountersIn
if !isKnownCounterDataError(err) { if !isKnownCounterDataError(err) {
return fmt.Errorf("error while getting value for counter %q: %w", metric.counterPath, err) return fmt.Errorf("error while getting value for counter %q: %w", metric.counterPath, err)
} }
m.Log.Warnf("error while getting value for counter %q, instance: %s, will skip metric: %v", metric.counterPath, metric.instance, err) m.Log.Warnf("Error while getting value for counter %q, instance: %s, will skip metric: %v", metric.counterPath, metric.instance, err)
continue continue
} }
for _, cValue := range counterValues { for _, cValue := range counterValues {
@ -589,14 +589,14 @@ func (m *WinPerfCounters) Init() error {
for _, wildcard := range wildcards { for _, wildcard := range wildcards {
if strings.Contains(object.ObjectName, wildcard) { if strings.Contains(object.ObjectName, wildcard) {
found = true found = true
m.Log.Errorf("object: %s, contains wildcard %s", object.ObjectName, wildcard) m.Log.Errorf("Object: %s, contains wildcard %s", object.ObjectName, wildcard)
} }
} }
for _, counter := range object.Counters { for _, counter := range object.Counters {
for _, wildcard := range wildcards { for _, wildcard := range wildcards {
if strings.Contains(counter, wildcard) { if strings.Contains(counter, wildcard) {
found = true found = true
m.Log.Errorf("object: %s, counter: %s contains wildcard %s", object.ObjectName, counter, wildcard) m.Log.Errorf("Object: %s, counter: %s contains wildcard %s", object.ObjectName, counter, wildcard)
} }
} }
} }

View File

@ -189,7 +189,7 @@ func (ps *PubSub) toMessages(metrics []telegraf.Metric) ([]*pubsub.Message, erro
b, err = ps.compressData(b) b, err = ps.compressData(b)
if err != nil { if err != nil {
ps.Log.Errorf("unable to compress message with %s: %w", ps.ContentEncoding, err) ps.Log.Errorf("Unable to compress message with %s: %v", ps.ContentEncoding, err)
continue continue
} }

View File

@ -167,7 +167,7 @@ func (m *MQTT) Write(metrics []telegraf.Metric) error {
for _, msg := range topicMessages { for _, msg := range topicMessages {
if err := m.client.Publish(msg.topic, msg.payload); err != nil { if err := m.client.Publish(msg.topic, msg.payload); err != nil {
m.Log.Warn("Could not publish message to MQTT server, %s", err) m.Log.Warnf("Could not publish message to MQTT server: %v", err)
} }
} }
@ -179,7 +179,7 @@ func (m *MQTT) collectNonBatch(hostname string, metrics []telegraf.Metric) []mes
for _, metric := range metrics { for _, metric := range metrics {
topic, err := m.generator.Generate(hostname, metric) topic, err := m.generator.Generate(hostname, metric)
if err != nil { if err != nil {
m.Log.Warnf("Generating topic name failed: %w", err) m.Log.Warnf("Generating topic name failed: %v", err)
m.Log.Debugf("metric was: %v", metric) m.Log.Debugf("metric was: %v", metric)
continue continue
} }
@ -201,7 +201,7 @@ func (m *MQTT) collectBatch(hostname string, metrics []telegraf.Metric) []messag
for _, metric := range metrics { for _, metric := range metrics {
topic, err := m.generator.Generate(hostname, metric) topic, err := m.generator.Generate(hostname, metric)
if err != nil { if err != nil {
m.Log.Warnf("Generating topic name failed: %w", err) m.Log.Warnf("Generating topic name failed: %v", err)
m.Log.Debugf("metric was: %v", metric) m.Log.Debugf("metric was: %v", metric)
continue continue
} }
@ -225,7 +225,7 @@ func (m *MQTT) collectField(hostname string, metrics []telegraf.Metric) []messag
for _, metric := range metrics { for _, metric := range metrics {
topic, err := m.generator.Generate(hostname, metric) topic, err := m.generator.Generate(hostname, metric)
if err != nil { if err != nil {
m.Log.Warnf("Generating topic name failed: %w", err) m.Log.Warnf("Generating topic name failed: %v", err)
m.Log.Debugf("metric was: %v", metric) m.Log.Debugf("metric was: %v", metric)
continue continue
} }
@ -249,14 +249,14 @@ func (m *MQTT) collectHomieV4(hostname string, metrics []telegraf.Metric) []mess
for _, metric := range metrics { for _, metric := range metrics {
topic, err := m.generator.Generate(hostname, metric) topic, err := m.generator.Generate(hostname, metric)
if err != nil { if err != nil {
m.Log.Warnf("Generating topic name failed: %w", err) m.Log.Warnf("Generating topic name failed: %v", err)
m.Log.Debugf("metric was: %v", metric) m.Log.Debugf("metric was: %v", metric)
continue continue
} }
msgs, nodeID, err := m.collectHomieDeviceMessages(topic, metric) msgs, nodeID, err := m.collectHomieDeviceMessages(topic, metric)
if err != nil { if err != nil {
m.Log.Warnf(err.Error()) m.Log.Warn(err.Error())
m.Log.Debugf("metric was: %v", metric) m.Log.Debugf("metric was: %v", metric)
continue continue
} }

View File

@ -134,7 +134,7 @@ func (a *NebiusCloudMonitoring) Write(metrics []telegraf.Metric) error {
for _, field := range m.FieldList() { for _, field := range m.FieldList() {
value, err := internal.ToFloat64(field.Value) value, err := internal.ToFloat64(field.Value)
if err != nil { if err != nil {
a.Log.Errorf("skipping value: %w", err.Error()) a.Log.Errorf("Skipping value: %v", err)
continue continue
} }

View File

@ -139,7 +139,7 @@ func (o *OpenTelemetry) Write(metrics []telegraf.Metric) error {
// sort the timestamps we collected // sort the timestamps we collected
sort.Slice(timestamps, func(i, j int) bool { return timestamps[i] < timestamps[j] }) sort.Slice(timestamps, func(i, j int) bool { return timestamps[i] < timestamps[j] })
o.Log.Debugf("received %d metrics and split into %d groups by timestamp", len(metrics), len(metricBatch)) o.Log.Debugf("Received %d metrics and split into %d groups by timestamp", len(metrics), len(metricBatch))
for _, timestamp := range timestamps { for _, timestamp := range timestamps {
if err := o.sendBatch(metricBatch[timestamp]); err != nil { if err := o.sendBatch(metricBatch[timestamp]); err != nil {
return err return err
@ -165,12 +165,12 @@ func (o *OpenTelemetry) sendBatch(metrics []telegraf.Metric) error {
case telegraf.Summary: case telegraf.Summary:
vType = common.InfluxMetricValueTypeSummary vType = common.InfluxMetricValueTypeSummary
default: default:
o.Log.Warnf("unrecognized metric type %Q", metric.Type()) o.Log.Warnf("Unrecognized metric type %v", metric.Type())
continue continue
} }
err := batch.AddPoint(metric.Name(), metric.Tags(), metric.Fields(), metric.Time(), vType) err := batch.AddPoint(metric.Name(), metric.Tags(), metric.Fields(), metric.Time(), vType)
if err != nil { if err != nil {
o.Log.Warnf("failed to add point: %s", err) o.Log.Warnf("Failed to add point: %v", err)
continue continue
} }
} }

View File

@ -89,7 +89,7 @@ func (tm *TableManager) MatchSource(ctx context.Context, db dbh, rowSource *Tabl
if isTempError(err) { if isTempError(err) {
return err return err
} }
tm.Postgresql.Logger.Errorf("permanent error updating schema for %s: %w", tagTable.name, err) tm.Postgresql.Logger.Errorf("Permanent error updating schema for %s: %v", tagTable.name, err)
} }
if len(missingCols) > 0 { if len(missingCols) > 0 {
@ -100,7 +100,7 @@ func (tm *TableManager) MatchSource(ctx context.Context, db dbh, rowSource *Tabl
} }
colDefs = append(colDefs, col.Name+" "+col.Type) colDefs = append(colDefs, col.Name+" "+col.Type)
} }
tm.Logger.Errorf("table %q is missing tag columns (dropping metrics): %s", tm.Logger.Errorf("Table %q is missing tag columns (dropping metrics): %s",
tagTable.name, tagTable.name,
strings.Join(colDefs, ", ")) strings.Join(colDefs, ", "))
} }
@ -120,7 +120,7 @@ func (tm *TableManager) MatchSource(ctx context.Context, db dbh, rowSource *Tabl
if isTempError(err) { if isTempError(err) {
return err return err
} }
tm.Postgresql.Logger.Errorf("permanent error updating schema for %s: %w", metricTable.name, err) tm.Postgresql.Logger.Errorf("Permanent error updating schema for %s: %v", metricTable.name, err)
} }
if len(missingCols) > 0 { if len(missingCols) > 0 {
@ -131,7 +131,7 @@ func (tm *TableManager) MatchSource(ctx context.Context, db dbh, rowSource *Tabl
} }
colDefs = append(colDefs, col.Name+" "+col.Type) colDefs = append(colDefs, col.Name+" "+col.Type)
} }
tm.Logger.Errorf("table %q is missing columns (omitting fields): %s", tm.Logger.Errorf("Table %q is missing columns (omitting fields): %s",
metricTable.name, metricTable.name,
strings.Join(colDefs, ", ")) strings.Join(colDefs, ", "))
} }
@ -189,7 +189,7 @@ func (tm *TableManager) EnsureStructure(
if col.Role == utils.TagColType { if col.Role == utils.TagColType {
return nil, fmt.Errorf("column name too long: %q", col.Name) return nil, fmt.Errorf("column name too long: %q", col.Name)
} }
tm.Postgresql.Logger.Errorf("column name too long: %q", col.Name) tm.Postgresql.Logger.Errorf("Column name too long: %q", col.Name)
invalidColumns = append(invalidColumns, col) invalidColumns = append(invalidColumns, col)
} }

View File

@ -200,7 +200,7 @@ func (t *Timestream) Connect() error {
t.Log.Errorf("Couldn't describe database %q. Check error, fix permissions, connectivity, create database.", t.DatabaseName) t.Log.Errorf("Couldn't describe database %q. Check error, fix permissions, connectivity, create database.", t.DatabaseName)
return err return err
} }
t.Log.Infof("Describe database %q returned %q.", t.DatabaseName, describeDatabaseOutput) t.Log.Infof("Describe database %q returned %v", t.DatabaseName, describeDatabaseOutput)
} }
t.svc = svc t.svc = svc

View File

@ -88,15 +88,15 @@ func (w *Wavefront) Connect() error {
} }
var connectionURL string var connectionURL string
if w.URL != "" { if w.URL != "" {
w.Log.Debug("connecting over http/https using Url: %s", w.URL) w.Log.Debugf("Connecting over http/https using url: %s", w.URL)
connectionURLWithToken, err := w.senderURLFromURLAndToken() connectionURLWithToken, err := w.senderURLFromURLAndToken()
if err != nil { if err != nil {
return err return err
} }
connectionURL = connectionURLWithToken connectionURL = connectionURLWithToken
} else { } else {
w.Log.Warnf("configuration with host/port is deprecated. Please use url.") w.Log.Warn("Configuration with host/port is deprecated. Please use url.")
w.Log.Debugf("connecting over http using Host: %q and Port: %d", w.Host, w.Port) w.Log.Debugf("Connecting over http using Host: %q and Port: %d", w.Host, w.Port)
connectionURL = senderURLFromHostAndPort(w.Host, w.Port) connectionURL = senderURLFromHostAndPort(w.Host, w.Port)
} }
@ -147,8 +147,8 @@ func (w *Wavefront) Write(metrics []telegraf.Metric) error {
} }
} }
} }
w.Log.Errorf("non-retryable error during Wavefront.Write: %v", err) w.Log.Errorf("Non-retryable error during Wavefront.Write: %v", err)
w.Log.Debugf("non-retryable metric data: %+v", point) w.Log.Debugf("Non-retryable metric data: %+v", point)
} }
} }
} }

View File

@ -126,7 +126,7 @@ func (a *YandexCloudMonitoring) Write(metrics []telegraf.Metric) error {
for _, field := range m.FieldList() { for _, field := range m.FieldList() {
value, err := internal.ToFloat64(field.Value) value, err := internal.ToFloat64(field.Value)
if err != nil { if err != nil {
a.Log.Errorf("skipping value: %w", err.Error()) a.Log.Errorf("Skipping value: %v", err)
continue continue
} }
@ -180,7 +180,7 @@ func getResponseFromMetadata(c *http.Client, metadataURL string) ([]byte, error)
} }
func (a *YandexCloudMonitoring) getFolderIDFromMetadata() (string, error) { func (a *YandexCloudMonitoring) getFolderIDFromMetadata() (string, error) {
a.Log.Infof("getting folder ID in %s", a.MetadataFolderURL) a.Log.Infof("Getting folder ID in %s", a.MetadataFolderURL)
body, err := getResponseFromMetadata(a.client, a.MetadataFolderURL) body, err := getResponseFromMetadata(a.client, a.MetadataFolderURL)
if err != nil { if err != nil {
return "", err return "", err
@ -193,7 +193,7 @@ func (a *YandexCloudMonitoring) getFolderIDFromMetadata() (string, error) {
} }
func (a *YandexCloudMonitoring) getIAMTokenFromMetadata() (string, int, error) { func (a *YandexCloudMonitoring) getIAMTokenFromMetadata() (string, int, error) {
a.Log.Debugf("getting new IAM token in %s", a.MetadataTokenURL) a.Log.Debugf("Getting new IAM token in %s", a.MetadataTokenURL)
body, err := getResponseFromMetadata(a.client, a.MetadataTokenURL) body, err := getResponseFromMetadata(a.client, a.MetadataTokenURL)
if err != nil { if err != nil {
return "", 0, err return "", 0, err
@ -230,7 +230,7 @@ func (a *YandexCloudMonitoring) send(body []byte) error {
} }
req.Header.Set("Authorization", "Bearer "+a.IAMToken) req.Header.Set("Authorization", "Bearer "+a.IAMToken)
a.Log.Debugf("sending metrics to %s", req.URL.String()) a.Log.Debugf("Sending metrics to %s", req.URL.String())
a.Log.Debugf("body: %s", body) a.Log.Debugf("body: %s", body)
resp, err := a.client.Do(req) resp, err := a.client.Do(req)
if err != nil { if err != nil {

View File

@ -46,7 +46,8 @@ func (s *Scaling) Init() error {
anyMinMaxSet := s.OutMax != nil || s.OutMin != nil || s.InMax != nil || s.InMin != nil anyMinMaxSet := s.OutMax != nil || s.OutMin != nil || s.InMax != nil || s.InMin != nil
factorSet := s.Factor != nil || s.Offset != nil factorSet := s.Factor != nil || s.Offset != nil
if anyMinMaxSet && factorSet { if anyMinMaxSet && factorSet {
return fmt.Errorf("cannot use factor/offset and minimum/maximum at the same time for fields %s", strings.Join(s.Fields, ",")) return fmt.Errorf("cannot use factor/offset and minimum/maximum at the same time for fields %s",
strings.Join(s.Fields, ","))
} else if anyMinMaxSet && !allMinMaxSet { } else if anyMinMaxSet && !allMinMaxSet {
return fmt.Errorf("all minimum and maximum values need to be set for fields %s", strings.Join(s.Fields, ",")) return fmt.Errorf("all minimum and maximum values need to be set for fields %s", strings.Join(s.Fields, ","))
} else if !anyMinMaxSet && !factorSet { } else if !anyMinMaxSet && !factorSet {
@ -96,7 +97,7 @@ func (s *Scale) Init() error {
for _, field := range s.Scalings[i].Fields { for _, field := range s.Scalings[i].Fields {
// only generate a warning for the first duplicate field filter // only generate a warning for the first duplicate field filter
if warn, ok := allFields[field]; ok && warn { if warn, ok := allFields[field]; ok && warn {
s.Log.Warnf("filter field %q used twice in scalings", field) s.Log.Warnf("Filter field %q used twice in scalings", field)
allFields[field] = false allFields[field] = false
} else { } else {
allFields[field] = true allFields[field] = true
@ -122,7 +123,7 @@ func (s *Scale) scaleValues(metric telegraf.Metric) {
v, err := internal.ToFloat64(field.Value) v, err := internal.ToFloat64(field.Value)
if err != nil { if err != nil {
s.Log.Errorf("error converting %q to float: %w\n", field.Key, err) s.Log.Errorf("Error converting %q to float: %v", field.Key, err)
continue continue
} }

View File

@ -139,7 +139,7 @@ func (s *Serializer) batchEvents(metrics []telegraf.Metric) ([]byte, error) {
for _, m := range metrics { for _, m := range metrics {
e, err := s.createEvent(m) e, err := s.createEvent(m)
if err != nil { if err != nil {
s.Log.Errorf("creating event for %v failed: %w", m, err) s.Log.Errorf("Creating event for %v failed: %v", m, err)
continue continue
} }
events = append(events, e) events = append(events, e)