Revive fixes - part 2 (#8835)

* Revive fixes regarding following set of rules:
[rule.if-return]
[rule.increment-decrement]
[rule.var-declaration]
[rule.package-comments]
[rule.receiver-naming]
[rule.unexported-return]
This commit is contained in:
Paweł Żak 2021-02-17 00:19:50 +01:00 committed by GitHub
parent 5606a9531a
commit d9736d543f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
81 changed files with 730 additions and 859 deletions

View File

@ -10,8 +10,6 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
var format = "2006-01-02T15:04:05.999Z07:00"
func TestAlignedTicker(t *testing.T) { func TestAlignedTicker(t *testing.T) {
interval := 10 * time.Second interval := 10 * time.Second
jitter := 0 * time.Second jitter := 0 * time.Second
@ -249,7 +247,7 @@ func simulatedDist(ticker Ticker, clock *clock.Mock) Distribution {
for !clock.Now().After(until) { for !clock.Now().After(until) {
select { select {
case tm := <-ticker.Elapsed(): case tm := <-ticker.Elapsed():
dist.Buckets[tm.Second()] += 1 dist.Buckets[tm.Second()]++
dist.Count++ dist.Count++
dist.Waittime += tm.Sub(last).Seconds() dist.Waittime += tm.Sub(last).Seconds()
last = tm last = tm

View File

@ -123,10 +123,7 @@ func (w *FileWriter) openCurrent() (err error) {
w.bytesWritten = fileInfo.Size() w.bytesWritten = fileInfo.Size()
} }
if err = w.rotateIfNeeded(); err != nil { return w.rotateIfNeeded()
return err
}
return nil
} }
func (w *FileWriter) rotateIfNeeded() error { func (w *FileWriter) rotateIfNeeded() error {
@ -153,11 +150,7 @@ func (w *FileWriter) rotate() (err error) {
return err return err
} }
if err = w.purgeArchivesIfNeeded(); err != nil { return w.purgeArchivesIfNeeded()
return err
}
return nil
} }
func (w *FileWriter) purgeArchivesIfNeeded() (err error) { func (w *FileWriter) purgeArchivesIfNeeded() (err error) {

View File

@ -15,27 +15,27 @@ type GosnmpWrapper struct {
} }
// Host returns the value of GoSNMP.Target. // Host returns the value of GoSNMP.Target.
func (gsw GosnmpWrapper) Host() string { func (gs GosnmpWrapper) Host() string {
return gsw.Target return gs.Target
} }
// Walk wraps GoSNMP.Walk() or GoSNMP.BulkWalk(), depending on whether the // Walk wraps GoSNMP.Walk() or GoSNMP.BulkWalk(), depending on whether the
// connection is using SNMPv1 or newer. // connection is using SNMPv1 or newer.
// Also, if any error is encountered, it will just once reconnect and try again. // Also, if any error is encountered, it will just once reconnect and try again.
func (gsw GosnmpWrapper) Walk(oid string, fn gosnmp.WalkFunc) error { func (gs GosnmpWrapper) Walk(oid string, fn gosnmp.WalkFunc) error {
var err error var err error
// On error, retry once. // On error, retry once.
// Unfortunately we can't distinguish between an error returned by gosnmp, and one returned by the walk function. // Unfortunately we can't distinguish between an error returned by gosnmp, and one returned by the walk function.
for i := 0; i < 2; i++ { for i := 0; i < 2; i++ {
if gsw.Version == gosnmp.Version1 { if gs.Version == gosnmp.Version1 {
err = gsw.GoSNMP.Walk(oid, fn) err = gs.GoSNMP.Walk(oid, fn)
} else { } else {
err = gsw.GoSNMP.BulkWalk(oid, fn) err = gs.GoSNMP.BulkWalk(oid, fn)
} }
if err == nil { if err == nil {
return nil return nil
} }
if err := gsw.GoSNMP.Connect(); err != nil { if err := gs.GoSNMP.Connect(); err != nil {
return fmt.Errorf("reconnecting: %w", err) return fmt.Errorf("reconnecting: %w", err)
} }
} }
@ -44,15 +44,15 @@ func (gsw GosnmpWrapper) Walk(oid string, fn gosnmp.WalkFunc) error {
// Get wraps GoSNMP.GET(). // Get wraps GoSNMP.GET().
// If any error is encountered, it will just once reconnect and try again. // If any error is encountered, it will just once reconnect and try again.
func (gsw GosnmpWrapper) Get(oids []string) (*gosnmp.SnmpPacket, error) { func (gs GosnmpWrapper) Get(oids []string) (*gosnmp.SnmpPacket, error) {
var err error var err error
var pkt *gosnmp.SnmpPacket var pkt *gosnmp.SnmpPacket
for i := 0; i < 2; i++ { for i := 0; i < 2; i++ {
pkt, err = gsw.GoSNMP.Get(oids) pkt, err = gs.GoSNMP.Get(oids)
if err == nil { if err == nil {
return pkt, nil return pkt, nil
} }
if err := gsw.GoSNMP.Connect(); err != nil { if err := gs.GoSNMP.Connect(); err != nil {
return nil, fmt.Errorf("reconnecting: %w", err) return nil, fmt.Errorf("reconnecting: %w", err)
} }
} }

View File

@ -11,10 +11,10 @@ import (
const ( const (
// Default size of metrics batch size. // Default size of metrics batch size.
DEFAULT_METRIC_BATCH_SIZE = 1000 DefaultMetricBatchSize = 1000
// Default number of metrics kept. It should be a multiple of batch size. // Default number of metrics kept. It should be a multiple of batch size.
DEFAULT_METRIC_BUFFER_LIMIT = 10000 DefaultMetricBufferLimit = 10000
) )
// OutputConfig containing name and filter // OutputConfig containing name and filter
@ -78,13 +78,13 @@ func NewRunningOutput(
bufferLimit = config.MetricBufferLimit bufferLimit = config.MetricBufferLimit
} }
if bufferLimit == 0 { if bufferLimit == 0 {
bufferLimit = DEFAULT_METRIC_BUFFER_LIMIT bufferLimit = DefaultMetricBufferLimit
} }
if config.MetricBatchSize > 0 { if config.MetricBatchSize > 0 {
batchSize = config.MetricBatchSize batchSize = config.MetricBatchSize
} }
if batchSize == 0 { if batchSize == 0 {
batchSize = DEFAULT_METRIC_BATCH_SIZE batchSize = DefaultMetricBatchSize
} }
ro := &RunningOutput{ ro := &RunningOutput{
@ -114,8 +114,8 @@ func (r *RunningOutput) LogName() string {
return logName("outputs", r.Config.Name, r.Config.Alias) return logName("outputs", r.Config.Name, r.Config.Alias)
} }
func (ro *RunningOutput) metricFiltered(metric telegraf.Metric) { func (r *RunningOutput) metricFiltered(metric telegraf.Metric) {
ro.MetricsFiltered.Incr(1) r.MetricsFiltered.Incr(1)
metric.Drop() metric.Drop()
} }
@ -133,45 +133,45 @@ func (r *RunningOutput) Init() error {
// AddMetric adds a metric to the output. // AddMetric adds a metric to the output.
// //
// Takes ownership of metric // Takes ownership of metric
func (ro *RunningOutput) AddMetric(metric telegraf.Metric) { func (r *RunningOutput) AddMetric(metric telegraf.Metric) {
if ok := ro.Config.Filter.Select(metric); !ok { if ok := r.Config.Filter.Select(metric); !ok {
ro.metricFiltered(metric) r.metricFiltered(metric)
return return
} }
ro.Config.Filter.Modify(metric) r.Config.Filter.Modify(metric)
if len(metric.FieldList()) == 0 { if len(metric.FieldList()) == 0 {
ro.metricFiltered(metric) r.metricFiltered(metric)
return return
} }
if output, ok := ro.Output.(telegraf.AggregatingOutput); ok { if output, ok := r.Output.(telegraf.AggregatingOutput); ok {
ro.aggMutex.Lock() r.aggMutex.Lock()
output.Add(metric) output.Add(metric)
ro.aggMutex.Unlock() r.aggMutex.Unlock()
return return
} }
if len(ro.Config.NameOverride) > 0 { if len(r.Config.NameOverride) > 0 {
metric.SetName(ro.Config.NameOverride) metric.SetName(r.Config.NameOverride)
} }
if len(ro.Config.NamePrefix) > 0 { if len(r.Config.NamePrefix) > 0 {
metric.AddPrefix(ro.Config.NamePrefix) metric.AddPrefix(r.Config.NamePrefix)
} }
if len(ro.Config.NameSuffix) > 0 { if len(r.Config.NameSuffix) > 0 {
metric.AddSuffix(ro.Config.NameSuffix) metric.AddSuffix(r.Config.NameSuffix)
} }
dropped := ro.buffer.Add(metric) dropped := r.buffer.Add(metric)
atomic.AddInt64(&ro.droppedMetrics, int64(dropped)) atomic.AddInt64(&r.droppedMetrics, int64(dropped))
count := atomic.AddInt64(&ro.newMetricsCount, 1) count := atomic.AddInt64(&r.newMetricsCount, 1)
if count == int64(ro.MetricBatchSize) { if count == int64(r.MetricBatchSize) {
atomic.StoreInt64(&ro.newMetricsCount, 0) atomic.StoreInt64(&r.newMetricsCount, 0)
select { select {
case ro.BatchReady <- time.Now(): case r.BatchReady <- time.Now():
default: default:
} }
} }
@ -179,50 +179,50 @@ func (ro *RunningOutput) AddMetric(metric telegraf.Metric) {
// Write writes all metrics to the output, stopping when all have been sent on // Write writes all metrics to the output, stopping when all have been sent on
// or error. // or error.
func (ro *RunningOutput) Write() error { func (r *RunningOutput) Write() error {
if output, ok := ro.Output.(telegraf.AggregatingOutput); ok { if output, ok := r.Output.(telegraf.AggregatingOutput); ok {
ro.aggMutex.Lock() r.aggMutex.Lock()
metrics := output.Push() metrics := output.Push()
ro.buffer.Add(metrics...) r.buffer.Add(metrics...)
output.Reset() output.Reset()
ro.aggMutex.Unlock() r.aggMutex.Unlock()
} }
atomic.StoreInt64(&ro.newMetricsCount, 0) atomic.StoreInt64(&r.newMetricsCount, 0)
// Only process the metrics in the buffer now. Metrics added while we are // Only process the metrics in the buffer now. Metrics added while we are
// writing will be sent on the next call. // writing will be sent on the next call.
nBuffer := ro.buffer.Len() nBuffer := r.buffer.Len()
nBatches := nBuffer/ro.MetricBatchSize + 1 nBatches := nBuffer/r.MetricBatchSize + 1
for i := 0; i < nBatches; i++ { for i := 0; i < nBatches; i++ {
batch := ro.buffer.Batch(ro.MetricBatchSize) batch := r.buffer.Batch(r.MetricBatchSize)
if len(batch) == 0 { if len(batch) == 0 {
break break
} }
err := ro.write(batch) err := r.write(batch)
if err != nil { if err != nil {
ro.buffer.Reject(batch) r.buffer.Reject(batch)
return err return err
} }
ro.buffer.Accept(batch) r.buffer.Accept(batch)
} }
return nil return nil
} }
// WriteBatch writes a single batch of metrics to the output. // WriteBatch writes a single batch of metrics to the output.
func (ro *RunningOutput) WriteBatch() error { func (r *RunningOutput) WriteBatch() error {
batch := ro.buffer.Batch(ro.MetricBatchSize) batch := r.buffer.Batch(r.MetricBatchSize)
if len(batch) == 0 { if len(batch) == 0 {
return nil return nil
} }
err := ro.write(batch) err := r.write(batch)
if err != nil { if err != nil {
ro.buffer.Reject(batch) r.buffer.Reject(batch)
return err return err
} }
ro.buffer.Accept(batch) r.buffer.Accept(batch)
return nil return nil
} }

View File

@ -52,8 +52,8 @@ func (rp *RunningProcessor) metricFiltered(metric telegraf.Metric) {
metric.Drop() metric.Drop()
} }
func (r *RunningProcessor) Init() error { func (rp *RunningProcessor) Init() error {
if p, ok := r.Processor.(telegraf.Initializer); ok { if p, ok := rp.Processor.(telegraf.Initializer); ok {
err := p.Init() err := p.Init()
if err != nil { if err != nil {
return err return err
@ -62,39 +62,39 @@ func (r *RunningProcessor) Init() error {
return nil return nil
} }
func (r *RunningProcessor) Log() telegraf.Logger { func (rp *RunningProcessor) Log() telegraf.Logger {
return r.log return rp.log
} }
func (r *RunningProcessor) LogName() string { func (rp *RunningProcessor) LogName() string {
return logName("processors", r.Config.Name, r.Config.Alias) return logName("processors", rp.Config.Name, rp.Config.Alias)
} }
func (r *RunningProcessor) MakeMetric(metric telegraf.Metric) telegraf.Metric { func (rp *RunningProcessor) MakeMetric(metric telegraf.Metric) telegraf.Metric {
return metric return metric
} }
func (r *RunningProcessor) Start(acc telegraf.Accumulator) error { func (rp *RunningProcessor) Start(acc telegraf.Accumulator) error {
return r.Processor.Start(acc) return rp.Processor.Start(acc)
} }
func (r *RunningProcessor) Add(m telegraf.Metric, acc telegraf.Accumulator) error { func (rp *RunningProcessor) Add(m telegraf.Metric, acc telegraf.Accumulator) error {
if ok := r.Config.Filter.Select(m); !ok { if ok := rp.Config.Filter.Select(m); !ok {
// pass downstream // pass downstream
acc.AddMetric(m) acc.AddMetric(m)
return nil return nil
} }
r.Config.Filter.Modify(m) rp.Config.Filter.Modify(m)
if len(m.FieldList()) == 0 { if len(m.FieldList()) == 0 {
// drop metric // drop metric
r.metricFiltered(m) rp.metricFiltered(m)
return nil return nil
} }
return r.Processor.Add(m, acc) return rp.Processor.Add(m, acc)
} }
func (r *RunningProcessor) Stop() { func (rp *RunningProcessor) Stop() {
r.Processor.Stop() rp.Processor.Stop()
} }

View File

@ -86,9 +86,5 @@ func (k *Config) SetConfig(config *sarama.Config) error {
config.Net.TLS.Enable = true config.Net.TLS.Enable = true
} }
if err := k.SetSASLConfig(config); err != nil { return k.SetSASLConfig(config)
return err
}
return nil
} }

View File

@ -34,15 +34,15 @@ func (s *Shim) LoadConfig(filePath *string) error {
} }
if conf.Input != nil { if conf.Input != nil {
if err = s.AddInput(conf.Input); err != nil { if err = s.AddInput(conf.Input); err != nil {
return fmt.Errorf("Failed to add Input: %w", err) return fmt.Errorf("failed to add Input: %w", err)
} }
} else if conf.Processor != nil { } else if conf.Processor != nil {
if err = s.AddStreamingProcessor(conf.Processor); err != nil { if err = s.AddStreamingProcessor(conf.Processor); err != nil {
return fmt.Errorf("Failed to add Processor: %w", err) return fmt.Errorf("failed to add Processor: %w", err)
} }
} else if conf.Output != nil { } else if conf.Output != nil {
if err = s.AddOutput(conf.Output); err != nil { if err = s.AddOutput(conf.Output); err != nil {
return fmt.Errorf("Failed to add Output: %w", err) return fmt.Errorf("failed to add Output: %w", err)
} }
} }
return nil return nil

View File

@ -83,7 +83,7 @@ func TestSelectNamepsacesIntegration(t *testing.T) {
count := 0 count := 0
for _, p := range acc.Metrics { for _, p := range acc.Metrics {
if p.Measurement == "aerospike_namespace" { if p.Measurement == "aerospike_namespace" {
count += 1 count++
} }
} }
assert.Equal(t, count, 1) assert.Equal(t, count, 1)

View File

@ -157,10 +157,8 @@ func (bond *Bond) gatherSlavePart(bondName string, rawFile string, acc telegraf.
acc.AddFields("bond_slave", fields, tags) acc.AddFields("bond_slave", fields, tags)
} }
} }
if err := scanner.Err(); err != nil {
return err return scanner.Err()
}
return nil
} }
// loadPath can be used to read path firstly from config // loadPath can be used to read path firstly from config

View File

@ -170,7 +170,7 @@ func (c cassandraMetric) addTagsFields(out map[string]interface{}) {
} }
} }
func (j *Cassandra) SampleConfig() string { func (c *Cassandra) SampleConfig() string {
return ` return `
## DEPRECATED: The cassandra plugin has been deprecated. Please use the ## DEPRECATED: The cassandra plugin has been deprecated. Please use the
## jolokia2 plugin instead. ## jolokia2 plugin instead.
@ -193,18 +193,18 @@ func (j *Cassandra) SampleConfig() string {
` `
} }
func (j *Cassandra) Description() string { func (c *Cassandra) Description() string {
return "Read Cassandra metrics through Jolokia" return "Read Cassandra metrics through Jolokia"
} }
func (j *Cassandra) getAttr(requestUrl *url.URL) (map[string]interface{}, error) { func (c *Cassandra) getAttr(requestUrl *url.URL) (map[string]interface{}, error) {
// Create + send request // Create + send request
req, err := http.NewRequest("GET", requestUrl.String(), nil) req, err := http.NewRequest("GET", requestUrl.String(), nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
resp, err := j.jClient.MakeRequest(req) resp, err := c.jClient.MakeRequest(req)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -378,7 +378,7 @@ func (c *CloudWatch) fetchNamespaceMetrics() ([]*cloudwatch.Metric, error) {
var token *string var token *string
var params *cloudwatch.ListMetricsInput var params *cloudwatch.ListMetricsInput
var recentlyActive *string = nil var recentlyActive *string
switch c.RecentlyActive { switch c.RecentlyActive {
case "PT3H": case "PT3H":
@ -597,11 +597,6 @@ func snakeCase(s string) string {
return s return s
} }
type dimension struct {
name string
value string
}
// ctod converts cloudwatch dimensions to regular dimensions. // ctod converts cloudwatch dimensions to regular dimensions.
func ctod(cDimensions []*cloudwatch.Dimension) *map[string]string { func ctod(cDimensions []*cloudwatch.Dimension) *map[string]string {
dimensions := map[string]string{} dimensions := map[string]string{}

View File

@ -28,7 +28,7 @@ func NewCPUStats(ps system.PS) *CPUStats {
} }
} }
func (_ *CPUStats) Description() string { func (c *CPUStats) Description() string {
return "Read metrics about cpu usage" return "Read metrics about cpu usage"
} }
@ -43,12 +43,12 @@ var sampleConfig = `
report_active = false report_active = false
` `
func (_ *CPUStats) SampleConfig() string { func (c *CPUStats) SampleConfig() string {
return sampleConfig return sampleConfig
} }
func (s *CPUStats) Gather(acc telegraf.Accumulator) error { func (c *CPUStats) Gather(acc telegraf.Accumulator) error {
times, err := s.ps.CPUTimes(s.PerCPU, s.TotalCPU) times, err := c.ps.CPUTimes(c.PerCPU, c.TotalCPU)
if err != nil { if err != nil {
return fmt.Errorf("error getting CPU info: %s", err) return fmt.Errorf("error getting CPU info: %s", err)
} }
@ -62,7 +62,7 @@ func (s *CPUStats) Gather(acc telegraf.Accumulator) error {
total := totalCpuTime(cts) total := totalCpuTime(cts)
active := activeCpuTime(cts) active := activeCpuTime(cts)
if s.CollectCPUTime { if c.CollectCPUTime {
// Add cpu time metrics // Add cpu time metrics
fieldsC := map[string]interface{}{ fieldsC := map[string]interface{}{
"time_user": cts.User, "time_user": cts.User,
@ -76,19 +76,19 @@ func (s *CPUStats) Gather(acc telegraf.Accumulator) error {
"time_guest": cts.Guest, "time_guest": cts.Guest,
"time_guest_nice": cts.GuestNice, "time_guest_nice": cts.GuestNice,
} }
if s.ReportActive { if c.ReportActive {
fieldsC["time_active"] = activeCpuTime(cts) fieldsC["time_active"] = activeCpuTime(cts)
} }
acc.AddCounter("cpu", fieldsC, tags, now) acc.AddCounter("cpu", fieldsC, tags, now)
} }
// Add in percentage // Add in percentage
if len(s.lastStats) == 0 { if len(c.lastStats) == 0 {
// If it's the 1st gather, can't get CPU Usage stats yet // If it's the 1st gather, can't get CPU Usage stats yet
continue continue
} }
lastCts, ok := s.lastStats[cts.CPU] lastCts, ok := c.lastStats[cts.CPU]
if !ok { if !ok {
continue continue
} }
@ -97,7 +97,7 @@ func (s *CPUStats) Gather(acc telegraf.Accumulator) error {
totalDelta := total - lastTotal totalDelta := total - lastTotal
if totalDelta < 0 { if totalDelta < 0 {
err = fmt.Errorf("Error: current total CPU time is less than previous total CPU time") err = fmt.Errorf("current total CPU time is less than previous total CPU time")
break break
} }
@ -117,15 +117,15 @@ func (s *CPUStats) Gather(acc telegraf.Accumulator) error {
"usage_guest": 100 * (cts.Guest - lastCts.Guest) / totalDelta, "usage_guest": 100 * (cts.Guest - lastCts.Guest) / totalDelta,
"usage_guest_nice": 100 * (cts.GuestNice - lastCts.GuestNice) / totalDelta, "usage_guest_nice": 100 * (cts.GuestNice - lastCts.GuestNice) / totalDelta,
} }
if s.ReportActive { if c.ReportActive {
fieldsG["usage_active"] = 100 * (active - lastActive) / totalDelta fieldsG["usage_active"] = 100 * (active - lastActive) / totalDelta
} }
acc.AddGauge("cpu", fieldsG, tags, now) acc.AddGauge("cpu", fieldsG, tags, now)
} }
s.lastStats = make(map[string]cpu.TimesStat) c.lastStats = make(map[string]cpu.TimesStat)
for _, cts := range times { for _, cts := range times {
s.lastStats[cts.CPU] = cts c.lastStats[cts.CPU] = cts
} }
return err return err

View File

@ -19,7 +19,7 @@ type DiskStats struct {
IgnoreFS []string `toml:"ignore_fs"` IgnoreFS []string `toml:"ignore_fs"`
} }
func (_ *DiskStats) Description() string { func (ds *DiskStats) Description() string {
return "Read metrics about disk usage by mount point" return "Read metrics about disk usage by mount point"
} }
@ -32,17 +32,17 @@ var diskSampleConfig = `
ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"] ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"]
` `
func (_ *DiskStats) SampleConfig() string { func (ds *DiskStats) SampleConfig() string {
return diskSampleConfig return diskSampleConfig
} }
func (s *DiskStats) Gather(acc telegraf.Accumulator) error { func (ds *DiskStats) Gather(acc telegraf.Accumulator) error {
// Legacy support: // Legacy support:
if len(s.Mountpoints) != 0 { if len(ds.Mountpoints) != 0 {
s.MountPoints = s.Mountpoints ds.MountPoints = ds.Mountpoints
} }
disks, partitions, err := s.ps.DiskUsage(s.MountPoints, s.IgnoreFS) disks, partitions, err := ds.ps.DiskUsage(ds.MountPoints, ds.IgnoreFS)
if err != nil { if err != nil {
return fmt.Errorf("error getting disk usage info: %s", err) return fmt.Errorf("error getting disk usage info: %s", err)
} }
@ -59,9 +59,9 @@ func (s *DiskStats) Gather(acc telegraf.Accumulator) error {
"fstype": du.Fstype, "fstype": du.Fstype,
"mode": mountOpts.Mode(), "mode": mountOpts.Mode(),
} }
var used_percent float64 var usedPercent float64
if du.Used+du.Free > 0 { if du.Used+du.Free > 0 {
used_percent = float64(du.Used) / usedPercent = float64(du.Used) /
(float64(du.Used) + float64(du.Free)) * 100 (float64(du.Used) + float64(du.Free)) * 100
} }
@ -69,7 +69,7 @@ func (s *DiskStats) Gather(acc telegraf.Accumulator) error {
"total": du.Total, "total": du.Total,
"free": du.Free, "free": du.Free,
"used": du.Used, "used": du.Used,
"used_percent": used_percent, "used_percent": usedPercent,
"inodes_total": du.InodesTotal, "inodes_total": du.InodesTotal,
"inodes_free": du.InodesFree, "inodes_free": du.InodesFree,
"inodes_used": du.InodesUsed, "inodes_used": du.InodesUsed,

View File

@ -30,7 +30,7 @@ type DiskIO struct {
initialized bool initialized bool
} }
func (_ *DiskIO) Description() string { func (d *DiskIO) Description() string {
return "Read metrics about disk IO by device" return "Read metrics about disk IO by device"
} }
@ -62,7 +62,7 @@ var diskIOsampleConfig = `
# name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"] # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"]
` `
func (_ *DiskIO) SampleConfig() string { func (d *DiskIO) SampleConfig() string {
return diskIOsampleConfig return diskIOsampleConfig
} }
@ -71,34 +71,34 @@ func hasMeta(s string) bool {
return strings.IndexAny(s, "*?[") >= 0 return strings.IndexAny(s, "*?[") >= 0
} }
func (s *DiskIO) init() error { func (d *DiskIO) init() error {
for _, device := range s.Devices { for _, device := range d.Devices {
if hasMeta(device) { if hasMeta(device) {
filter, err := filter.Compile(s.Devices) filter, err := filter.Compile(d.Devices)
if err != nil { if err != nil {
return fmt.Errorf("error compiling device pattern: %s", err.Error()) return fmt.Errorf("error compiling device pattern: %s", err.Error())
} }
s.deviceFilter = filter d.deviceFilter = filter
} }
} }
s.initialized = true d.initialized = true
return nil return nil
} }
func (s *DiskIO) Gather(acc telegraf.Accumulator) error { func (d *DiskIO) Gather(acc telegraf.Accumulator) error {
if !s.initialized { if !d.initialized {
err := s.init() err := d.init()
if err != nil { if err != nil {
return err return err
} }
} }
devices := []string{} devices := []string{}
if s.deviceFilter == nil { if d.deviceFilter == nil {
devices = s.Devices devices = d.Devices
} }
diskio, err := s.ps.DiskIO(devices) diskio, err := d.ps.DiskIO(devices)
if err != nil { if err != nil {
return fmt.Errorf("error getting disk io info: %s", err.Error()) return fmt.Errorf("error getting disk io info: %s", err.Error())
} }
@ -106,17 +106,17 @@ func (s *DiskIO) Gather(acc telegraf.Accumulator) error {
for _, io := range diskio { for _, io := range diskio {
match := false match := false
if s.deviceFilter != nil && s.deviceFilter.Match(io.Name) { if d.deviceFilter != nil && d.deviceFilter.Match(io.Name) {
match = true match = true
} }
tags := map[string]string{} tags := map[string]string{}
var devLinks []string var devLinks []string
tags["name"], devLinks = s.diskName(io.Name) tags["name"], devLinks = d.diskName(io.Name)
if s.deviceFilter != nil && !match { if d.deviceFilter != nil && !match {
for _, devLink := range devLinks { for _, devLink := range devLinks {
if s.deviceFilter.Match(devLink) { if d.deviceFilter.Match(devLink) {
match = true match = true
break break
} }
@ -126,11 +126,11 @@ func (s *DiskIO) Gather(acc telegraf.Accumulator) error {
} }
} }
for t, v := range s.diskTags(io.Name) { for t, v := range d.diskTags(io.Name) {
tags[t] = v tags[t] = v
} }
if !s.SkipSerialNumber { if !d.SkipSerialNumber {
if len(io.SerialNumber) != 0 { if len(io.SerialNumber) != 0 {
tags["serial"] = io.SerialNumber tags["serial"] = io.SerialNumber
} else { } else {
@ -157,23 +157,23 @@ func (s *DiskIO) Gather(acc telegraf.Accumulator) error {
return nil return nil
} }
func (s *DiskIO) diskName(devName string) (string, []string) { func (d *DiskIO) diskName(devName string) (string, []string) {
di, err := s.diskInfo(devName) di, err := d.diskInfo(devName)
devLinks := strings.Split(di["DEVLINKS"], " ") devLinks := strings.Split(di["DEVLINKS"], " ")
for i, devLink := range devLinks { for i, devLink := range devLinks {
devLinks[i] = strings.TrimPrefix(devLink, "/dev/") devLinks[i] = strings.TrimPrefix(devLink, "/dev/")
} }
if len(s.NameTemplates) == 0 { if len(d.NameTemplates) == 0 {
return devName, devLinks return devName, devLinks
} }
if err != nil { if err != nil {
s.Log.Warnf("Error gathering disk info: %s", err) d.Log.Warnf("Error gathering disk info: %s", err)
return devName, devLinks return devName, devLinks
} }
for _, nt := range s.NameTemplates { for _, nt := range d.NameTemplates {
miss := false miss := false
name := varRegex.ReplaceAllStringFunc(nt, func(sub string) string { name := varRegex.ReplaceAllStringFunc(nt, func(sub string) string {
sub = sub[1:] // strip leading '$' sub = sub[1:] // strip leading '$'
@ -195,19 +195,19 @@ func (s *DiskIO) diskName(devName string) (string, []string) {
return devName, devLinks return devName, devLinks
} }
func (s *DiskIO) diskTags(devName string) map[string]string { func (d *DiskIO) diskTags(devName string) map[string]string {
if len(s.DeviceTags) == 0 { if len(d.DeviceTags) == 0 {
return nil return nil
} }
di, err := s.diskInfo(devName) di, err := d.diskInfo(devName)
if err != nil { if err != nil {
s.Log.Warnf("Error gathering disk info: %s", err) d.Log.Warnf("Error gathering disk info: %s", err)
return nil return nil
} }
tags := map[string]string{} tags := map[string]string{}
for _, dt := range s.DeviceTags { for _, dt := range d.DeviceTags {
if v, ok := di[dt]; ok { if v, ok := di[dt]; ok {
tags[dt] = v tags[dt] = v
} }

View File

@ -18,7 +18,7 @@ type diskInfoCache struct {
var udevPath = "/run/udev/data" var udevPath = "/run/udev/data"
func (s *DiskIO) diskInfo(devName string) (map[string]string, error) { func (d *DiskIO) diskInfo(devName string) (map[string]string, error) {
var err error var err error
var stat unix.Stat_t var stat unix.Stat_t
@ -28,10 +28,10 @@ func (s *DiskIO) diskInfo(devName string) (map[string]string, error) {
return nil, err return nil, err
} }
if s.infoCache == nil { if d.infoCache == nil {
s.infoCache = map[string]diskInfoCache{} d.infoCache = map[string]diskInfoCache{}
} }
ic, ok := s.infoCache[devName] ic, ok := d.infoCache[devName]
if ok && stat.Mtim.Nano() == ic.modifiedAt { if ok && stat.Mtim.Nano() == ic.modifiedAt {
return ic.values, nil return ic.values, nil
@ -43,7 +43,7 @@ func (s *DiskIO) diskInfo(devName string) (map[string]string, error) {
di := map[string]string{} di := map[string]string{}
s.infoCache[devName] = diskInfoCache{ d.infoCache[devName] = diskInfoCache{
modifiedAt: stat.Mtim.Nano(), modifiedAt: stat.Mtim.Nano(),
udevDataPath: udevDataPath, udevDataPath: udevDataPath,
values: di, values: di,

View File

@ -4,6 +4,6 @@ package diskio
type diskInfoCache struct{} type diskInfoCache struct{}
func (s *DiskIO) diskInfo(devName string) (map[string]string, error) { func (d *DiskIO) diskInfo(devName string) (map[string]string, error) {
return nil, nil return nil, nil
} }

View File

@ -32,11 +32,11 @@ var sampleConfig = `
var defaultTimeout = 5 * time.Second var defaultTimeout = 5 * time.Second
func (r *Disque) SampleConfig() string { func (d *Disque) SampleConfig() string {
return sampleConfig return sampleConfig
} }
func (r *Disque) Description() string { func (d *Disque) Description() string {
return "Read metrics from one or many disque servers" return "Read metrics from one or many disque servers"
} }
@ -64,21 +64,21 @@ var ErrProtocolError = errors.New("disque protocol error")
// Reads stats from all configured servers accumulates stats. // Reads stats from all configured servers accumulates stats.
// Returns one of the errors encountered while gather stats (if any). // Returns one of the errors encountered while gather stats (if any).
func (g *Disque) Gather(acc telegraf.Accumulator) error { func (d *Disque) Gather(acc telegraf.Accumulator) error {
if len(g.Servers) == 0 { if len(d.Servers) == 0 {
url := &url.URL{ url := &url.URL{
Host: ":7711", Host: ":7711",
} }
g.gatherServer(url, acc) d.gatherServer(url, acc)
return nil return nil
} }
var wg sync.WaitGroup var wg sync.WaitGroup
for _, serv := range g.Servers { for _, serv := range d.Servers {
u, err := url.Parse(serv) u, err := url.Parse(serv)
if err != nil { if err != nil {
acc.AddError(fmt.Errorf("Unable to parse to address '%s': %s", serv, err)) acc.AddError(fmt.Errorf("unable to parse to address '%s': %s", serv, err))
continue continue
} else if u.Scheme == "" { } else if u.Scheme == "" {
// fallback to simple string based address (i.e. "10.0.0.1:10000") // fallback to simple string based address (i.e. "10.0.0.1:10000")
@ -89,7 +89,7 @@ func (g *Disque) Gather(acc telegraf.Accumulator) error {
wg.Add(1) wg.Add(1)
go func(serv string) { go func(serv string) {
defer wg.Done() defer wg.Done()
acc.AddError(g.gatherServer(u, acc)) acc.AddError(d.gatherServer(u, acc))
}(serv) }(serv)
} }
@ -100,8 +100,8 @@ func (g *Disque) Gather(acc telegraf.Accumulator) error {
const defaultPort = "7711" const defaultPort = "7711"
func (g *Disque) gatherServer(addr *url.URL, acc telegraf.Accumulator) error { func (d *Disque) gatherServer(addr *url.URL, acc telegraf.Accumulator) error {
if g.c == nil { if d.c == nil {
_, _, err := net.SplitHostPort(addr.Host) _, _, err := net.SplitHostPort(addr.Host)
if err != nil { if err != nil {
@ -110,7 +110,7 @@ func (g *Disque) gatherServer(addr *url.URL, acc telegraf.Accumulator) error {
c, err := net.DialTimeout("tcp", addr.Host, defaultTimeout) c, err := net.DialTimeout("tcp", addr.Host, defaultTimeout)
if err != nil { if err != nil {
return fmt.Errorf("Unable to connect to disque server '%s': %s", addr.Host, err) return fmt.Errorf("unable to connect to disque server '%s': %s", addr.Host, err)
} }
if addr.User != nil { if addr.User != nil {
@ -130,15 +130,15 @@ func (g *Disque) gatherServer(addr *url.URL, acc telegraf.Accumulator) error {
} }
} }
g.c = c d.c = c
} }
// Extend connection // Extend connection
g.c.SetDeadline(time.Now().Add(defaultTimeout)) d.c.SetDeadline(time.Now().Add(defaultTimeout))
g.c.Write([]byte("info\r\n")) d.c.Write([]byte("info\r\n"))
r := bufio.NewReader(g.c) r := bufio.NewReader(d.c)
line, err := r.ReadString('\n') line, err := r.ReadString('\n')
if err != nil { if err != nil {
@ -176,7 +176,7 @@ func (g *Disque) gatherServer(addr *url.URL, acc telegraf.Accumulator) error {
parts := strings.SplitN(line, ":", 2) parts := strings.SplitN(line, ":", 2)
name := string(parts[0]) name := parts[0]
metric, ok := Tracking[name] metric, ok := Tracking[name]
if !ok { if !ok {

View File

@ -1,4 +1,4 @@
// Helper functions copied from // Package docker contains few helper functions copied from
// https://github.com/docker/cli/blob/master/cli/command/container/stats_helpers.go // https://github.com/docker/cli/blob/master/cli/command/container/stats_helpers.go
package docker package docker

View File

@ -664,7 +664,7 @@ func (e *Elasticsearch) gatherSingleIndexStats(name string, index indexStat, now
shardTags := map[string]string{ shardTags := map[string]string{
"index_name": name, "index_name": name,
"node_id": routingNode, "node_id": routingNode,
"shard_name": string(shardNumber), "shard_name": shardNumber,
"type": shardType, "type": shardType,
} }
@ -741,11 +741,7 @@ func (e *Elasticsearch) gatherJSONData(url string, v interface{}) error {
r.StatusCode, http.StatusOK) r.StatusCode, http.StatusOK)
} }
if err = json.NewDecoder(r.Body).Decode(v); err != nil { return json.NewDecoder(r.Body).Decode(v)
return err
}
return nil
} }
func (e *Elasticsearch) compileIndexMatchers() (map[string]filter.Filter, error) { func (e *Elasticsearch) compileIndexMatchers() (map[string]filter.Filter, error) {

View File

@ -69,12 +69,12 @@ func (c CommandRunner) Run(
command string, command string,
timeout time.Duration, timeout time.Duration,
) ([]byte, []byte, error) { ) ([]byte, []byte, error) {
split_cmd, err := shellquote.Split(command) splitCmd, err := shellquote.Split(command)
if err != nil || len(split_cmd) == 0 { if err != nil || len(splitCmd) == 0 {
return nil, nil, fmt.Errorf("exec: unable to parse command, %s", err) return nil, nil, fmt.Errorf("exec: unable to parse command, %s", err)
} }
cmd := exec.Command(split_cmd[0], split_cmd[1:]...) cmd := exec.Command(splitCmd[0], splitCmd[1:]...)
var ( var (
out bytes.Buffer out bytes.Buffer
@ -123,7 +123,7 @@ func removeCarriageReturns(b bytes.Buffer) bytes.Buffer {
byt, er := b.ReadBytes(0x0D) byt, er := b.ReadBytes(0x0D)
end := len(byt) end := len(byt)
if nil == er { if nil == er {
end -= 1 end--
} }
if nil != byt { if nil != byt {
buf.Write(byt[:end]) buf.Write(byt[:end])

View File

@ -65,11 +65,11 @@ type FileCount struct {
Log telegraf.Logger Log telegraf.Logger
} }
func (_ *FileCount) Description() string { func (fc *FileCount) Description() string {
return "Count files in a directory" return "Count files in a directory"
} }
func (_ *FileCount) SampleConfig() string { return sampleConfig } func (fc *FileCount) SampleConfig() string { return sampleConfig }
type fileFilterFunc func(os.FileInfo) (bool, error) type fileFilterFunc func(os.FileInfo) (bool, error)

View File

@ -61,24 +61,24 @@ var sampleConfig = `
# insecure_skip_verify = false # insecure_skip_verify = false
` `
func (r *haproxy) SampleConfig() string { func (h *haproxy) SampleConfig() string {
return sampleConfig return sampleConfig
} }
func (r *haproxy) Description() string { func (h *haproxy) Description() string {
return "Read metrics of haproxy, via socket or csv stats page" return "Read metrics of haproxy, via socket or csv stats page"
} }
// Reads stats from all configured servers accumulates stats. // Reads stats from all configured servers accumulates stats.
// Returns one of the errors encountered while gather stats (if any). // Returns one of the errors encountered while gather stats (if any).
func (g *haproxy) Gather(acc telegraf.Accumulator) error { func (h *haproxy) Gather(acc telegraf.Accumulator) error {
if len(g.Servers) == 0 { if len(h.Servers) == 0 {
return g.gatherServer("http://127.0.0.1:1936/haproxy?stats", acc) return h.gatherServer("http://127.0.0.1:1936/haproxy?stats", acc)
} }
endpoints := make([]string, 0, len(g.Servers)) endpoints := make([]string, 0, len(h.Servers))
for _, endpoint := range g.Servers { for _, endpoint := range h.Servers {
if strings.HasPrefix(endpoint, "http") { if strings.HasPrefix(endpoint, "http") {
endpoints = append(endpoints, endpoint) endpoints = append(endpoints, endpoint)
@ -107,7 +107,7 @@ func (g *haproxy) Gather(acc telegraf.Accumulator) error {
for _, server := range endpoints { for _, server := range endpoints {
go func(serv string) { go func(serv string) {
defer wg.Done() defer wg.Done()
if err := g.gatherServer(serv, acc); err != nil { if err := h.gatherServer(serv, acc); err != nil {
acc.AddError(err) acc.AddError(err)
} }
}(server) }(server)
@ -117,7 +117,7 @@ func (g *haproxy) Gather(acc telegraf.Accumulator) error {
return nil return nil
} }
func (g *haproxy) gatherServerSocket(addr string, acc telegraf.Accumulator) error { func (h *haproxy) gatherServerSocket(addr string, acc telegraf.Accumulator) error {
socketPath := getSocketAddr(addr) socketPath := getSocketAddr(addr)
c, err := net.Dial("unix", socketPath) c, err := net.Dial("unix", socketPath)
@ -132,28 +132,28 @@ func (g *haproxy) gatherServerSocket(addr string, acc telegraf.Accumulator) erro
return fmt.Errorf("could not write to socket '%s': %s", addr, errw) return fmt.Errorf("could not write to socket '%s': %s", addr, errw)
} }
return g.importCsvResult(c, acc, socketPath) return h.importCsvResult(c, acc, socketPath)
} }
func (g *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error { func (h *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error {
if !strings.HasPrefix(addr, "http") { if !strings.HasPrefix(addr, "http") {
return g.gatherServerSocket(addr, acc) return h.gatherServerSocket(addr, acc)
} }
if g.client == nil { if h.client == nil {
tlsCfg, err := g.ClientConfig.TLSConfig() tlsCfg, err := h.ClientConfig.TLSConfig()
if err != nil { if err != nil {
return err return err
} }
tr := &http.Transport{ tr := &http.Transport{
ResponseHeaderTimeout: time.Duration(3 * time.Second), ResponseHeaderTimeout: 3 * time.Second,
TLSClientConfig: tlsCfg, TLSClientConfig: tlsCfg,
} }
client := &http.Client{ client := &http.Client{
Transport: tr, Transport: tr,
Timeout: time.Duration(4 * time.Second), Timeout: 4 * time.Second,
} }
g.client = client h.client = client
} }
if !strings.HasSuffix(addr, ";csv") { if !strings.HasSuffix(addr, ";csv") {
@ -176,11 +176,11 @@ func (g *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error {
addr = u.String() addr = u.String()
} }
if g.Username != "" || g.Password != "" { if h.Username != "" || h.Password != "" {
req.SetBasicAuth(g.Username, g.Password) req.SetBasicAuth(h.Username, h.Password)
} }
res, err := g.client.Do(req) res, err := h.client.Do(req)
if err != nil { if err != nil {
return fmt.Errorf("unable to connect to haproxy server '%s': %s", addr, err) return fmt.Errorf("unable to connect to haproxy server '%s': %s", addr, err)
} }
@ -190,7 +190,7 @@ func (g *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error {
return fmt.Errorf("unable to get valid stat result from '%s', http response code : %d", addr, res.StatusCode) return fmt.Errorf("unable to get valid stat result from '%s', http response code : %d", addr, res.StatusCode)
} }
if err := g.importCsvResult(res.Body, acc, u.Host); err != nil { if err := h.importCsvResult(res.Body, acc, u.Host); err != nil {
return fmt.Errorf("unable to parse stat result from '%s': %s", addr, err) return fmt.Errorf("unable to parse stat result from '%s': %s", addr, err)
} }
@ -222,7 +222,7 @@ var fieldRenames = map[string]string{
"hrsp_other": "http_response.other", "hrsp_other": "http_response.other",
} }
func (g *haproxy) importCsvResult(r io.Reader, acc telegraf.Accumulator, host string) error { func (h *haproxy) importCsvResult(r io.Reader, acc telegraf.Accumulator, host string) error {
csvr := csv.NewReader(r) csvr := csv.NewReader(r)
now := time.Now() now := time.Now()
@ -259,7 +259,7 @@ func (g *haproxy) importCsvResult(r io.Reader, acc telegraf.Accumulator, host st
colName := headers[i] colName := headers[i]
fieldName := colName fieldName := colName
if !g.KeepFieldNames { if !h.KeepFieldNames {
if fieldRename, ok := fieldRenames[colName]; ok { if fieldRename, ok := fieldRenames[colName]; ok {
fieldName = fieldRename fieldName = fieldRename
} }

View File

@ -20,7 +20,7 @@ type Fetcher interface {
Fetch(address string) ([]gohddtemp.Disk, error) Fetch(address string) ([]gohddtemp.Disk, error)
} }
func (_ *HDDTemp) Description() string { func (h *HDDTemp) Description() string {
return "Monitor disks' temperatures using hddtemp" return "Monitor disks' temperatures using hddtemp"
} }
@ -36,7 +36,7 @@ var hddtempSampleConfig = `
# devices = ["sda", "*"] # devices = ["sda", "*"]
` `
func (_ *HDDTemp) SampleConfig() string { func (h *HDDTemp) SampleConfig() string {
return hddtempSampleConfig return hddtempSampleConfig
} }

View File

@ -13,10 +13,10 @@ type Infiniband struct {
// Sample configuration for plugin // Sample configuration for plugin
var InfinibandConfig = `` var InfinibandConfig = ``
func (_ *Infiniband) SampleConfig() string { func (i *Infiniband) SampleConfig() string {
return InfinibandConfig return InfinibandConfig
} }
func (_ *Infiniband) Description() string { func (i *Infiniband) Description() string {
return "Gets counters from all InfiniBand cards and ports installed" return "Gets counters from all InfiniBand cards and ports installed"
} }

View File

@ -11,8 +11,7 @@ import (
) )
// Gather statistics from our infiniband cards // Gather statistics from our infiniband cards
func (_ *Infiniband) Gather(acc telegraf.Accumulator) error { func (i *Infiniband) Gather(acc telegraf.Accumulator) error {
rdmaDevices := rdmamap.GetRdmaDeviceList() rdmaDevices := rdmamap.GetRdmaDeviceList()
if len(rdmaDevices) == 0 { if len(rdmaDevices) == 0 {
@ -41,7 +40,6 @@ func (_ *Infiniband) Gather(acc telegraf.Accumulator) error {
// Add the statistics to the accumulator // Add the statistics to the accumulator
func addStats(dev string, port string, stats []rdmamap.RdmaStatEntry, acc telegraf.Accumulator) { func addStats(dev string, port string, stats []rdmamap.RdmaStatEntry, acc telegraf.Accumulator) {
// Allow users to filter by card and port // Allow users to filter by card and port
tags := map[string]string{"device": dev, "port": port} tags := map[string]string{"device": dev, "port": port}
fields := make(map[string]interface{}) fields := make(map[string]interface{})

View File

@ -288,7 +288,7 @@ func (h *InfluxDBListener) handleWrite() http.HandlerFunc {
var m telegraf.Metric var m telegraf.Metric
var err error var err error
var parseErrorCount int var parseErrorCount int
var lastPos int = 0 var lastPos int
var firstParseErrorStr string var firstParseErrorStr string
for { for {
select { select {
@ -306,7 +306,7 @@ func (h *InfluxDBListener) handleWrite() http.HandlerFunc {
// Continue parsing metrics even if some are malformed // Continue parsing metrics even if some are malformed
if parseErr, ok := err.(*influx.ParseError); ok { if parseErr, ok := err.(*influx.ParseError); ok {
parseErrorCount += 1 parseErrorCount++
errStr := parseErr.Error() errStr := parseErr.Error()
if firstParseErrorStr == "" { if firstParseErrorStr == "" {
firstParseErrorStr = errStr firstParseErrorStr = errStr

View File

@ -47,27 +47,27 @@ func NewConnection(server, privilege, hexKey string) *Connection {
return conn return conn
} }
func (t *Connection) options() []string { func (c *Connection) options() []string {
intf := t.Interface intf := c.Interface
if intf == "" { if intf == "" {
intf = "lan" intf = "lan"
} }
options := []string{ options := []string{
"-H", t.Hostname, "-H", c.Hostname,
"-U", t.Username, "-U", c.Username,
"-P", t.Password, "-P", c.Password,
"-I", intf, "-I", intf,
} }
if t.HexKey != "" { if c.HexKey != "" {
options = append(options, "-y", t.HexKey) options = append(options, "-y", c.HexKey)
} }
if t.Port != 0 { if c.Port != 0 {
options = append(options, "-p", strconv.Itoa(t.Port)) options = append(options, "-p", strconv.Itoa(c.Port))
} }
if t.Privilege != "" { if c.Privilege != "" {
options = append(options, "-L", t.Privilege) options = append(options, "-L", c.Privilege)
} }
return options return options
} }

View File

@ -29,12 +29,12 @@ const measurement = "ipset"
var defaultTimeout = internal.Duration{Duration: time.Second} var defaultTimeout = internal.Duration{Duration: time.Second}
// Description returns a short description of the plugin // Description returns a short description of the plugin
func (ipset *Ipset) Description() string { func (i *Ipset) Description() string {
return "Gather packets and bytes counters from Linux ipsets" return "Gather packets and bytes counters from Linux ipsets"
} }
// SampleConfig returns sample configuration options. // SampleConfig returns sample configuration options.
func (ipset *Ipset) SampleConfig() string { func (i *Ipset) SampleConfig() string {
return ` return `
## By default, we only show sets which have already matched at least 1 packet. ## By default, we only show sets which have already matched at least 1 packet.
## set include_unmatched_sets = true to gather them all. ## set include_unmatched_sets = true to gather them all.
@ -46,8 +46,8 @@ func (ipset *Ipset) SampleConfig() string {
` `
} }
func (ips *Ipset) Gather(acc telegraf.Accumulator) error { func (i *Ipset) Gather(acc telegraf.Accumulator) error {
out, e := ips.lister(ips.Timeout, ips.UseSudo) out, e := i.lister(i.Timeout, i.UseSudo)
if e != nil { if e != nil {
acc.AddError(e) acc.AddError(e)
} }
@ -64,25 +64,25 @@ func (ips *Ipset) Gather(acc telegraf.Accumulator) error {
data := strings.Fields(line) data := strings.Fields(line)
if len(data) < 7 { if len(data) < 7 {
acc.AddError(fmt.Errorf("Error parsing line (expected at least 7 fields): %s", line)) acc.AddError(fmt.Errorf("error parsing line (expected at least 7 fields): %s", line))
continue continue
} }
if data[0] == "add" && (data[4] != "0" || ips.IncludeUnmatchedSets) { if data[0] == "add" && (data[4] != "0" || i.IncludeUnmatchedSets) {
tags := map[string]string{ tags := map[string]string{
"set": data[1], "set": data[1],
"rule": data[2], "rule": data[2],
} }
packets_total, err := strconv.ParseUint(data[4], 10, 64) packetsTotal, err := strconv.ParseUint(data[4], 10, 64)
if err != nil { if err != nil {
acc.AddError(err) acc.AddError(err)
} }
bytes_total, err := strconv.ParseUint(data[6], 10, 64) bytesTotal, err := strconv.ParseUint(data[6], 10, 64)
if err != nil { if err != nil {
acc.AddError(err) acc.AddError(err)
} }
fields := map[string]interface{}{ fields := map[string]interface{}{
"packets_total": packets_total, "packets_total": packetsTotal,
"bytes_total": bytes_total, "bytes_total": bytesTotal,
} }
acc.AddCounter(measurement, fields, tags) acc.AddCounter(measurement, fields, tags)
} }

View File

@ -40,7 +40,7 @@ func TestIpset(t *testing.T) {
value: `create hash:net family inet hashsize 1024 maxelem 65536 counters value: `create hash:net family inet hashsize 1024 maxelem 65536 counters
add myset 4.5.6.7 packets 123 bytes add myset 4.5.6.7 packets 123 bytes
`, `,
err: fmt.Errorf("Error parsing line (expected at least 7 fields): \t\t\t\tadd myset 4.5.6.7 packets 123 bytes"), err: fmt.Errorf("error parsing line (expected at least 7 fields): \t\t\t\tadd myset 4.5.6.7 packets 123 bytes"),
}, },
{ {
name: "Non-empty sets, counters, no comment", name: "Non-empty sets, counters, no comment",

View File

@ -47,11 +47,9 @@ func (c *client) init() error {
break break
} }
} }
// first api fetch // first api fetch
if err := c.doGet(context.Background(), jobPath, new(jobResponse)); err != nil { return c.doGet(context.Background(), jobPath, new(jobResponse))
return err
}
return nil
} }
func (c *client) doGet(ctx context.Context, url string, v interface{}) error { func (c *client) doGet(ctx context.Context, url string, v interface{}) error {
@ -97,10 +95,8 @@ func (c *client) doGet(ctx context.Context, url string, v interface{}) error {
Title: resp.Status, Title: resp.Status,
} }
} }
if err = json.NewDecoder(resp.Body).Decode(v); err != nil {
return err return json.NewDecoder(resp.Body).Decode(v)
}
return nil
} }
type APIError struct { type APIError struct {

View File

@ -20,10 +20,10 @@ type SysctlFS struct {
var sysctlFSDescription = `Provides Linux sysctl fs metrics` var sysctlFSDescription = `Provides Linux sysctl fs metrics`
var sysctlFSSampleConfig = `` var sysctlFSSampleConfig = ``
func (_ SysctlFS) Description() string { func (sfs SysctlFS) Description() string {
return sysctlFSDescription return sysctlFSDescription
} }
func (_ SysctlFS) SampleConfig() string { func (sfs SysctlFS) SampleConfig() string {
return sysctlFSSampleConfig return sysctlFSSampleConfig
} }

View File

@ -149,8 +149,8 @@ const processStats = "/_node/stats/process"
const pipelinesStats = "/_node/stats/pipelines" const pipelinesStats = "/_node/stats/pipelines"
const pipelineStats = "/_node/stats/pipeline" const pipelineStats = "/_node/stats/pipeline"
func (i *Logstash) Init() error { func (logstash *Logstash) Init() error {
err := choice.CheckSlice(i.Collect, []string{"pipelines", "process", "jvm"}) err := choice.CheckSlice(logstash.Collect, []string{"pipelines", "process", "jvm"})
if err != nil { if err != nil {
return fmt.Errorf(`cannot verify "collect" setting: %v`, err) return fmt.Errorf(`cannot verify "collect" setting: %v`, err)
} }

View File

@ -1,15 +1,9 @@
// +build !windows // +build !windows
// lustre2 doesn't aim for Windows // Package lustre2 (doesn't aim for Windows)
// Lustre 2.x Telegraf plugin
/* // Lustre (http://lustre.org/) is an open-source, parallel file system
Lustre 2.x Telegraf plugin // for HPC environments. It stores statistics about its activity in /proc
Lustre (http://lustre.org/) is an open-source, parallel file system
for HPC environments. It stores statistics about its activity in
/proc
*/
package lustre2 package lustre2
import ( import (
@ -30,8 +24,8 @@ type tags struct {
// Lustre proc files can change between versions, so we want to future-proof // Lustre proc files can change between versions, so we want to future-proof
// by letting people choose what to look at. // by letting people choose what to look at.
type Lustre2 struct { type Lustre2 struct {
Ost_procfiles []string `toml:"ost_procfiles"` OstProcfiles []string `toml:"ost_procfiles"`
Mds_procfiles []string `toml:"mds_procfiles"` MdsProcfiles []string `toml:"mds_procfiles"`
// allFields maps and OST name to the metric fields associated with that OST // allFields maps and OST name to the metric fields associated with that OST
allFields map[tags]map[string]interface{} allFields map[tags]map[string]interface{}
@ -63,7 +57,7 @@ type mapping struct {
tag string // Additional tag to add for this metric tag string // Additional tag to add for this metric
} }
var wanted_ost_fields = []*mapping{ var wantedOstFields = []*mapping{
{ {
inProc: "write_bytes", inProc: "write_bytes",
field: 6, field: 6,
@ -95,7 +89,7 @@ var wanted_ost_fields = []*mapping{
}, },
} }
var wanted_ost_jobstats_fields = []*mapping{ var wantedOstJobstatsFields = []*mapping{
{ // The read line has several fields, so we need to differentiate what they are { // The read line has several fields, so we need to differentiate what they are
inProc: "read", inProc: "read",
field: 3, field: 3,
@ -228,7 +222,7 @@ var wanted_ost_jobstats_fields = []*mapping{
}, },
} }
var wanted_mds_fields = []*mapping{ var wantedMdsFields = []*mapping{
{ {
inProc: "open", inProc: "open",
}, },
@ -279,7 +273,7 @@ var wanted_mds_fields = []*mapping{
}, },
} }
var wanted_mdt_jobstats_fields = []*mapping{ var wantedMdtJobstatsFields = []*mapping{
{ {
inProc: "open", inProc: "open",
field: 3, field: 3,
@ -362,7 +356,7 @@ var wanted_mdt_jobstats_fields = []*mapping{
}, },
} }
func (l *Lustre2) GetLustreProcStats(fileglob string, wantedFields []*mapping, acc telegraf.Accumulator) error { func (l *Lustre2) GetLustreProcStats(fileglob string, wantedFields []*mapping) error {
files, err := filepath.Glob(fileglob) files, err := filepath.Glob(fileglob)
if err != nil { if err != nil {
return err return err
@ -386,7 +380,7 @@ func (l *Lustre2) GetLustreProcStats(fileglob string, wantedFields []*mapping, a
} }
jobs := strings.Split(string(wholeFile), "- ") jobs := strings.Split(string(wholeFile), "- ")
for _, job := range jobs { for _, job := range jobs {
lines := strings.Split(string(job), "\n") lines := strings.Split(job, "\n")
jobid := "" jobid := ""
// figure out if the data should be tagged with job_id here // figure out if the data should be tagged with job_id here
@ -422,7 +416,7 @@ func (l *Lustre2) GetLustreProcStats(fileglob string, wantedFields []*mapping, a
if wantedField == 0 { if wantedField == 0 {
wantedField = 1 wantedField = 1
} }
data, err = strconv.ParseUint(strings.TrimSuffix((parts[wantedField]), ","), 10, 64) data, err = strconv.ParseUint(strings.TrimSuffix(parts[wantedField], ","), 10, 64)
if err != nil { if err != nil {
return err return err
} }
@ -454,66 +448,60 @@ func (l *Lustre2) Gather(acc telegraf.Accumulator) error {
//l.allFields = make(map[string]map[string]interface{}) //l.allFields = make(map[string]map[string]interface{})
l.allFields = make(map[tags]map[string]interface{}) l.allFields = make(map[tags]map[string]interface{})
if len(l.Ost_procfiles) == 0 { if len(l.OstProcfiles) == 0 {
// read/write bytes are in obdfilter/<ost_name>/stats // read/write bytes are in obdfilter/<ost_name>/stats
err := l.GetLustreProcStats("/proc/fs/lustre/obdfilter/*/stats", err := l.GetLustreProcStats("/proc/fs/lustre/obdfilter/*/stats", wantedOstFields)
wanted_ost_fields, acc)
if err != nil { if err != nil {
return err return err
} }
// cache counters are in osd-ldiskfs/<ost_name>/stats // cache counters are in osd-ldiskfs/<ost_name>/stats
err = l.GetLustreProcStats("/proc/fs/lustre/osd-ldiskfs/*/stats", err = l.GetLustreProcStats("/proc/fs/lustre/osd-ldiskfs/*/stats", wantedOstFields)
wanted_ost_fields, acc)
if err != nil { if err != nil {
return err return err
} }
// per job statistics are in obdfilter/<ost_name>/job_stats // per job statistics are in obdfilter/<ost_name>/job_stats
err = l.GetLustreProcStats("/proc/fs/lustre/obdfilter/*/job_stats", err = l.GetLustreProcStats("/proc/fs/lustre/obdfilter/*/job_stats", wantedOstJobstatsFields)
wanted_ost_jobstats_fields, acc)
if err != nil { if err != nil {
return err return err
} }
} }
if len(l.Mds_procfiles) == 0 { if len(l.MdsProcfiles) == 0 {
// Metadata server stats // Metadata server stats
err := l.GetLustreProcStats("/proc/fs/lustre/mdt/*/md_stats", err := l.GetLustreProcStats("/proc/fs/lustre/mdt/*/md_stats", wantedMdsFields)
wanted_mds_fields, acc)
if err != nil { if err != nil {
return err return err
} }
// Metadata target job stats // Metadata target job stats
err = l.GetLustreProcStats("/proc/fs/lustre/mdt/*/job_stats", err = l.GetLustreProcStats("/proc/fs/lustre/mdt/*/job_stats", wantedMdtJobstatsFields)
wanted_mdt_jobstats_fields, acc)
if err != nil { if err != nil {
return err return err
} }
} }
for _, procfile := range l.Ost_procfiles { for _, procfile := range l.OstProcfiles {
ost_fields := wanted_ost_fields ostFields := wantedOstFields
if strings.HasSuffix(procfile, "job_stats") { if strings.HasSuffix(procfile, "job_stats") {
ost_fields = wanted_ost_jobstats_fields ostFields = wantedOstJobstatsFields
} }
err := l.GetLustreProcStats(procfile, ost_fields, acc) err := l.GetLustreProcStats(procfile, ostFields)
if err != nil { if err != nil {
return err return err
} }
} }
for _, procfile := range l.Mds_procfiles { for _, procfile := range l.MdsProcfiles {
mdt_fields := wanted_mds_fields mdtFields := wantedMdsFields
if strings.HasSuffix(procfile, "job_stats") { if strings.HasSuffix(procfile, "job_stats") {
mdt_fields = wanted_mdt_jobstats_fields mdtFields = wantedMdtJobstatsFields
} }
err := l.GetLustreProcStats(procfile, mdt_fields, acc) err := l.GetLustreProcStats(procfile, mdtFields)
if err != nil { if err != nil {
return err return err
} }
} }
for tgs, fields := range l.allFields { for tgs, fields := range l.allFields {
tags := map[string]string{ tags := map[string]string{
"name": tgs.name, "name": tgs.name,
} }

View File

@ -135,33 +135,33 @@ const mdtJobStatsContents = `job_stats:
func TestLustre2GeneratesMetrics(t *testing.T) { func TestLustre2GeneratesMetrics(t *testing.T) {
tempdir := os.TempDir() + "/telegraf/proc/fs/lustre/" tempdir := os.TempDir() + "/telegraf/proc/fs/lustre/"
ost_name := "OST0001" ostName := "OST0001"
mdtdir := tempdir + "/mdt/" mdtdir := tempdir + "/mdt/"
err := os.MkdirAll(mdtdir+"/"+ost_name, 0755) err := os.MkdirAll(mdtdir+"/"+ostName, 0755)
require.NoError(t, err) require.NoError(t, err)
osddir := tempdir + "/osd-ldiskfs/" osddir := tempdir + "/osd-ldiskfs/"
err = os.MkdirAll(osddir+"/"+ost_name, 0755) err = os.MkdirAll(osddir+"/"+ostName, 0755)
require.NoError(t, err) require.NoError(t, err)
obddir := tempdir + "/obdfilter/" obddir := tempdir + "/obdfilter/"
err = os.MkdirAll(obddir+"/"+ost_name, 0755) err = os.MkdirAll(obddir+"/"+ostName, 0755)
require.NoError(t, err) require.NoError(t, err)
err = ioutil.WriteFile(mdtdir+"/"+ost_name+"/md_stats", []byte(mdtProcContents), 0644) err = ioutil.WriteFile(mdtdir+"/"+ostName+"/md_stats", []byte(mdtProcContents), 0644)
require.NoError(t, err) require.NoError(t, err)
err = ioutil.WriteFile(osddir+"/"+ost_name+"/stats", []byte(osdldiskfsProcContents), 0644) err = ioutil.WriteFile(osddir+"/"+ostName+"/stats", []byte(osdldiskfsProcContents), 0644)
require.NoError(t, err) require.NoError(t, err)
err = ioutil.WriteFile(obddir+"/"+ost_name+"/stats", []byte(obdfilterProcContents), 0644) err = ioutil.WriteFile(obddir+"/"+ostName+"/stats", []byte(obdfilterProcContents), 0644)
require.NoError(t, err) require.NoError(t, err)
// Begin by testing standard Lustre stats // Begin by testing standard Lustre stats
m := &Lustre2{ m := &Lustre2{
Ost_procfiles: []string{obddir + "/*/stats", osddir + "/*/stats"}, OstProcfiles: []string{obddir + "/*/stats", osddir + "/*/stats"},
Mds_procfiles: []string{mdtdir + "/*/md_stats"}, MdsProcfiles: []string{mdtdir + "/*/md_stats"},
} }
var acc testutil.Accumulator var acc testutil.Accumulator
@ -170,7 +170,7 @@ func TestLustre2GeneratesMetrics(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
tags := map[string]string{ tags := map[string]string{
"name": ost_name, "name": ostName,
} }
fields := map[string]interface{}{ fields := map[string]interface{}{
@ -208,27 +208,27 @@ func TestLustre2GeneratesMetrics(t *testing.T) {
func TestLustre2GeneratesJobstatsMetrics(t *testing.T) { func TestLustre2GeneratesJobstatsMetrics(t *testing.T) {
tempdir := os.TempDir() + "/telegraf/proc/fs/lustre/" tempdir := os.TempDir() + "/telegraf/proc/fs/lustre/"
ost_name := "OST0001" ostName := "OST0001"
job_names := []string{"cluster-testjob1", "testjob2"} jobNames := []string{"cluster-testjob1", "testjob2"}
mdtdir := tempdir + "/mdt/" mdtdir := tempdir + "/mdt/"
err := os.MkdirAll(mdtdir+"/"+ost_name, 0755) err := os.MkdirAll(mdtdir+"/"+ostName, 0755)
require.NoError(t, err) require.NoError(t, err)
obddir := tempdir + "/obdfilter/" obddir := tempdir + "/obdfilter/"
err = os.MkdirAll(obddir+"/"+ost_name, 0755) err = os.MkdirAll(obddir+"/"+ostName, 0755)
require.NoError(t, err) require.NoError(t, err)
err = ioutil.WriteFile(mdtdir+"/"+ost_name+"/job_stats", []byte(mdtJobStatsContents), 0644) err = ioutil.WriteFile(mdtdir+"/"+ostName+"/job_stats", []byte(mdtJobStatsContents), 0644)
require.NoError(t, err) require.NoError(t, err)
err = ioutil.WriteFile(obddir+"/"+ost_name+"/job_stats", []byte(obdfilterJobStatsContents), 0644) err = ioutil.WriteFile(obddir+"/"+ostName+"/job_stats", []byte(obdfilterJobStatsContents), 0644)
require.NoError(t, err) require.NoError(t, err)
// Test Lustre Jobstats // Test Lustre Jobstats
m := &Lustre2{ m := &Lustre2{
Ost_procfiles: []string{obddir + "/*/job_stats"}, OstProcfiles: []string{obddir + "/*/job_stats"},
Mds_procfiles: []string{mdtdir + "/*/job_stats"}, MdsProcfiles: []string{mdtdir + "/*/job_stats"},
} }
var acc testutil.Accumulator var acc testutil.Accumulator
@ -240,12 +240,12 @@ func TestLustre2GeneratesJobstatsMetrics(t *testing.T) {
// and even further make this dependent on summing per OST // and even further make this dependent on summing per OST
tags := []map[string]string{ tags := []map[string]string{
{ {
"name": ost_name, "name": ostName,
"jobid": job_names[0], "jobid": jobNames[0],
}, },
{ {
"name": ost_name, "name": ostName,
"jobid": job_names[1], "jobid": jobNames[1],
}, },
} }
@ -347,7 +347,7 @@ func TestLustre2CanParseConfiguration(t *testing.T) {
"/proc/fs/lustre/mdt/*/md_stats", "/proc/fs/lustre/mdt/*/md_stats",
]`) ]`)
table, err := toml.Parse([]byte(config)) table, err := toml.Parse(config)
require.NoError(t, err) require.NoError(t, err)
inputs, ok := table.Fields["inputs"] inputs, ok := table.Fields["inputs"]
@ -361,11 +361,11 @@ func TestLustre2CanParseConfiguration(t *testing.T) {
require.NoError(t, toml.UnmarshalTable(lustre2.([]*ast.Table)[0], &plugin)) require.NoError(t, toml.UnmarshalTable(lustre2.([]*ast.Table)[0], &plugin))
assert.Equal(t, Lustre2{ assert.Equal(t, Lustre2{
Ost_procfiles: []string{ OstProcfiles: []string{
"/proc/fs/lustre/obdfilter/*/stats", "/proc/fs/lustre/obdfilter/*/stats",
"/proc/fs/lustre/osd-ldiskfs/*/stats", "/proc/fs/lustre/osd-ldiskfs/*/stats",
}, },
Mds_procfiles: []string{ MdsProcfiles: []string{
"/proc/fs/lustre/mdt/*/md_stats", "/proc/fs/lustre/mdt/*/md_stats",
}, },
}, plugin) }, plugin)

View File

@ -220,7 +220,7 @@ func (c *Marklogic) createHTTPClient() (*http.Client, error) {
Transport: &http.Transport{ Transport: &http.Transport{
TLSClientConfig: tlsCfg, TLSClientConfig: tlsCfg,
}, },
Timeout: time.Duration(5 * time.Second), Timeout: 5 * time.Second,
} }
return client, nil return client, nil
@ -246,11 +246,7 @@ func (c *Marklogic) gatherJSONData(url string, v interface{}) error {
response.StatusCode, http.StatusOK) response.StatusCode, http.StatusOK)
} }
if err = json.NewDecoder(response.Body).Decode(v); err != nil { return json.NewDecoder(response.Body).Decode(v)
return err
}
return nil
} }
func init() { func init() {

View File

@ -14,19 +14,19 @@ type MemStats struct {
platform string platform string
} }
func (_ *MemStats) Description() string { func (ms *MemStats) Description() string {
return "Read metrics about memory usage" return "Read metrics about memory usage"
} }
func (_ *MemStats) SampleConfig() string { return "" } func (ms *MemStats) SampleConfig() string { return "" }
func (m *MemStats) Init() error { func (ms *MemStats) Init() error {
m.platform = runtime.GOOS ms.platform = runtime.GOOS
return nil return nil
} }
func (s *MemStats) Gather(acc telegraf.Accumulator) error { func (ms *MemStats) Gather(acc telegraf.Accumulator) error {
vm, err := s.ps.VMStat() vm, err := ms.ps.VMStat()
if err != nil { if err != nil {
return fmt.Errorf("error getting virtual memory info: %s", err) return fmt.Errorf("error getting virtual memory info: %s", err)
} }
@ -39,7 +39,7 @@ func (s *MemStats) Gather(acc telegraf.Accumulator) error {
"available_percent": 100 * float64(vm.Available) / float64(vm.Total), "available_percent": 100 * float64(vm.Available) / float64(vm.Total),
} }
switch s.platform { switch ms.platform {
case "darwin": case "darwin":
fields["active"] = vm.Active fields["active"] = vm.Active
fields["free"] = vm.Free fields["free"] = vm.Free

View File

@ -2,7 +2,6 @@ package mesos
import ( import (
"encoding/json" "encoding/json"
"fmt"
"math/rand" "math/rand"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
@ -19,17 +18,10 @@ var masterMetrics map[string]interface{}
var masterTestServer *httptest.Server var masterTestServer *httptest.Server
var slaveMetrics map[string]interface{} var slaveMetrics map[string]interface{}
// var slaveTaskMetrics map[string]interface{}
var slaveTestServer *httptest.Server var slaveTestServer *httptest.Server
func randUUID() string {
b := make([]byte, 16)
rand.Read(b)
return fmt.Sprintf("%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:])
}
// master metrics that will be returned by generateMetrics() // master metrics that will be returned by generateMetrics()
var masterMetricNames []string = []string{ var masterMetricNames = []string{
// resources // resources
"master/cpus_percent", "master/cpus_percent",
"master/cpus_used", "master/cpus_used",
@ -214,7 +206,7 @@ var masterMetricNames []string = []string{
} }
// slave metrics that will be returned by generateMetrics() // slave metrics that will be returned by generateMetrics()
var slaveMetricNames []string = []string{ var slaveMetricNames = []string{
// resources // resources
"slave/cpus_percent", "slave/cpus_percent",
"slave/cpus_used", "slave/cpus_used",

View File

@ -25,7 +25,7 @@ type Connector interface {
Connect() (Connection, error) Connect() (Connection, error)
} }
func NewConnector(hostname, port, password string) (*connector, error) { func newConnector(hostname, port, password string) (*connector, error) {
return &connector{ return &connector{
hostname: hostname, hostname: hostname,
port: port, port: port,
@ -58,7 +58,7 @@ func (c *connector) Connect() (Connection, error) {
return &connection{rcon: rcon}, nil return &connection{rcon: rcon}, nil
} }
func NewClient(connector Connector) (*client, error) { func newClient(connector Connector) (*client, error) {
return &client{connector: connector}, nil return &client{connector: connector}, nil
} }

View File

@ -98,7 +98,7 @@ func TestClient_Player(t *testing.T) {
conn: &MockConnection{commands: tt.commands}, conn: &MockConnection{commands: tt.commands},
} }
client, err := NewClient(connector) client, err := newClient(connector)
require.NoError(t, err) require.NoError(t, err)
actual, err := client.Players() actual, err := client.Players()
@ -183,7 +183,7 @@ func TestClient_Scores(t *testing.T) {
conn: &MockConnection{commands: tt.commands}, conn: &MockConnection{commands: tt.commands},
} }
client, err := NewClient(connector) client, err := newClient(connector)
require.NoError(t, err) require.NoError(t, err)
actual, err := client.Scores(tt.player) actual, err := client.Scores(tt.player)

View File

@ -62,7 +62,7 @@ type Packet struct {
// Write method fails to write the header bytes in their little // Write method fails to write the header bytes in their little
// endian byte order. // endian byte order.
func (p Packet) Compile() (payload []byte, err error) { func (p Packet) Compile() (payload []byte, err error) {
var size int32 = p.Header.Size var size = p.Header.Size
var buffer bytes.Buffer var buffer bytes.Buffer
var padding [PacketPaddingSize]byte var padding [PacketPaddingSize]byte

View File

@ -50,12 +50,12 @@ func (s *Minecraft) SampleConfig() string {
func (s *Minecraft) Gather(acc telegraf.Accumulator) error { func (s *Minecraft) Gather(acc telegraf.Accumulator) error {
if s.client == nil { if s.client == nil {
connector, err := NewConnector(s.Server, s.Port, s.Password) connector, err := newConnector(s.Server, s.Port, s.Password)
if err != nil { if err != nil {
return err return err
} }
client, err := NewClient(connector) client, err := newClient(connector)
if err != nil { if err != nil {
return err return err
} }

View File

@ -338,11 +338,11 @@ func validateFieldContainers(t []fieldContainer, n string) error {
} }
//search name duplicate //search name duplicate
canonical_name := item.Measurement + "." + item.Name canonicalName := item.Measurement + "." + item.Name
if nameEncountered[canonical_name] { if nameEncountered[canonicalName] {
return fmt.Errorf("name '%s' is duplicated in measurement '%s' '%s' - '%s'", item.Name, item.Measurement, n, item.Name) return fmt.Errorf("name '%s' is duplicated in measurement '%s' '%s' - '%s'", item.Name, item.Measurement, n, item.Name)
} }
nameEncountered[canonical_name] = true nameEncountered[canonicalName] = true
if n == cInputRegisters || n == cHoldingRegisters { if n == cInputRegisters || n == cHoldingRegisters {
// search byte order // search byte order
@ -405,13 +405,13 @@ func removeDuplicates(elements []uint16) []uint16 {
func readRegisterValues(m *Modbus, rt string, rr registerRange) ([]byte, error) { func readRegisterValues(m *Modbus, rt string, rr registerRange) ([]byte, error) {
if rt == cDiscreteInputs { if rt == cDiscreteInputs {
return m.client.ReadDiscreteInputs(uint16(rr.address), uint16(rr.length)) return m.client.ReadDiscreteInputs(rr.address, rr.length)
} else if rt == cCoils { } else if rt == cCoils {
return m.client.ReadCoils(uint16(rr.address), uint16(rr.length)) return m.client.ReadCoils(rr.address, rr.length)
} else if rt == cInputRegisters { } else if rt == cInputRegisters {
return m.client.ReadInputRegisters(uint16(rr.address), uint16(rr.length)) return m.client.ReadInputRegisters(rr.address, rr.length)
} else if rt == cHoldingRegisters { } else if rt == cHoldingRegisters {
return m.client.ReadHoldingRegisters(uint16(rr.address), uint16(rr.length)) return m.client.ReadHoldingRegisters(rr.address, rr.length)
} else { } else {
return []byte{}, fmt.Errorf("not Valid function") return []byte{}, fmt.Errorf("not Valid function")
} }
@ -462,16 +462,16 @@ func (m *Modbus) getFields() error {
if register.Type == cInputRegisters || register.Type == cHoldingRegisters { if register.Type == cInputRegisters || register.Type == cHoldingRegisters {
for i := 0; i < len(register.Fields); i++ { for i := 0; i < len(register.Fields); i++ {
var values_t []byte var valuesT []byte
for j := 0; j < len(register.Fields[i].Address); j++ { for j := 0; j < len(register.Fields[i].Address); j++ {
tempArray := rawValues[register.Fields[i].Address[j]] tempArray := rawValues[register.Fields[i].Address[j]]
for x := 0; x < len(tempArray); x++ { for x := 0; x < len(tempArray); x++ {
values_t = append(values_t, tempArray[x]) valuesT = append(valuesT, tempArray[x])
} }
} }
register.Fields[i].value = convertDataType(register.Fields[i], values_t) register.Fields[i].value = convertDataType(register.Fields[i], valuesT)
} }
} }
@ -587,30 +587,6 @@ func convertEndianness64(o string, b []byte) uint64 {
} }
} }
func format16(f string, r uint16) interface{} {
switch f {
case "UINT16":
return r
case "INT16":
return int16(r)
default:
return r
}
}
func format32(f string, r uint32) interface{} {
switch f {
case "UINT32":
return r
case "INT32":
return int32(r)
case "FLOAT32-IEEE":
return math.Float32frombits(r)
default:
return r
}
}
func format64(f string, r uint64) interface{} { func format64(f string, r uint64) interface{} {
switch f { switch f {
case "UINT64": case "UINT64":
@ -689,7 +665,7 @@ func (m *Modbus) Gather(acc telegraf.Accumulator) error {
} }
timestamp := time.Now() timestamp := time.Now()
for retry := 0; retry <= m.Retries; retry += 1 { for retry := 0; retry <= m.Retries; retry++ {
timestamp = time.Now() timestamp = time.Now()
err := m.getFields() err := m.getFields()
if err != nil { if err != nil {

View File

@ -679,7 +679,7 @@ func TestRetrySuccessful(t *testing.T) {
if retries >= maxretries { if retries >= maxretries {
except = &mbserver.Success except = &mbserver.Success
} }
retries += 1 retries++
return data, except return data, except
}) })
@ -756,7 +756,7 @@ func TestRetryFail(t *testing.T) {
counter := 0 counter := 0
serv.RegisterFunctionHandler(1, serv.RegisterFunctionHandler(1,
func(s *mbserver.Server, frame mbserver.Framer) ([]byte, *mbserver.Exception) { func(s *mbserver.Server, frame mbserver.Framer) ([]byte, *mbserver.Exception) {
counter += 1 counter++
data := make([]byte, 2) data := make([]byte, 2)
data[0] = byte(1) data[0] = byte(1)
data[1] = byte(0) data[1] = byte(0)

View File

@ -102,7 +102,7 @@ func (m *MultiFile) Gather(acc telegraf.Accumulator) error {
var value interface{} var value interface{}
var d int = 0 var d int
if _, errfmt := fmt.Sscanf(file.Conversion, "float(%d)", &d); errfmt == nil || file.Conversion == "float" { if _, errfmt := fmt.Sscanf(file.Conversion, "float(%d)", &d); errfmt == nil || file.Conversion == "float" {
var v float64 var v float64
v, err = strconv.ParseFloat(vStr, 64) v, err = strconv.ParseFloat(vStr, 64)

View File

@ -143,7 +143,6 @@ const sampleConfig = `
` `
const ( const (
defaultTimeout = 5 * time.Second
defaultPerfEventsStatementsDigestTextLimit = 120 defaultPerfEventsStatementsDigestTextLimit = 120
defaultPerfEventsStatementsLimit = 250 defaultPerfEventsStatementsLimit = 250
defaultPerfEventsStatementsTimeLimit = 86400 defaultPerfEventsStatementsTimeLimit = 86400
@ -712,8 +711,8 @@ func (m *Mysql) gatherBinaryLogs(db *sql.DB, serv string, acc telegraf.Accumulat
servtag := getDSNTag(serv) servtag := getDSNTag(serv)
tags := map[string]string{"server": servtag} tags := map[string]string{"server": servtag}
var ( var (
size uint64 = 0 size uint64
count uint64 = 0 count uint64
fileSize uint64 fileSize uint64
fileName string fileName string
) )
@ -893,16 +892,16 @@ func (m *Mysql) GatherProcessListStatuses(db *sql.DB, serv string, acc telegraf.
} }
// get count of connections from each user // get count of connections from each user
conn_rows, err := db.Query("SELECT user, sum(1) AS connections FROM INFORMATION_SCHEMA.PROCESSLIST GROUP BY user") connRows, err := db.Query("SELECT user, sum(1) AS connections FROM INFORMATION_SCHEMA.PROCESSLIST GROUP BY user")
if err != nil { if err != nil {
return err return err
} }
for conn_rows.Next() { for connRows.Next() {
var user string var user string
var connections int64 var connections int64
err = conn_rows.Scan(&user, &connections) err = connRows.Scan(&user, &connections)
if err != nil { if err != nil {
return err return err
} }
@ -990,141 +989,141 @@ func getColSlice(l int) ([]interface{}, error) {
// list of all possible column names // list of all possible column names
var ( var (
user string user string
total_connections int64 totalConnections int64
concurrent_connections int64 concurrentConnections int64
connected_time int64 connectedTime int64
busy_time int64 busyTime int64
cpu_time int64 cpuTime int64
bytes_received int64 bytesReceived int64
bytes_sent int64 bytesSent int64
binlog_bytes_written int64 binlogBytesWritten int64
rows_read int64 rowsRead int64
rows_sent int64 rowsSent int64
rows_deleted int64 rowsDeleted int64
rows_inserted int64 rowsInserted int64
rows_updated int64 rowsUpdated int64
select_commands int64 selectCommands int64
update_commands int64 updateCommands int64
other_commands int64 otherCommands int64
commit_transactions int64 commitTransactions int64
rollback_transactions int64 rollbackTransactions int64
denied_connections int64 deniedConnections int64
lost_connections int64 lostConnections int64
access_denied int64 accessDenied int64
empty_queries int64 emptyQueries int64
total_ssl_connections int64 totalSslConnections int64
max_statement_time_exceeded int64 maxStatementTimeExceeded int64
// maria specific // maria specific
fbusy_time float64 fbusyTime float64
fcpu_time float64 fcpuTime float64
// percona specific // percona specific
rows_fetched int64 rowsFetched int64
table_rows_read int64 tableRowsRead int64
) )
switch l { switch l {
case 23: // maria5 case 23: // maria5
return []interface{}{ return []interface{}{
&user, &user,
&total_connections, &totalConnections,
&concurrent_connections, &concurrentConnections,
&connected_time, &connectedTime,
&fbusy_time, &fbusyTime,
&fcpu_time, &fcpuTime,
&bytes_received, &bytesReceived,
&bytes_sent, &bytesSent,
&binlog_bytes_written, &binlogBytesWritten,
&rows_read, &rowsRead,
&rows_sent, &rowsSent,
&rows_deleted, &rowsDeleted,
&rows_inserted, &rowsInserted,
&rows_updated, &rowsUpdated,
&select_commands, &selectCommands,
&update_commands, &updateCommands,
&other_commands, &otherCommands,
&commit_transactions, &commitTransactions,
&rollback_transactions, &rollbackTransactions,
&denied_connections, &deniedConnections,
&lost_connections, &lostConnections,
&access_denied, &accessDenied,
&empty_queries, &emptyQueries,
}, nil }, nil
case 25: // maria10 case 25: // maria10
return []interface{}{ return []interface{}{
&user, &user,
&total_connections, &totalConnections,
&concurrent_connections, &concurrentConnections,
&connected_time, &connectedTime,
&fbusy_time, &fbusyTime,
&fcpu_time, &fcpuTime,
&bytes_received, &bytesReceived,
&bytes_sent, &bytesSent,
&binlog_bytes_written, &binlogBytesWritten,
&rows_read, &rowsRead,
&rows_sent, &rowsSent,
&rows_deleted, &rowsDeleted,
&rows_inserted, &rowsInserted,
&rows_updated, &rowsUpdated,
&select_commands, &selectCommands,
&update_commands, &updateCommands,
&other_commands, &otherCommands,
&commit_transactions, &commitTransactions,
&rollback_transactions, &rollbackTransactions,
&denied_connections, &deniedConnections,
&lost_connections, &lostConnections,
&access_denied, &accessDenied,
&empty_queries, &emptyQueries,
&total_ssl_connections, &totalSslConnections,
&max_statement_time_exceeded, &maxStatementTimeExceeded,
}, nil }, nil
case 21: // mysql 5.5 case 21: // mysql 5.5
return []interface{}{ return []interface{}{
&user, &user,
&total_connections, &totalConnections,
&concurrent_connections, &concurrentConnections,
&connected_time, &connectedTime,
&busy_time, &busyTime,
&cpu_time, &cpuTime,
&bytes_received, &bytesReceived,
&bytes_sent, &bytesSent,
&binlog_bytes_written, &binlogBytesWritten,
&rows_fetched, &rowsFetched,
&rows_updated, &rowsUpdated,
&table_rows_read, &tableRowsRead,
&select_commands, &selectCommands,
&update_commands, &updateCommands,
&other_commands, &otherCommands,
&commit_transactions, &commitTransactions,
&rollback_transactions, &rollbackTransactions,
&denied_connections, &deniedConnections,
&lost_connections, &lostConnections,
&access_denied, &accessDenied,
&empty_queries, &emptyQueries,
}, nil }, nil
case 22: // percona case 22: // percona
return []interface{}{ return []interface{}{
&user, &user,
&total_connections, &totalConnections,
&concurrent_connections, &concurrentConnections,
&connected_time, &connectedTime,
&busy_time, &busyTime,
&cpu_time, &cpuTime,
&bytes_received, &bytesReceived,
&bytes_sent, &bytesSent,
&binlog_bytes_written, &binlogBytesWritten,
&rows_fetched, &rowsFetched,
&rows_updated, &rowsUpdated,
&table_rows_read, &tableRowsRead,
&select_commands, &selectCommands,
&update_commands, &updateCommands,
&other_commands, &otherCommands,
&commit_transactions, &commitTransactions,
&rollback_transactions, &rollbackTransactions,
&denied_connections, &deniedConnections,
&lost_connections, &lostConnections,
&access_denied, &accessDenied,
&empty_queries, &emptyQueries,
&total_ssl_connections, &totalSslConnections,
}, nil }, nil
} }
@ -1685,7 +1684,7 @@ func (m *Mysql) gatherPerfEventsStatements(db *sql.DB, serv string, acc telegraf
defer rows.Close() defer rows.Close()
var ( var (
schemaName, digest, digest_text string schemaName, digest, digestText string
count, queryTime, errors, warnings float64 count, queryTime, errors, warnings float64
rowsAffected, rowsSent, rowsExamined float64 rowsAffected, rowsSent, rowsExamined float64
tmpTables, tmpDiskTables float64 tmpTables, tmpDiskTables float64
@ -1700,7 +1699,7 @@ func (m *Mysql) gatherPerfEventsStatements(db *sql.DB, serv string, acc telegraf
for rows.Next() { for rows.Next() {
err = rows.Scan( err = rows.Scan(
&schemaName, &digest, &digest_text, &schemaName, &digest, &digestText,
&count, &queryTime, &errors, &warnings, &count, &queryTime, &errors, &warnings,
&rowsAffected, &rowsSent, &rowsExamined, &rowsAffected, &rowsSent, &rowsExamined,
&tmpTables, &tmpDiskTables, &tmpTables, &tmpDiskTables,
@ -1713,7 +1712,7 @@ func (m *Mysql) gatherPerfEventsStatements(db *sql.DB, serv string, acc telegraf
} }
tags["schema"] = schemaName tags["schema"] = schemaName
tags["digest"] = digest tags["digest"] = digest
tags["digest_text"] = digest_text tags["digest_text"] = digestText
fields := map[string]interface{}{ fields := map[string]interface{}{
"events_statements_total": count, "events_statements_total": count,

View File

@ -20,7 +20,7 @@ type NetIOStats struct {
Interfaces []string Interfaces []string
} }
func (_ *NetIOStats) Description() string { func (n *NetIOStats) Description() string {
return "Read metrics about network interface usage" return "Read metrics about network interface usage"
} }
@ -38,18 +38,18 @@ var netSampleConfig = `
## ##
` `
func (_ *NetIOStats) SampleConfig() string { func (n *NetIOStats) SampleConfig() string {
return netSampleConfig return netSampleConfig
} }
func (s *NetIOStats) Gather(acc telegraf.Accumulator) error { func (n *NetIOStats) Gather(acc telegraf.Accumulator) error {
netio, err := s.ps.NetIO() netio, err := n.ps.NetIO()
if err != nil { if err != nil {
return fmt.Errorf("error getting net io info: %s", err) return fmt.Errorf("error getting net io info: %s", err)
} }
if s.filter == nil { if n.filter == nil {
if s.filter, err = filter.Compile(s.Interfaces); err != nil { if n.filter, err = filter.Compile(n.Interfaces); err != nil {
return fmt.Errorf("error compiling filter: %s", err) return fmt.Errorf("error compiling filter: %s", err)
} }
} }
@ -64,17 +64,17 @@ func (s *NetIOStats) Gather(acc telegraf.Accumulator) error {
} }
for _, io := range netio { for _, io := range netio {
if len(s.Interfaces) != 0 { if len(n.Interfaces) != 0 {
var found bool var found bool
if s.filter.Match(io.Name) { if n.filter.Match(io.Name) {
found = true found = true
} }
if !found { if !found {
continue continue
} }
} else if !s.skipChecks { } else if !n.skipChecks {
iface, ok := interfacesByName[io.Name] iface, ok := interfacesByName[io.Name]
if !ok { if !ok {
continue continue
@ -108,8 +108,8 @@ func (s *NetIOStats) Gather(acc telegraf.Accumulator) error {
// Get system wide stats for different network protocols // Get system wide stats for different network protocols
// (ignore these stats if the call fails) // (ignore these stats if the call fails)
if !s.IgnoreProtocolStats { if !n.IgnoreProtocolStats {
netprotos, _ := s.ps.NetProto() netprotos, _ := n.ps.NetProto()
fields := make(map[string]interface{}) fields := make(map[string]interface{})
for _, proto := range netprotos { for _, proto := range netprotos {
for stat, value := range proto.Stats { for stat, value := range proto.Stats {

View File

@ -13,18 +13,18 @@ type NetStats struct {
ps system.PS ps system.PS
} }
func (_ *NetStats) Description() string { func (ns *NetStats) Description() string {
return "Read TCP metrics such as established, time wait and sockets counts." return "Read TCP metrics such as established, time wait and sockets counts."
} }
var tcpstatSampleConfig = "" var tcpstatSampleConfig = ""
func (_ *NetStats) SampleConfig() string { func (ns *NetStats) SampleConfig() string {
return tcpstatSampleConfig return tcpstatSampleConfig
} }
func (s *NetStats) Gather(acc telegraf.Accumulator) error { func (ns *NetStats) Gather(acc telegraf.Accumulator) error {
netconns, err := s.ps.NetConnections() netconns, err := ns.ps.NetConnections()
if err != nil { if err != nil {
return fmt.Errorf("error getting net connections info: %s", err) return fmt.Errorf("error getting net connections info: %s", err)
} }
@ -35,7 +35,7 @@ func (s *NetStats) Gather(acc telegraf.Accumulator) error {
tags := map[string]string{} tags := map[string]string{}
for _, netcon := range netconns { for _, netcon := range netconns {
if netcon.Type == syscall.SOCK_DGRAM { if netcon.Type == syscall.SOCK_DGRAM {
counts["UDP"] += 1 counts["UDP"]++
continue // UDP has no status continue // UDP has no status
} }
c, ok := counts[netcon.Status] c, ok := counts[netcon.Status]

View File

@ -14,7 +14,7 @@ import (
) )
// Mapping of ntpq header names to tag keys // Mapping of ntpq header names to tag keys
var tagHeaders map[string]string = map[string]string{ var tagHeaders = map[string]string{
"remote": "remote", "remote": "remote",
"refid": "refid", "refid": "refid",
"st": "stratum", "st": "stratum",
@ -128,7 +128,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error {
case strings.HasSuffix(when, "h"): case strings.HasSuffix(when, "h"):
m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "h")) m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "h"))
if err != nil { if err != nil {
acc.AddError(fmt.Errorf("E! Error ntpq: parsing int: %s", fields[index])) acc.AddError(fmt.Errorf("error ntpq: parsing int: %s", fields[index]))
continue continue
} }
// seconds in an hour // seconds in an hour
@ -137,7 +137,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error {
case strings.HasSuffix(when, "d"): case strings.HasSuffix(when, "d"):
m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "d")) m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "d"))
if err != nil { if err != nil {
acc.AddError(fmt.Errorf("E! Error ntpq: parsing int: %s", fields[index])) acc.AddError(fmt.Errorf("error ntpq: parsing int: %s", fields[index]))
continue continue
} }
// seconds in a day // seconds in a day
@ -146,7 +146,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error {
case strings.HasSuffix(when, "m"): case strings.HasSuffix(when, "m"):
m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "m")) m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "m"))
if err != nil { if err != nil {
acc.AddError(fmt.Errorf("E! Error ntpq: parsing int: %s", fields[index])) acc.AddError(fmt.Errorf("error ntpq: parsing int: %s", fields[index]))
continue continue
} }
// seconds in a day // seconds in a day
@ -157,7 +157,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error {
m, err := strconv.Atoi(fields[index]) m, err := strconv.Atoi(fields[index])
if err != nil { if err != nil {
acc.AddError(fmt.Errorf("E! Error ntpq: parsing int: %s", fields[index])) acc.AddError(fmt.Errorf("error ntpq: parsing int: %s", fields[index]))
continue continue
} }
mFields[key] = int64(m) mFields[key] = int64(m)
@ -174,7 +174,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error {
m, err := strconv.ParseFloat(fields[index], 64) m, err := strconv.ParseFloat(fields[index], 64)
if err != nil { if err != nil {
acc.AddError(fmt.Errorf("E! Error ntpq: parsing float: %s", fields[index])) acc.AddError(fmt.Errorf("error ntpq: parsing float: %s", fields[index]))
continue continue
} }
mFields[key] = m mFields[key] = m

View File

@ -32,26 +32,26 @@ func (p *passenger) parseCommand() (string, []string) {
} }
type info struct { type info struct {
Passenger_version string `xml:"passenger_version"` PassengerVersion string `xml:"passenger_version"`
Process_count int `xml:"process_count"` ProcessCount int `xml:"process_count"`
Capacity_used int `xml:"capacity_used"` CapacityUsed int `xml:"capacity_used"`
Get_wait_list_size int `xml:"get_wait_list_size"` GetWaitListSize int `xml:"get_wait_list_size"`
Max int `xml:"max"` Max int `xml:"max"`
Supergroups struct { Supergroups struct {
Supergroup []struct { Supergroup []struct {
Name string `xml:"name"` Name string `xml:"name"`
Get_wait_list_size int `xml:"get_wait_list_size"` GetWaitListSize int `xml:"get_wait_list_size"`
Capacity_used int `xml:"capacity_used"` CapacityUsed int `xml:"capacity_used"`
Group []struct { Group []struct {
Name string `xml:"name"` Name string `xml:"name"`
AppRoot string `xml:"app_root"` AppRoot string `xml:"app_root"`
AppType string `xml:"app_type"` AppType string `xml:"app_type"`
Enabled_process_count int `xml:"enabled_process_count"` EnabledProcessCount int `xml:"enabled_process_count"`
Disabling_process_count int `xml:"disabling_process_count"` DisablingProcessCount int `xml:"disabling_process_count"`
Disabled_process_count int `xml:"disabled_process_count"` DisabledProcessCount int `xml:"disabled_process_count"`
Capacity_used int `xml:"capacity_used"` CapacityUsed int `xml:"capacity_used"`
Get_wait_list_size int `xml:"get_wait_list_size"` GetWaitListSize int `xml:"get_wait_list_size"`
Processes_being_spawned int `xml:"processes_being_spawned"` ProcessesBeingSpawned int `xml:"processes_being_spawned"`
Processes struct { Processes struct {
Process []*process `xml:"process"` Process []*process `xml:"process"`
} `xml:"processes"` } `xml:"processes"`
@ -66,23 +66,23 @@ type process struct {
Sessions int `xml:"sessions"` Sessions int `xml:"sessions"`
Busyness int `xml:"busyness"` Busyness int `xml:"busyness"`
Processed int `xml:"processed"` Processed int `xml:"processed"`
Spawner_creation_time int64 `xml:"spawner_creation_time"` SpawnerCreationTime int64 `xml:"spawner_creation_time"`
Spawn_start_time int64 `xml:"spawn_start_time"` SpawnStartTime int64 `xml:"spawn_start_time"`
Spawn_end_time int64 `xml:"spawn_end_time"` SpawnEndTime int64 `xml:"spawn_end_time"`
Last_used int64 `xml:"last_used"` LastUsed int64 `xml:"last_used"`
Uptime string `xml:"uptime"` Uptime string `xml:"uptime"`
Code_revision string `xml:"code_revision"` CodeRevision string `xml:"code_revision"`
Life_status string `xml:"life_status"` LifeStatus string `xml:"life_status"`
Enabled string `xml:"enabled"` Enabled string `xml:"enabled"`
Has_metrics bool `xml:"has_metrics"` HasMetrics bool `xml:"has_metrics"`
Cpu int64 `xml:"cpu"` Cpu int64 `xml:"cpu"`
Rss int64 `xml:"rss"` Rss int64 `xml:"rss"`
Pss int64 `xml:"pss"` Pss int64 `xml:"pss"`
Private_dirty int64 `xml:"private_dirty"` PrivateDirty int64 `xml:"private_dirty"`
Swap int64 `xml:"swap"` Swap int64 `xml:"swap"`
Real_memory int64 `xml:"real_memory"` RealMemory int64 `xml:"real_memory"`
Vmsize int64 `xml:"vmsize"` Vmsize int64 `xml:"vmsize"`
Process_group_id string `xml:"process_group_id"` ProcessGroupId string `xml:"process_group_id"`
} }
func (p *process) getUptime() int64 { func (p *process) getUptime() int64 {
@ -137,31 +137,27 @@ var sampleConfig = `
command = "passenger-status -v --show=xml" command = "passenger-status -v --show=xml"
` `
func (r *passenger) SampleConfig() string { func (p *passenger) SampleConfig() string {
return sampleConfig return sampleConfig
} }
func (r *passenger) Description() string { func (p *passenger) Description() string {
return "Read metrics of passenger using passenger-status" return "Read metrics of passenger using passenger-status"
} }
func (g *passenger) Gather(acc telegraf.Accumulator) error { func (p *passenger) Gather(acc telegraf.Accumulator) error {
if g.Command == "" { if p.Command == "" {
g.Command = "passenger-status -v --show=xml" p.Command = "passenger-status -v --show=xml"
} }
cmd, args := g.parseCommand() cmd, args := p.parseCommand()
out, err := exec.Command(cmd, args...).Output() out, err := exec.Command(cmd, args...).Output()
if err != nil { if err != nil {
return err return err
} }
if err = importMetric(out, acc); err != nil { return importMetric(out, acc)
return err
}
return nil
} }
func importMetric(stat []byte, acc telegraf.Accumulator) error { func importMetric(stat []byte, acc telegraf.Accumulator) error {
@ -174,13 +170,13 @@ func importMetric(stat []byte, acc telegraf.Accumulator) error {
} }
tags := map[string]string{ tags := map[string]string{
"passenger_version": p.Passenger_version, "passenger_version": p.PassengerVersion,
} }
fields := map[string]interface{}{ fields := map[string]interface{}{
"process_count": p.Process_count, "process_count": p.ProcessCount,
"max": p.Max, "max": p.Max,
"capacity_used": p.Capacity_used, "capacity_used": p.CapacityUsed,
"get_wait_list_size": p.Get_wait_list_size, "get_wait_list_size": p.GetWaitListSize,
} }
acc.AddFields("passenger", fields, tags) acc.AddFields("passenger", fields, tags)
@ -189,8 +185,8 @@ func importMetric(stat []byte, acc telegraf.Accumulator) error {
"name": sg.Name, "name": sg.Name,
} }
fields := map[string]interface{}{ fields := map[string]interface{}{
"get_wait_list_size": sg.Get_wait_list_size, "get_wait_list_size": sg.GetWaitListSize,
"capacity_used": sg.Capacity_used, "capacity_used": sg.CapacityUsed,
} }
acc.AddFields("passenger_supergroup", fields, tags) acc.AddFields("passenger_supergroup", fields, tags)
@ -201,9 +197,9 @@ func importMetric(stat []byte, acc telegraf.Accumulator) error {
"app_type": group.AppType, "app_type": group.AppType,
} }
fields := map[string]interface{}{ fields := map[string]interface{}{
"get_wait_list_size": group.Get_wait_list_size, "get_wait_list_size": group.GetWaitListSize,
"capacity_used": group.Capacity_used, "capacity_used": group.CapacityUsed,
"processes_being_spawned": group.Processes_being_spawned, "processes_being_spawned": group.ProcessesBeingSpawned,
} }
acc.AddFields("passenger_group", fields, tags) acc.AddFields("passenger_group", fields, tags)
@ -213,26 +209,26 @@ func importMetric(stat []byte, acc telegraf.Accumulator) error {
"app_root": group.AppRoot, "app_root": group.AppRoot,
"supergroup_name": sg.Name, "supergroup_name": sg.Name,
"pid": fmt.Sprintf("%d", process.Pid), "pid": fmt.Sprintf("%d", process.Pid),
"code_revision": process.Code_revision, "code_revision": process.CodeRevision,
"life_status": process.Life_status, "life_status": process.LifeStatus,
"process_group_id": process.Process_group_id, "process_group_id": process.ProcessGroupId,
} }
fields := map[string]interface{}{ fields := map[string]interface{}{
"concurrency": process.Concurrency, "concurrency": process.Concurrency,
"sessions": process.Sessions, "sessions": process.Sessions,
"busyness": process.Busyness, "busyness": process.Busyness,
"processed": process.Processed, "processed": process.Processed,
"spawner_creation_time": process.Spawner_creation_time, "spawner_creation_time": process.SpawnerCreationTime,
"spawn_start_time": process.Spawn_start_time, "spawn_start_time": process.SpawnStartTime,
"spawn_end_time": process.Spawn_end_time, "spawn_end_time": process.SpawnEndTime,
"last_used": process.Last_used, "last_used": process.LastUsed,
"uptime": process.getUptime(), "uptime": process.getUptime(),
"cpu": process.Cpu, "cpu": process.Cpu,
"rss": process.Rss, "rss": process.Rss,
"pss": process.Pss, "pss": process.Pss,
"private_dirty": process.Private_dirty, "private_dirty": process.PrivateDirty,
"swap": process.Swap, "swap": process.Swap,
"real_memory": process.Real_memory, "real_memory": process.RealMemory,
"vmsize": process.Vmsize, "vmsize": process.Vmsize,
} }
acc.AddFields("passenger_process", fields, tags) acc.AddFields("passenger_process", fields, tags)

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// Package fcgi implements the FastCGI protocol. // Package phpfpm implements the FastCGI protocol.
// Currently only the responder role is supported. // Currently only the responder role is supported.
// The protocol is defined at http://www.fastcgi.com/drupal/node/6?q=node/22 // The protocol is defined at http://www.fastcgi.com/drupal/node/6?q=node/22
package phpfpm package phpfpm
@ -135,8 +135,8 @@ func (rec *record) read(r io.Reader) (err error) {
return nil return nil
} }
func (r *record) content() []byte { func (rec *record) content() []byte {
return r.buf[:r.h.ContentLength] return rec.buf[:rec.h.ContentLength]
} }
// writeRecord writes and sends a single record. // writeRecord writes and sends a single record.

View File

@ -33,25 +33,25 @@ func newFcgiClient(h string, args ...interface{}) (*conn, error) {
return fcgi, err return fcgi, err
} }
func (client *conn) Request( func (c *conn) Request(
env map[string]string, env map[string]string,
requestData string, requestData string,
) (retout []byte, reterr []byte, err error) { ) (retout []byte, reterr []byte, err error) {
defer client.rwc.Close() defer c.rwc.Close()
var reqId uint16 = 1 var reqId uint16 = 1
err = client.writeBeginRequest(reqId, uint16(roleResponder), 0) err = c.writeBeginRequest(reqId, uint16(roleResponder), 0)
if err != nil { if err != nil {
return return
} }
err = client.writePairs(typeParams, reqId, env) err = c.writePairs(typeParams, reqId, env)
if err != nil { if err != nil {
return return
} }
if len(requestData) > 0 { if len(requestData) > 0 {
if err = client.writeRecord(typeStdin, reqId, []byte(requestData)); err != nil { if err = c.writeRecord(typeStdin, reqId, []byte(requestData)); err != nil {
return return
} }
} }
@ -62,7 +62,7 @@ func (client *conn) Request(
// receive until EOF or FCGI_END_REQUEST // receive until EOF or FCGI_END_REQUEST
READ_LOOP: READ_LOOP:
for { for {
err1 = rec.read(client.rwc) err1 = rec.read(c.rwc)
if err1 != nil && strings.Contains(err1.Error(), "use of closed network connection") { if err1 != nil && strings.Contains(err1.Error(), "use of closed network connection") {
if err1 != io.EOF { if err1 != io.EOF {
err = err1 err = err1

View File

@ -92,11 +92,11 @@ var sampleConfig = `
# pid_finder = "pgrep" # pid_finder = "pgrep"
` `
func (_ *Procstat) SampleConfig() string { func (p *Procstat) SampleConfig() string {
return sampleConfig return sampleConfig
} }
func (_ *Procstat) Description() string { func (p *Procstat) Description() string {
return "Monitor process cpu and memory usage" return "Monitor process cpu and memory usage"
} }
@ -117,7 +117,7 @@ func (p *Procstat) Gather(acc telegraf.Accumulator) error {
p.createProcess = defaultProcess p.createProcess = defaultProcess
} }
pids, tags, err := p.findPids(acc) pids, tags, err := p.findPids()
now := time.Now() now := time.Now()
if err != nil { if err != nil {
@ -136,7 +136,7 @@ func (p *Procstat) Gather(acc telegraf.Accumulator) error {
procs, err := p.updateProcesses(pids, tags, p.procs) procs, err := p.updateProcesses(pids, tags, p.procs)
if err != nil { if err != nil {
acc.AddError(fmt.Errorf("E! Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] user: [%s] %s", acc.AddError(fmt.Errorf("procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] user: [%s] %s",
p.Exe, p.PidFile, p.Pattern, p.User, err.Error())) p.Exe, p.PidFile, p.Pattern, p.User, err.Error()))
} }
p.procs = procs p.procs = procs
@ -234,26 +234,26 @@ func (p *Procstat) addMetric(proc Process, acc telegraf.Accumulator, t time.Time
fields[prefix+"created_at"] = createdAt * 1000000 //Convert ms to ns fields[prefix+"created_at"] = createdAt * 1000000 //Convert ms to ns
} }
cpu_time, err := proc.Times() cpuTime, err := proc.Times()
if err == nil { if err == nil {
fields[prefix+"cpu_time_user"] = cpu_time.User fields[prefix+"cpu_time_user"] = cpuTime.User
fields[prefix+"cpu_time_system"] = cpu_time.System fields[prefix+"cpu_time_system"] = cpuTime.System
fields[prefix+"cpu_time_idle"] = cpu_time.Idle fields[prefix+"cpu_time_idle"] = cpuTime.Idle
fields[prefix+"cpu_time_nice"] = cpu_time.Nice fields[prefix+"cpu_time_nice"] = cpuTime.Nice
fields[prefix+"cpu_time_iowait"] = cpu_time.Iowait fields[prefix+"cpu_time_iowait"] = cpuTime.Iowait
fields[prefix+"cpu_time_irq"] = cpu_time.Irq fields[prefix+"cpu_time_irq"] = cpuTime.Irq
fields[prefix+"cpu_time_soft_irq"] = cpu_time.Softirq fields[prefix+"cpu_time_soft_irq"] = cpuTime.Softirq
fields[prefix+"cpu_time_steal"] = cpu_time.Steal fields[prefix+"cpu_time_steal"] = cpuTime.Steal
fields[prefix+"cpu_time_guest"] = cpu_time.Guest fields[prefix+"cpu_time_guest"] = cpuTime.Guest
fields[prefix+"cpu_time_guest_nice"] = cpu_time.GuestNice fields[prefix+"cpu_time_guest_nice"] = cpuTime.GuestNice
} }
cpu_perc, err := proc.Percent(time.Duration(0)) cpuPerc, err := proc.Percent(time.Duration(0))
if err == nil { if err == nil {
if p.solarisMode { if p.solarisMode {
fields[prefix+"cpu_usage"] = cpu_perc / float64(runtime.NumCPU()) fields[prefix+"cpu_usage"] = cpuPerc / float64(runtime.NumCPU())
} else { } else {
fields[prefix+"cpu_usage"] = cpu_perc fields[prefix+"cpu_usage"] = cpuPerc
} }
} }
@ -267,9 +267,9 @@ func (p *Procstat) addMetric(proc Process, acc telegraf.Accumulator, t time.Time
fields[prefix+"memory_locked"] = mem.Locked fields[prefix+"memory_locked"] = mem.Locked
} }
mem_perc, err := proc.MemoryPercent() memPerc, err := proc.MemoryPercent()
if err == nil { if err == nil {
fields[prefix+"memory_usage"] = mem_perc fields[prefix+"memory_usage"] = memPerc
} }
rlims, err := proc.RlimitUsage(true) rlims, err := proc.RlimitUsage(true)
@ -368,7 +368,7 @@ func (p *Procstat) getPIDFinder() (PIDFinder, error) {
} }
// Get matching PIDs and their initial tags // Get matching PIDs and their initial tags
func (p *Procstat) findPids(acc telegraf.Accumulator) ([]PID, map[string]string, error) { func (p *Procstat) findPids() ([]PID, map[string]string, error) {
var pids []PID var pids []PID
tags := make(map[string]string) tags := make(map[string]string)
var err error var err error
@ -400,7 +400,7 @@ func (p *Procstat) findPids(acc telegraf.Accumulator) ([]PID, map[string]string,
pids, err = p.winServicePIDs() pids, err = p.winServicePIDs()
tags = map[string]string{"win_service": p.WinService} tags = map[string]string{"win_service": p.WinService}
} else { } else {
err = fmt.Errorf("Either exe, pid_file, user, pattern, systemd_unit, cgroup, or win_service must be specified") err = fmt.Errorf("either exe, pid_file, user, pattern, systemd_unit, cgroup, or win_service must be specified")
} }
return pids, tags, err return pids, tags, err

View File

@ -30,14 +30,14 @@ func mockExecCommand(arg0 string, args ...string) *exec.Cmd {
func TestMockExecCommand(t *testing.T) { func TestMockExecCommand(t *testing.T) {
var cmd []string var cmd []string
for _, arg := range os.Args { for _, arg := range os.Args {
if string(arg) == "--" { if arg == "--" {
cmd = []string{} cmd = []string{}
continue continue
} }
if cmd == nil { if cmd == nil {
continue continue
} }
cmd = append(cmd, string(arg)) cmd = append(cmd, arg)
} }
if cmd == nil { if cmd == nil {
return return
@ -72,7 +72,7 @@ func pidFinder(pids []PID, err error) func() (PIDFinder, error) {
} }
} }
func (pg *testPgrep) PidFile(path string) ([]PID, error) { func (pg *testPgrep) PidFile(_ string) ([]PID, error) {
return pg.pids, pg.err return pg.pids, pg.err
} }
@ -80,15 +80,15 @@ func (p *testProc) Cmdline() (string, error) {
return "test_proc", nil return "test_proc", nil
} }
func (pg *testPgrep) Pattern(pattern string) ([]PID, error) { func (pg *testPgrep) Pattern(_ string) ([]PID, error) {
return pg.pids, pg.err return pg.pids, pg.err
} }
func (pg *testPgrep) Uid(user string) ([]PID, error) { func (pg *testPgrep) Uid(_ string) ([]PID, error) {
return pg.pids, pg.err return pg.pids, pg.err
} }
func (pg *testPgrep) FullPattern(pattern string) ([]PID, error) { func (pg *testPgrep) FullPattern(_ string) ([]PID, error) {
return pg.pids, pg.err return pg.pids, pg.err
} }
@ -97,7 +97,7 @@ type testProc struct {
tags map[string]string tags map[string]string
} }
func newTestProc(pid PID) (Process, error) { func newTestProc(_ PID) (Process, error) {
proc := &testProc{ proc := &testProc{
tags: make(map[string]string), tags: make(map[string]string),
} }
@ -144,7 +144,7 @@ func (p *testProc) NumThreads() (int32, error) {
return 0, nil return 0, nil
} }
func (p *testProc) Percent(interval time.Duration) (float64, error) { func (p *testProc) Percent(_ time.Duration) (float64, error) {
return 0, nil return 0, nil
} }
@ -160,12 +160,12 @@ func (p *testProc) Times() (*cpu.TimesStat, error) {
return &cpu.TimesStat{}, nil return &cpu.TimesStat{}, nil
} }
func (p *testProc) RlimitUsage(gatherUsage bool) ([]process.RlimitStat, error) { func (p *testProc) RlimitUsage(_ bool) ([]process.RlimitStat, error) {
return []process.RlimitStat{}, nil return []process.RlimitStat{}, nil
} }
var pid PID = PID(42) var pid = PID(42)
var exe string = "foo" var exe = "foo"
func TestGather_CreateProcessErrorOk(t *testing.T) { func TestGather_CreateProcessErrorOk(t *testing.T) {
var acc testutil.Accumulator var acc testutil.Accumulator
@ -363,8 +363,7 @@ func TestGather_systemdUnitPIDs(t *testing.T) {
createPIDFinder: pidFinder([]PID{}, nil), createPIDFinder: pidFinder([]PID{}, nil),
SystemdUnit: "TestGather_systemdUnitPIDs", SystemdUnit: "TestGather_systemdUnitPIDs",
} }
var acc testutil.Accumulator pids, tags, err := p.findPids()
pids, tags, err := p.findPids(&acc)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, []PID{11408}, pids) assert.Equal(t, []PID{11408}, pids)
assert.Equal(t, "TestGather_systemdUnitPIDs", tags["systemd_unit"]) assert.Equal(t, "TestGather_systemdUnitPIDs", tags["systemd_unit"])
@ -385,8 +384,7 @@ func TestGather_cgroupPIDs(t *testing.T) {
createPIDFinder: pidFinder([]PID{}, nil), createPIDFinder: pidFinder([]PID{}, nil),
CGroup: td, CGroup: td,
} }
var acc testutil.Accumulator pids, tags, err := p.findPids()
pids, tags, err := p.findPids(&acc)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, []PID{1234, 5678}, pids) assert.Equal(t, []PID{1234, 5678}, pids)
assert.Equal(t, td, tags["cgroup"]) assert.Equal(t, td, tags["cgroup"])
@ -415,7 +413,7 @@ func TestGather_SameTimestamps(t *testing.T) {
require.NoError(t, acc.GatherError(p.Gather)) require.NoError(t, acc.GatherError(p.Gather))
procstat, _ := acc.Get("procstat") procstat, _ := acc.Get("procstat")
procstat_lookup, _ := acc.Get("procstat_lookup") procstatLookup, _ := acc.Get("procstat_lookup")
require.Equal(t, procstat.Time, procstat_lookup.Time) require.Equal(t, procstat.Time, procstatLookup.Time)
} }

View File

@ -230,7 +230,7 @@ var Tracking = map[string]string{
"role": "replication_role", "role": "replication_role",
} }
func (r *Redis) init(acc telegraf.Accumulator) error { func (r *Redis) init() error {
if r.initialized { if r.initialized {
return nil return nil
} }
@ -307,7 +307,7 @@ func (r *Redis) init(acc telegraf.Accumulator) error {
// Returns one of the errors encountered while gather stats (if any). // Returns one of the errors encountered while gather stats (if any).
func (r *Redis) Gather(acc telegraf.Accumulator) error { func (r *Redis) Gather(acc telegraf.Accumulator) error {
if !r.initialized { if !r.initialized {
err := r.init(acc) err := r.init()
if err != nil { if err != nil {
return err return err
} }
@ -361,7 +361,7 @@ func gatherInfoOutput(
tags map[string]string, tags map[string]string,
) error { ) error {
var section string var section string
var keyspace_hits, keyspace_misses int64 var keyspaceHits, keyspaceMisses int64
scanner := bufio.NewScanner(rdr) scanner := bufio.NewScanner(rdr)
fields := make(map[string]interface{}) fields := make(map[string]interface{})
@ -383,7 +383,7 @@ func gatherInfoOutput(
if len(parts) < 2 { if len(parts) < 2 {
continue continue
} }
name := string(parts[0]) name := parts[0]
if section == "Server" { if section == "Server" {
if name != "lru_clock" && name != "uptime_in_seconds" && name != "redis_version" { if name != "lru_clock" && name != "uptime_in_seconds" && name != "redis_version" {
@ -406,7 +406,7 @@ func gatherInfoOutput(
metric, ok := Tracking[name] metric, ok := Tracking[name]
if !ok { if !ok {
if section == "Keyspace" { if section == "Keyspace" {
kline := strings.TrimSpace(string(parts[1])) kline := strings.TrimSpace(parts[1])
gatherKeyspaceLine(name, kline, acc, tags) gatherKeyspaceLine(name, kline, acc, tags)
continue continue
} }
@ -433,9 +433,9 @@ func gatherInfoOutput(
if ival, err := strconv.ParseInt(val, 10, 64); err == nil { if ival, err := strconv.ParseInt(val, 10, 64); err == nil {
switch name { switch name {
case "keyspace_hits": case "keyspace_hits":
keyspace_hits = ival keyspaceHits = ival
case "keyspace_misses": case "keyspace_misses":
keyspace_misses = ival keyspaceMisses = ival
case "rdb_last_save_time": case "rdb_last_save_time":
// influxdb can't calculate this, so we have to do it // influxdb can't calculate this, so we have to do it
fields["rdb_last_save_time_elapsed"] = time.Now().Unix() - ival fields["rdb_last_save_time_elapsed"] = time.Now().Unix() - ival
@ -459,11 +459,11 @@ func gatherInfoOutput(
fields[metric] = val fields[metric] = val
} }
var keyspace_hitrate float64 = 0.0 var keyspaceHitrate float64
if keyspace_hits != 0 || keyspace_misses != 0 { if keyspaceHits != 0 || keyspaceMisses != 0 {
keyspace_hitrate = float64(keyspace_hits) / float64(keyspace_hits+keyspace_misses) keyspaceHitrate = float64(keyspaceHits) / float64(keyspaceHits+keyspaceMisses)
} }
fields["keyspace_hitrate"] = keyspace_hitrate fields["keyspace_hitrate"] = keyspaceHitrate
o := RedisFieldTypes{} o := RedisFieldTypes{}
@ -482,12 +482,12 @@ func gatherKeyspaceLine(
name string, name string,
line string, line string,
acc telegraf.Accumulator, acc telegraf.Accumulator,
global_tags map[string]string, globalTags map[string]string,
) { ) {
if strings.Contains(line, "keys=") { if strings.Contains(line, "keys=") {
fields := make(map[string]interface{}) fields := make(map[string]interface{})
tags := make(map[string]string) tags := make(map[string]string)
for k, v := range global_tags { for k, v := range globalTags {
tags[k] = v tags[k] = v
} }
tags["database"] = name tags["database"] = name
@ -511,7 +511,7 @@ func gatherCommandstateLine(
name string, name string,
line string, line string,
acc telegraf.Accumulator, acc telegraf.Accumulator,
global_tags map[string]string, globalTags map[string]string,
) { ) {
if !strings.HasPrefix(name, "cmdstat") { if !strings.HasPrefix(name, "cmdstat") {
return return
@ -519,7 +519,7 @@ func gatherCommandstateLine(
fields := make(map[string]interface{}) fields := make(map[string]interface{})
tags := make(map[string]string) tags := make(map[string]string)
for k, v := range global_tags { for k, v := range globalTags {
tags[k] = v tags[k] = v
} }
tags["command"] = strings.TrimPrefix(name, "cmdstat_") tags["command"] = strings.TrimPrefix(name, "cmdstat_")
@ -556,11 +556,11 @@ func gatherReplicationLine(
name string, name string,
line string, line string,
acc telegraf.Accumulator, acc telegraf.Accumulator,
global_tags map[string]string, globalTags map[string]string,
) { ) {
fields := make(map[string]interface{}) fields := make(map[string]interface{})
tags := make(map[string]string) tags := make(map[string]string)
for k, v := range global_tags { for k, v := range globalTags {
tags[k] = v tags[k] = v
} }

View File

@ -459,7 +459,7 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) {
i := f.OidIndexLength + 1 // leading separator i := f.OidIndexLength + 1 // leading separator
idx = strings.Map(func(r rune) rune { idx = strings.Map(func(r rune) rune {
if r == '.' { if r == '.' {
i -= 1 i--
} }
if i < 1 { if i < 1 {
return -1 return -1
@ -641,7 +641,7 @@ func fieldConvert(conv string, v interface{}) (interface{}, error) {
case int32: case int32:
v = int64(vt) v = int64(vt)
case int64: case int64:
v = int64(vt) v = vt
case uint: case uint:
v = int64(vt) v = int64(vt)
case uint8: case uint8:
@ -864,28 +864,6 @@ func SnmpTranslate(oid string) (mibName string, oidNum string, oidText string, c
return stc.mibName, stc.oidNum, stc.oidText, stc.conversion, stc.err return stc.mibName, stc.oidNum, stc.oidText, stc.conversion, stc.err
} }
func SnmpTranslateForce(oid string, mibName string, oidNum string, oidText string, conversion string) {
snmpTranslateCachesLock.Lock()
defer snmpTranslateCachesLock.Unlock()
if snmpTranslateCaches == nil {
snmpTranslateCaches = map[string]snmpTranslateCache{}
}
var stc snmpTranslateCache
stc.mibName = mibName
stc.oidNum = oidNum
stc.oidText = oidText
stc.conversion = conversion
stc.err = nil
snmpTranslateCaches[oid] = stc
}
func SnmpTranslateClear() {
snmpTranslateCachesLock.Lock()
defer snmpTranslateCachesLock.Unlock()
snmpTranslateCaches = map[string]snmpTranslateCache{}
}
func snmpTranslateCall(oid string) (mibName string, oidNum string, oidText string, conversion string, err error) { func snmpTranslateCall(oid string) (mibName string, oidNum string, oidText string, conversion string, err error) {
var out []byte var out []byte
if strings.ContainsAny(oid, ":abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") { if strings.ContainsAny(oid, ":abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") {

View File

@ -18,10 +18,6 @@ import (
const mbeansPath = "/admin/mbeans?stats=true&wt=json&cat=CORE&cat=QUERYHANDLER&cat=UPDATEHANDLER&cat=CACHE" const mbeansPath = "/admin/mbeans?stats=true&wt=json&cat=CORE&cat=QUERYHANDLER&cat=UPDATEHANDLER&cat=CACHE"
const adminCoresPath = "/solr/admin/cores?action=STATUS&wt=json" const adminCoresPath = "/solr/admin/cores?action=STATUS&wt=json"
type node struct {
Host string `json:"host"`
}
const sampleConfig = ` const sampleConfig = `
## specify a list of one or more Solr servers ## specify a list of one or more Solr servers
servers = ["http://localhost:8983"] servers = ["http://localhost:8983"]
@ -497,10 +493,8 @@ func (s *Solr) gatherData(url string, v interface{}) error {
return fmt.Errorf("solr: API responded with status-code %d, expected %d, url %s", return fmt.Errorf("solr: API responded with status-code %d, expected %d, url %s",
r.StatusCode, http.StatusOK, url) r.StatusCode, http.StatusOK, url)
} }
if err = json.NewDecoder(r.Body).Decode(v); err != nil {
return err return json.NewDecoder(r.Body).Decode(v)
}
return nil
} }
func init() { func init() {

View File

@ -201,24 +201,24 @@ func (g *lockedSeriesGrouper) Add(
} }
// ListMetricDescriptors implements metricClient interface // ListMetricDescriptors implements metricClient interface
func (c *stackdriverMetricClient) ListMetricDescriptors( func (smc *stackdriverMetricClient) ListMetricDescriptors(
ctx context.Context, ctx context.Context,
req *monitoringpb.ListMetricDescriptorsRequest, req *monitoringpb.ListMetricDescriptorsRequest,
) (<-chan *metricpb.MetricDescriptor, error) { ) (<-chan *metricpb.MetricDescriptor, error) {
mdChan := make(chan *metricpb.MetricDescriptor, 1000) mdChan := make(chan *metricpb.MetricDescriptor, 1000)
go func() { go func() {
c.log.Debugf("List metric descriptor request filter: %s", req.Filter) smc.log.Debugf("List metric descriptor request filter: %s", req.Filter)
defer close(mdChan) defer close(mdChan)
// Iterate over metric descriptors and send them to buffered channel // Iterate over metric descriptors and send them to buffered channel
mdResp := c.conn.ListMetricDescriptors(ctx, req) mdResp := smc.conn.ListMetricDescriptors(ctx, req)
c.listMetricDescriptorsCalls.Incr(1) smc.listMetricDescriptorsCalls.Incr(1)
for { for {
mdDesc, mdErr := mdResp.Next() mdDesc, mdErr := mdResp.Next()
if mdErr != nil { if mdErr != nil {
if mdErr != iterator.Done { if mdErr != iterator.Done {
c.log.Errorf("Failed iterating metric descriptor responses: %q: %v", req.String(), mdErr) smc.log.Errorf("Failed iterating metric descriptor responses: %q: %v", req.String(), mdErr)
} }
break break
} }
@ -230,24 +230,24 @@ func (c *stackdriverMetricClient) ListMetricDescriptors(
} }
// ListTimeSeries implements metricClient interface // ListTimeSeries implements metricClient interface
func (c *stackdriverMetricClient) ListTimeSeries( func (smc *stackdriverMetricClient) ListTimeSeries(
ctx context.Context, ctx context.Context,
req *monitoringpb.ListTimeSeriesRequest, req *monitoringpb.ListTimeSeriesRequest,
) (<-chan *monitoringpb.TimeSeries, error) { ) (<-chan *monitoringpb.TimeSeries, error) {
tsChan := make(chan *monitoringpb.TimeSeries, 1000) tsChan := make(chan *monitoringpb.TimeSeries, 1000)
go func() { go func() {
c.log.Debugf("List time series request filter: %s", req.Filter) smc.log.Debugf("List time series request filter: %s", req.Filter)
defer close(tsChan) defer close(tsChan)
// Iterate over timeseries and send them to buffered channel // Iterate over timeseries and send them to buffered channel
tsResp := c.conn.ListTimeSeries(ctx, req) tsResp := smc.conn.ListTimeSeries(ctx, req)
c.listTimeSeriesCalls.Incr(1) smc.listTimeSeriesCalls.Incr(1)
for { for {
tsDesc, tsErr := tsResp.Next() tsDesc, tsErr := tsResp.Next()
if tsErr != nil { if tsErr != nil {
if tsErr != iterator.Done { if tsErr != iterator.Done {
c.log.Errorf("Failed iterating time series responses: %q: %v", req.String(), tsErr) smc.log.Errorf("Failed iterating time series responses: %q: %v", req.String(), tsErr)
} }
break break
} }
@ -259,8 +259,8 @@ func (c *stackdriverMetricClient) ListTimeSeries(
} }
// Close implements metricClient interface // Close implements metricClient interface
func (s *stackdriverMetricClient) Close() error { func (smc *stackdriverMetricClient) Close() error {
return s.conn.Close() return smc.conn.Close()
} }
// Description implements telegraf.Input interface // Description implements telegraf.Input interface

View File

@ -21,9 +21,9 @@ import (
) )
const ( const (
// UDP_MAX_PACKET_SIZE is the UDP packet limit, see // UdpMaxPacketSize is the UDP packet limit, see
// https://en.wikipedia.org/wiki/User_Datagram_Protocol#Packet_structure // https://en.wikipedia.org/wiki/User_Datagram_Protocol#Packet_structure
UDP_MAX_PACKET_SIZE int = 64 * 1024 UdpMaxPacketSize int = 64 * 1024
defaultFieldName = "value" defaultFieldName = "value"
@ -31,7 +31,6 @@ const (
defaultSeparator = "_" defaultSeparator = "_"
defaultAllowPendingMessage = 10000 defaultAllowPendingMessage = 10000
MaxTCPConnections = 250
parserGoRoutines = 5 parserGoRoutines = 5
) )
@ -203,7 +202,7 @@ type cacheddistributions struct {
tags map[string]string tags map[string]string
} }
func (_ *Statsd) Description() string { func (s *Statsd) Description() string {
return "Statsd UDP/TCP Server" return "Statsd UDP/TCP Server"
} }
@ -273,7 +272,7 @@ const sampleConfig = `
#max_ttl = "1000h" #max_ttl = "1000h"
` `
func (_ *Statsd) SampleConfig() string { func (s *Statsd) SampleConfig() string {
return sampleConfig return sampleConfig
} }
@ -499,7 +498,7 @@ func (s *Statsd) udpListen(conn *net.UDPConn) error {
s.UDPlistener.SetReadBuffer(s.ReadBufferSize) s.UDPlistener.SetReadBuffer(s.ReadBufferSize)
} }
buf := make([]byte, UDP_MAX_PACKET_SIZE) buf := make([]byte, UdpMaxPacketSize)
for { for {
select { select {
case <-s.done: case <-s.done:

View File

@ -12,14 +12,14 @@ type SwapStats struct {
ps system.PS ps system.PS
} }
func (_ *SwapStats) Description() string { func (ss *SwapStats) Description() string {
return "Read metrics about swap memory usage" return "Read metrics about swap memory usage"
} }
func (_ *SwapStats) SampleConfig() string { return "" } func (ss *SwapStats) SampleConfig() string { return "" }
func (s *SwapStats) Gather(acc telegraf.Accumulator) error { func (ss *SwapStats) Gather(acc telegraf.Accumulator) error {
swap, err := s.ps.SwapStat() swap, err := ss.ps.SwapStat()
if err != nil { if err != nil {
return fmt.Errorf("error getting swap memory info: %s", err) return fmt.Errorf("error getting swap memory info: %s", err)
} }

View File

@ -2,7 +2,6 @@ package webhooks
import ( import (
"fmt" "fmt"
"log"
"net" "net"
"net/http" "net/http"
"reflect" "reflect"
@ -28,14 +27,16 @@ func init() {
} }
type Webhooks struct { type Webhooks struct {
ServiceAddress string ServiceAddress string `toml:"service_address"`
Github *github.GithubWebhook Github *github.GithubWebhook `toml:"github"`
Filestack *filestack.FilestackWebhook Filestack *filestack.FilestackWebhook `toml:"filestack"`
Mandrill *mandrill.MandrillWebhook Mandrill *mandrill.MandrillWebhook `toml:"mandrill"`
Rollbar *rollbar.RollbarWebhook Rollbar *rollbar.RollbarWebhook `toml:"rollbar"`
Papertrail *papertrail.PapertrailWebhook Papertrail *papertrail.PapertrailWebhook `toml:"papertrail"`
Particle *particle.ParticleWebhook Particle *particle.ParticleWebhook `toml:"particle"`
Log telegraf.Logger `toml:"-"`
srv *http.Server srv *http.Server
} }
@ -110,25 +111,24 @@ func (wb *Webhooks) Start(acc telegraf.Accumulator) error {
ln, err := net.Listen("tcp", fmt.Sprintf("%s", wb.ServiceAddress)) ln, err := net.Listen("tcp", fmt.Sprintf("%s", wb.ServiceAddress))
if err != nil { if err != nil {
log.Fatalf("E! Error starting server: %v", err) return fmt.Errorf("error starting server: %v", err)
return err
} }
go func() { go func() {
if err := wb.srv.Serve(ln); err != nil { if err := wb.srv.Serve(ln); err != nil {
if err != http.ErrServerClosed { if err != http.ErrServerClosed {
acc.AddError(fmt.Errorf("E! Error listening: %v", err)) acc.AddError(fmt.Errorf("error listening: %v", err))
} }
} }
}() }()
log.Printf("I! Started the webhooks service on %s\n", wb.ServiceAddress) wb.Log.Infof("Started the webhooks service on %s", wb.ServiceAddress)
return nil return nil
} }
func (rb *Webhooks) Stop() { func (wb *Webhooks) Stop() {
rb.srv.Close() wb.srv.Close()
log.Println("I! Stopping the Webhooks service") wb.Log.Infof("Stopping the Webhooks service")
} }

View File

@ -1,4 +1,4 @@
// Code generated by mockery v1.0.0 // Code generated by mockery v1.0.0. DO NOT EDIT.
package mocks package mocks
import appinsights "github.com/Microsoft/ApplicationInsights-Go/appinsights" import appinsights "github.com/Microsoft/ApplicationInsights-Go/appinsights"

View File

@ -1,4 +1,4 @@
// Code generated by mockery v1.0.0 // Code generated by mockery v1.0.0. DO NOT EDIT.
package mocks package mocks
import appinsights "github.com/Microsoft/ApplicationInsights-Go/appinsights" import appinsights "github.com/Microsoft/ApplicationInsights-Go/appinsights"

View File

@ -1,7 +1,6 @@
package cloudwatch package cloudwatch
import ( import (
"log"
"math" "math"
"sort" "sort"
"strings" "strings"
@ -30,6 +29,8 @@ type CloudWatch struct {
svc *cloudwatch.CloudWatch svc *cloudwatch.CloudWatch
WriteStatistics bool `toml:"write_statistics"` WriteStatistics bool `toml:"write_statistics"`
Log telegraf.Logger `toml:"-"`
} }
type statisticType int type statisticType int
@ -253,7 +254,7 @@ func (c *CloudWatch) WriteToCloudWatch(datums []*cloudwatch.MetricDatum) error {
_, err := c.svc.PutMetricData(params) _, err := c.svc.PutMetricData(params)
if err != nil { if err != nil {
log.Printf("E! CloudWatch: Unable to write to CloudWatch : %+v \n", err.Error()) c.Log.Errorf("Unable to write to CloudWatch : %+v", err.Error())
} }
return err return err
@ -265,7 +266,7 @@ func PartitionDatums(size int, datums []*cloudwatch.MetricDatum) [][]*cloudwatch
numberOfPartitions := len(datums) / size numberOfPartitions := len(datums) / size
if len(datums)%size != 0 { if len(datums)%size != 0 {
numberOfPartitions += 1 numberOfPartitions++
} }
partitions := make([][]*cloudwatch.MetricDatum, numberOfPartitions) partitions := make([][]*cloudwatch.MetricDatum, numberOfPartitions)

View File

@ -31,7 +31,7 @@ func TestBuildDimensions(t *testing.T) {
i := 0 i := 0
for k := range testPoint.Tags() { for k := range testPoint.Tags() {
tagKeys[i] = k tagKeys[i] = k
i += 1 i++
} }
sort.Strings(tagKeys) sort.Strings(tagKeys)
@ -151,7 +151,6 @@ func TestBuildMetricDatums_SkipEmptyTags(t *testing.T) {
} }
func TestPartitionDatums(t *testing.T) { func TestPartitionDatums(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
testDatum := cloudwatch.MetricDatum{ testDatum := cloudwatch.MetricDatum{

View File

@ -47,7 +47,7 @@ type Metric struct {
type Point [2]float64 type Point [2]float64
const datadog_api = "https://app.datadoghq.com/api/v1/series" const datadogApi = "https://app.datadoghq.com/api/v1/series"
func (d *Datadog) Connect() error { func (d *Datadog) Connect() error {
if d.Apikey == "" { if d.Apikey == "" {
@ -166,7 +166,7 @@ func buildTags(tagList []*telegraf.Tag) []string {
index := 0 index := 0
for _, tag := range tagList { for _, tag := range tagList {
tags[index] = fmt.Sprintf("%s:%s", tag.Key, tag.Value) tags[index] = fmt.Sprintf("%s:%s", tag.Key, tag.Value)
index += 1 index++
} }
return tags return tags
} }
@ -208,7 +208,7 @@ func (d *Datadog) Close() error {
func init() { func init() {
outputs.Add("datadog", func() telegraf.Output { outputs.Add("datadog", func() telegraf.Output {
return &Datadog{ return &Datadog{
URL: datadog_api, URL: datadogApi,
} }
}) })
} }

View File

@ -196,7 +196,7 @@ func (d *Dynatrace) Write(metrics []telegraf.Metric) error {
// write metric id,tags and value // write metric id,tags and value
switch metric.Type() { switch metric.Type() {
case telegraf.Counter: case telegraf.Counter:
var delta float64 = 0 var delta float64
// Check if LastValue exists // Check if LastValue exists
if lastvalue, ok := d.State[metricID+tagb.String()]; ok { if lastvalue, ok := d.State[metricID+tagb.String()]; ok {
@ -236,7 +236,7 @@ func (d *Dynatrace) send(msg []byte) error {
req, err := http.NewRequest("POST", d.URL, bytes.NewBuffer(msg)) req, err := http.NewRequest("POST", d.URL, bytes.NewBuffer(msg))
if err != nil { if err != nil {
d.Log.Errorf("Dynatrace error: %s", err.Error()) d.Log.Errorf("Dynatrace error: %s", err.Error())
return fmt.Errorf("Dynatrace error while creating HTTP request:, %s", err.Error()) return fmt.Errorf("error while creating HTTP request:, %s", err.Error())
} }
req.Header.Add("Content-Type", "text/plain; charset=UTF-8") req.Header.Add("Content-Type", "text/plain; charset=UTF-8")
@ -250,7 +250,7 @@ func (d *Dynatrace) send(msg []byte) error {
if err != nil { if err != nil {
d.Log.Errorf("Dynatrace error: %s", err.Error()) d.Log.Errorf("Dynatrace error: %s", err.Error())
fmt.Println(req) fmt.Println(req)
return fmt.Errorf("Dynatrace error while sending HTTP request:, %s", err.Error()) return fmt.Errorf("error while sending HTTP request:, %s", err.Error())
} }
defer resp.Body.Close() defer resp.Body.Close()
@ -263,7 +263,7 @@ func (d *Dynatrace) send(msg []byte) error {
bodyString := string(bodyBytes) bodyString := string(bodyBytes)
d.Log.Debugf("Dynatrace returned: %s", bodyString) d.Log.Debugf("Dynatrace returned: %s", bodyString)
} else { } else {
return fmt.Errorf("Dynatrace request failed with response code:, %d", resp.StatusCode) return fmt.Errorf("request failed with response code:, %d", resp.StatusCode)
} }
return nil return nil

View File

@ -102,7 +102,7 @@ func (f *File) Description() string {
} }
func (f *File) Write(metrics []telegraf.Metric) error { func (f *File) Write(metrics []telegraf.Metric) error {
var writeErr error = nil var writeErr error
if f.UseBatchFormat { if f.UseBatchFormat {
octets, err := f.serializer.SerializeBatch(metrics) octets, err := f.serializer.SerializeBatch(metrics)
@ -123,7 +123,7 @@ func (f *File) Write(metrics []telegraf.Metric) error {
_, err = f.writer.Write(b) _, err = f.writer.Write(b)
if err != nil { if err != nil {
writeErr = fmt.Errorf("E! [outputs.file] failed to write message: %v", err) writeErr = fmt.Errorf("failed to write message: %v", err)
} }
} }
} }

View File

@ -171,11 +171,7 @@ func (h *HTTP) Write(metrics []telegraf.Metric) error {
return err return err
} }
if err := h.write(reqBody); err != nil { return h.write(reqBody)
return err
}
return nil
} }
func (h *HTTP) write(reqBody []byte) error { func (h *HTTP) write(reqBody []byte) error {

View File

@ -38,7 +38,6 @@ func (e APIError) Error() string {
const ( const (
defaultRequestTimeout = time.Second * 5 defaultRequestTimeout = time.Second * 5
defaultMaxWait = 60 // seconds defaultMaxWait = 60 // seconds
defaultDatabase = "telegraf"
) )
type HTTPConfig struct { type HTTPConfig struct {
@ -171,7 +170,7 @@ func (g genericRespError) Error() string {
func (c *httpClient) Write(ctx context.Context, metrics []telegraf.Metric) error { func (c *httpClient) Write(ctx context.Context, metrics []telegraf.Metric) error {
if c.retryTime.After(time.Now()) { if c.retryTime.After(time.Now()) {
return errors.New("Retry time has not elapsed") return errors.New("retry time has not elapsed")
} }
batches := make(map[string][]telegraf.Metric) batches := make(map[string][]telegraf.Metric)

View File

@ -4,7 +4,6 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"log"
"math/rand" "math/rand"
"net/url" "net/url"
"time" "time"
@ -96,12 +95,12 @@ type InfluxDB struct {
UintSupport bool `toml:"influx_uint_support"` UintSupport bool `toml:"influx_uint_support"`
tls.ClientConfig tls.ClientConfig
Log telegraf.Logger `toml:"-"`
clients []Client clients []Client
} }
func (i *InfluxDB) Connect() error { func (i *InfluxDB) Connect() error {
ctx := context.Background()
if len(i.URLs) == 0 { if len(i.URLs) == 0 {
i.URLs = append(i.URLs, defaultURL) i.URLs = append(i.URLs, defaultURL)
} }
@ -122,7 +121,7 @@ func (i *InfluxDB) Connect() error {
switch parts.Scheme { switch parts.Scheme {
case "http", "https", "unix": case "http", "https", "unix":
c, err := i.getHTTPClient(ctx, parts, proxy) c, err := i.getHTTPClient(parts, proxy)
if err != nil { if err != nil {
return err return err
} }
@ -165,13 +164,13 @@ func (i *InfluxDB) Write(metrics []telegraf.Metric) error {
return nil return nil
} }
log.Printf("E! [outputs.influxdb_v2] when writing to [%s]: %v", client.URL(), err) i.Log.Errorf("When writing to [%s]: %v", client.URL(), err)
} }
return err return err
} }
func (i *InfluxDB) getHTTPClient(ctx context.Context, url *url.URL, proxy *url.URL) (Client, error) { func (i *InfluxDB) getHTTPClient(url *url.URL, proxy *url.URL) (Client, error) {
tlsConfig, err := i.ClientConfig.TLSConfig() tlsConfig, err := i.ClientConfig.TLSConfig()
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -165,11 +165,7 @@ func (o *OpenTSDB) WriteHttp(metrics []telegraf.Metric, u *url.URL) error {
} }
} }
if err := http.flush(); err != nil { return http.flush()
return err
}
return nil
} }
func (o *OpenTSDB) WriteTelnet(metrics []telegraf.Metric, u *url.URL) error { func (o *OpenTSDB) WriteTelnet(metrics []telegraf.Metric, u *url.URL) error {
@ -235,9 +231,9 @@ func buildValue(v interface{}) (string, error) {
var retv string var retv string
switch p := v.(type) { switch p := v.(type) {
case int64: case int64:
retv = IntToString(int64(p)) retv = IntToString(p)
case uint64: case uint64:
retv = UIntToString(uint64(p)) retv = UIntToString(p)
case float64: case float64:
retv = FloatToString(float64(p)) retv = FloatToString(float64(p))
default: default:
@ -246,16 +242,16 @@ func buildValue(v interface{}) (string, error) {
return retv, nil return retv, nil
} }
func IntToString(input_num int64) string { func IntToString(inputNum int64) string {
return strconv.FormatInt(input_num, 10) return strconv.FormatInt(inputNum, 10)
} }
func UIntToString(input_num uint64) string { func UIntToString(inputNum uint64) string {
return strconv.FormatUint(input_num, 10) return strconv.FormatUint(inputNum, 10)
} }
func FloatToString(input_num float64) string { func FloatToString(inputNum float64) string {
return strconv.FormatFloat(input_num, 'f', 6, 64) return strconv.FormatFloat(inputNum, 'f', 6, 64)
} }
func (o *OpenTSDB) SampleConfig() string { func (o *OpenTSDB) SampleConfig() string {

View File

@ -2,7 +2,6 @@ package riemann_legacy
import ( import (
"fmt" "fmt"
"log"
"os" "os"
"sort" "sort"
"strings" "strings"
@ -12,12 +11,13 @@ import (
"github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/outputs"
) )
const deprecationMsg = "E! Error: this Riemann output plugin will be deprecated in a future release, see https://github.com/influxdata/telegraf/issues/1878 for more details & discussion." const deprecationMsg = "Error: this Riemann output plugin will be deprecated in a future release, see https://github.com/influxdata/telegraf/issues/1878 for more details & discussion."
type Riemann struct { type Riemann struct {
URL string URL string `toml:"url"`
Transport string Transport string `toml:"transport"`
Separator string Separator string `toml:"separator"`
Log telegraf.Logger `toml:"-"`
client *raidman.Client client *raidman.Client
} }
@ -32,7 +32,7 @@ var sampleConfig = `
` `
func (r *Riemann) Connect() error { func (r *Riemann) Connect() error {
log.Printf(deprecationMsg) r.Log.Error(deprecationMsg)
c, err := raidman.Dial(r.Transport, r.URL) c, err := raidman.Dial(r.Transport, r.URL)
if err != nil { if err != nil {
@ -62,7 +62,7 @@ func (r *Riemann) Description() string {
} }
func (r *Riemann) Write(metrics []telegraf.Metric) error { func (r *Riemann) Write(metrics []telegraf.Metric) error {
log.Printf(deprecationMsg) r.Log.Error(deprecationMsg)
if len(metrics) == 0 { if len(metrics) == 0 {
return nil return nil
} }
@ -140,7 +140,7 @@ func serviceName(s string, n string, t map[string]string, f string) string {
tagStrings = append(tagStrings, t[tagName]) tagStrings = append(tagStrings, t[tagName])
} }
} }
var tagString string = strings.Join(tagStrings, s) var tagString = strings.Join(tagStrings, s)
if tagString != "" { if tagString != "" {
serviceStrings = append(serviceStrings, tagString) serviceStrings = append(serviceStrings, tagString)
} }

View File

@ -19,11 +19,7 @@ type Config struct {
// Validate validates the config's templates and tags. // Validate validates the config's templates and tags.
func (c *Config) Validate() error { func (c *Config) Validate() error {
if err := c.validateTemplates(); err != nil { return c.validateTemplates()
return err
}
return nil
} }
func (c *Config) validateTemplates() error { func (c *Config) validateTemplates() error {

View File

@ -82,8 +82,8 @@ func NewSeriesParser(handler *MetricHandler) *Parser {
} }
} }
func (h *Parser) SetTimeFunc(f TimeFunc) { func (p *Parser) SetTimeFunc(f TimeFunc) {
h.handler.SetTimeFunc(f) p.handler.SetTimeFunc(f)
} }
func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) {
@ -178,18 +178,18 @@ func NewStreamParser(r io.Reader) *StreamParser {
// SetTimeFunc changes the function used to determine the time of metrics // SetTimeFunc changes the function used to determine the time of metrics
// without a timestamp. The default TimeFunc is time.Now. Useful mostly for // without a timestamp. The default TimeFunc is time.Now. Useful mostly for
// testing, or perhaps if you want all metrics to have the same timestamp. // testing, or perhaps if you want all metrics to have the same timestamp.
func (h *StreamParser) SetTimeFunc(f TimeFunc) { func (sp *StreamParser) SetTimeFunc(f TimeFunc) {
h.handler.SetTimeFunc(f) sp.handler.SetTimeFunc(f)
} }
func (h *StreamParser) SetTimePrecision(u time.Duration) { func (sp *StreamParser) SetTimePrecision(u time.Duration) {
h.handler.SetTimePrecision(u) sp.handler.SetTimePrecision(u)
} }
// Next parses the next item from the stream. You can repeat calls to this // Next parses the next item from the stream. You can repeat calls to this
// function if it returns ParseError to get the next metric or error. // function if it returns ParseError to get the next metric or error.
func (p *StreamParser) Next() (telegraf.Metric, error) { func (sp *StreamParser) Next() (telegraf.Metric, error) {
err := p.machine.Next() err := sp.machine.Next()
if err == EOF { if err == EOF {
return nil, err return nil, err
} }
@ -200,16 +200,16 @@ func (p *StreamParser) Next() (telegraf.Metric, error) {
if err != nil { if err != nil {
return nil, &ParseError{ return nil, &ParseError{
Offset: p.machine.Position(), Offset: sp.machine.Position(),
LineOffset: p.machine.LineOffset(), LineOffset: sp.machine.LineOffset(),
LineNumber: p.machine.LineNumber(), LineNumber: sp.machine.LineNumber(),
Column: p.machine.Column(), Column: sp.machine.Column(),
msg: err.Error(), msg: err.Error(),
buf: p.machine.LineText(), buf: sp.machine.LineText(),
} }
} }
metric, err := p.handler.Metric() metric, err := sp.handler.Metric()
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -218,27 +218,27 @@ func (p *StreamParser) Next() (telegraf.Metric, error) {
} }
// Position returns the current byte offset into the data. // Position returns the current byte offset into the data.
func (p *StreamParser) Position() int { func (sp *StreamParser) Position() int {
return p.machine.Position() return sp.machine.Position()
} }
// LineOffset returns the byte offset of the current line. // LineOffset returns the byte offset of the current line.
func (p *StreamParser) LineOffset() int { func (sp *StreamParser) LineOffset() int {
return p.machine.LineOffset() return sp.machine.LineOffset()
} }
// LineNumber returns the current line number. Lines are counted based on the // LineNumber returns the current line number. Lines are counted based on the
// regular expression `\r?\n`. // regular expression `\r?\n`.
func (p *StreamParser) LineNumber() int { func (sp *StreamParser) LineNumber() int {
return p.machine.LineNumber() return sp.machine.LineNumber()
} }
// Column returns the current column. // Column returns the current column.
func (p *StreamParser) Column() int { func (sp *StreamParser) Column() int {
return p.machine.Column() return sp.machine.Column()
} }
// LineText returns the text of the current line that has been parsed so far. // LineText returns the text of the current line that has been parsed so far.
func (p *StreamParser) LineText() string { func (sp *StreamParser) LineText() string {
return p.machine.LineText() return sp.machine.LineText()
} }

View File

@ -13,7 +13,7 @@ import (
"github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/metric"
) )
const MAX_BUFFER_SIZE = 2 const MaxBufferSize = 2
type Point struct { type Point struct {
Name string Name string
@ -170,9 +170,9 @@ func (p *PointParser) convertPointToTelegrafMetric(points []Point) ([]telegraf.M
func (p *PointParser) scan() (Token, string) { func (p *PointParser) scan() (Token, string) {
// If we have a token on the buffer, then return it. // If we have a token on the buffer, then return it.
if p.buf.n != 0 { if p.buf.n != 0 {
idx := p.buf.n % MAX_BUFFER_SIZE idx := p.buf.n % MaxBufferSize
tok, lit := p.buf.tok[idx], p.buf.lit[idx] tok, lit := p.buf.tok[idx], p.buf.lit[idx]
p.buf.n -= 1 p.buf.n--
return tok, lit return tok, lit
} }
@ -188,8 +188,8 @@ func (p *PointParser) scan() (Token, string) {
func (p *PointParser) buffer(tok Token, lit string) { func (p *PointParser) buffer(tok Token, lit string) {
// create the buffer if it is empty // create the buffer if it is empty
if len(p.buf.tok) == 0 { if len(p.buf.tok) == 0 {
p.buf.tok = make([]Token, MAX_BUFFER_SIZE) p.buf.tok = make([]Token, MaxBufferSize)
p.buf.lit = make([]string, MAX_BUFFER_SIZE) p.buf.lit = make([]string, MaxBufferSize)
} }
// for now assume a simple circular buffer of length two // for now assume a simple circular buffer of length two
@ -203,9 +203,9 @@ func (p *PointParser) unscan() {
} }
func (p *PointParser) unscanTokens(n int) { func (p *PointParser) unscanTokens(n int) {
if n > MAX_BUFFER_SIZE { if n > MaxBufferSize {
// just log for now // just log for now
log.Printf("cannot unscan more than %d tokens", MAX_BUFFER_SIZE) log.Printf("cannot unscan more than %d tokens", MaxBufferSize)
} }
p.buf.n += n p.buf.n += n
} }

View File

@ -46,11 +46,11 @@ type PortName struct {
Log telegraf.Logger `toml:"-"` Log telegraf.Logger `toml:"-"`
} }
func (d *PortName) SampleConfig() string { func (pn *PortName) SampleConfig() string {
return sampleConfig return sampleConfig
} }
func (d *PortName) Description() string { func (pn *PortName) Description() string {
return "Given a tag/field of a TCP or UDP port number, add a tag/field of the service name looked up in the system services file" return "Given a tag/field of a TCP or UDP port number, add a tag/field of the service name looked up in the system services file"
} }
@ -106,22 +106,22 @@ func readServices(r io.Reader) sMap {
return services return services
} }
func (d *PortName) Apply(metrics ...telegraf.Metric) []telegraf.Metric { func (pn *PortName) Apply(metrics ...telegraf.Metric) []telegraf.Metric {
for _, m := range metrics { for _, m := range metrics {
var portProto string var portProto string
var fromField bool var fromField bool
if len(d.SourceTag) > 0 { if len(pn.SourceTag) > 0 {
if tag, ok := m.GetTag(d.SourceTag); ok { if tag, ok := m.GetTag(pn.SourceTag); ok {
portProto = string([]byte(tag)) portProto = tag
} }
} }
if len(d.SourceField) > 0 { if len(pn.SourceField) > 0 {
if field, ok := m.GetField(d.SourceField); ok { if field, ok := m.GetField(pn.SourceField); ok {
switch v := field.(type) { switch v := field.(type) {
default: default:
d.Log.Errorf("Unexpected type %t in source field; must be string or int", v) pn.Log.Errorf("Unexpected type %t in source field; must be string or int", v)
continue continue
case int64: case int64:
portProto = strconv.FormatInt(v, 10) portProto = strconv.FormatInt(v, 10)
@ -143,7 +143,7 @@ func (d *PortName) Apply(metrics ...telegraf.Metric) []telegraf.Metric {
if l == 0 { if l == 0 {
// Empty tag // Empty tag
d.Log.Errorf("empty port tag: %v", d.SourceTag) pn.Log.Errorf("empty port tag: %v", pn.SourceTag)
continue continue
} }
@ -154,25 +154,25 @@ func (d *PortName) Apply(metrics ...telegraf.Metric) []telegraf.Metric {
port, err = strconv.Atoi(val) port, err = strconv.Atoi(val)
if err != nil { if err != nil {
// Can't convert port to string // Can't convert port to string
d.Log.Errorf("error converting port to integer: %v", val) pn.Log.Errorf("error converting port to integer: %v", val)
continue continue
} }
} }
proto := d.DefaultProtocol proto := pn.DefaultProtocol
if l > 1 && len(portProtoSlice[1]) > 0 { if l > 1 && len(portProtoSlice[1]) > 0 {
proto = portProtoSlice[1] proto = portProtoSlice[1]
} }
if len(d.ProtocolTag) > 0 { if len(pn.ProtocolTag) > 0 {
if tag, ok := m.GetTag(d.ProtocolTag); ok { if tag, ok := m.GetTag(pn.ProtocolTag); ok {
proto = tag proto = tag
} }
} }
if len(d.ProtocolField) > 0 { if len(pn.ProtocolField) > 0 {
if field, ok := m.GetField(d.ProtocolField); ok { if field, ok := m.GetField(pn.ProtocolField); ok {
switch v := field.(type) { switch v := field.(type) {
default: default:
d.Log.Errorf("Unexpected type %t in protocol field; must be string", v) pn.Log.Errorf("Unexpected type %t in protocol field; must be string", v)
continue continue
case string: case string:
proto = v proto = v
@ -190,7 +190,7 @@ func (d *PortName) Apply(metrics ...telegraf.Metric) []telegraf.Metric {
// normally has entries for both, so our map does too. If // normally has entries for both, so our map does too. If
// not, it's very likely the source tag or the services // not, it's very likely the source tag or the services
// file doesn't make sense. // file doesn't make sense.
d.Log.Errorf("protocol not found in services map: %v", proto) pn.Log.Errorf("protocol not found in services map: %v", proto)
continue continue
} }
@ -200,21 +200,21 @@ func (d *PortName) Apply(metrics ...telegraf.Metric) []telegraf.Metric {
// //
// Not all ports are named so this isn't an error, but // Not all ports are named so this isn't an error, but
// it's helpful to know when debugging. // it's helpful to know when debugging.
d.Log.Debugf("port not found in services map: %v", port) pn.Log.Debugf("port not found in services map: %v", port)
continue continue
} }
if fromField { if fromField {
m.AddField(d.Dest, service) m.AddField(pn.Dest, service)
} else { } else {
m.AddTag(d.Dest, service) m.AddTag(pn.Dest, service)
} }
} }
return metrics return metrics
} }
func (h *PortName) Init() error { func (pn *PortName) Init() error {
services = make(sMap) services = make(sMap)
readServicesFile() readServicesFile()
return nil return nil

View File

@ -276,8 +276,7 @@ func (t *TopK) push() []telegraf.Metric {
} }
// The return value that will hold the returned metrics // The return value that will hold the returned metrics
var ret []telegraf.Metric = make([]telegraf.Metric, 0, 0) var ret = make([]telegraf.Metric, 0, 0)
// Get the top K metrics for each field and add them to the return value // Get the top K metrics for each field and add them to the return value
addedKeys := make(map[string]bool) addedKeys := make(map[string]bool)
for _, field := range t.Fields { for _, field := range t.Fields {
@ -317,11 +316,11 @@ func (t *TopK) push() []telegraf.Metric {
result := make([]telegraf.Metric, 0, len(ret)) result := make([]telegraf.Metric, 0, len(ret))
for _, m := range ret { for _, m := range ret {
copy, err := metric.New(m.Name(), m.Tags(), m.Fields(), m.Time(), m.Type()) newMetric, err := metric.New(m.Name(), m.Tags(), m.Fields(), m.Time(), m.Type())
if err != nil { if err != nil {
continue continue
} }
result = append(result, copy) result = append(result, newMetric)
} }
return result return result
@ -412,7 +411,7 @@ func (t *TopK) getAggregationFunction(aggOperation string) (func([]telegraf.Metr
continue continue
} }
mean[field] += val mean[field] += val
meanCounters[field] += 1 meanCounters[field]++
} }
} }
// Divide by the number of recorded measurements collected for every field // Divide by the number of recorded measurements collected for every field
@ -423,7 +422,7 @@ func (t *TopK) getAggregationFunction(aggOperation string) (func([]telegraf.Metr
continue continue
} }
mean[k] = mean[k] / meanCounters[k] mean[k] = mean[k] / meanCounters[k]
noMeasurementsFound = noMeasurementsFound && false noMeasurementsFound = false
} }
if noMeasurementsFound { if noMeasurementsFound {

View File

@ -237,7 +237,7 @@ func (s *Serializer) writeMetric(w io.Writer, m telegraf.Metric) error {
// Additional length needed for field separator `,` // Additional length needed for field separator `,`
if !firstField { if !firstField {
bytesNeeded += 1 bytesNeeded++
} }
if s.maxLineBytes > 0 && bytesNeeded > s.maxLineBytes { if s.maxLineBytes > 0 && bytesNeeded > s.maxLineBytes {

View File

@ -50,7 +50,7 @@ func (r *reader) Read(p []byte) (int, error) {
for _, metric := range r.metrics[r.offset:] { for _, metric := range r.metrics[r.offset:] {
_, err := r.serializer.Write(r.buf, metric) _, err := r.serializer.Write(r.buf, metric)
r.offset += 1 r.offset++
if err != nil { if err != nil {
r.buf.Reset() r.buf.Reset()
if _, ok := err.(*MetricError); ok { if _, ok := err.(*MetricError); ok {

View File

@ -187,7 +187,7 @@ func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) {
metrickey, promts = getPromTS(metricName, labels, value, metric.Time()) metrickey, promts = getPromTS(metricName, labels, value, metric.Time())
} }
default: default:
return nil, fmt.Errorf("Unknown type %v", metric.Type()) return nil, fmt.Errorf("unknown type %v", metric.Type())
} }
// A batch of metrics can contain multiple values for a single // A batch of metrics can contain multiple values for a single
@ -205,7 +205,7 @@ func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) {
} }
var promTS = make([]*prompb.TimeSeries, len(entries)) var promTS = make([]*prompb.TimeSeries, len(entries))
var i int64 = 0 var i int64
for _, promts := range entries { for _, promts := range entries {
promTS[i] = promts promTS[i] = promts
i++ i++

View File

@ -1,4 +1,4 @@
// selfstat is a package for tracking and collecting internal statistics // Package selfstat is a package for tracking and collecting internal statistics
// about telegraf. Metrics can be registered using this package, and then // about telegraf. Metrics can be registered using this package, and then
// incremented or set within your code. If the inputs.internal plugin is enabled, // incremented or set within your code. If the inputs.internal plugin is enabled,
// then all registered stats will be collected as they would by any other input // then all registered stats will be collected as they would by any other input