Revive fixes - part 2 (#8835)
* Revive fixes regarding following set of rules: [rule.if-return] [rule.increment-decrement] [rule.var-declaration] [rule.package-comments] [rule.receiver-naming] [rule.unexported-return]
This commit is contained in:
parent
5606a9531a
commit
d9736d543f
|
|
@ -10,8 +10,6 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var format = "2006-01-02T15:04:05.999Z07:00"
|
||||
|
||||
func TestAlignedTicker(t *testing.T) {
|
||||
interval := 10 * time.Second
|
||||
jitter := 0 * time.Second
|
||||
|
|
@ -249,7 +247,7 @@ func simulatedDist(ticker Ticker, clock *clock.Mock) Distribution {
|
|||
for !clock.Now().After(until) {
|
||||
select {
|
||||
case tm := <-ticker.Elapsed():
|
||||
dist.Buckets[tm.Second()] += 1
|
||||
dist.Buckets[tm.Second()]++
|
||||
dist.Count++
|
||||
dist.Waittime += tm.Sub(last).Seconds()
|
||||
last = tm
|
||||
|
|
|
|||
|
|
@ -123,10 +123,7 @@ func (w *FileWriter) openCurrent() (err error) {
|
|||
w.bytesWritten = fileInfo.Size()
|
||||
}
|
||||
|
||||
if err = w.rotateIfNeeded(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return w.rotateIfNeeded()
|
||||
}
|
||||
|
||||
func (w *FileWriter) rotateIfNeeded() error {
|
||||
|
|
@ -153,11 +150,7 @@ func (w *FileWriter) rotate() (err error) {
|
|||
return err
|
||||
}
|
||||
|
||||
if err = w.purgeArchivesIfNeeded(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return w.purgeArchivesIfNeeded()
|
||||
}
|
||||
|
||||
func (w *FileWriter) purgeArchivesIfNeeded() (err error) {
|
||||
|
|
|
|||
|
|
@ -15,27 +15,27 @@ type GosnmpWrapper struct {
|
|||
}
|
||||
|
||||
// Host returns the value of GoSNMP.Target.
|
||||
func (gsw GosnmpWrapper) Host() string {
|
||||
return gsw.Target
|
||||
func (gs GosnmpWrapper) Host() string {
|
||||
return gs.Target
|
||||
}
|
||||
|
||||
// Walk wraps GoSNMP.Walk() or GoSNMP.BulkWalk(), depending on whether the
|
||||
// connection is using SNMPv1 or newer.
|
||||
// Also, if any error is encountered, it will just once reconnect and try again.
|
||||
func (gsw GosnmpWrapper) Walk(oid string, fn gosnmp.WalkFunc) error {
|
||||
func (gs GosnmpWrapper) Walk(oid string, fn gosnmp.WalkFunc) error {
|
||||
var err error
|
||||
// On error, retry once.
|
||||
// Unfortunately we can't distinguish between an error returned by gosnmp, and one returned by the walk function.
|
||||
for i := 0; i < 2; i++ {
|
||||
if gsw.Version == gosnmp.Version1 {
|
||||
err = gsw.GoSNMP.Walk(oid, fn)
|
||||
if gs.Version == gosnmp.Version1 {
|
||||
err = gs.GoSNMP.Walk(oid, fn)
|
||||
} else {
|
||||
err = gsw.GoSNMP.BulkWalk(oid, fn)
|
||||
err = gs.GoSNMP.BulkWalk(oid, fn)
|
||||
}
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if err := gsw.GoSNMP.Connect(); err != nil {
|
||||
if err := gs.GoSNMP.Connect(); err != nil {
|
||||
return fmt.Errorf("reconnecting: %w", err)
|
||||
}
|
||||
}
|
||||
|
|
@ -44,15 +44,15 @@ func (gsw GosnmpWrapper) Walk(oid string, fn gosnmp.WalkFunc) error {
|
|||
|
||||
// Get wraps GoSNMP.GET().
|
||||
// If any error is encountered, it will just once reconnect and try again.
|
||||
func (gsw GosnmpWrapper) Get(oids []string) (*gosnmp.SnmpPacket, error) {
|
||||
func (gs GosnmpWrapper) Get(oids []string) (*gosnmp.SnmpPacket, error) {
|
||||
var err error
|
||||
var pkt *gosnmp.SnmpPacket
|
||||
for i := 0; i < 2; i++ {
|
||||
pkt, err = gsw.GoSNMP.Get(oids)
|
||||
pkt, err = gs.GoSNMP.Get(oids)
|
||||
if err == nil {
|
||||
return pkt, nil
|
||||
}
|
||||
if err := gsw.GoSNMP.Connect(); err != nil {
|
||||
if err := gs.GoSNMP.Connect(); err != nil {
|
||||
return nil, fmt.Errorf("reconnecting: %w", err)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -11,10 +11,10 @@ import (
|
|||
|
||||
const (
|
||||
// Default size of metrics batch size.
|
||||
DEFAULT_METRIC_BATCH_SIZE = 1000
|
||||
DefaultMetricBatchSize = 1000
|
||||
|
||||
// Default number of metrics kept. It should be a multiple of batch size.
|
||||
DEFAULT_METRIC_BUFFER_LIMIT = 10000
|
||||
DefaultMetricBufferLimit = 10000
|
||||
)
|
||||
|
||||
// OutputConfig containing name and filter
|
||||
|
|
@ -78,13 +78,13 @@ func NewRunningOutput(
|
|||
bufferLimit = config.MetricBufferLimit
|
||||
}
|
||||
if bufferLimit == 0 {
|
||||
bufferLimit = DEFAULT_METRIC_BUFFER_LIMIT
|
||||
bufferLimit = DefaultMetricBufferLimit
|
||||
}
|
||||
if config.MetricBatchSize > 0 {
|
||||
batchSize = config.MetricBatchSize
|
||||
}
|
||||
if batchSize == 0 {
|
||||
batchSize = DEFAULT_METRIC_BATCH_SIZE
|
||||
batchSize = DefaultMetricBatchSize
|
||||
}
|
||||
|
||||
ro := &RunningOutput{
|
||||
|
|
@ -114,8 +114,8 @@ func (r *RunningOutput) LogName() string {
|
|||
return logName("outputs", r.Config.Name, r.Config.Alias)
|
||||
}
|
||||
|
||||
func (ro *RunningOutput) metricFiltered(metric telegraf.Metric) {
|
||||
ro.MetricsFiltered.Incr(1)
|
||||
func (r *RunningOutput) metricFiltered(metric telegraf.Metric) {
|
||||
r.MetricsFiltered.Incr(1)
|
||||
metric.Drop()
|
||||
}
|
||||
|
||||
|
|
@ -133,45 +133,45 @@ func (r *RunningOutput) Init() error {
|
|||
// AddMetric adds a metric to the output.
|
||||
//
|
||||
// Takes ownership of metric
|
||||
func (ro *RunningOutput) AddMetric(metric telegraf.Metric) {
|
||||
if ok := ro.Config.Filter.Select(metric); !ok {
|
||||
ro.metricFiltered(metric)
|
||||
func (r *RunningOutput) AddMetric(metric telegraf.Metric) {
|
||||
if ok := r.Config.Filter.Select(metric); !ok {
|
||||
r.metricFiltered(metric)
|
||||
return
|
||||
}
|
||||
|
||||
ro.Config.Filter.Modify(metric)
|
||||
r.Config.Filter.Modify(metric)
|
||||
if len(metric.FieldList()) == 0 {
|
||||
ro.metricFiltered(metric)
|
||||
r.metricFiltered(metric)
|
||||
return
|
||||
}
|
||||
|
||||
if output, ok := ro.Output.(telegraf.AggregatingOutput); ok {
|
||||
ro.aggMutex.Lock()
|
||||
if output, ok := r.Output.(telegraf.AggregatingOutput); ok {
|
||||
r.aggMutex.Lock()
|
||||
output.Add(metric)
|
||||
ro.aggMutex.Unlock()
|
||||
r.aggMutex.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
if len(ro.Config.NameOverride) > 0 {
|
||||
metric.SetName(ro.Config.NameOverride)
|
||||
if len(r.Config.NameOverride) > 0 {
|
||||
metric.SetName(r.Config.NameOverride)
|
||||
}
|
||||
|
||||
if len(ro.Config.NamePrefix) > 0 {
|
||||
metric.AddPrefix(ro.Config.NamePrefix)
|
||||
if len(r.Config.NamePrefix) > 0 {
|
||||
metric.AddPrefix(r.Config.NamePrefix)
|
||||
}
|
||||
|
||||
if len(ro.Config.NameSuffix) > 0 {
|
||||
metric.AddSuffix(ro.Config.NameSuffix)
|
||||
if len(r.Config.NameSuffix) > 0 {
|
||||
metric.AddSuffix(r.Config.NameSuffix)
|
||||
}
|
||||
|
||||
dropped := ro.buffer.Add(metric)
|
||||
atomic.AddInt64(&ro.droppedMetrics, int64(dropped))
|
||||
dropped := r.buffer.Add(metric)
|
||||
atomic.AddInt64(&r.droppedMetrics, int64(dropped))
|
||||
|
||||
count := atomic.AddInt64(&ro.newMetricsCount, 1)
|
||||
if count == int64(ro.MetricBatchSize) {
|
||||
atomic.StoreInt64(&ro.newMetricsCount, 0)
|
||||
count := atomic.AddInt64(&r.newMetricsCount, 1)
|
||||
if count == int64(r.MetricBatchSize) {
|
||||
atomic.StoreInt64(&r.newMetricsCount, 0)
|
||||
select {
|
||||
case ro.BatchReady <- time.Now():
|
||||
case r.BatchReady <- time.Now():
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
|
@ -179,50 +179,50 @@ func (ro *RunningOutput) AddMetric(metric telegraf.Metric) {
|
|||
|
||||
// Write writes all metrics to the output, stopping when all have been sent on
|
||||
// or error.
|
||||
func (ro *RunningOutput) Write() error {
|
||||
if output, ok := ro.Output.(telegraf.AggregatingOutput); ok {
|
||||
ro.aggMutex.Lock()
|
||||
func (r *RunningOutput) Write() error {
|
||||
if output, ok := r.Output.(telegraf.AggregatingOutput); ok {
|
||||
r.aggMutex.Lock()
|
||||
metrics := output.Push()
|
||||
ro.buffer.Add(metrics...)
|
||||
r.buffer.Add(metrics...)
|
||||
output.Reset()
|
||||
ro.aggMutex.Unlock()
|
||||
r.aggMutex.Unlock()
|
||||
}
|
||||
|
||||
atomic.StoreInt64(&ro.newMetricsCount, 0)
|
||||
atomic.StoreInt64(&r.newMetricsCount, 0)
|
||||
|
||||
// Only process the metrics in the buffer now. Metrics added while we are
|
||||
// writing will be sent on the next call.
|
||||
nBuffer := ro.buffer.Len()
|
||||
nBatches := nBuffer/ro.MetricBatchSize + 1
|
||||
nBuffer := r.buffer.Len()
|
||||
nBatches := nBuffer/r.MetricBatchSize + 1
|
||||
for i := 0; i < nBatches; i++ {
|
||||
batch := ro.buffer.Batch(ro.MetricBatchSize)
|
||||
batch := r.buffer.Batch(r.MetricBatchSize)
|
||||
if len(batch) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
err := ro.write(batch)
|
||||
err := r.write(batch)
|
||||
if err != nil {
|
||||
ro.buffer.Reject(batch)
|
||||
r.buffer.Reject(batch)
|
||||
return err
|
||||
}
|
||||
ro.buffer.Accept(batch)
|
||||
r.buffer.Accept(batch)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteBatch writes a single batch of metrics to the output.
|
||||
func (ro *RunningOutput) WriteBatch() error {
|
||||
batch := ro.buffer.Batch(ro.MetricBatchSize)
|
||||
func (r *RunningOutput) WriteBatch() error {
|
||||
batch := r.buffer.Batch(r.MetricBatchSize)
|
||||
if len(batch) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := ro.write(batch)
|
||||
err := r.write(batch)
|
||||
if err != nil {
|
||||
ro.buffer.Reject(batch)
|
||||
r.buffer.Reject(batch)
|
||||
return err
|
||||
}
|
||||
ro.buffer.Accept(batch)
|
||||
r.buffer.Accept(batch)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -52,8 +52,8 @@ func (rp *RunningProcessor) metricFiltered(metric telegraf.Metric) {
|
|||
metric.Drop()
|
||||
}
|
||||
|
||||
func (r *RunningProcessor) Init() error {
|
||||
if p, ok := r.Processor.(telegraf.Initializer); ok {
|
||||
func (rp *RunningProcessor) Init() error {
|
||||
if p, ok := rp.Processor.(telegraf.Initializer); ok {
|
||||
err := p.Init()
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -62,39 +62,39 @@ func (r *RunningProcessor) Init() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (r *RunningProcessor) Log() telegraf.Logger {
|
||||
return r.log
|
||||
func (rp *RunningProcessor) Log() telegraf.Logger {
|
||||
return rp.log
|
||||
}
|
||||
|
||||
func (r *RunningProcessor) LogName() string {
|
||||
return logName("processors", r.Config.Name, r.Config.Alias)
|
||||
func (rp *RunningProcessor) LogName() string {
|
||||
return logName("processors", rp.Config.Name, rp.Config.Alias)
|
||||
}
|
||||
|
||||
func (r *RunningProcessor) MakeMetric(metric telegraf.Metric) telegraf.Metric {
|
||||
func (rp *RunningProcessor) MakeMetric(metric telegraf.Metric) telegraf.Metric {
|
||||
return metric
|
||||
}
|
||||
|
||||
func (r *RunningProcessor) Start(acc telegraf.Accumulator) error {
|
||||
return r.Processor.Start(acc)
|
||||
func (rp *RunningProcessor) Start(acc telegraf.Accumulator) error {
|
||||
return rp.Processor.Start(acc)
|
||||
}
|
||||
|
||||
func (r *RunningProcessor) Add(m telegraf.Metric, acc telegraf.Accumulator) error {
|
||||
if ok := r.Config.Filter.Select(m); !ok {
|
||||
func (rp *RunningProcessor) Add(m telegraf.Metric, acc telegraf.Accumulator) error {
|
||||
if ok := rp.Config.Filter.Select(m); !ok {
|
||||
// pass downstream
|
||||
acc.AddMetric(m)
|
||||
return nil
|
||||
}
|
||||
|
||||
r.Config.Filter.Modify(m)
|
||||
rp.Config.Filter.Modify(m)
|
||||
if len(m.FieldList()) == 0 {
|
||||
// drop metric
|
||||
r.metricFiltered(m)
|
||||
rp.metricFiltered(m)
|
||||
return nil
|
||||
}
|
||||
|
||||
return r.Processor.Add(m, acc)
|
||||
return rp.Processor.Add(m, acc)
|
||||
}
|
||||
|
||||
func (r *RunningProcessor) Stop() {
|
||||
r.Processor.Stop()
|
||||
func (rp *RunningProcessor) Stop() {
|
||||
rp.Processor.Stop()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -86,9 +86,5 @@ func (k *Config) SetConfig(config *sarama.Config) error {
|
|||
config.Net.TLS.Enable = true
|
||||
}
|
||||
|
||||
if err := k.SetSASLConfig(config); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return k.SetSASLConfig(config)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -34,15 +34,15 @@ func (s *Shim) LoadConfig(filePath *string) error {
|
|||
}
|
||||
if conf.Input != nil {
|
||||
if err = s.AddInput(conf.Input); err != nil {
|
||||
return fmt.Errorf("Failed to add Input: %w", err)
|
||||
return fmt.Errorf("failed to add Input: %w", err)
|
||||
}
|
||||
} else if conf.Processor != nil {
|
||||
if err = s.AddStreamingProcessor(conf.Processor); err != nil {
|
||||
return fmt.Errorf("Failed to add Processor: %w", err)
|
||||
return fmt.Errorf("failed to add Processor: %w", err)
|
||||
}
|
||||
} else if conf.Output != nil {
|
||||
if err = s.AddOutput(conf.Output); err != nil {
|
||||
return fmt.Errorf("Failed to add Output: %w", err)
|
||||
return fmt.Errorf("failed to add Output: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -83,7 +83,7 @@ func TestSelectNamepsacesIntegration(t *testing.T) {
|
|||
count := 0
|
||||
for _, p := range acc.Metrics {
|
||||
if p.Measurement == "aerospike_namespace" {
|
||||
count += 1
|
||||
count++
|
||||
}
|
||||
}
|
||||
assert.Equal(t, count, 1)
|
||||
|
|
|
|||
|
|
@ -157,10 +157,8 @@ func (bond *Bond) gatherSlavePart(bondName string, rawFile string, acc telegraf.
|
|||
acc.AddFields("bond_slave", fields, tags)
|
||||
}
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
return scanner.Err()
|
||||
}
|
||||
|
||||
// loadPath can be used to read path firstly from config
|
||||
|
|
|
|||
|
|
@ -170,7 +170,7 @@ func (c cassandraMetric) addTagsFields(out map[string]interface{}) {
|
|||
}
|
||||
}
|
||||
|
||||
func (j *Cassandra) SampleConfig() string {
|
||||
func (c *Cassandra) SampleConfig() string {
|
||||
return `
|
||||
## DEPRECATED: The cassandra plugin has been deprecated. Please use the
|
||||
## jolokia2 plugin instead.
|
||||
|
|
@ -193,18 +193,18 @@ func (j *Cassandra) SampleConfig() string {
|
|||
`
|
||||
}
|
||||
|
||||
func (j *Cassandra) Description() string {
|
||||
func (c *Cassandra) Description() string {
|
||||
return "Read Cassandra metrics through Jolokia"
|
||||
}
|
||||
|
||||
func (j *Cassandra) getAttr(requestUrl *url.URL) (map[string]interface{}, error) {
|
||||
func (c *Cassandra) getAttr(requestUrl *url.URL) (map[string]interface{}, error) {
|
||||
// Create + send request
|
||||
req, err := http.NewRequest("GET", requestUrl.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := j.jClient.MakeRequest(req)
|
||||
resp, err := c.jClient.MakeRequest(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -378,7 +378,7 @@ func (c *CloudWatch) fetchNamespaceMetrics() ([]*cloudwatch.Metric, error) {
|
|||
|
||||
var token *string
|
||||
var params *cloudwatch.ListMetricsInput
|
||||
var recentlyActive *string = nil
|
||||
var recentlyActive *string
|
||||
|
||||
switch c.RecentlyActive {
|
||||
case "PT3H":
|
||||
|
|
@ -597,11 +597,6 @@ func snakeCase(s string) string {
|
|||
return s
|
||||
}
|
||||
|
||||
type dimension struct {
|
||||
name string
|
||||
value string
|
||||
}
|
||||
|
||||
// ctod converts cloudwatch dimensions to regular dimensions.
|
||||
func ctod(cDimensions []*cloudwatch.Dimension) *map[string]string {
|
||||
dimensions := map[string]string{}
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ func NewCPUStats(ps system.PS) *CPUStats {
|
|||
}
|
||||
}
|
||||
|
||||
func (_ *CPUStats) Description() string {
|
||||
func (c *CPUStats) Description() string {
|
||||
return "Read metrics about cpu usage"
|
||||
}
|
||||
|
||||
|
|
@ -43,12 +43,12 @@ var sampleConfig = `
|
|||
report_active = false
|
||||
`
|
||||
|
||||
func (_ *CPUStats) SampleConfig() string {
|
||||
func (c *CPUStats) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (s *CPUStats) Gather(acc telegraf.Accumulator) error {
|
||||
times, err := s.ps.CPUTimes(s.PerCPU, s.TotalCPU)
|
||||
func (c *CPUStats) Gather(acc telegraf.Accumulator) error {
|
||||
times, err := c.ps.CPUTimes(c.PerCPU, c.TotalCPU)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting CPU info: %s", err)
|
||||
}
|
||||
|
|
@ -62,7 +62,7 @@ func (s *CPUStats) Gather(acc telegraf.Accumulator) error {
|
|||
total := totalCpuTime(cts)
|
||||
active := activeCpuTime(cts)
|
||||
|
||||
if s.CollectCPUTime {
|
||||
if c.CollectCPUTime {
|
||||
// Add cpu time metrics
|
||||
fieldsC := map[string]interface{}{
|
||||
"time_user": cts.User,
|
||||
|
|
@ -76,19 +76,19 @@ func (s *CPUStats) Gather(acc telegraf.Accumulator) error {
|
|||
"time_guest": cts.Guest,
|
||||
"time_guest_nice": cts.GuestNice,
|
||||
}
|
||||
if s.ReportActive {
|
||||
if c.ReportActive {
|
||||
fieldsC["time_active"] = activeCpuTime(cts)
|
||||
}
|
||||
acc.AddCounter("cpu", fieldsC, tags, now)
|
||||
}
|
||||
|
||||
// Add in percentage
|
||||
if len(s.lastStats) == 0 {
|
||||
if len(c.lastStats) == 0 {
|
||||
// If it's the 1st gather, can't get CPU Usage stats yet
|
||||
continue
|
||||
}
|
||||
|
||||
lastCts, ok := s.lastStats[cts.CPU]
|
||||
lastCts, ok := c.lastStats[cts.CPU]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
|
@ -97,7 +97,7 @@ func (s *CPUStats) Gather(acc telegraf.Accumulator) error {
|
|||
totalDelta := total - lastTotal
|
||||
|
||||
if totalDelta < 0 {
|
||||
err = fmt.Errorf("Error: current total CPU time is less than previous total CPU time")
|
||||
err = fmt.Errorf("current total CPU time is less than previous total CPU time")
|
||||
break
|
||||
}
|
||||
|
||||
|
|
@ -117,15 +117,15 @@ func (s *CPUStats) Gather(acc telegraf.Accumulator) error {
|
|||
"usage_guest": 100 * (cts.Guest - lastCts.Guest) / totalDelta,
|
||||
"usage_guest_nice": 100 * (cts.GuestNice - lastCts.GuestNice) / totalDelta,
|
||||
}
|
||||
if s.ReportActive {
|
||||
if c.ReportActive {
|
||||
fieldsG["usage_active"] = 100 * (active - lastActive) / totalDelta
|
||||
}
|
||||
acc.AddGauge("cpu", fieldsG, tags, now)
|
||||
}
|
||||
|
||||
s.lastStats = make(map[string]cpu.TimesStat)
|
||||
c.lastStats = make(map[string]cpu.TimesStat)
|
||||
for _, cts := range times {
|
||||
s.lastStats[cts.CPU] = cts
|
||||
c.lastStats[cts.CPU] = cts
|
||||
}
|
||||
|
||||
return err
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ type DiskStats struct {
|
|||
IgnoreFS []string `toml:"ignore_fs"`
|
||||
}
|
||||
|
||||
func (_ *DiskStats) Description() string {
|
||||
func (ds *DiskStats) Description() string {
|
||||
return "Read metrics about disk usage by mount point"
|
||||
}
|
||||
|
||||
|
|
@ -32,17 +32,17 @@ var diskSampleConfig = `
|
|||
ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"]
|
||||
`
|
||||
|
||||
func (_ *DiskStats) SampleConfig() string {
|
||||
func (ds *DiskStats) SampleConfig() string {
|
||||
return diskSampleConfig
|
||||
}
|
||||
|
||||
func (s *DiskStats) Gather(acc telegraf.Accumulator) error {
|
||||
func (ds *DiskStats) Gather(acc telegraf.Accumulator) error {
|
||||
// Legacy support:
|
||||
if len(s.Mountpoints) != 0 {
|
||||
s.MountPoints = s.Mountpoints
|
||||
if len(ds.Mountpoints) != 0 {
|
||||
ds.MountPoints = ds.Mountpoints
|
||||
}
|
||||
|
||||
disks, partitions, err := s.ps.DiskUsage(s.MountPoints, s.IgnoreFS)
|
||||
disks, partitions, err := ds.ps.DiskUsage(ds.MountPoints, ds.IgnoreFS)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting disk usage info: %s", err)
|
||||
}
|
||||
|
|
@ -59,9 +59,9 @@ func (s *DiskStats) Gather(acc telegraf.Accumulator) error {
|
|||
"fstype": du.Fstype,
|
||||
"mode": mountOpts.Mode(),
|
||||
}
|
||||
var used_percent float64
|
||||
var usedPercent float64
|
||||
if du.Used+du.Free > 0 {
|
||||
used_percent = float64(du.Used) /
|
||||
usedPercent = float64(du.Used) /
|
||||
(float64(du.Used) + float64(du.Free)) * 100
|
||||
}
|
||||
|
||||
|
|
@ -69,7 +69,7 @@ func (s *DiskStats) Gather(acc telegraf.Accumulator) error {
|
|||
"total": du.Total,
|
||||
"free": du.Free,
|
||||
"used": du.Used,
|
||||
"used_percent": used_percent,
|
||||
"used_percent": usedPercent,
|
||||
"inodes_total": du.InodesTotal,
|
||||
"inodes_free": du.InodesFree,
|
||||
"inodes_used": du.InodesUsed,
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ type DiskIO struct {
|
|||
initialized bool
|
||||
}
|
||||
|
||||
func (_ *DiskIO) Description() string {
|
||||
func (d *DiskIO) Description() string {
|
||||
return "Read metrics about disk IO by device"
|
||||
}
|
||||
|
||||
|
|
@ -62,7 +62,7 @@ var diskIOsampleConfig = `
|
|||
# name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"]
|
||||
`
|
||||
|
||||
func (_ *DiskIO) SampleConfig() string {
|
||||
func (d *DiskIO) SampleConfig() string {
|
||||
return diskIOsampleConfig
|
||||
}
|
||||
|
||||
|
|
@ -71,34 +71,34 @@ func hasMeta(s string) bool {
|
|||
return strings.IndexAny(s, "*?[") >= 0
|
||||
}
|
||||
|
||||
func (s *DiskIO) init() error {
|
||||
for _, device := range s.Devices {
|
||||
func (d *DiskIO) init() error {
|
||||
for _, device := range d.Devices {
|
||||
if hasMeta(device) {
|
||||
filter, err := filter.Compile(s.Devices)
|
||||
filter, err := filter.Compile(d.Devices)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error compiling device pattern: %s", err.Error())
|
||||
}
|
||||
s.deviceFilter = filter
|
||||
d.deviceFilter = filter
|
||||
}
|
||||
}
|
||||
s.initialized = true
|
||||
d.initialized = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *DiskIO) Gather(acc telegraf.Accumulator) error {
|
||||
if !s.initialized {
|
||||
err := s.init()
|
||||
func (d *DiskIO) Gather(acc telegraf.Accumulator) error {
|
||||
if !d.initialized {
|
||||
err := d.init()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
devices := []string{}
|
||||
if s.deviceFilter == nil {
|
||||
devices = s.Devices
|
||||
if d.deviceFilter == nil {
|
||||
devices = d.Devices
|
||||
}
|
||||
|
||||
diskio, err := s.ps.DiskIO(devices)
|
||||
diskio, err := d.ps.DiskIO(devices)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting disk io info: %s", err.Error())
|
||||
}
|
||||
|
|
@ -106,17 +106,17 @@ func (s *DiskIO) Gather(acc telegraf.Accumulator) error {
|
|||
for _, io := range diskio {
|
||||
|
||||
match := false
|
||||
if s.deviceFilter != nil && s.deviceFilter.Match(io.Name) {
|
||||
if d.deviceFilter != nil && d.deviceFilter.Match(io.Name) {
|
||||
match = true
|
||||
}
|
||||
|
||||
tags := map[string]string{}
|
||||
var devLinks []string
|
||||
tags["name"], devLinks = s.diskName(io.Name)
|
||||
tags["name"], devLinks = d.diskName(io.Name)
|
||||
|
||||
if s.deviceFilter != nil && !match {
|
||||
if d.deviceFilter != nil && !match {
|
||||
for _, devLink := range devLinks {
|
||||
if s.deviceFilter.Match(devLink) {
|
||||
if d.deviceFilter.Match(devLink) {
|
||||
match = true
|
||||
break
|
||||
}
|
||||
|
|
@ -126,11 +126,11 @@ func (s *DiskIO) Gather(acc telegraf.Accumulator) error {
|
|||
}
|
||||
}
|
||||
|
||||
for t, v := range s.diskTags(io.Name) {
|
||||
for t, v := range d.diskTags(io.Name) {
|
||||
tags[t] = v
|
||||
}
|
||||
|
||||
if !s.SkipSerialNumber {
|
||||
if !d.SkipSerialNumber {
|
||||
if len(io.SerialNumber) != 0 {
|
||||
tags["serial"] = io.SerialNumber
|
||||
} else {
|
||||
|
|
@ -157,23 +157,23 @@ func (s *DiskIO) Gather(acc telegraf.Accumulator) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *DiskIO) diskName(devName string) (string, []string) {
|
||||
di, err := s.diskInfo(devName)
|
||||
func (d *DiskIO) diskName(devName string) (string, []string) {
|
||||
di, err := d.diskInfo(devName)
|
||||
devLinks := strings.Split(di["DEVLINKS"], " ")
|
||||
for i, devLink := range devLinks {
|
||||
devLinks[i] = strings.TrimPrefix(devLink, "/dev/")
|
||||
}
|
||||
|
||||
if len(s.NameTemplates) == 0 {
|
||||
if len(d.NameTemplates) == 0 {
|
||||
return devName, devLinks
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
s.Log.Warnf("Error gathering disk info: %s", err)
|
||||
d.Log.Warnf("Error gathering disk info: %s", err)
|
||||
return devName, devLinks
|
||||
}
|
||||
|
||||
for _, nt := range s.NameTemplates {
|
||||
for _, nt := range d.NameTemplates {
|
||||
miss := false
|
||||
name := varRegex.ReplaceAllStringFunc(nt, func(sub string) string {
|
||||
sub = sub[1:] // strip leading '$'
|
||||
|
|
@ -195,19 +195,19 @@ func (s *DiskIO) diskName(devName string) (string, []string) {
|
|||
return devName, devLinks
|
||||
}
|
||||
|
||||
func (s *DiskIO) diskTags(devName string) map[string]string {
|
||||
if len(s.DeviceTags) == 0 {
|
||||
func (d *DiskIO) diskTags(devName string) map[string]string {
|
||||
if len(d.DeviceTags) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
di, err := s.diskInfo(devName)
|
||||
di, err := d.diskInfo(devName)
|
||||
if err != nil {
|
||||
s.Log.Warnf("Error gathering disk info: %s", err)
|
||||
d.Log.Warnf("Error gathering disk info: %s", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
tags := map[string]string{}
|
||||
for _, dt := range s.DeviceTags {
|
||||
for _, dt := range d.DeviceTags {
|
||||
if v, ok := di[dt]; ok {
|
||||
tags[dt] = v
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ type diskInfoCache struct {
|
|||
|
||||
var udevPath = "/run/udev/data"
|
||||
|
||||
func (s *DiskIO) diskInfo(devName string) (map[string]string, error) {
|
||||
func (d *DiskIO) diskInfo(devName string) (map[string]string, error) {
|
||||
var err error
|
||||
var stat unix.Stat_t
|
||||
|
||||
|
|
@ -28,10 +28,10 @@ func (s *DiskIO) diskInfo(devName string) (map[string]string, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if s.infoCache == nil {
|
||||
s.infoCache = map[string]diskInfoCache{}
|
||||
if d.infoCache == nil {
|
||||
d.infoCache = map[string]diskInfoCache{}
|
||||
}
|
||||
ic, ok := s.infoCache[devName]
|
||||
ic, ok := d.infoCache[devName]
|
||||
|
||||
if ok && stat.Mtim.Nano() == ic.modifiedAt {
|
||||
return ic.values, nil
|
||||
|
|
@ -43,7 +43,7 @@ func (s *DiskIO) diskInfo(devName string) (map[string]string, error) {
|
|||
|
||||
di := map[string]string{}
|
||||
|
||||
s.infoCache[devName] = diskInfoCache{
|
||||
d.infoCache[devName] = diskInfoCache{
|
||||
modifiedAt: stat.Mtim.Nano(),
|
||||
udevDataPath: udevDataPath,
|
||||
values: di,
|
||||
|
|
|
|||
|
|
@ -4,6 +4,6 @@ package diskio
|
|||
|
||||
type diskInfoCache struct{}
|
||||
|
||||
func (s *DiskIO) diskInfo(devName string) (map[string]string, error) {
|
||||
func (d *DiskIO) diskInfo(devName string) (map[string]string, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -32,11 +32,11 @@ var sampleConfig = `
|
|||
|
||||
var defaultTimeout = 5 * time.Second
|
||||
|
||||
func (r *Disque) SampleConfig() string {
|
||||
func (d *Disque) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (r *Disque) Description() string {
|
||||
func (d *Disque) Description() string {
|
||||
return "Read metrics from one or many disque servers"
|
||||
}
|
||||
|
||||
|
|
@ -64,21 +64,21 @@ var ErrProtocolError = errors.New("disque protocol error")
|
|||
|
||||
// Reads stats from all configured servers accumulates stats.
|
||||
// Returns one of the errors encountered while gather stats (if any).
|
||||
func (g *Disque) Gather(acc telegraf.Accumulator) error {
|
||||
if len(g.Servers) == 0 {
|
||||
func (d *Disque) Gather(acc telegraf.Accumulator) error {
|
||||
if len(d.Servers) == 0 {
|
||||
url := &url.URL{
|
||||
Host: ":7711",
|
||||
}
|
||||
g.gatherServer(url, acc)
|
||||
d.gatherServer(url, acc)
|
||||
return nil
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for _, serv := range g.Servers {
|
||||
for _, serv := range d.Servers {
|
||||
u, err := url.Parse(serv)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("Unable to parse to address '%s': %s", serv, err))
|
||||
acc.AddError(fmt.Errorf("unable to parse to address '%s': %s", serv, err))
|
||||
continue
|
||||
} else if u.Scheme == "" {
|
||||
// fallback to simple string based address (i.e. "10.0.0.1:10000")
|
||||
|
|
@ -89,7 +89,7 @@ func (g *Disque) Gather(acc telegraf.Accumulator) error {
|
|||
wg.Add(1)
|
||||
go func(serv string) {
|
||||
defer wg.Done()
|
||||
acc.AddError(g.gatherServer(u, acc))
|
||||
acc.AddError(d.gatherServer(u, acc))
|
||||
}(serv)
|
||||
}
|
||||
|
||||
|
|
@ -100,8 +100,8 @@ func (g *Disque) Gather(acc telegraf.Accumulator) error {
|
|||
|
||||
const defaultPort = "7711"
|
||||
|
||||
func (g *Disque) gatherServer(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
if g.c == nil {
|
||||
func (d *Disque) gatherServer(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
if d.c == nil {
|
||||
|
||||
_, _, err := net.SplitHostPort(addr.Host)
|
||||
if err != nil {
|
||||
|
|
@ -110,7 +110,7 @@ func (g *Disque) gatherServer(addr *url.URL, acc telegraf.Accumulator) error {
|
|||
|
||||
c, err := net.DialTimeout("tcp", addr.Host, defaultTimeout)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to connect to disque server '%s': %s", addr.Host, err)
|
||||
return fmt.Errorf("unable to connect to disque server '%s': %s", addr.Host, err)
|
||||
}
|
||||
|
||||
if addr.User != nil {
|
||||
|
|
@ -130,15 +130,15 @@ func (g *Disque) gatherServer(addr *url.URL, acc telegraf.Accumulator) error {
|
|||
}
|
||||
}
|
||||
|
||||
g.c = c
|
||||
d.c = c
|
||||
}
|
||||
|
||||
// Extend connection
|
||||
g.c.SetDeadline(time.Now().Add(defaultTimeout))
|
||||
d.c.SetDeadline(time.Now().Add(defaultTimeout))
|
||||
|
||||
g.c.Write([]byte("info\r\n"))
|
||||
d.c.Write([]byte("info\r\n"))
|
||||
|
||||
r := bufio.NewReader(g.c)
|
||||
r := bufio.NewReader(d.c)
|
||||
|
||||
line, err := r.ReadString('\n')
|
||||
if err != nil {
|
||||
|
|
@ -176,7 +176,7 @@ func (g *Disque) gatherServer(addr *url.URL, acc telegraf.Accumulator) error {
|
|||
|
||||
parts := strings.SplitN(line, ":", 2)
|
||||
|
||||
name := string(parts[0])
|
||||
name := parts[0]
|
||||
|
||||
metric, ok := Tracking[name]
|
||||
if !ok {
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Helper functions copied from
|
||||
// Package docker contains few helper functions copied from
|
||||
// https://github.com/docker/cli/blob/master/cli/command/container/stats_helpers.go
|
||||
package docker
|
||||
|
||||
|
|
|
|||
|
|
@ -664,7 +664,7 @@ func (e *Elasticsearch) gatherSingleIndexStats(name string, index indexStat, now
|
|||
shardTags := map[string]string{
|
||||
"index_name": name,
|
||||
"node_id": routingNode,
|
||||
"shard_name": string(shardNumber),
|
||||
"shard_name": shardNumber,
|
||||
"type": shardType,
|
||||
}
|
||||
|
||||
|
|
@ -741,11 +741,7 @@ func (e *Elasticsearch) gatherJSONData(url string, v interface{}) error {
|
|||
r.StatusCode, http.StatusOK)
|
||||
}
|
||||
|
||||
if err = json.NewDecoder(r.Body).Decode(v); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return json.NewDecoder(r.Body).Decode(v)
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) compileIndexMatchers() (map[string]filter.Filter, error) {
|
||||
|
|
|
|||
|
|
@ -69,12 +69,12 @@ func (c CommandRunner) Run(
|
|||
command string,
|
||||
timeout time.Duration,
|
||||
) ([]byte, []byte, error) {
|
||||
split_cmd, err := shellquote.Split(command)
|
||||
if err != nil || len(split_cmd) == 0 {
|
||||
splitCmd, err := shellquote.Split(command)
|
||||
if err != nil || len(splitCmd) == 0 {
|
||||
return nil, nil, fmt.Errorf("exec: unable to parse command, %s", err)
|
||||
}
|
||||
|
||||
cmd := exec.Command(split_cmd[0], split_cmd[1:]...)
|
||||
cmd := exec.Command(splitCmd[0], splitCmd[1:]...)
|
||||
|
||||
var (
|
||||
out bytes.Buffer
|
||||
|
|
@ -123,7 +123,7 @@ func removeCarriageReturns(b bytes.Buffer) bytes.Buffer {
|
|||
byt, er := b.ReadBytes(0x0D)
|
||||
end := len(byt)
|
||||
if nil == er {
|
||||
end -= 1
|
||||
end--
|
||||
}
|
||||
if nil != byt {
|
||||
buf.Write(byt[:end])
|
||||
|
|
|
|||
|
|
@ -65,11 +65,11 @@ type FileCount struct {
|
|||
Log telegraf.Logger
|
||||
}
|
||||
|
||||
func (_ *FileCount) Description() string {
|
||||
func (fc *FileCount) Description() string {
|
||||
return "Count files in a directory"
|
||||
}
|
||||
|
||||
func (_ *FileCount) SampleConfig() string { return sampleConfig }
|
||||
func (fc *FileCount) SampleConfig() string { return sampleConfig }
|
||||
|
||||
type fileFilterFunc func(os.FileInfo) (bool, error)
|
||||
|
||||
|
|
|
|||
|
|
@ -61,24 +61,24 @@ var sampleConfig = `
|
|||
# insecure_skip_verify = false
|
||||
`
|
||||
|
||||
func (r *haproxy) SampleConfig() string {
|
||||
func (h *haproxy) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (r *haproxy) Description() string {
|
||||
func (h *haproxy) Description() string {
|
||||
return "Read metrics of haproxy, via socket or csv stats page"
|
||||
}
|
||||
|
||||
// Reads stats from all configured servers accumulates stats.
|
||||
// Returns one of the errors encountered while gather stats (if any).
|
||||
func (g *haproxy) Gather(acc telegraf.Accumulator) error {
|
||||
if len(g.Servers) == 0 {
|
||||
return g.gatherServer("http://127.0.0.1:1936/haproxy?stats", acc)
|
||||
func (h *haproxy) Gather(acc telegraf.Accumulator) error {
|
||||
if len(h.Servers) == 0 {
|
||||
return h.gatherServer("http://127.0.0.1:1936/haproxy?stats", acc)
|
||||
}
|
||||
|
||||
endpoints := make([]string, 0, len(g.Servers))
|
||||
endpoints := make([]string, 0, len(h.Servers))
|
||||
|
||||
for _, endpoint := range g.Servers {
|
||||
for _, endpoint := range h.Servers {
|
||||
|
||||
if strings.HasPrefix(endpoint, "http") {
|
||||
endpoints = append(endpoints, endpoint)
|
||||
|
|
@ -107,7 +107,7 @@ func (g *haproxy) Gather(acc telegraf.Accumulator) error {
|
|||
for _, server := range endpoints {
|
||||
go func(serv string) {
|
||||
defer wg.Done()
|
||||
if err := g.gatherServer(serv, acc); err != nil {
|
||||
if err := h.gatherServer(serv, acc); err != nil {
|
||||
acc.AddError(err)
|
||||
}
|
||||
}(server)
|
||||
|
|
@ -117,7 +117,7 @@ func (g *haproxy) Gather(acc telegraf.Accumulator) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (g *haproxy) gatherServerSocket(addr string, acc telegraf.Accumulator) error {
|
||||
func (h *haproxy) gatherServerSocket(addr string, acc telegraf.Accumulator) error {
|
||||
socketPath := getSocketAddr(addr)
|
||||
|
||||
c, err := net.Dial("unix", socketPath)
|
||||
|
|
@ -132,28 +132,28 @@ func (g *haproxy) gatherServerSocket(addr string, acc telegraf.Accumulator) erro
|
|||
return fmt.Errorf("could not write to socket '%s': %s", addr, errw)
|
||||
}
|
||||
|
||||
return g.importCsvResult(c, acc, socketPath)
|
||||
return h.importCsvResult(c, acc, socketPath)
|
||||
}
|
||||
|
||||
func (g *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error {
|
||||
func (h *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error {
|
||||
if !strings.HasPrefix(addr, "http") {
|
||||
return g.gatherServerSocket(addr, acc)
|
||||
return h.gatherServerSocket(addr, acc)
|
||||
}
|
||||
|
||||
if g.client == nil {
|
||||
tlsCfg, err := g.ClientConfig.TLSConfig()
|
||||
if h.client == nil {
|
||||
tlsCfg, err := h.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tr := &http.Transport{
|
||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||
ResponseHeaderTimeout: 3 * time.Second,
|
||||
TLSClientConfig: tlsCfg,
|
||||
}
|
||||
client := &http.Client{
|
||||
Transport: tr,
|
||||
Timeout: time.Duration(4 * time.Second),
|
||||
Timeout: 4 * time.Second,
|
||||
}
|
||||
g.client = client
|
||||
h.client = client
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(addr, ";csv") {
|
||||
|
|
@ -176,11 +176,11 @@ func (g *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error {
|
|||
addr = u.String()
|
||||
}
|
||||
|
||||
if g.Username != "" || g.Password != "" {
|
||||
req.SetBasicAuth(g.Username, g.Password)
|
||||
if h.Username != "" || h.Password != "" {
|
||||
req.SetBasicAuth(h.Username, h.Password)
|
||||
}
|
||||
|
||||
res, err := g.client.Do(req)
|
||||
res, err := h.client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to connect to haproxy server '%s': %s", addr, err)
|
||||
}
|
||||
|
|
@ -190,7 +190,7 @@ func (g *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error {
|
|||
return fmt.Errorf("unable to get valid stat result from '%s', http response code : %d", addr, res.StatusCode)
|
||||
}
|
||||
|
||||
if err := g.importCsvResult(res.Body, acc, u.Host); err != nil {
|
||||
if err := h.importCsvResult(res.Body, acc, u.Host); err != nil {
|
||||
return fmt.Errorf("unable to parse stat result from '%s': %s", addr, err)
|
||||
}
|
||||
|
||||
|
|
@ -222,7 +222,7 @@ var fieldRenames = map[string]string{
|
|||
"hrsp_other": "http_response.other",
|
||||
}
|
||||
|
||||
func (g *haproxy) importCsvResult(r io.Reader, acc telegraf.Accumulator, host string) error {
|
||||
func (h *haproxy) importCsvResult(r io.Reader, acc telegraf.Accumulator, host string) error {
|
||||
csvr := csv.NewReader(r)
|
||||
now := time.Now()
|
||||
|
||||
|
|
@ -259,7 +259,7 @@ func (g *haproxy) importCsvResult(r io.Reader, acc telegraf.Accumulator, host st
|
|||
|
||||
colName := headers[i]
|
||||
fieldName := colName
|
||||
if !g.KeepFieldNames {
|
||||
if !h.KeepFieldNames {
|
||||
if fieldRename, ok := fieldRenames[colName]; ok {
|
||||
fieldName = fieldRename
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ type Fetcher interface {
|
|||
Fetch(address string) ([]gohddtemp.Disk, error)
|
||||
}
|
||||
|
||||
func (_ *HDDTemp) Description() string {
|
||||
func (h *HDDTemp) Description() string {
|
||||
return "Monitor disks' temperatures using hddtemp"
|
||||
}
|
||||
|
||||
|
|
@ -36,7 +36,7 @@ var hddtempSampleConfig = `
|
|||
# devices = ["sda", "*"]
|
||||
`
|
||||
|
||||
func (_ *HDDTemp) SampleConfig() string {
|
||||
func (h *HDDTemp) SampleConfig() string {
|
||||
return hddtempSampleConfig
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -13,10 +13,10 @@ type Infiniband struct {
|
|||
// Sample configuration for plugin
|
||||
var InfinibandConfig = ``
|
||||
|
||||
func (_ *Infiniband) SampleConfig() string {
|
||||
func (i *Infiniband) SampleConfig() string {
|
||||
return InfinibandConfig
|
||||
}
|
||||
|
||||
func (_ *Infiniband) Description() string {
|
||||
func (i *Infiniband) Description() string {
|
||||
return "Gets counters from all InfiniBand cards and ports installed"
|
||||
}
|
||||
|
|
|
|||
|
|
@ -11,8 +11,7 @@ import (
|
|||
)
|
||||
|
||||
// Gather statistics from our infiniband cards
|
||||
func (_ *Infiniband) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
func (i *Infiniband) Gather(acc telegraf.Accumulator) error {
|
||||
rdmaDevices := rdmamap.GetRdmaDeviceList()
|
||||
|
||||
if len(rdmaDevices) == 0 {
|
||||
|
|
@ -41,7 +40,6 @@ func (_ *Infiniband) Gather(acc telegraf.Accumulator) error {
|
|||
|
||||
// Add the statistics to the accumulator
|
||||
func addStats(dev string, port string, stats []rdmamap.RdmaStatEntry, acc telegraf.Accumulator) {
|
||||
|
||||
// Allow users to filter by card and port
|
||||
tags := map[string]string{"device": dev, "port": port}
|
||||
fields := make(map[string]interface{})
|
||||
|
|
|
|||
|
|
@ -288,7 +288,7 @@ func (h *InfluxDBListener) handleWrite() http.HandlerFunc {
|
|||
var m telegraf.Metric
|
||||
var err error
|
||||
var parseErrorCount int
|
||||
var lastPos int = 0
|
||||
var lastPos int
|
||||
var firstParseErrorStr string
|
||||
for {
|
||||
select {
|
||||
|
|
@ -306,7 +306,7 @@ func (h *InfluxDBListener) handleWrite() http.HandlerFunc {
|
|||
|
||||
// Continue parsing metrics even if some are malformed
|
||||
if parseErr, ok := err.(*influx.ParseError); ok {
|
||||
parseErrorCount += 1
|
||||
parseErrorCount++
|
||||
errStr := parseErr.Error()
|
||||
if firstParseErrorStr == "" {
|
||||
firstParseErrorStr = errStr
|
||||
|
|
|
|||
|
|
@ -47,27 +47,27 @@ func NewConnection(server, privilege, hexKey string) *Connection {
|
|||
return conn
|
||||
}
|
||||
|
||||
func (t *Connection) options() []string {
|
||||
intf := t.Interface
|
||||
func (c *Connection) options() []string {
|
||||
intf := c.Interface
|
||||
if intf == "" {
|
||||
intf = "lan"
|
||||
}
|
||||
|
||||
options := []string{
|
||||
"-H", t.Hostname,
|
||||
"-U", t.Username,
|
||||
"-P", t.Password,
|
||||
"-H", c.Hostname,
|
||||
"-U", c.Username,
|
||||
"-P", c.Password,
|
||||
"-I", intf,
|
||||
}
|
||||
|
||||
if t.HexKey != "" {
|
||||
options = append(options, "-y", t.HexKey)
|
||||
if c.HexKey != "" {
|
||||
options = append(options, "-y", c.HexKey)
|
||||
}
|
||||
if t.Port != 0 {
|
||||
options = append(options, "-p", strconv.Itoa(t.Port))
|
||||
if c.Port != 0 {
|
||||
options = append(options, "-p", strconv.Itoa(c.Port))
|
||||
}
|
||||
if t.Privilege != "" {
|
||||
options = append(options, "-L", t.Privilege)
|
||||
if c.Privilege != "" {
|
||||
options = append(options, "-L", c.Privilege)
|
||||
}
|
||||
return options
|
||||
}
|
||||
|
|
|
|||
|
|
@ -29,12 +29,12 @@ const measurement = "ipset"
|
|||
var defaultTimeout = internal.Duration{Duration: time.Second}
|
||||
|
||||
// Description returns a short description of the plugin
|
||||
func (ipset *Ipset) Description() string {
|
||||
func (i *Ipset) Description() string {
|
||||
return "Gather packets and bytes counters from Linux ipsets"
|
||||
}
|
||||
|
||||
// SampleConfig returns sample configuration options.
|
||||
func (ipset *Ipset) SampleConfig() string {
|
||||
func (i *Ipset) SampleConfig() string {
|
||||
return `
|
||||
## By default, we only show sets which have already matched at least 1 packet.
|
||||
## set include_unmatched_sets = true to gather them all.
|
||||
|
|
@ -46,8 +46,8 @@ func (ipset *Ipset) SampleConfig() string {
|
|||
`
|
||||
}
|
||||
|
||||
func (ips *Ipset) Gather(acc telegraf.Accumulator) error {
|
||||
out, e := ips.lister(ips.Timeout, ips.UseSudo)
|
||||
func (i *Ipset) Gather(acc telegraf.Accumulator) error {
|
||||
out, e := i.lister(i.Timeout, i.UseSudo)
|
||||
if e != nil {
|
||||
acc.AddError(e)
|
||||
}
|
||||
|
|
@ -64,25 +64,25 @@ func (ips *Ipset) Gather(acc telegraf.Accumulator) error {
|
|||
|
||||
data := strings.Fields(line)
|
||||
if len(data) < 7 {
|
||||
acc.AddError(fmt.Errorf("Error parsing line (expected at least 7 fields): %s", line))
|
||||
acc.AddError(fmt.Errorf("error parsing line (expected at least 7 fields): %s", line))
|
||||
continue
|
||||
}
|
||||
if data[0] == "add" && (data[4] != "0" || ips.IncludeUnmatchedSets) {
|
||||
if data[0] == "add" && (data[4] != "0" || i.IncludeUnmatchedSets) {
|
||||
tags := map[string]string{
|
||||
"set": data[1],
|
||||
"rule": data[2],
|
||||
}
|
||||
packets_total, err := strconv.ParseUint(data[4], 10, 64)
|
||||
packetsTotal, err := strconv.ParseUint(data[4], 10, 64)
|
||||
if err != nil {
|
||||
acc.AddError(err)
|
||||
}
|
||||
bytes_total, err := strconv.ParseUint(data[6], 10, 64)
|
||||
bytesTotal, err := strconv.ParseUint(data[6], 10, 64)
|
||||
if err != nil {
|
||||
acc.AddError(err)
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"packets_total": packets_total,
|
||||
"bytes_total": bytes_total,
|
||||
"packets_total": packetsTotal,
|
||||
"bytes_total": bytesTotal,
|
||||
}
|
||||
acc.AddCounter(measurement, fields, tags)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -40,7 +40,7 @@ func TestIpset(t *testing.T) {
|
|||
value: `create hash:net family inet hashsize 1024 maxelem 65536 counters
|
||||
add myset 4.5.6.7 packets 123 bytes
|
||||
`,
|
||||
err: fmt.Errorf("Error parsing line (expected at least 7 fields): \t\t\t\tadd myset 4.5.6.7 packets 123 bytes"),
|
||||
err: fmt.Errorf("error parsing line (expected at least 7 fields): \t\t\t\tadd myset 4.5.6.7 packets 123 bytes"),
|
||||
},
|
||||
{
|
||||
name: "Non-empty sets, counters, no comment",
|
||||
|
|
|
|||
|
|
@ -47,11 +47,9 @@ func (c *client) init() error {
|
|||
break
|
||||
}
|
||||
}
|
||||
|
||||
// first api fetch
|
||||
if err := c.doGet(context.Background(), jobPath, new(jobResponse)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return c.doGet(context.Background(), jobPath, new(jobResponse))
|
||||
}
|
||||
|
||||
func (c *client) doGet(ctx context.Context, url string, v interface{}) error {
|
||||
|
|
@ -97,10 +95,8 @@ func (c *client) doGet(ctx context.Context, url string, v interface{}) error {
|
|||
Title: resp.Status,
|
||||
}
|
||||
}
|
||||
if err = json.NewDecoder(resp.Body).Decode(v); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
return json.NewDecoder(resp.Body).Decode(v)
|
||||
}
|
||||
|
||||
type APIError struct {
|
||||
|
|
|
|||
|
|
@ -20,10 +20,10 @@ type SysctlFS struct {
|
|||
var sysctlFSDescription = `Provides Linux sysctl fs metrics`
|
||||
var sysctlFSSampleConfig = ``
|
||||
|
||||
func (_ SysctlFS) Description() string {
|
||||
func (sfs SysctlFS) Description() string {
|
||||
return sysctlFSDescription
|
||||
}
|
||||
func (_ SysctlFS) SampleConfig() string {
|
||||
func (sfs SysctlFS) SampleConfig() string {
|
||||
return sysctlFSSampleConfig
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -149,8 +149,8 @@ const processStats = "/_node/stats/process"
|
|||
const pipelinesStats = "/_node/stats/pipelines"
|
||||
const pipelineStats = "/_node/stats/pipeline"
|
||||
|
||||
func (i *Logstash) Init() error {
|
||||
err := choice.CheckSlice(i.Collect, []string{"pipelines", "process", "jvm"})
|
||||
func (logstash *Logstash) Init() error {
|
||||
err := choice.CheckSlice(logstash.Collect, []string{"pipelines", "process", "jvm"})
|
||||
if err != nil {
|
||||
return fmt.Errorf(`cannot verify "collect" setting: %v`, err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,15 +1,9 @@
|
|||
// +build !windows
|
||||
|
||||
// lustre2 doesn't aim for Windows
|
||||
|
||||
/*
|
||||
Lustre 2.x Telegraf plugin
|
||||
|
||||
Lustre (http://lustre.org/) is an open-source, parallel file system
|
||||
for HPC environments. It stores statistics about its activity in
|
||||
/proc
|
||||
|
||||
*/
|
||||
// Package lustre2 (doesn't aim for Windows)
|
||||
// Lustre 2.x Telegraf plugin
|
||||
// Lustre (http://lustre.org/) is an open-source, parallel file system
|
||||
// for HPC environments. It stores statistics about its activity in /proc
|
||||
package lustre2
|
||||
|
||||
import (
|
||||
|
|
@ -30,8 +24,8 @@ type tags struct {
|
|||
// Lustre proc files can change between versions, so we want to future-proof
|
||||
// by letting people choose what to look at.
|
||||
type Lustre2 struct {
|
||||
Ost_procfiles []string `toml:"ost_procfiles"`
|
||||
Mds_procfiles []string `toml:"mds_procfiles"`
|
||||
OstProcfiles []string `toml:"ost_procfiles"`
|
||||
MdsProcfiles []string `toml:"mds_procfiles"`
|
||||
|
||||
// allFields maps and OST name to the metric fields associated with that OST
|
||||
allFields map[tags]map[string]interface{}
|
||||
|
|
@ -63,7 +57,7 @@ type mapping struct {
|
|||
tag string // Additional tag to add for this metric
|
||||
}
|
||||
|
||||
var wanted_ost_fields = []*mapping{
|
||||
var wantedOstFields = []*mapping{
|
||||
{
|
||||
inProc: "write_bytes",
|
||||
field: 6,
|
||||
|
|
@ -95,7 +89,7 @@ var wanted_ost_fields = []*mapping{
|
|||
},
|
||||
}
|
||||
|
||||
var wanted_ost_jobstats_fields = []*mapping{
|
||||
var wantedOstJobstatsFields = []*mapping{
|
||||
{ // The read line has several fields, so we need to differentiate what they are
|
||||
inProc: "read",
|
||||
field: 3,
|
||||
|
|
@ -228,7 +222,7 @@ var wanted_ost_jobstats_fields = []*mapping{
|
|||
},
|
||||
}
|
||||
|
||||
var wanted_mds_fields = []*mapping{
|
||||
var wantedMdsFields = []*mapping{
|
||||
{
|
||||
inProc: "open",
|
||||
},
|
||||
|
|
@ -279,7 +273,7 @@ var wanted_mds_fields = []*mapping{
|
|||
},
|
||||
}
|
||||
|
||||
var wanted_mdt_jobstats_fields = []*mapping{
|
||||
var wantedMdtJobstatsFields = []*mapping{
|
||||
{
|
||||
inProc: "open",
|
||||
field: 3,
|
||||
|
|
@ -362,7 +356,7 @@ var wanted_mdt_jobstats_fields = []*mapping{
|
|||
},
|
||||
}
|
||||
|
||||
func (l *Lustre2) GetLustreProcStats(fileglob string, wantedFields []*mapping, acc telegraf.Accumulator) error {
|
||||
func (l *Lustre2) GetLustreProcStats(fileglob string, wantedFields []*mapping) error {
|
||||
files, err := filepath.Glob(fileglob)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -386,7 +380,7 @@ func (l *Lustre2) GetLustreProcStats(fileglob string, wantedFields []*mapping, a
|
|||
}
|
||||
jobs := strings.Split(string(wholeFile), "- ")
|
||||
for _, job := range jobs {
|
||||
lines := strings.Split(string(job), "\n")
|
||||
lines := strings.Split(job, "\n")
|
||||
jobid := ""
|
||||
|
||||
// figure out if the data should be tagged with job_id here
|
||||
|
|
@ -422,7 +416,7 @@ func (l *Lustre2) GetLustreProcStats(fileglob string, wantedFields []*mapping, a
|
|||
if wantedField == 0 {
|
||||
wantedField = 1
|
||||
}
|
||||
data, err = strconv.ParseUint(strings.TrimSuffix((parts[wantedField]), ","), 10, 64)
|
||||
data, err = strconv.ParseUint(strings.TrimSuffix(parts[wantedField], ","), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -454,66 +448,60 @@ func (l *Lustre2) Gather(acc telegraf.Accumulator) error {
|
|||
//l.allFields = make(map[string]map[string]interface{})
|
||||
l.allFields = make(map[tags]map[string]interface{})
|
||||
|
||||
if len(l.Ost_procfiles) == 0 {
|
||||
if len(l.OstProcfiles) == 0 {
|
||||
// read/write bytes are in obdfilter/<ost_name>/stats
|
||||
err := l.GetLustreProcStats("/proc/fs/lustre/obdfilter/*/stats",
|
||||
wanted_ost_fields, acc)
|
||||
err := l.GetLustreProcStats("/proc/fs/lustre/obdfilter/*/stats", wantedOstFields)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// cache counters are in osd-ldiskfs/<ost_name>/stats
|
||||
err = l.GetLustreProcStats("/proc/fs/lustre/osd-ldiskfs/*/stats",
|
||||
wanted_ost_fields, acc)
|
||||
err = l.GetLustreProcStats("/proc/fs/lustre/osd-ldiskfs/*/stats", wantedOstFields)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// per job statistics are in obdfilter/<ost_name>/job_stats
|
||||
err = l.GetLustreProcStats("/proc/fs/lustre/obdfilter/*/job_stats",
|
||||
wanted_ost_jobstats_fields, acc)
|
||||
err = l.GetLustreProcStats("/proc/fs/lustre/obdfilter/*/job_stats", wantedOstJobstatsFields)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(l.Mds_procfiles) == 0 {
|
||||
if len(l.MdsProcfiles) == 0 {
|
||||
// Metadata server stats
|
||||
err := l.GetLustreProcStats("/proc/fs/lustre/mdt/*/md_stats",
|
||||
wanted_mds_fields, acc)
|
||||
err := l.GetLustreProcStats("/proc/fs/lustre/mdt/*/md_stats", wantedMdsFields)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Metadata target job stats
|
||||
err = l.GetLustreProcStats("/proc/fs/lustre/mdt/*/job_stats",
|
||||
wanted_mdt_jobstats_fields, acc)
|
||||
err = l.GetLustreProcStats("/proc/fs/lustre/mdt/*/job_stats", wantedMdtJobstatsFields)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, procfile := range l.Ost_procfiles {
|
||||
ost_fields := wanted_ost_fields
|
||||
for _, procfile := range l.OstProcfiles {
|
||||
ostFields := wantedOstFields
|
||||
if strings.HasSuffix(procfile, "job_stats") {
|
||||
ost_fields = wanted_ost_jobstats_fields
|
||||
ostFields = wantedOstJobstatsFields
|
||||
}
|
||||
err := l.GetLustreProcStats(procfile, ost_fields, acc)
|
||||
err := l.GetLustreProcStats(procfile, ostFields)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, procfile := range l.Mds_procfiles {
|
||||
mdt_fields := wanted_mds_fields
|
||||
for _, procfile := range l.MdsProcfiles {
|
||||
mdtFields := wantedMdsFields
|
||||
if strings.HasSuffix(procfile, "job_stats") {
|
||||
mdt_fields = wanted_mdt_jobstats_fields
|
||||
mdtFields = wantedMdtJobstatsFields
|
||||
}
|
||||
err := l.GetLustreProcStats(procfile, mdt_fields, acc)
|
||||
err := l.GetLustreProcStats(procfile, mdtFields)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for tgs, fields := range l.allFields {
|
||||
|
||||
tags := map[string]string{
|
||||
"name": tgs.name,
|
||||
}
|
||||
|
|
|
|||
|
|
@ -135,33 +135,33 @@ const mdtJobStatsContents = `job_stats:
|
|||
func TestLustre2GeneratesMetrics(t *testing.T) {
|
||||
|
||||
tempdir := os.TempDir() + "/telegraf/proc/fs/lustre/"
|
||||
ost_name := "OST0001"
|
||||
ostName := "OST0001"
|
||||
|
||||
mdtdir := tempdir + "/mdt/"
|
||||
err := os.MkdirAll(mdtdir+"/"+ost_name, 0755)
|
||||
err := os.MkdirAll(mdtdir+"/"+ostName, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
osddir := tempdir + "/osd-ldiskfs/"
|
||||
err = os.MkdirAll(osddir+"/"+ost_name, 0755)
|
||||
err = os.MkdirAll(osddir+"/"+ostName, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
obddir := tempdir + "/obdfilter/"
|
||||
err = os.MkdirAll(obddir+"/"+ost_name, 0755)
|
||||
err = os.MkdirAll(obddir+"/"+ostName, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(mdtdir+"/"+ost_name+"/md_stats", []byte(mdtProcContents), 0644)
|
||||
err = ioutil.WriteFile(mdtdir+"/"+ostName+"/md_stats", []byte(mdtProcContents), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(osddir+"/"+ost_name+"/stats", []byte(osdldiskfsProcContents), 0644)
|
||||
err = ioutil.WriteFile(osddir+"/"+ostName+"/stats", []byte(osdldiskfsProcContents), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(obddir+"/"+ost_name+"/stats", []byte(obdfilterProcContents), 0644)
|
||||
err = ioutil.WriteFile(obddir+"/"+ostName+"/stats", []byte(obdfilterProcContents), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Begin by testing standard Lustre stats
|
||||
m := &Lustre2{
|
||||
Ost_procfiles: []string{obddir + "/*/stats", osddir + "/*/stats"},
|
||||
Mds_procfiles: []string{mdtdir + "/*/md_stats"},
|
||||
OstProcfiles: []string{obddir + "/*/stats", osddir + "/*/stats"},
|
||||
MdsProcfiles: []string{mdtdir + "/*/md_stats"},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
|
@ -170,7 +170,7 @@ func TestLustre2GeneratesMetrics(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
tags := map[string]string{
|
||||
"name": ost_name,
|
||||
"name": ostName,
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
|
|
@ -208,27 +208,27 @@ func TestLustre2GeneratesMetrics(t *testing.T) {
|
|||
func TestLustre2GeneratesJobstatsMetrics(t *testing.T) {
|
||||
|
||||
tempdir := os.TempDir() + "/telegraf/proc/fs/lustre/"
|
||||
ost_name := "OST0001"
|
||||
job_names := []string{"cluster-testjob1", "testjob2"}
|
||||
ostName := "OST0001"
|
||||
jobNames := []string{"cluster-testjob1", "testjob2"}
|
||||
|
||||
mdtdir := tempdir + "/mdt/"
|
||||
err := os.MkdirAll(mdtdir+"/"+ost_name, 0755)
|
||||
err := os.MkdirAll(mdtdir+"/"+ostName, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
obddir := tempdir + "/obdfilter/"
|
||||
err = os.MkdirAll(obddir+"/"+ost_name, 0755)
|
||||
err = os.MkdirAll(obddir+"/"+ostName, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(mdtdir+"/"+ost_name+"/job_stats", []byte(mdtJobStatsContents), 0644)
|
||||
err = ioutil.WriteFile(mdtdir+"/"+ostName+"/job_stats", []byte(mdtJobStatsContents), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(obddir+"/"+ost_name+"/job_stats", []byte(obdfilterJobStatsContents), 0644)
|
||||
err = ioutil.WriteFile(obddir+"/"+ostName+"/job_stats", []byte(obdfilterJobStatsContents), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test Lustre Jobstats
|
||||
m := &Lustre2{
|
||||
Ost_procfiles: []string{obddir + "/*/job_stats"},
|
||||
Mds_procfiles: []string{mdtdir + "/*/job_stats"},
|
||||
OstProcfiles: []string{obddir + "/*/job_stats"},
|
||||
MdsProcfiles: []string{mdtdir + "/*/job_stats"},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
|
@ -240,12 +240,12 @@ func TestLustre2GeneratesJobstatsMetrics(t *testing.T) {
|
|||
// and even further make this dependent on summing per OST
|
||||
tags := []map[string]string{
|
||||
{
|
||||
"name": ost_name,
|
||||
"jobid": job_names[0],
|
||||
"name": ostName,
|
||||
"jobid": jobNames[0],
|
||||
},
|
||||
{
|
||||
"name": ost_name,
|
||||
"jobid": job_names[1],
|
||||
"name": ostName,
|
||||
"jobid": jobNames[1],
|
||||
},
|
||||
}
|
||||
|
||||
|
|
@ -347,7 +347,7 @@ func TestLustre2CanParseConfiguration(t *testing.T) {
|
|||
"/proc/fs/lustre/mdt/*/md_stats",
|
||||
]`)
|
||||
|
||||
table, err := toml.Parse([]byte(config))
|
||||
table, err := toml.Parse(config)
|
||||
require.NoError(t, err)
|
||||
|
||||
inputs, ok := table.Fields["inputs"]
|
||||
|
|
@ -361,11 +361,11 @@ func TestLustre2CanParseConfiguration(t *testing.T) {
|
|||
require.NoError(t, toml.UnmarshalTable(lustre2.([]*ast.Table)[0], &plugin))
|
||||
|
||||
assert.Equal(t, Lustre2{
|
||||
Ost_procfiles: []string{
|
||||
OstProcfiles: []string{
|
||||
"/proc/fs/lustre/obdfilter/*/stats",
|
||||
"/proc/fs/lustre/osd-ldiskfs/*/stats",
|
||||
},
|
||||
Mds_procfiles: []string{
|
||||
MdsProcfiles: []string{
|
||||
"/proc/fs/lustre/mdt/*/md_stats",
|
||||
},
|
||||
}, plugin)
|
||||
|
|
|
|||
|
|
@ -220,7 +220,7 @@ func (c *Marklogic) createHTTPClient() (*http.Client, error) {
|
|||
Transport: &http.Transport{
|
||||
TLSClientConfig: tlsCfg,
|
||||
},
|
||||
Timeout: time.Duration(5 * time.Second),
|
||||
Timeout: 5 * time.Second,
|
||||
}
|
||||
|
||||
return client, nil
|
||||
|
|
@ -246,11 +246,7 @@ func (c *Marklogic) gatherJSONData(url string, v interface{}) error {
|
|||
response.StatusCode, http.StatusOK)
|
||||
}
|
||||
|
||||
if err = json.NewDecoder(response.Body).Decode(v); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return json.NewDecoder(response.Body).Decode(v)
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
|
|
|||
|
|
@ -14,19 +14,19 @@ type MemStats struct {
|
|||
platform string
|
||||
}
|
||||
|
||||
func (_ *MemStats) Description() string {
|
||||
func (ms *MemStats) Description() string {
|
||||
return "Read metrics about memory usage"
|
||||
}
|
||||
|
||||
func (_ *MemStats) SampleConfig() string { return "" }
|
||||
func (ms *MemStats) SampleConfig() string { return "" }
|
||||
|
||||
func (m *MemStats) Init() error {
|
||||
m.platform = runtime.GOOS
|
||||
func (ms *MemStats) Init() error {
|
||||
ms.platform = runtime.GOOS
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *MemStats) Gather(acc telegraf.Accumulator) error {
|
||||
vm, err := s.ps.VMStat()
|
||||
func (ms *MemStats) Gather(acc telegraf.Accumulator) error {
|
||||
vm, err := ms.ps.VMStat()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting virtual memory info: %s", err)
|
||||
}
|
||||
|
|
@ -39,7 +39,7 @@ func (s *MemStats) Gather(acc telegraf.Accumulator) error {
|
|||
"available_percent": 100 * float64(vm.Available) / float64(vm.Total),
|
||||
}
|
||||
|
||||
switch s.platform {
|
||||
switch ms.platform {
|
||||
case "darwin":
|
||||
fields["active"] = vm.Active
|
||||
fields["free"] = vm.Free
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@ package mesos
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
|
|
@ -19,17 +18,10 @@ var masterMetrics map[string]interface{}
|
|||
var masterTestServer *httptest.Server
|
||||
var slaveMetrics map[string]interface{}
|
||||
|
||||
// var slaveTaskMetrics map[string]interface{}
|
||||
var slaveTestServer *httptest.Server
|
||||
|
||||
func randUUID() string {
|
||||
b := make([]byte, 16)
|
||||
rand.Read(b)
|
||||
return fmt.Sprintf("%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:])
|
||||
}
|
||||
|
||||
// master metrics that will be returned by generateMetrics()
|
||||
var masterMetricNames []string = []string{
|
||||
var masterMetricNames = []string{
|
||||
// resources
|
||||
"master/cpus_percent",
|
||||
"master/cpus_used",
|
||||
|
|
@ -214,7 +206,7 @@ var masterMetricNames []string = []string{
|
|||
}
|
||||
|
||||
// slave metrics that will be returned by generateMetrics()
|
||||
var slaveMetricNames []string = []string{
|
||||
var slaveMetricNames = []string{
|
||||
// resources
|
||||
"slave/cpus_percent",
|
||||
"slave/cpus_used",
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ type Connector interface {
|
|||
Connect() (Connection, error)
|
||||
}
|
||||
|
||||
func NewConnector(hostname, port, password string) (*connector, error) {
|
||||
func newConnector(hostname, port, password string) (*connector, error) {
|
||||
return &connector{
|
||||
hostname: hostname,
|
||||
port: port,
|
||||
|
|
@ -58,7 +58,7 @@ func (c *connector) Connect() (Connection, error) {
|
|||
return &connection{rcon: rcon}, nil
|
||||
}
|
||||
|
||||
func NewClient(connector Connector) (*client, error) {
|
||||
func newClient(connector Connector) (*client, error) {
|
||||
return &client{connector: connector}, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -98,7 +98,7 @@ func TestClient_Player(t *testing.T) {
|
|||
conn: &MockConnection{commands: tt.commands},
|
||||
}
|
||||
|
||||
client, err := NewClient(connector)
|
||||
client, err := newClient(connector)
|
||||
require.NoError(t, err)
|
||||
|
||||
actual, err := client.Players()
|
||||
|
|
@ -183,7 +183,7 @@ func TestClient_Scores(t *testing.T) {
|
|||
conn: &MockConnection{commands: tt.commands},
|
||||
}
|
||||
|
||||
client, err := NewClient(connector)
|
||||
client, err := newClient(connector)
|
||||
require.NoError(t, err)
|
||||
|
||||
actual, err := client.Scores(tt.player)
|
||||
|
|
|
|||
|
|
@ -62,7 +62,7 @@ type Packet struct {
|
|||
// Write method fails to write the header bytes in their little
|
||||
// endian byte order.
|
||||
func (p Packet) Compile() (payload []byte, err error) {
|
||||
var size int32 = p.Header.Size
|
||||
var size = p.Header.Size
|
||||
var buffer bytes.Buffer
|
||||
var padding [PacketPaddingSize]byte
|
||||
|
||||
|
|
|
|||
|
|
@ -50,12 +50,12 @@ func (s *Minecraft) SampleConfig() string {
|
|||
|
||||
func (s *Minecraft) Gather(acc telegraf.Accumulator) error {
|
||||
if s.client == nil {
|
||||
connector, err := NewConnector(s.Server, s.Port, s.Password)
|
||||
connector, err := newConnector(s.Server, s.Port, s.Password)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client, err := NewClient(connector)
|
||||
client, err := newClient(connector)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -338,11 +338,11 @@ func validateFieldContainers(t []fieldContainer, n string) error {
|
|||
}
|
||||
|
||||
//search name duplicate
|
||||
canonical_name := item.Measurement + "." + item.Name
|
||||
if nameEncountered[canonical_name] {
|
||||
canonicalName := item.Measurement + "." + item.Name
|
||||
if nameEncountered[canonicalName] {
|
||||
return fmt.Errorf("name '%s' is duplicated in measurement '%s' '%s' - '%s'", item.Name, item.Measurement, n, item.Name)
|
||||
}
|
||||
nameEncountered[canonical_name] = true
|
||||
nameEncountered[canonicalName] = true
|
||||
|
||||
if n == cInputRegisters || n == cHoldingRegisters {
|
||||
// search byte order
|
||||
|
|
@ -405,13 +405,13 @@ func removeDuplicates(elements []uint16) []uint16 {
|
|||
|
||||
func readRegisterValues(m *Modbus, rt string, rr registerRange) ([]byte, error) {
|
||||
if rt == cDiscreteInputs {
|
||||
return m.client.ReadDiscreteInputs(uint16(rr.address), uint16(rr.length))
|
||||
return m.client.ReadDiscreteInputs(rr.address, rr.length)
|
||||
} else if rt == cCoils {
|
||||
return m.client.ReadCoils(uint16(rr.address), uint16(rr.length))
|
||||
return m.client.ReadCoils(rr.address, rr.length)
|
||||
} else if rt == cInputRegisters {
|
||||
return m.client.ReadInputRegisters(uint16(rr.address), uint16(rr.length))
|
||||
return m.client.ReadInputRegisters(rr.address, rr.length)
|
||||
} else if rt == cHoldingRegisters {
|
||||
return m.client.ReadHoldingRegisters(uint16(rr.address), uint16(rr.length))
|
||||
return m.client.ReadHoldingRegisters(rr.address, rr.length)
|
||||
} else {
|
||||
return []byte{}, fmt.Errorf("not Valid function")
|
||||
}
|
||||
|
|
@ -462,16 +462,16 @@ func (m *Modbus) getFields() error {
|
|||
|
||||
if register.Type == cInputRegisters || register.Type == cHoldingRegisters {
|
||||
for i := 0; i < len(register.Fields); i++ {
|
||||
var values_t []byte
|
||||
var valuesT []byte
|
||||
|
||||
for j := 0; j < len(register.Fields[i].Address); j++ {
|
||||
tempArray := rawValues[register.Fields[i].Address[j]]
|
||||
for x := 0; x < len(tempArray); x++ {
|
||||
values_t = append(values_t, tempArray[x])
|
||||
valuesT = append(valuesT, tempArray[x])
|
||||
}
|
||||
}
|
||||
|
||||
register.Fields[i].value = convertDataType(register.Fields[i], values_t)
|
||||
register.Fields[i].value = convertDataType(register.Fields[i], valuesT)
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -587,30 +587,6 @@ func convertEndianness64(o string, b []byte) uint64 {
|
|||
}
|
||||
}
|
||||
|
||||
func format16(f string, r uint16) interface{} {
|
||||
switch f {
|
||||
case "UINT16":
|
||||
return r
|
||||
case "INT16":
|
||||
return int16(r)
|
||||
default:
|
||||
return r
|
||||
}
|
||||
}
|
||||
|
||||
func format32(f string, r uint32) interface{} {
|
||||
switch f {
|
||||
case "UINT32":
|
||||
return r
|
||||
case "INT32":
|
||||
return int32(r)
|
||||
case "FLOAT32-IEEE":
|
||||
return math.Float32frombits(r)
|
||||
default:
|
||||
return r
|
||||
}
|
||||
}
|
||||
|
||||
func format64(f string, r uint64) interface{} {
|
||||
switch f {
|
||||
case "UINT64":
|
||||
|
|
@ -689,7 +665,7 @@ func (m *Modbus) Gather(acc telegraf.Accumulator) error {
|
|||
}
|
||||
|
||||
timestamp := time.Now()
|
||||
for retry := 0; retry <= m.Retries; retry += 1 {
|
||||
for retry := 0; retry <= m.Retries; retry++ {
|
||||
timestamp = time.Now()
|
||||
err := m.getFields()
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -679,7 +679,7 @@ func TestRetrySuccessful(t *testing.T) {
|
|||
if retries >= maxretries {
|
||||
except = &mbserver.Success
|
||||
}
|
||||
retries += 1
|
||||
retries++
|
||||
|
||||
return data, except
|
||||
})
|
||||
|
|
@ -756,7 +756,7 @@ func TestRetryFail(t *testing.T) {
|
|||
counter := 0
|
||||
serv.RegisterFunctionHandler(1,
|
||||
func(s *mbserver.Server, frame mbserver.Framer) ([]byte, *mbserver.Exception) {
|
||||
counter += 1
|
||||
counter++
|
||||
data := make([]byte, 2)
|
||||
data[0] = byte(1)
|
||||
data[1] = byte(0)
|
||||
|
|
|
|||
|
|
@ -102,7 +102,7 @@ func (m *MultiFile) Gather(acc telegraf.Accumulator) error {
|
|||
|
||||
var value interface{}
|
||||
|
||||
var d int = 0
|
||||
var d int
|
||||
if _, errfmt := fmt.Sscanf(file.Conversion, "float(%d)", &d); errfmt == nil || file.Conversion == "float" {
|
||||
var v float64
|
||||
v, err = strconv.ParseFloat(vStr, 64)
|
||||
|
|
|
|||
|
|
@ -143,7 +143,6 @@ const sampleConfig = `
|
|||
`
|
||||
|
||||
const (
|
||||
defaultTimeout = 5 * time.Second
|
||||
defaultPerfEventsStatementsDigestTextLimit = 120
|
||||
defaultPerfEventsStatementsLimit = 250
|
||||
defaultPerfEventsStatementsTimeLimit = 86400
|
||||
|
|
@ -712,8 +711,8 @@ func (m *Mysql) gatherBinaryLogs(db *sql.DB, serv string, acc telegraf.Accumulat
|
|||
servtag := getDSNTag(serv)
|
||||
tags := map[string]string{"server": servtag}
|
||||
var (
|
||||
size uint64 = 0
|
||||
count uint64 = 0
|
||||
size uint64
|
||||
count uint64
|
||||
fileSize uint64
|
||||
fileName string
|
||||
)
|
||||
|
|
@ -893,16 +892,16 @@ func (m *Mysql) GatherProcessListStatuses(db *sql.DB, serv string, acc telegraf.
|
|||
}
|
||||
|
||||
// get count of connections from each user
|
||||
conn_rows, err := db.Query("SELECT user, sum(1) AS connections FROM INFORMATION_SCHEMA.PROCESSLIST GROUP BY user")
|
||||
connRows, err := db.Query("SELECT user, sum(1) AS connections FROM INFORMATION_SCHEMA.PROCESSLIST GROUP BY user")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for conn_rows.Next() {
|
||||
for connRows.Next() {
|
||||
var user string
|
||||
var connections int64
|
||||
|
||||
err = conn_rows.Scan(&user, &connections)
|
||||
err = connRows.Scan(&user, &connections)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -990,141 +989,141 @@ func getColSlice(l int) ([]interface{}, error) {
|
|||
// list of all possible column names
|
||||
var (
|
||||
user string
|
||||
total_connections int64
|
||||
concurrent_connections int64
|
||||
connected_time int64
|
||||
busy_time int64
|
||||
cpu_time int64
|
||||
bytes_received int64
|
||||
bytes_sent int64
|
||||
binlog_bytes_written int64
|
||||
rows_read int64
|
||||
rows_sent int64
|
||||
rows_deleted int64
|
||||
rows_inserted int64
|
||||
rows_updated int64
|
||||
select_commands int64
|
||||
update_commands int64
|
||||
other_commands int64
|
||||
commit_transactions int64
|
||||
rollback_transactions int64
|
||||
denied_connections int64
|
||||
lost_connections int64
|
||||
access_denied int64
|
||||
empty_queries int64
|
||||
total_ssl_connections int64
|
||||
max_statement_time_exceeded int64
|
||||
totalConnections int64
|
||||
concurrentConnections int64
|
||||
connectedTime int64
|
||||
busyTime int64
|
||||
cpuTime int64
|
||||
bytesReceived int64
|
||||
bytesSent int64
|
||||
binlogBytesWritten int64
|
||||
rowsRead int64
|
||||
rowsSent int64
|
||||
rowsDeleted int64
|
||||
rowsInserted int64
|
||||
rowsUpdated int64
|
||||
selectCommands int64
|
||||
updateCommands int64
|
||||
otherCommands int64
|
||||
commitTransactions int64
|
||||
rollbackTransactions int64
|
||||
deniedConnections int64
|
||||
lostConnections int64
|
||||
accessDenied int64
|
||||
emptyQueries int64
|
||||
totalSslConnections int64
|
||||
maxStatementTimeExceeded int64
|
||||
// maria specific
|
||||
fbusy_time float64
|
||||
fcpu_time float64
|
||||
fbusyTime float64
|
||||
fcpuTime float64
|
||||
// percona specific
|
||||
rows_fetched int64
|
||||
table_rows_read int64
|
||||
rowsFetched int64
|
||||
tableRowsRead int64
|
||||
)
|
||||
|
||||
switch l {
|
||||
case 23: // maria5
|
||||
return []interface{}{
|
||||
&user,
|
||||
&total_connections,
|
||||
&concurrent_connections,
|
||||
&connected_time,
|
||||
&fbusy_time,
|
||||
&fcpu_time,
|
||||
&bytes_received,
|
||||
&bytes_sent,
|
||||
&binlog_bytes_written,
|
||||
&rows_read,
|
||||
&rows_sent,
|
||||
&rows_deleted,
|
||||
&rows_inserted,
|
||||
&rows_updated,
|
||||
&select_commands,
|
||||
&update_commands,
|
||||
&other_commands,
|
||||
&commit_transactions,
|
||||
&rollback_transactions,
|
||||
&denied_connections,
|
||||
&lost_connections,
|
||||
&access_denied,
|
||||
&empty_queries,
|
||||
&totalConnections,
|
||||
&concurrentConnections,
|
||||
&connectedTime,
|
||||
&fbusyTime,
|
||||
&fcpuTime,
|
||||
&bytesReceived,
|
||||
&bytesSent,
|
||||
&binlogBytesWritten,
|
||||
&rowsRead,
|
||||
&rowsSent,
|
||||
&rowsDeleted,
|
||||
&rowsInserted,
|
||||
&rowsUpdated,
|
||||
&selectCommands,
|
||||
&updateCommands,
|
||||
&otherCommands,
|
||||
&commitTransactions,
|
||||
&rollbackTransactions,
|
||||
&deniedConnections,
|
||||
&lostConnections,
|
||||
&accessDenied,
|
||||
&emptyQueries,
|
||||
}, nil
|
||||
case 25: // maria10
|
||||
return []interface{}{
|
||||
&user,
|
||||
&total_connections,
|
||||
&concurrent_connections,
|
||||
&connected_time,
|
||||
&fbusy_time,
|
||||
&fcpu_time,
|
||||
&bytes_received,
|
||||
&bytes_sent,
|
||||
&binlog_bytes_written,
|
||||
&rows_read,
|
||||
&rows_sent,
|
||||
&rows_deleted,
|
||||
&rows_inserted,
|
||||
&rows_updated,
|
||||
&select_commands,
|
||||
&update_commands,
|
||||
&other_commands,
|
||||
&commit_transactions,
|
||||
&rollback_transactions,
|
||||
&denied_connections,
|
||||
&lost_connections,
|
||||
&access_denied,
|
||||
&empty_queries,
|
||||
&total_ssl_connections,
|
||||
&max_statement_time_exceeded,
|
||||
&totalConnections,
|
||||
&concurrentConnections,
|
||||
&connectedTime,
|
||||
&fbusyTime,
|
||||
&fcpuTime,
|
||||
&bytesReceived,
|
||||
&bytesSent,
|
||||
&binlogBytesWritten,
|
||||
&rowsRead,
|
||||
&rowsSent,
|
||||
&rowsDeleted,
|
||||
&rowsInserted,
|
||||
&rowsUpdated,
|
||||
&selectCommands,
|
||||
&updateCommands,
|
||||
&otherCommands,
|
||||
&commitTransactions,
|
||||
&rollbackTransactions,
|
||||
&deniedConnections,
|
||||
&lostConnections,
|
||||
&accessDenied,
|
||||
&emptyQueries,
|
||||
&totalSslConnections,
|
||||
&maxStatementTimeExceeded,
|
||||
}, nil
|
||||
case 21: // mysql 5.5
|
||||
return []interface{}{
|
||||
&user,
|
||||
&total_connections,
|
||||
&concurrent_connections,
|
||||
&connected_time,
|
||||
&busy_time,
|
||||
&cpu_time,
|
||||
&bytes_received,
|
||||
&bytes_sent,
|
||||
&binlog_bytes_written,
|
||||
&rows_fetched,
|
||||
&rows_updated,
|
||||
&table_rows_read,
|
||||
&select_commands,
|
||||
&update_commands,
|
||||
&other_commands,
|
||||
&commit_transactions,
|
||||
&rollback_transactions,
|
||||
&denied_connections,
|
||||
&lost_connections,
|
||||
&access_denied,
|
||||
&empty_queries,
|
||||
&totalConnections,
|
||||
&concurrentConnections,
|
||||
&connectedTime,
|
||||
&busyTime,
|
||||
&cpuTime,
|
||||
&bytesReceived,
|
||||
&bytesSent,
|
||||
&binlogBytesWritten,
|
||||
&rowsFetched,
|
||||
&rowsUpdated,
|
||||
&tableRowsRead,
|
||||
&selectCommands,
|
||||
&updateCommands,
|
||||
&otherCommands,
|
||||
&commitTransactions,
|
||||
&rollbackTransactions,
|
||||
&deniedConnections,
|
||||
&lostConnections,
|
||||
&accessDenied,
|
||||
&emptyQueries,
|
||||
}, nil
|
||||
case 22: // percona
|
||||
return []interface{}{
|
||||
&user,
|
||||
&total_connections,
|
||||
&concurrent_connections,
|
||||
&connected_time,
|
||||
&busy_time,
|
||||
&cpu_time,
|
||||
&bytes_received,
|
||||
&bytes_sent,
|
||||
&binlog_bytes_written,
|
||||
&rows_fetched,
|
||||
&rows_updated,
|
||||
&table_rows_read,
|
||||
&select_commands,
|
||||
&update_commands,
|
||||
&other_commands,
|
||||
&commit_transactions,
|
||||
&rollback_transactions,
|
||||
&denied_connections,
|
||||
&lost_connections,
|
||||
&access_denied,
|
||||
&empty_queries,
|
||||
&total_ssl_connections,
|
||||
&totalConnections,
|
||||
&concurrentConnections,
|
||||
&connectedTime,
|
||||
&busyTime,
|
||||
&cpuTime,
|
||||
&bytesReceived,
|
||||
&bytesSent,
|
||||
&binlogBytesWritten,
|
||||
&rowsFetched,
|
||||
&rowsUpdated,
|
||||
&tableRowsRead,
|
||||
&selectCommands,
|
||||
&updateCommands,
|
||||
&otherCommands,
|
||||
&commitTransactions,
|
||||
&rollbackTransactions,
|
||||
&deniedConnections,
|
||||
&lostConnections,
|
||||
&accessDenied,
|
||||
&emptyQueries,
|
||||
&totalSslConnections,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
@ -1685,7 +1684,7 @@ func (m *Mysql) gatherPerfEventsStatements(db *sql.DB, serv string, acc telegraf
|
|||
defer rows.Close()
|
||||
|
||||
var (
|
||||
schemaName, digest, digest_text string
|
||||
schemaName, digest, digestText string
|
||||
count, queryTime, errors, warnings float64
|
||||
rowsAffected, rowsSent, rowsExamined float64
|
||||
tmpTables, tmpDiskTables float64
|
||||
|
|
@ -1700,7 +1699,7 @@ func (m *Mysql) gatherPerfEventsStatements(db *sql.DB, serv string, acc telegraf
|
|||
|
||||
for rows.Next() {
|
||||
err = rows.Scan(
|
||||
&schemaName, &digest, &digest_text,
|
||||
&schemaName, &digest, &digestText,
|
||||
&count, &queryTime, &errors, &warnings,
|
||||
&rowsAffected, &rowsSent, &rowsExamined,
|
||||
&tmpTables, &tmpDiskTables,
|
||||
|
|
@ -1713,7 +1712,7 @@ func (m *Mysql) gatherPerfEventsStatements(db *sql.DB, serv string, acc telegraf
|
|||
}
|
||||
tags["schema"] = schemaName
|
||||
tags["digest"] = digest
|
||||
tags["digest_text"] = digest_text
|
||||
tags["digest_text"] = digestText
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"events_statements_total": count,
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ type NetIOStats struct {
|
|||
Interfaces []string
|
||||
}
|
||||
|
||||
func (_ *NetIOStats) Description() string {
|
||||
func (n *NetIOStats) Description() string {
|
||||
return "Read metrics about network interface usage"
|
||||
}
|
||||
|
||||
|
|
@ -38,18 +38,18 @@ var netSampleConfig = `
|
|||
##
|
||||
`
|
||||
|
||||
func (_ *NetIOStats) SampleConfig() string {
|
||||
func (n *NetIOStats) SampleConfig() string {
|
||||
return netSampleConfig
|
||||
}
|
||||
|
||||
func (s *NetIOStats) Gather(acc telegraf.Accumulator) error {
|
||||
netio, err := s.ps.NetIO()
|
||||
func (n *NetIOStats) Gather(acc telegraf.Accumulator) error {
|
||||
netio, err := n.ps.NetIO()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting net io info: %s", err)
|
||||
}
|
||||
|
||||
if s.filter == nil {
|
||||
if s.filter, err = filter.Compile(s.Interfaces); err != nil {
|
||||
if n.filter == nil {
|
||||
if n.filter, err = filter.Compile(n.Interfaces); err != nil {
|
||||
return fmt.Errorf("error compiling filter: %s", err)
|
||||
}
|
||||
}
|
||||
|
|
@ -64,17 +64,17 @@ func (s *NetIOStats) Gather(acc telegraf.Accumulator) error {
|
|||
}
|
||||
|
||||
for _, io := range netio {
|
||||
if len(s.Interfaces) != 0 {
|
||||
if len(n.Interfaces) != 0 {
|
||||
var found bool
|
||||
|
||||
if s.filter.Match(io.Name) {
|
||||
if n.filter.Match(io.Name) {
|
||||
found = true
|
||||
}
|
||||
|
||||
if !found {
|
||||
continue
|
||||
}
|
||||
} else if !s.skipChecks {
|
||||
} else if !n.skipChecks {
|
||||
iface, ok := interfacesByName[io.Name]
|
||||
if !ok {
|
||||
continue
|
||||
|
|
@ -108,8 +108,8 @@ func (s *NetIOStats) Gather(acc telegraf.Accumulator) error {
|
|||
|
||||
// Get system wide stats for different network protocols
|
||||
// (ignore these stats if the call fails)
|
||||
if !s.IgnoreProtocolStats {
|
||||
netprotos, _ := s.ps.NetProto()
|
||||
if !n.IgnoreProtocolStats {
|
||||
netprotos, _ := n.ps.NetProto()
|
||||
fields := make(map[string]interface{})
|
||||
for _, proto := range netprotos {
|
||||
for stat, value := range proto.Stats {
|
||||
|
|
|
|||
|
|
@ -13,18 +13,18 @@ type NetStats struct {
|
|||
ps system.PS
|
||||
}
|
||||
|
||||
func (_ *NetStats) Description() string {
|
||||
func (ns *NetStats) Description() string {
|
||||
return "Read TCP metrics such as established, time wait and sockets counts."
|
||||
}
|
||||
|
||||
var tcpstatSampleConfig = ""
|
||||
|
||||
func (_ *NetStats) SampleConfig() string {
|
||||
func (ns *NetStats) SampleConfig() string {
|
||||
return tcpstatSampleConfig
|
||||
}
|
||||
|
||||
func (s *NetStats) Gather(acc telegraf.Accumulator) error {
|
||||
netconns, err := s.ps.NetConnections()
|
||||
func (ns *NetStats) Gather(acc telegraf.Accumulator) error {
|
||||
netconns, err := ns.ps.NetConnections()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting net connections info: %s", err)
|
||||
}
|
||||
|
|
@ -35,7 +35,7 @@ func (s *NetStats) Gather(acc telegraf.Accumulator) error {
|
|||
tags := map[string]string{}
|
||||
for _, netcon := range netconns {
|
||||
if netcon.Type == syscall.SOCK_DGRAM {
|
||||
counts["UDP"] += 1
|
||||
counts["UDP"]++
|
||||
continue // UDP has no status
|
||||
}
|
||||
c, ok := counts[netcon.Status]
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ import (
|
|||
)
|
||||
|
||||
// Mapping of ntpq header names to tag keys
|
||||
var tagHeaders map[string]string = map[string]string{
|
||||
var tagHeaders = map[string]string{
|
||||
"remote": "remote",
|
||||
"refid": "refid",
|
||||
"st": "stratum",
|
||||
|
|
@ -128,7 +128,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error {
|
|||
case strings.HasSuffix(when, "h"):
|
||||
m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "h"))
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("E! Error ntpq: parsing int: %s", fields[index]))
|
||||
acc.AddError(fmt.Errorf("error ntpq: parsing int: %s", fields[index]))
|
||||
continue
|
||||
}
|
||||
// seconds in an hour
|
||||
|
|
@ -137,7 +137,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error {
|
|||
case strings.HasSuffix(when, "d"):
|
||||
m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "d"))
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("E! Error ntpq: parsing int: %s", fields[index]))
|
||||
acc.AddError(fmt.Errorf("error ntpq: parsing int: %s", fields[index]))
|
||||
continue
|
||||
}
|
||||
// seconds in a day
|
||||
|
|
@ -146,7 +146,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error {
|
|||
case strings.HasSuffix(when, "m"):
|
||||
m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "m"))
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("E! Error ntpq: parsing int: %s", fields[index]))
|
||||
acc.AddError(fmt.Errorf("error ntpq: parsing int: %s", fields[index]))
|
||||
continue
|
||||
}
|
||||
// seconds in a day
|
||||
|
|
@ -157,7 +157,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error {
|
|||
|
||||
m, err := strconv.Atoi(fields[index])
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("E! Error ntpq: parsing int: %s", fields[index]))
|
||||
acc.AddError(fmt.Errorf("error ntpq: parsing int: %s", fields[index]))
|
||||
continue
|
||||
}
|
||||
mFields[key] = int64(m)
|
||||
|
|
@ -174,7 +174,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error {
|
|||
|
||||
m, err := strconv.ParseFloat(fields[index], 64)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("E! Error ntpq: parsing float: %s", fields[index]))
|
||||
acc.AddError(fmt.Errorf("error ntpq: parsing float: %s", fields[index]))
|
||||
continue
|
||||
}
|
||||
mFields[key] = m
|
||||
|
|
|
|||
|
|
@ -32,26 +32,26 @@ func (p *passenger) parseCommand() (string, []string) {
|
|||
}
|
||||
|
||||
type info struct {
|
||||
Passenger_version string `xml:"passenger_version"`
|
||||
Process_count int `xml:"process_count"`
|
||||
Capacity_used int `xml:"capacity_used"`
|
||||
Get_wait_list_size int `xml:"get_wait_list_size"`
|
||||
PassengerVersion string `xml:"passenger_version"`
|
||||
ProcessCount int `xml:"process_count"`
|
||||
CapacityUsed int `xml:"capacity_used"`
|
||||
GetWaitListSize int `xml:"get_wait_list_size"`
|
||||
Max int `xml:"max"`
|
||||
Supergroups struct {
|
||||
Supergroup []struct {
|
||||
Name string `xml:"name"`
|
||||
Get_wait_list_size int `xml:"get_wait_list_size"`
|
||||
Capacity_used int `xml:"capacity_used"`
|
||||
GetWaitListSize int `xml:"get_wait_list_size"`
|
||||
CapacityUsed int `xml:"capacity_used"`
|
||||
Group []struct {
|
||||
Name string `xml:"name"`
|
||||
AppRoot string `xml:"app_root"`
|
||||
AppType string `xml:"app_type"`
|
||||
Enabled_process_count int `xml:"enabled_process_count"`
|
||||
Disabling_process_count int `xml:"disabling_process_count"`
|
||||
Disabled_process_count int `xml:"disabled_process_count"`
|
||||
Capacity_used int `xml:"capacity_used"`
|
||||
Get_wait_list_size int `xml:"get_wait_list_size"`
|
||||
Processes_being_spawned int `xml:"processes_being_spawned"`
|
||||
EnabledProcessCount int `xml:"enabled_process_count"`
|
||||
DisablingProcessCount int `xml:"disabling_process_count"`
|
||||
DisabledProcessCount int `xml:"disabled_process_count"`
|
||||
CapacityUsed int `xml:"capacity_used"`
|
||||
GetWaitListSize int `xml:"get_wait_list_size"`
|
||||
ProcessesBeingSpawned int `xml:"processes_being_spawned"`
|
||||
Processes struct {
|
||||
Process []*process `xml:"process"`
|
||||
} `xml:"processes"`
|
||||
|
|
@ -66,23 +66,23 @@ type process struct {
|
|||
Sessions int `xml:"sessions"`
|
||||
Busyness int `xml:"busyness"`
|
||||
Processed int `xml:"processed"`
|
||||
Spawner_creation_time int64 `xml:"spawner_creation_time"`
|
||||
Spawn_start_time int64 `xml:"spawn_start_time"`
|
||||
Spawn_end_time int64 `xml:"spawn_end_time"`
|
||||
Last_used int64 `xml:"last_used"`
|
||||
SpawnerCreationTime int64 `xml:"spawner_creation_time"`
|
||||
SpawnStartTime int64 `xml:"spawn_start_time"`
|
||||
SpawnEndTime int64 `xml:"spawn_end_time"`
|
||||
LastUsed int64 `xml:"last_used"`
|
||||
Uptime string `xml:"uptime"`
|
||||
Code_revision string `xml:"code_revision"`
|
||||
Life_status string `xml:"life_status"`
|
||||
CodeRevision string `xml:"code_revision"`
|
||||
LifeStatus string `xml:"life_status"`
|
||||
Enabled string `xml:"enabled"`
|
||||
Has_metrics bool `xml:"has_metrics"`
|
||||
HasMetrics bool `xml:"has_metrics"`
|
||||
Cpu int64 `xml:"cpu"`
|
||||
Rss int64 `xml:"rss"`
|
||||
Pss int64 `xml:"pss"`
|
||||
Private_dirty int64 `xml:"private_dirty"`
|
||||
PrivateDirty int64 `xml:"private_dirty"`
|
||||
Swap int64 `xml:"swap"`
|
||||
Real_memory int64 `xml:"real_memory"`
|
||||
RealMemory int64 `xml:"real_memory"`
|
||||
Vmsize int64 `xml:"vmsize"`
|
||||
Process_group_id string `xml:"process_group_id"`
|
||||
ProcessGroupId string `xml:"process_group_id"`
|
||||
}
|
||||
|
||||
func (p *process) getUptime() int64 {
|
||||
|
|
@ -137,31 +137,27 @@ var sampleConfig = `
|
|||
command = "passenger-status -v --show=xml"
|
||||
`
|
||||
|
||||
func (r *passenger) SampleConfig() string {
|
||||
func (p *passenger) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (r *passenger) Description() string {
|
||||
func (p *passenger) Description() string {
|
||||
return "Read metrics of passenger using passenger-status"
|
||||
}
|
||||
|
||||
func (g *passenger) Gather(acc telegraf.Accumulator) error {
|
||||
if g.Command == "" {
|
||||
g.Command = "passenger-status -v --show=xml"
|
||||
func (p *passenger) Gather(acc telegraf.Accumulator) error {
|
||||
if p.Command == "" {
|
||||
p.Command = "passenger-status -v --show=xml"
|
||||
}
|
||||
|
||||
cmd, args := g.parseCommand()
|
||||
cmd, args := p.parseCommand()
|
||||
out, err := exec.Command(cmd, args...).Output()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = importMetric(out, acc); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return importMetric(out, acc)
|
||||
}
|
||||
|
||||
func importMetric(stat []byte, acc telegraf.Accumulator) error {
|
||||
|
|
@ -174,13 +170,13 @@ func importMetric(stat []byte, acc telegraf.Accumulator) error {
|
|||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"passenger_version": p.Passenger_version,
|
||||
"passenger_version": p.PassengerVersion,
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"process_count": p.Process_count,
|
||||
"process_count": p.ProcessCount,
|
||||
"max": p.Max,
|
||||
"capacity_used": p.Capacity_used,
|
||||
"get_wait_list_size": p.Get_wait_list_size,
|
||||
"capacity_used": p.CapacityUsed,
|
||||
"get_wait_list_size": p.GetWaitListSize,
|
||||
}
|
||||
acc.AddFields("passenger", fields, tags)
|
||||
|
||||
|
|
@ -189,8 +185,8 @@ func importMetric(stat []byte, acc telegraf.Accumulator) error {
|
|||
"name": sg.Name,
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"get_wait_list_size": sg.Get_wait_list_size,
|
||||
"capacity_used": sg.Capacity_used,
|
||||
"get_wait_list_size": sg.GetWaitListSize,
|
||||
"capacity_used": sg.CapacityUsed,
|
||||
}
|
||||
acc.AddFields("passenger_supergroup", fields, tags)
|
||||
|
||||
|
|
@ -201,9 +197,9 @@ func importMetric(stat []byte, acc telegraf.Accumulator) error {
|
|||
"app_type": group.AppType,
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"get_wait_list_size": group.Get_wait_list_size,
|
||||
"capacity_used": group.Capacity_used,
|
||||
"processes_being_spawned": group.Processes_being_spawned,
|
||||
"get_wait_list_size": group.GetWaitListSize,
|
||||
"capacity_used": group.CapacityUsed,
|
||||
"processes_being_spawned": group.ProcessesBeingSpawned,
|
||||
}
|
||||
acc.AddFields("passenger_group", fields, tags)
|
||||
|
||||
|
|
@ -213,26 +209,26 @@ func importMetric(stat []byte, acc telegraf.Accumulator) error {
|
|||
"app_root": group.AppRoot,
|
||||
"supergroup_name": sg.Name,
|
||||
"pid": fmt.Sprintf("%d", process.Pid),
|
||||
"code_revision": process.Code_revision,
|
||||
"life_status": process.Life_status,
|
||||
"process_group_id": process.Process_group_id,
|
||||
"code_revision": process.CodeRevision,
|
||||
"life_status": process.LifeStatus,
|
||||
"process_group_id": process.ProcessGroupId,
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"concurrency": process.Concurrency,
|
||||
"sessions": process.Sessions,
|
||||
"busyness": process.Busyness,
|
||||
"processed": process.Processed,
|
||||
"spawner_creation_time": process.Spawner_creation_time,
|
||||
"spawn_start_time": process.Spawn_start_time,
|
||||
"spawn_end_time": process.Spawn_end_time,
|
||||
"last_used": process.Last_used,
|
||||
"spawner_creation_time": process.SpawnerCreationTime,
|
||||
"spawn_start_time": process.SpawnStartTime,
|
||||
"spawn_end_time": process.SpawnEndTime,
|
||||
"last_used": process.LastUsed,
|
||||
"uptime": process.getUptime(),
|
||||
"cpu": process.Cpu,
|
||||
"rss": process.Rss,
|
||||
"pss": process.Pss,
|
||||
"private_dirty": process.Private_dirty,
|
||||
"private_dirty": process.PrivateDirty,
|
||||
"swap": process.Swap,
|
||||
"real_memory": process.Real_memory,
|
||||
"real_memory": process.RealMemory,
|
||||
"vmsize": process.Vmsize,
|
||||
}
|
||||
acc.AddFields("passenger_process", fields, tags)
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package fcgi implements the FastCGI protocol.
|
||||
// Package phpfpm implements the FastCGI protocol.
|
||||
// Currently only the responder role is supported.
|
||||
// The protocol is defined at http://www.fastcgi.com/drupal/node/6?q=node/22
|
||||
package phpfpm
|
||||
|
|
@ -135,8 +135,8 @@ func (rec *record) read(r io.Reader) (err error) {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (r *record) content() []byte {
|
||||
return r.buf[:r.h.ContentLength]
|
||||
func (rec *record) content() []byte {
|
||||
return rec.buf[:rec.h.ContentLength]
|
||||
}
|
||||
|
||||
// writeRecord writes and sends a single record.
|
||||
|
|
|
|||
|
|
@ -33,25 +33,25 @@ func newFcgiClient(h string, args ...interface{}) (*conn, error) {
|
|||
return fcgi, err
|
||||
}
|
||||
|
||||
func (client *conn) Request(
|
||||
func (c *conn) Request(
|
||||
env map[string]string,
|
||||
requestData string,
|
||||
) (retout []byte, reterr []byte, err error) {
|
||||
defer client.rwc.Close()
|
||||
defer c.rwc.Close()
|
||||
var reqId uint16 = 1
|
||||
|
||||
err = client.writeBeginRequest(reqId, uint16(roleResponder), 0)
|
||||
err = c.writeBeginRequest(reqId, uint16(roleResponder), 0)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = client.writePairs(typeParams, reqId, env)
|
||||
err = c.writePairs(typeParams, reqId, env)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if len(requestData) > 0 {
|
||||
if err = client.writeRecord(typeStdin, reqId, []byte(requestData)); err != nil {
|
||||
if err = c.writeRecord(typeStdin, reqId, []byte(requestData)); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
@ -62,7 +62,7 @@ func (client *conn) Request(
|
|||
// receive until EOF or FCGI_END_REQUEST
|
||||
READ_LOOP:
|
||||
for {
|
||||
err1 = rec.read(client.rwc)
|
||||
err1 = rec.read(c.rwc)
|
||||
if err1 != nil && strings.Contains(err1.Error(), "use of closed network connection") {
|
||||
if err1 != io.EOF {
|
||||
err = err1
|
||||
|
|
|
|||
|
|
@ -92,11 +92,11 @@ var sampleConfig = `
|
|||
# pid_finder = "pgrep"
|
||||
`
|
||||
|
||||
func (_ *Procstat) SampleConfig() string {
|
||||
func (p *Procstat) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (_ *Procstat) Description() string {
|
||||
func (p *Procstat) Description() string {
|
||||
return "Monitor process cpu and memory usage"
|
||||
}
|
||||
|
||||
|
|
@ -117,7 +117,7 @@ func (p *Procstat) Gather(acc telegraf.Accumulator) error {
|
|||
p.createProcess = defaultProcess
|
||||
}
|
||||
|
||||
pids, tags, err := p.findPids(acc)
|
||||
pids, tags, err := p.findPids()
|
||||
now := time.Now()
|
||||
|
||||
if err != nil {
|
||||
|
|
@ -136,7 +136,7 @@ func (p *Procstat) Gather(acc telegraf.Accumulator) error {
|
|||
|
||||
procs, err := p.updateProcesses(pids, tags, p.procs)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("E! Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] user: [%s] %s",
|
||||
acc.AddError(fmt.Errorf("procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] user: [%s] %s",
|
||||
p.Exe, p.PidFile, p.Pattern, p.User, err.Error()))
|
||||
}
|
||||
p.procs = procs
|
||||
|
|
@ -234,26 +234,26 @@ func (p *Procstat) addMetric(proc Process, acc telegraf.Accumulator, t time.Time
|
|||
fields[prefix+"created_at"] = createdAt * 1000000 //Convert ms to ns
|
||||
}
|
||||
|
||||
cpu_time, err := proc.Times()
|
||||
cpuTime, err := proc.Times()
|
||||
if err == nil {
|
||||
fields[prefix+"cpu_time_user"] = cpu_time.User
|
||||
fields[prefix+"cpu_time_system"] = cpu_time.System
|
||||
fields[prefix+"cpu_time_idle"] = cpu_time.Idle
|
||||
fields[prefix+"cpu_time_nice"] = cpu_time.Nice
|
||||
fields[prefix+"cpu_time_iowait"] = cpu_time.Iowait
|
||||
fields[prefix+"cpu_time_irq"] = cpu_time.Irq
|
||||
fields[prefix+"cpu_time_soft_irq"] = cpu_time.Softirq
|
||||
fields[prefix+"cpu_time_steal"] = cpu_time.Steal
|
||||
fields[prefix+"cpu_time_guest"] = cpu_time.Guest
|
||||
fields[prefix+"cpu_time_guest_nice"] = cpu_time.GuestNice
|
||||
fields[prefix+"cpu_time_user"] = cpuTime.User
|
||||
fields[prefix+"cpu_time_system"] = cpuTime.System
|
||||
fields[prefix+"cpu_time_idle"] = cpuTime.Idle
|
||||
fields[prefix+"cpu_time_nice"] = cpuTime.Nice
|
||||
fields[prefix+"cpu_time_iowait"] = cpuTime.Iowait
|
||||
fields[prefix+"cpu_time_irq"] = cpuTime.Irq
|
||||
fields[prefix+"cpu_time_soft_irq"] = cpuTime.Softirq
|
||||
fields[prefix+"cpu_time_steal"] = cpuTime.Steal
|
||||
fields[prefix+"cpu_time_guest"] = cpuTime.Guest
|
||||
fields[prefix+"cpu_time_guest_nice"] = cpuTime.GuestNice
|
||||
}
|
||||
|
||||
cpu_perc, err := proc.Percent(time.Duration(0))
|
||||
cpuPerc, err := proc.Percent(time.Duration(0))
|
||||
if err == nil {
|
||||
if p.solarisMode {
|
||||
fields[prefix+"cpu_usage"] = cpu_perc / float64(runtime.NumCPU())
|
||||
fields[prefix+"cpu_usage"] = cpuPerc / float64(runtime.NumCPU())
|
||||
} else {
|
||||
fields[prefix+"cpu_usage"] = cpu_perc
|
||||
fields[prefix+"cpu_usage"] = cpuPerc
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -267,9 +267,9 @@ func (p *Procstat) addMetric(proc Process, acc telegraf.Accumulator, t time.Time
|
|||
fields[prefix+"memory_locked"] = mem.Locked
|
||||
}
|
||||
|
||||
mem_perc, err := proc.MemoryPercent()
|
||||
memPerc, err := proc.MemoryPercent()
|
||||
if err == nil {
|
||||
fields[prefix+"memory_usage"] = mem_perc
|
||||
fields[prefix+"memory_usage"] = memPerc
|
||||
}
|
||||
|
||||
rlims, err := proc.RlimitUsage(true)
|
||||
|
|
@ -368,7 +368,7 @@ func (p *Procstat) getPIDFinder() (PIDFinder, error) {
|
|||
}
|
||||
|
||||
// Get matching PIDs and their initial tags
|
||||
func (p *Procstat) findPids(acc telegraf.Accumulator) ([]PID, map[string]string, error) {
|
||||
func (p *Procstat) findPids() ([]PID, map[string]string, error) {
|
||||
var pids []PID
|
||||
tags := make(map[string]string)
|
||||
var err error
|
||||
|
|
@ -400,7 +400,7 @@ func (p *Procstat) findPids(acc telegraf.Accumulator) ([]PID, map[string]string,
|
|||
pids, err = p.winServicePIDs()
|
||||
tags = map[string]string{"win_service": p.WinService}
|
||||
} else {
|
||||
err = fmt.Errorf("Either exe, pid_file, user, pattern, systemd_unit, cgroup, or win_service must be specified")
|
||||
err = fmt.Errorf("either exe, pid_file, user, pattern, systemd_unit, cgroup, or win_service must be specified")
|
||||
}
|
||||
|
||||
return pids, tags, err
|
||||
|
|
|
|||
|
|
@ -30,14 +30,14 @@ func mockExecCommand(arg0 string, args ...string) *exec.Cmd {
|
|||
func TestMockExecCommand(t *testing.T) {
|
||||
var cmd []string
|
||||
for _, arg := range os.Args {
|
||||
if string(arg) == "--" {
|
||||
if arg == "--" {
|
||||
cmd = []string{}
|
||||
continue
|
||||
}
|
||||
if cmd == nil {
|
||||
continue
|
||||
}
|
||||
cmd = append(cmd, string(arg))
|
||||
cmd = append(cmd, arg)
|
||||
}
|
||||
if cmd == nil {
|
||||
return
|
||||
|
|
@ -72,7 +72,7 @@ func pidFinder(pids []PID, err error) func() (PIDFinder, error) {
|
|||
}
|
||||
}
|
||||
|
||||
func (pg *testPgrep) PidFile(path string) ([]PID, error) {
|
||||
func (pg *testPgrep) PidFile(_ string) ([]PID, error) {
|
||||
return pg.pids, pg.err
|
||||
}
|
||||
|
||||
|
|
@ -80,15 +80,15 @@ func (p *testProc) Cmdline() (string, error) {
|
|||
return "test_proc", nil
|
||||
}
|
||||
|
||||
func (pg *testPgrep) Pattern(pattern string) ([]PID, error) {
|
||||
func (pg *testPgrep) Pattern(_ string) ([]PID, error) {
|
||||
return pg.pids, pg.err
|
||||
}
|
||||
|
||||
func (pg *testPgrep) Uid(user string) ([]PID, error) {
|
||||
func (pg *testPgrep) Uid(_ string) ([]PID, error) {
|
||||
return pg.pids, pg.err
|
||||
}
|
||||
|
||||
func (pg *testPgrep) FullPattern(pattern string) ([]PID, error) {
|
||||
func (pg *testPgrep) FullPattern(_ string) ([]PID, error) {
|
||||
return pg.pids, pg.err
|
||||
}
|
||||
|
||||
|
|
@ -97,7 +97,7 @@ type testProc struct {
|
|||
tags map[string]string
|
||||
}
|
||||
|
||||
func newTestProc(pid PID) (Process, error) {
|
||||
func newTestProc(_ PID) (Process, error) {
|
||||
proc := &testProc{
|
||||
tags: make(map[string]string),
|
||||
}
|
||||
|
|
@ -144,7 +144,7 @@ func (p *testProc) NumThreads() (int32, error) {
|
|||
return 0, nil
|
||||
}
|
||||
|
||||
func (p *testProc) Percent(interval time.Duration) (float64, error) {
|
||||
func (p *testProc) Percent(_ time.Duration) (float64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
|
|
@ -160,12 +160,12 @@ func (p *testProc) Times() (*cpu.TimesStat, error) {
|
|||
return &cpu.TimesStat{}, nil
|
||||
}
|
||||
|
||||
func (p *testProc) RlimitUsage(gatherUsage bool) ([]process.RlimitStat, error) {
|
||||
func (p *testProc) RlimitUsage(_ bool) ([]process.RlimitStat, error) {
|
||||
return []process.RlimitStat{}, nil
|
||||
}
|
||||
|
||||
var pid PID = PID(42)
|
||||
var exe string = "foo"
|
||||
var pid = PID(42)
|
||||
var exe = "foo"
|
||||
|
||||
func TestGather_CreateProcessErrorOk(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
|
|
@ -363,8 +363,7 @@ func TestGather_systemdUnitPIDs(t *testing.T) {
|
|||
createPIDFinder: pidFinder([]PID{}, nil),
|
||||
SystemdUnit: "TestGather_systemdUnitPIDs",
|
||||
}
|
||||
var acc testutil.Accumulator
|
||||
pids, tags, err := p.findPids(&acc)
|
||||
pids, tags, err := p.findPids()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []PID{11408}, pids)
|
||||
assert.Equal(t, "TestGather_systemdUnitPIDs", tags["systemd_unit"])
|
||||
|
|
@ -385,8 +384,7 @@ func TestGather_cgroupPIDs(t *testing.T) {
|
|||
createPIDFinder: pidFinder([]PID{}, nil),
|
||||
CGroup: td,
|
||||
}
|
||||
var acc testutil.Accumulator
|
||||
pids, tags, err := p.findPids(&acc)
|
||||
pids, tags, err := p.findPids()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []PID{1234, 5678}, pids)
|
||||
assert.Equal(t, td, tags["cgroup"])
|
||||
|
|
@ -415,7 +413,7 @@ func TestGather_SameTimestamps(t *testing.T) {
|
|||
require.NoError(t, acc.GatherError(p.Gather))
|
||||
|
||||
procstat, _ := acc.Get("procstat")
|
||||
procstat_lookup, _ := acc.Get("procstat_lookup")
|
||||
procstatLookup, _ := acc.Get("procstat_lookup")
|
||||
|
||||
require.Equal(t, procstat.Time, procstat_lookup.Time)
|
||||
require.Equal(t, procstat.Time, procstatLookup.Time)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -230,7 +230,7 @@ var Tracking = map[string]string{
|
|||
"role": "replication_role",
|
||||
}
|
||||
|
||||
func (r *Redis) init(acc telegraf.Accumulator) error {
|
||||
func (r *Redis) init() error {
|
||||
if r.initialized {
|
||||
return nil
|
||||
}
|
||||
|
|
@ -307,7 +307,7 @@ func (r *Redis) init(acc telegraf.Accumulator) error {
|
|||
// Returns one of the errors encountered while gather stats (if any).
|
||||
func (r *Redis) Gather(acc telegraf.Accumulator) error {
|
||||
if !r.initialized {
|
||||
err := r.init(acc)
|
||||
err := r.init()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -361,7 +361,7 @@ func gatherInfoOutput(
|
|||
tags map[string]string,
|
||||
) error {
|
||||
var section string
|
||||
var keyspace_hits, keyspace_misses int64
|
||||
var keyspaceHits, keyspaceMisses int64
|
||||
|
||||
scanner := bufio.NewScanner(rdr)
|
||||
fields := make(map[string]interface{})
|
||||
|
|
@ -383,7 +383,7 @@ func gatherInfoOutput(
|
|||
if len(parts) < 2 {
|
||||
continue
|
||||
}
|
||||
name := string(parts[0])
|
||||
name := parts[0]
|
||||
|
||||
if section == "Server" {
|
||||
if name != "lru_clock" && name != "uptime_in_seconds" && name != "redis_version" {
|
||||
|
|
@ -406,7 +406,7 @@ func gatherInfoOutput(
|
|||
metric, ok := Tracking[name]
|
||||
if !ok {
|
||||
if section == "Keyspace" {
|
||||
kline := strings.TrimSpace(string(parts[1]))
|
||||
kline := strings.TrimSpace(parts[1])
|
||||
gatherKeyspaceLine(name, kline, acc, tags)
|
||||
continue
|
||||
}
|
||||
|
|
@ -433,9 +433,9 @@ func gatherInfoOutput(
|
|||
if ival, err := strconv.ParseInt(val, 10, 64); err == nil {
|
||||
switch name {
|
||||
case "keyspace_hits":
|
||||
keyspace_hits = ival
|
||||
keyspaceHits = ival
|
||||
case "keyspace_misses":
|
||||
keyspace_misses = ival
|
||||
keyspaceMisses = ival
|
||||
case "rdb_last_save_time":
|
||||
// influxdb can't calculate this, so we have to do it
|
||||
fields["rdb_last_save_time_elapsed"] = time.Now().Unix() - ival
|
||||
|
|
@ -459,11 +459,11 @@ func gatherInfoOutput(
|
|||
|
||||
fields[metric] = val
|
||||
}
|
||||
var keyspace_hitrate float64 = 0.0
|
||||
if keyspace_hits != 0 || keyspace_misses != 0 {
|
||||
keyspace_hitrate = float64(keyspace_hits) / float64(keyspace_hits+keyspace_misses)
|
||||
var keyspaceHitrate float64
|
||||
if keyspaceHits != 0 || keyspaceMisses != 0 {
|
||||
keyspaceHitrate = float64(keyspaceHits) / float64(keyspaceHits+keyspaceMisses)
|
||||
}
|
||||
fields["keyspace_hitrate"] = keyspace_hitrate
|
||||
fields["keyspace_hitrate"] = keyspaceHitrate
|
||||
|
||||
o := RedisFieldTypes{}
|
||||
|
||||
|
|
@ -482,12 +482,12 @@ func gatherKeyspaceLine(
|
|||
name string,
|
||||
line string,
|
||||
acc telegraf.Accumulator,
|
||||
global_tags map[string]string,
|
||||
globalTags map[string]string,
|
||||
) {
|
||||
if strings.Contains(line, "keys=") {
|
||||
fields := make(map[string]interface{})
|
||||
tags := make(map[string]string)
|
||||
for k, v := range global_tags {
|
||||
for k, v := range globalTags {
|
||||
tags[k] = v
|
||||
}
|
||||
tags["database"] = name
|
||||
|
|
@ -511,7 +511,7 @@ func gatherCommandstateLine(
|
|||
name string,
|
||||
line string,
|
||||
acc telegraf.Accumulator,
|
||||
global_tags map[string]string,
|
||||
globalTags map[string]string,
|
||||
) {
|
||||
if !strings.HasPrefix(name, "cmdstat") {
|
||||
return
|
||||
|
|
@ -519,7 +519,7 @@ func gatherCommandstateLine(
|
|||
|
||||
fields := make(map[string]interface{})
|
||||
tags := make(map[string]string)
|
||||
for k, v := range global_tags {
|
||||
for k, v := range globalTags {
|
||||
tags[k] = v
|
||||
}
|
||||
tags["command"] = strings.TrimPrefix(name, "cmdstat_")
|
||||
|
|
@ -556,11 +556,11 @@ func gatherReplicationLine(
|
|||
name string,
|
||||
line string,
|
||||
acc telegraf.Accumulator,
|
||||
global_tags map[string]string,
|
||||
globalTags map[string]string,
|
||||
) {
|
||||
fields := make(map[string]interface{})
|
||||
tags := make(map[string]string)
|
||||
for k, v := range global_tags {
|
||||
for k, v := range globalTags {
|
||||
tags[k] = v
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -459,7 +459,7 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) {
|
|||
i := f.OidIndexLength + 1 // leading separator
|
||||
idx = strings.Map(func(r rune) rune {
|
||||
if r == '.' {
|
||||
i -= 1
|
||||
i--
|
||||
}
|
||||
if i < 1 {
|
||||
return -1
|
||||
|
|
@ -641,7 +641,7 @@ func fieldConvert(conv string, v interface{}) (interface{}, error) {
|
|||
case int32:
|
||||
v = int64(vt)
|
||||
case int64:
|
||||
v = int64(vt)
|
||||
v = vt
|
||||
case uint:
|
||||
v = int64(vt)
|
||||
case uint8:
|
||||
|
|
@ -864,28 +864,6 @@ func SnmpTranslate(oid string) (mibName string, oidNum string, oidText string, c
|
|||
return stc.mibName, stc.oidNum, stc.oidText, stc.conversion, stc.err
|
||||
}
|
||||
|
||||
func SnmpTranslateForce(oid string, mibName string, oidNum string, oidText string, conversion string) {
|
||||
snmpTranslateCachesLock.Lock()
|
||||
defer snmpTranslateCachesLock.Unlock()
|
||||
if snmpTranslateCaches == nil {
|
||||
snmpTranslateCaches = map[string]snmpTranslateCache{}
|
||||
}
|
||||
|
||||
var stc snmpTranslateCache
|
||||
stc.mibName = mibName
|
||||
stc.oidNum = oidNum
|
||||
stc.oidText = oidText
|
||||
stc.conversion = conversion
|
||||
stc.err = nil
|
||||
snmpTranslateCaches[oid] = stc
|
||||
}
|
||||
|
||||
func SnmpTranslateClear() {
|
||||
snmpTranslateCachesLock.Lock()
|
||||
defer snmpTranslateCachesLock.Unlock()
|
||||
snmpTranslateCaches = map[string]snmpTranslateCache{}
|
||||
}
|
||||
|
||||
func snmpTranslateCall(oid string) (mibName string, oidNum string, oidText string, conversion string, err error) {
|
||||
var out []byte
|
||||
if strings.ContainsAny(oid, ":abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") {
|
||||
|
|
|
|||
|
|
@ -18,10 +18,6 @@ import (
|
|||
const mbeansPath = "/admin/mbeans?stats=true&wt=json&cat=CORE&cat=QUERYHANDLER&cat=UPDATEHANDLER&cat=CACHE"
|
||||
const adminCoresPath = "/solr/admin/cores?action=STATUS&wt=json"
|
||||
|
||||
type node struct {
|
||||
Host string `json:"host"`
|
||||
}
|
||||
|
||||
const sampleConfig = `
|
||||
## specify a list of one or more Solr servers
|
||||
servers = ["http://localhost:8983"]
|
||||
|
|
@ -497,10 +493,8 @@ func (s *Solr) gatherData(url string, v interface{}) error {
|
|||
return fmt.Errorf("solr: API responded with status-code %d, expected %d, url %s",
|
||||
r.StatusCode, http.StatusOK, url)
|
||||
}
|
||||
if err = json.NewDecoder(r.Body).Decode(v); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
return json.NewDecoder(r.Body).Decode(v)
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
|
|
|||
|
|
@ -201,24 +201,24 @@ func (g *lockedSeriesGrouper) Add(
|
|||
}
|
||||
|
||||
// ListMetricDescriptors implements metricClient interface
|
||||
func (c *stackdriverMetricClient) ListMetricDescriptors(
|
||||
func (smc *stackdriverMetricClient) ListMetricDescriptors(
|
||||
ctx context.Context,
|
||||
req *monitoringpb.ListMetricDescriptorsRequest,
|
||||
) (<-chan *metricpb.MetricDescriptor, error) {
|
||||
mdChan := make(chan *metricpb.MetricDescriptor, 1000)
|
||||
|
||||
go func() {
|
||||
c.log.Debugf("List metric descriptor request filter: %s", req.Filter)
|
||||
smc.log.Debugf("List metric descriptor request filter: %s", req.Filter)
|
||||
defer close(mdChan)
|
||||
|
||||
// Iterate over metric descriptors and send them to buffered channel
|
||||
mdResp := c.conn.ListMetricDescriptors(ctx, req)
|
||||
c.listMetricDescriptorsCalls.Incr(1)
|
||||
mdResp := smc.conn.ListMetricDescriptors(ctx, req)
|
||||
smc.listMetricDescriptorsCalls.Incr(1)
|
||||
for {
|
||||
mdDesc, mdErr := mdResp.Next()
|
||||
if mdErr != nil {
|
||||
if mdErr != iterator.Done {
|
||||
c.log.Errorf("Failed iterating metric descriptor responses: %q: %v", req.String(), mdErr)
|
||||
smc.log.Errorf("Failed iterating metric descriptor responses: %q: %v", req.String(), mdErr)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
|
@ -230,24 +230,24 @@ func (c *stackdriverMetricClient) ListMetricDescriptors(
|
|||
}
|
||||
|
||||
// ListTimeSeries implements metricClient interface
|
||||
func (c *stackdriverMetricClient) ListTimeSeries(
|
||||
func (smc *stackdriverMetricClient) ListTimeSeries(
|
||||
ctx context.Context,
|
||||
req *monitoringpb.ListTimeSeriesRequest,
|
||||
) (<-chan *monitoringpb.TimeSeries, error) {
|
||||
tsChan := make(chan *monitoringpb.TimeSeries, 1000)
|
||||
|
||||
go func() {
|
||||
c.log.Debugf("List time series request filter: %s", req.Filter)
|
||||
smc.log.Debugf("List time series request filter: %s", req.Filter)
|
||||
defer close(tsChan)
|
||||
|
||||
// Iterate over timeseries and send them to buffered channel
|
||||
tsResp := c.conn.ListTimeSeries(ctx, req)
|
||||
c.listTimeSeriesCalls.Incr(1)
|
||||
tsResp := smc.conn.ListTimeSeries(ctx, req)
|
||||
smc.listTimeSeriesCalls.Incr(1)
|
||||
for {
|
||||
tsDesc, tsErr := tsResp.Next()
|
||||
if tsErr != nil {
|
||||
if tsErr != iterator.Done {
|
||||
c.log.Errorf("Failed iterating time series responses: %q: %v", req.String(), tsErr)
|
||||
smc.log.Errorf("Failed iterating time series responses: %q: %v", req.String(), tsErr)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
|
@ -259,8 +259,8 @@ func (c *stackdriverMetricClient) ListTimeSeries(
|
|||
}
|
||||
|
||||
// Close implements metricClient interface
|
||||
func (s *stackdriverMetricClient) Close() error {
|
||||
return s.conn.Close()
|
||||
func (smc *stackdriverMetricClient) Close() error {
|
||||
return smc.conn.Close()
|
||||
}
|
||||
|
||||
// Description implements telegraf.Input interface
|
||||
|
|
|
|||
|
|
@ -21,9 +21,9 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
// UDP_MAX_PACKET_SIZE is the UDP packet limit, see
|
||||
// UdpMaxPacketSize is the UDP packet limit, see
|
||||
// https://en.wikipedia.org/wiki/User_Datagram_Protocol#Packet_structure
|
||||
UDP_MAX_PACKET_SIZE int = 64 * 1024
|
||||
UdpMaxPacketSize int = 64 * 1024
|
||||
|
||||
defaultFieldName = "value"
|
||||
|
||||
|
|
@ -31,7 +31,6 @@ const (
|
|||
|
||||
defaultSeparator = "_"
|
||||
defaultAllowPendingMessage = 10000
|
||||
MaxTCPConnections = 250
|
||||
|
||||
parserGoRoutines = 5
|
||||
)
|
||||
|
|
@ -203,7 +202,7 @@ type cacheddistributions struct {
|
|||
tags map[string]string
|
||||
}
|
||||
|
||||
func (_ *Statsd) Description() string {
|
||||
func (s *Statsd) Description() string {
|
||||
return "Statsd UDP/TCP Server"
|
||||
}
|
||||
|
||||
|
|
@ -273,7 +272,7 @@ const sampleConfig = `
|
|||
#max_ttl = "1000h"
|
||||
`
|
||||
|
||||
func (_ *Statsd) SampleConfig() string {
|
||||
func (s *Statsd) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
|
|
@ -499,7 +498,7 @@ func (s *Statsd) udpListen(conn *net.UDPConn) error {
|
|||
s.UDPlistener.SetReadBuffer(s.ReadBufferSize)
|
||||
}
|
||||
|
||||
buf := make([]byte, UDP_MAX_PACKET_SIZE)
|
||||
buf := make([]byte, UdpMaxPacketSize)
|
||||
for {
|
||||
select {
|
||||
case <-s.done:
|
||||
|
|
|
|||
|
|
@ -12,14 +12,14 @@ type SwapStats struct {
|
|||
ps system.PS
|
||||
}
|
||||
|
||||
func (_ *SwapStats) Description() string {
|
||||
func (ss *SwapStats) Description() string {
|
||||
return "Read metrics about swap memory usage"
|
||||
}
|
||||
|
||||
func (_ *SwapStats) SampleConfig() string { return "" }
|
||||
func (ss *SwapStats) SampleConfig() string { return "" }
|
||||
|
||||
func (s *SwapStats) Gather(acc telegraf.Accumulator) error {
|
||||
swap, err := s.ps.SwapStat()
|
||||
func (ss *SwapStats) Gather(acc telegraf.Accumulator) error {
|
||||
swap, err := ss.ps.SwapStat()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting swap memory info: %s", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@ package webhooks
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"reflect"
|
||||
|
|
@ -28,14 +27,16 @@ func init() {
|
|||
}
|
||||
|
||||
type Webhooks struct {
|
||||
ServiceAddress string
|
||||
ServiceAddress string `toml:"service_address"`
|
||||
|
||||
Github *github.GithubWebhook
|
||||
Filestack *filestack.FilestackWebhook
|
||||
Mandrill *mandrill.MandrillWebhook
|
||||
Rollbar *rollbar.RollbarWebhook
|
||||
Papertrail *papertrail.PapertrailWebhook
|
||||
Particle *particle.ParticleWebhook
|
||||
Github *github.GithubWebhook `toml:"github"`
|
||||
Filestack *filestack.FilestackWebhook `toml:"filestack"`
|
||||
Mandrill *mandrill.MandrillWebhook `toml:"mandrill"`
|
||||
Rollbar *rollbar.RollbarWebhook `toml:"rollbar"`
|
||||
Papertrail *papertrail.PapertrailWebhook `toml:"papertrail"`
|
||||
Particle *particle.ParticleWebhook `toml:"particle"`
|
||||
|
||||
Log telegraf.Logger `toml:"-"`
|
||||
|
||||
srv *http.Server
|
||||
}
|
||||
|
|
@ -110,25 +111,24 @@ func (wb *Webhooks) Start(acc telegraf.Accumulator) error {
|
|||
|
||||
ln, err := net.Listen("tcp", fmt.Sprintf("%s", wb.ServiceAddress))
|
||||
if err != nil {
|
||||
log.Fatalf("E! Error starting server: %v", err)
|
||||
return err
|
||||
return fmt.Errorf("error starting server: %v", err)
|
||||
|
||||
}
|
||||
|
||||
go func() {
|
||||
if err := wb.srv.Serve(ln); err != nil {
|
||||
if err != http.ErrServerClosed {
|
||||
acc.AddError(fmt.Errorf("E! Error listening: %v", err))
|
||||
acc.AddError(fmt.Errorf("error listening: %v", err))
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
log.Printf("I! Started the webhooks service on %s\n", wb.ServiceAddress)
|
||||
wb.Log.Infof("Started the webhooks service on %s", wb.ServiceAddress)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rb *Webhooks) Stop() {
|
||||
rb.srv.Close()
|
||||
log.Println("I! Stopping the Webhooks service")
|
||||
func (wb *Webhooks) Stop() {
|
||||
wb.srv.Close()
|
||||
wb.Log.Infof("Stopping the Webhooks service")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Code generated by mockery v1.0.0
|
||||
// Code generated by mockery v1.0.0. DO NOT EDIT.
|
||||
package mocks
|
||||
|
||||
import appinsights "github.com/Microsoft/ApplicationInsights-Go/appinsights"
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Code generated by mockery v1.0.0
|
||||
// Code generated by mockery v1.0.0. DO NOT EDIT.
|
||||
package mocks
|
||||
|
||||
import appinsights "github.com/Microsoft/ApplicationInsights-Go/appinsights"
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
package cloudwatch
|
||||
|
||||
import (
|
||||
"log"
|
||||
"math"
|
||||
"sort"
|
||||
"strings"
|
||||
|
|
@ -30,6 +29,8 @@ type CloudWatch struct {
|
|||
svc *cloudwatch.CloudWatch
|
||||
|
||||
WriteStatistics bool `toml:"write_statistics"`
|
||||
|
||||
Log telegraf.Logger `toml:"-"`
|
||||
}
|
||||
|
||||
type statisticType int
|
||||
|
|
@ -253,7 +254,7 @@ func (c *CloudWatch) WriteToCloudWatch(datums []*cloudwatch.MetricDatum) error {
|
|||
_, err := c.svc.PutMetricData(params)
|
||||
|
||||
if err != nil {
|
||||
log.Printf("E! CloudWatch: Unable to write to CloudWatch : %+v \n", err.Error())
|
||||
c.Log.Errorf("Unable to write to CloudWatch : %+v", err.Error())
|
||||
}
|
||||
|
||||
return err
|
||||
|
|
@ -265,7 +266,7 @@ func PartitionDatums(size int, datums []*cloudwatch.MetricDatum) [][]*cloudwatch
|
|||
|
||||
numberOfPartitions := len(datums) / size
|
||||
if len(datums)%size != 0 {
|
||||
numberOfPartitions += 1
|
||||
numberOfPartitions++
|
||||
}
|
||||
|
||||
partitions := make([][]*cloudwatch.MetricDatum, numberOfPartitions)
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ func TestBuildDimensions(t *testing.T) {
|
|||
i := 0
|
||||
for k := range testPoint.Tags() {
|
||||
tagKeys[i] = k
|
||||
i += 1
|
||||
i++
|
||||
}
|
||||
|
||||
sort.Strings(tagKeys)
|
||||
|
|
@ -151,7 +151,6 @@ func TestBuildMetricDatums_SkipEmptyTags(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestPartitionDatums(t *testing.T) {
|
||||
|
||||
assert := assert.New(t)
|
||||
|
||||
testDatum := cloudwatch.MetricDatum{
|
||||
|
|
|
|||
|
|
@ -47,7 +47,7 @@ type Metric struct {
|
|||
|
||||
type Point [2]float64
|
||||
|
||||
const datadog_api = "https://app.datadoghq.com/api/v1/series"
|
||||
const datadogApi = "https://app.datadoghq.com/api/v1/series"
|
||||
|
||||
func (d *Datadog) Connect() error {
|
||||
if d.Apikey == "" {
|
||||
|
|
@ -166,7 +166,7 @@ func buildTags(tagList []*telegraf.Tag) []string {
|
|||
index := 0
|
||||
for _, tag := range tagList {
|
||||
tags[index] = fmt.Sprintf("%s:%s", tag.Key, tag.Value)
|
||||
index += 1
|
||||
index++
|
||||
}
|
||||
return tags
|
||||
}
|
||||
|
|
@ -208,7 +208,7 @@ func (d *Datadog) Close() error {
|
|||
func init() {
|
||||
outputs.Add("datadog", func() telegraf.Output {
|
||||
return &Datadog{
|
||||
URL: datadog_api,
|
||||
URL: datadogApi,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -196,7 +196,7 @@ func (d *Dynatrace) Write(metrics []telegraf.Metric) error {
|
|||
// write metric id,tags and value
|
||||
switch metric.Type() {
|
||||
case telegraf.Counter:
|
||||
var delta float64 = 0
|
||||
var delta float64
|
||||
|
||||
// Check if LastValue exists
|
||||
if lastvalue, ok := d.State[metricID+tagb.String()]; ok {
|
||||
|
|
@ -236,7 +236,7 @@ func (d *Dynatrace) send(msg []byte) error {
|
|||
req, err := http.NewRequest("POST", d.URL, bytes.NewBuffer(msg))
|
||||
if err != nil {
|
||||
d.Log.Errorf("Dynatrace error: %s", err.Error())
|
||||
return fmt.Errorf("Dynatrace error while creating HTTP request:, %s", err.Error())
|
||||
return fmt.Errorf("error while creating HTTP request:, %s", err.Error())
|
||||
}
|
||||
req.Header.Add("Content-Type", "text/plain; charset=UTF-8")
|
||||
|
||||
|
|
@ -250,7 +250,7 @@ func (d *Dynatrace) send(msg []byte) error {
|
|||
if err != nil {
|
||||
d.Log.Errorf("Dynatrace error: %s", err.Error())
|
||||
fmt.Println(req)
|
||||
return fmt.Errorf("Dynatrace error while sending HTTP request:, %s", err.Error())
|
||||
return fmt.Errorf("error while sending HTTP request:, %s", err.Error())
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
|
|
@ -263,7 +263,7 @@ func (d *Dynatrace) send(msg []byte) error {
|
|||
bodyString := string(bodyBytes)
|
||||
d.Log.Debugf("Dynatrace returned: %s", bodyString)
|
||||
} else {
|
||||
return fmt.Errorf("Dynatrace request failed with response code:, %d", resp.StatusCode)
|
||||
return fmt.Errorf("request failed with response code:, %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -102,7 +102,7 @@ func (f *File) Description() string {
|
|||
}
|
||||
|
||||
func (f *File) Write(metrics []telegraf.Metric) error {
|
||||
var writeErr error = nil
|
||||
var writeErr error
|
||||
|
||||
if f.UseBatchFormat {
|
||||
octets, err := f.serializer.SerializeBatch(metrics)
|
||||
|
|
@ -123,7 +123,7 @@ func (f *File) Write(metrics []telegraf.Metric) error {
|
|||
|
||||
_, err = f.writer.Write(b)
|
||||
if err != nil {
|
||||
writeErr = fmt.Errorf("E! [outputs.file] failed to write message: %v", err)
|
||||
writeErr = fmt.Errorf("failed to write message: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -171,11 +171,7 @@ func (h *HTTP) Write(metrics []telegraf.Metric) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if err := h.write(reqBody); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return h.write(reqBody)
|
||||
}
|
||||
|
||||
func (h *HTTP) write(reqBody []byte) error {
|
||||
|
|
|
|||
|
|
@ -38,7 +38,6 @@ func (e APIError) Error() string {
|
|||
const (
|
||||
defaultRequestTimeout = time.Second * 5
|
||||
defaultMaxWait = 60 // seconds
|
||||
defaultDatabase = "telegraf"
|
||||
)
|
||||
|
||||
type HTTPConfig struct {
|
||||
|
|
@ -171,7 +170,7 @@ func (g genericRespError) Error() string {
|
|||
|
||||
func (c *httpClient) Write(ctx context.Context, metrics []telegraf.Metric) error {
|
||||
if c.retryTime.After(time.Now()) {
|
||||
return errors.New("Retry time has not elapsed")
|
||||
return errors.New("retry time has not elapsed")
|
||||
}
|
||||
|
||||
batches := make(map[string][]telegraf.Metric)
|
||||
|
|
|
|||
|
|
@ -4,7 +4,6 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net/url"
|
||||
"time"
|
||||
|
|
@ -96,12 +95,12 @@ type InfluxDB struct {
|
|||
UintSupport bool `toml:"influx_uint_support"`
|
||||
tls.ClientConfig
|
||||
|
||||
Log telegraf.Logger `toml:"-"`
|
||||
|
||||
clients []Client
|
||||
}
|
||||
|
||||
func (i *InfluxDB) Connect() error {
|
||||
ctx := context.Background()
|
||||
|
||||
if len(i.URLs) == 0 {
|
||||
i.URLs = append(i.URLs, defaultURL)
|
||||
}
|
||||
|
|
@ -122,7 +121,7 @@ func (i *InfluxDB) Connect() error {
|
|||
|
||||
switch parts.Scheme {
|
||||
case "http", "https", "unix":
|
||||
c, err := i.getHTTPClient(ctx, parts, proxy)
|
||||
c, err := i.getHTTPClient(parts, proxy)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -165,13 +164,13 @@ func (i *InfluxDB) Write(metrics []telegraf.Metric) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
log.Printf("E! [outputs.influxdb_v2] when writing to [%s]: %v", client.URL(), err)
|
||||
i.Log.Errorf("When writing to [%s]: %v", client.URL(), err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (i *InfluxDB) getHTTPClient(ctx context.Context, url *url.URL, proxy *url.URL) (Client, error) {
|
||||
func (i *InfluxDB) getHTTPClient(url *url.URL, proxy *url.URL) (Client, error) {
|
||||
tlsConfig, err := i.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
|||
|
|
@ -165,11 +165,7 @@ func (o *OpenTSDB) WriteHttp(metrics []telegraf.Metric, u *url.URL) error {
|
|||
}
|
||||
}
|
||||
|
||||
if err := http.flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return http.flush()
|
||||
}
|
||||
|
||||
func (o *OpenTSDB) WriteTelnet(metrics []telegraf.Metric, u *url.URL) error {
|
||||
|
|
@ -235,9 +231,9 @@ func buildValue(v interface{}) (string, error) {
|
|||
var retv string
|
||||
switch p := v.(type) {
|
||||
case int64:
|
||||
retv = IntToString(int64(p))
|
||||
retv = IntToString(p)
|
||||
case uint64:
|
||||
retv = UIntToString(uint64(p))
|
||||
retv = UIntToString(p)
|
||||
case float64:
|
||||
retv = FloatToString(float64(p))
|
||||
default:
|
||||
|
|
@ -246,16 +242,16 @@ func buildValue(v interface{}) (string, error) {
|
|||
return retv, nil
|
||||
}
|
||||
|
||||
func IntToString(input_num int64) string {
|
||||
return strconv.FormatInt(input_num, 10)
|
||||
func IntToString(inputNum int64) string {
|
||||
return strconv.FormatInt(inputNum, 10)
|
||||
}
|
||||
|
||||
func UIntToString(input_num uint64) string {
|
||||
return strconv.FormatUint(input_num, 10)
|
||||
func UIntToString(inputNum uint64) string {
|
||||
return strconv.FormatUint(inputNum, 10)
|
||||
}
|
||||
|
||||
func FloatToString(input_num float64) string {
|
||||
return strconv.FormatFloat(input_num, 'f', 6, 64)
|
||||
func FloatToString(inputNum float64) string {
|
||||
return strconv.FormatFloat(inputNum, 'f', 6, 64)
|
||||
}
|
||||
|
||||
func (o *OpenTSDB) SampleConfig() string {
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@ package riemann_legacy
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
|
|
@ -12,12 +11,13 @@ import (
|
|||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
)
|
||||
|
||||
const deprecationMsg = "E! Error: this Riemann output plugin will be deprecated in a future release, see https://github.com/influxdata/telegraf/issues/1878 for more details & discussion."
|
||||
const deprecationMsg = "Error: this Riemann output plugin will be deprecated in a future release, see https://github.com/influxdata/telegraf/issues/1878 for more details & discussion."
|
||||
|
||||
type Riemann struct {
|
||||
URL string
|
||||
Transport string
|
||||
Separator string
|
||||
URL string `toml:"url"`
|
||||
Transport string `toml:"transport"`
|
||||
Separator string `toml:"separator"`
|
||||
Log telegraf.Logger `toml:"-"`
|
||||
|
||||
client *raidman.Client
|
||||
}
|
||||
|
|
@ -32,7 +32,7 @@ var sampleConfig = `
|
|||
`
|
||||
|
||||
func (r *Riemann) Connect() error {
|
||||
log.Printf(deprecationMsg)
|
||||
r.Log.Error(deprecationMsg)
|
||||
c, err := raidman.Dial(r.Transport, r.URL)
|
||||
|
||||
if err != nil {
|
||||
|
|
@ -62,7 +62,7 @@ func (r *Riemann) Description() string {
|
|||
}
|
||||
|
||||
func (r *Riemann) Write(metrics []telegraf.Metric) error {
|
||||
log.Printf(deprecationMsg)
|
||||
r.Log.Error(deprecationMsg)
|
||||
if len(metrics) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
|
@ -140,7 +140,7 @@ func serviceName(s string, n string, t map[string]string, f string) string {
|
|||
tagStrings = append(tagStrings, t[tagName])
|
||||
}
|
||||
}
|
||||
var tagString string = strings.Join(tagStrings, s)
|
||||
var tagString = strings.Join(tagStrings, s)
|
||||
if tagString != "" {
|
||||
serviceStrings = append(serviceStrings, tagString)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,11 +19,7 @@ type Config struct {
|
|||
|
||||
// Validate validates the config's templates and tags.
|
||||
func (c *Config) Validate() error {
|
||||
if err := c.validateTemplates(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return c.validateTemplates()
|
||||
}
|
||||
|
||||
func (c *Config) validateTemplates() error {
|
||||
|
|
|
|||
|
|
@ -82,8 +82,8 @@ func NewSeriesParser(handler *MetricHandler) *Parser {
|
|||
}
|
||||
}
|
||||
|
||||
func (h *Parser) SetTimeFunc(f TimeFunc) {
|
||||
h.handler.SetTimeFunc(f)
|
||||
func (p *Parser) SetTimeFunc(f TimeFunc) {
|
||||
p.handler.SetTimeFunc(f)
|
||||
}
|
||||
|
||||
func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) {
|
||||
|
|
@ -178,18 +178,18 @@ func NewStreamParser(r io.Reader) *StreamParser {
|
|||
// SetTimeFunc changes the function used to determine the time of metrics
|
||||
// without a timestamp. The default TimeFunc is time.Now. Useful mostly for
|
||||
// testing, or perhaps if you want all metrics to have the same timestamp.
|
||||
func (h *StreamParser) SetTimeFunc(f TimeFunc) {
|
||||
h.handler.SetTimeFunc(f)
|
||||
func (sp *StreamParser) SetTimeFunc(f TimeFunc) {
|
||||
sp.handler.SetTimeFunc(f)
|
||||
}
|
||||
|
||||
func (h *StreamParser) SetTimePrecision(u time.Duration) {
|
||||
h.handler.SetTimePrecision(u)
|
||||
func (sp *StreamParser) SetTimePrecision(u time.Duration) {
|
||||
sp.handler.SetTimePrecision(u)
|
||||
}
|
||||
|
||||
// Next parses the next item from the stream. You can repeat calls to this
|
||||
// function if it returns ParseError to get the next metric or error.
|
||||
func (p *StreamParser) Next() (telegraf.Metric, error) {
|
||||
err := p.machine.Next()
|
||||
func (sp *StreamParser) Next() (telegraf.Metric, error) {
|
||||
err := sp.machine.Next()
|
||||
if err == EOF {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -200,16 +200,16 @@ func (p *StreamParser) Next() (telegraf.Metric, error) {
|
|||
|
||||
if err != nil {
|
||||
return nil, &ParseError{
|
||||
Offset: p.machine.Position(),
|
||||
LineOffset: p.machine.LineOffset(),
|
||||
LineNumber: p.machine.LineNumber(),
|
||||
Column: p.machine.Column(),
|
||||
Offset: sp.machine.Position(),
|
||||
LineOffset: sp.machine.LineOffset(),
|
||||
LineNumber: sp.machine.LineNumber(),
|
||||
Column: sp.machine.Column(),
|
||||
msg: err.Error(),
|
||||
buf: p.machine.LineText(),
|
||||
buf: sp.machine.LineText(),
|
||||
}
|
||||
}
|
||||
|
||||
metric, err := p.handler.Metric()
|
||||
metric, err := sp.handler.Metric()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -218,27 +218,27 @@ func (p *StreamParser) Next() (telegraf.Metric, error) {
|
|||
}
|
||||
|
||||
// Position returns the current byte offset into the data.
|
||||
func (p *StreamParser) Position() int {
|
||||
return p.machine.Position()
|
||||
func (sp *StreamParser) Position() int {
|
||||
return sp.machine.Position()
|
||||
}
|
||||
|
||||
// LineOffset returns the byte offset of the current line.
|
||||
func (p *StreamParser) LineOffset() int {
|
||||
return p.machine.LineOffset()
|
||||
func (sp *StreamParser) LineOffset() int {
|
||||
return sp.machine.LineOffset()
|
||||
}
|
||||
|
||||
// LineNumber returns the current line number. Lines are counted based on the
|
||||
// regular expression `\r?\n`.
|
||||
func (p *StreamParser) LineNumber() int {
|
||||
return p.machine.LineNumber()
|
||||
func (sp *StreamParser) LineNumber() int {
|
||||
return sp.machine.LineNumber()
|
||||
}
|
||||
|
||||
// Column returns the current column.
|
||||
func (p *StreamParser) Column() int {
|
||||
return p.machine.Column()
|
||||
func (sp *StreamParser) Column() int {
|
||||
return sp.machine.Column()
|
||||
}
|
||||
|
||||
// LineText returns the text of the current line that has been parsed so far.
|
||||
func (p *StreamParser) LineText() string {
|
||||
return p.machine.LineText()
|
||||
func (sp *StreamParser) LineText() string {
|
||||
return sp.machine.LineText()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ import (
|
|||
"github.com/influxdata/telegraf/metric"
|
||||
)
|
||||
|
||||
const MAX_BUFFER_SIZE = 2
|
||||
const MaxBufferSize = 2
|
||||
|
||||
type Point struct {
|
||||
Name string
|
||||
|
|
@ -170,9 +170,9 @@ func (p *PointParser) convertPointToTelegrafMetric(points []Point) ([]telegraf.M
|
|||
func (p *PointParser) scan() (Token, string) {
|
||||
// If we have a token on the buffer, then return it.
|
||||
if p.buf.n != 0 {
|
||||
idx := p.buf.n % MAX_BUFFER_SIZE
|
||||
idx := p.buf.n % MaxBufferSize
|
||||
tok, lit := p.buf.tok[idx], p.buf.lit[idx]
|
||||
p.buf.n -= 1
|
||||
p.buf.n--
|
||||
return tok, lit
|
||||
}
|
||||
|
||||
|
|
@ -188,8 +188,8 @@ func (p *PointParser) scan() (Token, string) {
|
|||
func (p *PointParser) buffer(tok Token, lit string) {
|
||||
// create the buffer if it is empty
|
||||
if len(p.buf.tok) == 0 {
|
||||
p.buf.tok = make([]Token, MAX_BUFFER_SIZE)
|
||||
p.buf.lit = make([]string, MAX_BUFFER_SIZE)
|
||||
p.buf.tok = make([]Token, MaxBufferSize)
|
||||
p.buf.lit = make([]string, MaxBufferSize)
|
||||
}
|
||||
|
||||
// for now assume a simple circular buffer of length two
|
||||
|
|
@ -203,9 +203,9 @@ func (p *PointParser) unscan() {
|
|||
}
|
||||
|
||||
func (p *PointParser) unscanTokens(n int) {
|
||||
if n > MAX_BUFFER_SIZE {
|
||||
if n > MaxBufferSize {
|
||||
// just log for now
|
||||
log.Printf("cannot unscan more than %d tokens", MAX_BUFFER_SIZE)
|
||||
log.Printf("cannot unscan more than %d tokens", MaxBufferSize)
|
||||
}
|
||||
p.buf.n += n
|
||||
}
|
||||
|
|
|
|||
|
|
@ -46,11 +46,11 @@ type PortName struct {
|
|||
Log telegraf.Logger `toml:"-"`
|
||||
}
|
||||
|
||||
func (d *PortName) SampleConfig() string {
|
||||
func (pn *PortName) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (d *PortName) Description() string {
|
||||
func (pn *PortName) Description() string {
|
||||
return "Given a tag/field of a TCP or UDP port number, add a tag/field of the service name looked up in the system services file"
|
||||
}
|
||||
|
||||
|
|
@ -106,22 +106,22 @@ func readServices(r io.Reader) sMap {
|
|||
return services
|
||||
}
|
||||
|
||||
func (d *PortName) Apply(metrics ...telegraf.Metric) []telegraf.Metric {
|
||||
func (pn *PortName) Apply(metrics ...telegraf.Metric) []telegraf.Metric {
|
||||
for _, m := range metrics {
|
||||
|
||||
var portProto string
|
||||
var fromField bool
|
||||
|
||||
if len(d.SourceTag) > 0 {
|
||||
if tag, ok := m.GetTag(d.SourceTag); ok {
|
||||
portProto = string([]byte(tag))
|
||||
if len(pn.SourceTag) > 0 {
|
||||
if tag, ok := m.GetTag(pn.SourceTag); ok {
|
||||
portProto = tag
|
||||
}
|
||||
}
|
||||
if len(d.SourceField) > 0 {
|
||||
if field, ok := m.GetField(d.SourceField); ok {
|
||||
if len(pn.SourceField) > 0 {
|
||||
if field, ok := m.GetField(pn.SourceField); ok {
|
||||
switch v := field.(type) {
|
||||
default:
|
||||
d.Log.Errorf("Unexpected type %t in source field; must be string or int", v)
|
||||
pn.Log.Errorf("Unexpected type %t in source field; must be string or int", v)
|
||||
continue
|
||||
case int64:
|
||||
portProto = strconv.FormatInt(v, 10)
|
||||
|
|
@ -143,7 +143,7 @@ func (d *PortName) Apply(metrics ...telegraf.Metric) []telegraf.Metric {
|
|||
|
||||
if l == 0 {
|
||||
// Empty tag
|
||||
d.Log.Errorf("empty port tag: %v", d.SourceTag)
|
||||
pn.Log.Errorf("empty port tag: %v", pn.SourceTag)
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
@ -154,25 +154,25 @@ func (d *PortName) Apply(metrics ...telegraf.Metric) []telegraf.Metric {
|
|||
port, err = strconv.Atoi(val)
|
||||
if err != nil {
|
||||
// Can't convert port to string
|
||||
d.Log.Errorf("error converting port to integer: %v", val)
|
||||
pn.Log.Errorf("error converting port to integer: %v", val)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
proto := d.DefaultProtocol
|
||||
proto := pn.DefaultProtocol
|
||||
if l > 1 && len(portProtoSlice[1]) > 0 {
|
||||
proto = portProtoSlice[1]
|
||||
}
|
||||
if len(d.ProtocolTag) > 0 {
|
||||
if tag, ok := m.GetTag(d.ProtocolTag); ok {
|
||||
if len(pn.ProtocolTag) > 0 {
|
||||
if tag, ok := m.GetTag(pn.ProtocolTag); ok {
|
||||
proto = tag
|
||||
}
|
||||
}
|
||||
if len(d.ProtocolField) > 0 {
|
||||
if field, ok := m.GetField(d.ProtocolField); ok {
|
||||
if len(pn.ProtocolField) > 0 {
|
||||
if field, ok := m.GetField(pn.ProtocolField); ok {
|
||||
switch v := field.(type) {
|
||||
default:
|
||||
d.Log.Errorf("Unexpected type %t in protocol field; must be string", v)
|
||||
pn.Log.Errorf("Unexpected type %t in protocol field; must be string", v)
|
||||
continue
|
||||
case string:
|
||||
proto = v
|
||||
|
|
@ -190,7 +190,7 @@ func (d *PortName) Apply(metrics ...telegraf.Metric) []telegraf.Metric {
|
|||
// normally has entries for both, so our map does too. If
|
||||
// not, it's very likely the source tag or the services
|
||||
// file doesn't make sense.
|
||||
d.Log.Errorf("protocol not found in services map: %v", proto)
|
||||
pn.Log.Errorf("protocol not found in services map: %v", proto)
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
@ -200,21 +200,21 @@ func (d *PortName) Apply(metrics ...telegraf.Metric) []telegraf.Metric {
|
|||
//
|
||||
// Not all ports are named so this isn't an error, but
|
||||
// it's helpful to know when debugging.
|
||||
d.Log.Debugf("port not found in services map: %v", port)
|
||||
pn.Log.Debugf("port not found in services map: %v", port)
|
||||
continue
|
||||
}
|
||||
|
||||
if fromField {
|
||||
m.AddField(d.Dest, service)
|
||||
m.AddField(pn.Dest, service)
|
||||
} else {
|
||||
m.AddTag(d.Dest, service)
|
||||
m.AddTag(pn.Dest, service)
|
||||
}
|
||||
}
|
||||
|
||||
return metrics
|
||||
}
|
||||
|
||||
func (h *PortName) Init() error {
|
||||
func (pn *PortName) Init() error {
|
||||
services = make(sMap)
|
||||
readServicesFile()
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -276,8 +276,7 @@ func (t *TopK) push() []telegraf.Metric {
|
|||
}
|
||||
|
||||
// The return value that will hold the returned metrics
|
||||
var ret []telegraf.Metric = make([]telegraf.Metric, 0, 0)
|
||||
|
||||
var ret = make([]telegraf.Metric, 0, 0)
|
||||
// Get the top K metrics for each field and add them to the return value
|
||||
addedKeys := make(map[string]bool)
|
||||
for _, field := range t.Fields {
|
||||
|
|
@ -317,11 +316,11 @@ func (t *TopK) push() []telegraf.Metric {
|
|||
|
||||
result := make([]telegraf.Metric, 0, len(ret))
|
||||
for _, m := range ret {
|
||||
copy, err := metric.New(m.Name(), m.Tags(), m.Fields(), m.Time(), m.Type())
|
||||
newMetric, err := metric.New(m.Name(), m.Tags(), m.Fields(), m.Time(), m.Type())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
result = append(result, copy)
|
||||
result = append(result, newMetric)
|
||||
}
|
||||
|
||||
return result
|
||||
|
|
@ -412,7 +411,7 @@ func (t *TopK) getAggregationFunction(aggOperation string) (func([]telegraf.Metr
|
|||
continue
|
||||
}
|
||||
mean[field] += val
|
||||
meanCounters[field] += 1
|
||||
meanCounters[field]++
|
||||
}
|
||||
}
|
||||
// Divide by the number of recorded measurements collected for every field
|
||||
|
|
@ -423,7 +422,7 @@ func (t *TopK) getAggregationFunction(aggOperation string) (func([]telegraf.Metr
|
|||
continue
|
||||
}
|
||||
mean[k] = mean[k] / meanCounters[k]
|
||||
noMeasurementsFound = noMeasurementsFound && false
|
||||
noMeasurementsFound = false
|
||||
}
|
||||
|
||||
if noMeasurementsFound {
|
||||
|
|
|
|||
|
|
@ -237,7 +237,7 @@ func (s *Serializer) writeMetric(w io.Writer, m telegraf.Metric) error {
|
|||
|
||||
// Additional length needed for field separator `,`
|
||||
if !firstField {
|
||||
bytesNeeded += 1
|
||||
bytesNeeded++
|
||||
}
|
||||
|
||||
if s.maxLineBytes > 0 && bytesNeeded > s.maxLineBytes {
|
||||
|
|
|
|||
|
|
@ -50,7 +50,7 @@ func (r *reader) Read(p []byte) (int, error) {
|
|||
|
||||
for _, metric := range r.metrics[r.offset:] {
|
||||
_, err := r.serializer.Write(r.buf, metric)
|
||||
r.offset += 1
|
||||
r.offset++
|
||||
if err != nil {
|
||||
r.buf.Reset()
|
||||
if _, ok := err.(*MetricError); ok {
|
||||
|
|
|
|||
|
|
@ -187,7 +187,7 @@ func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) {
|
|||
metrickey, promts = getPromTS(metricName, labels, value, metric.Time())
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("Unknown type %v", metric.Type())
|
||||
return nil, fmt.Errorf("unknown type %v", metric.Type())
|
||||
}
|
||||
|
||||
// A batch of metrics can contain multiple values for a single
|
||||
|
|
@ -205,7 +205,7 @@ func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) {
|
|||
}
|
||||
|
||||
var promTS = make([]*prompb.TimeSeries, len(entries))
|
||||
var i int64 = 0
|
||||
var i int64
|
||||
for _, promts := range entries {
|
||||
promTS[i] = promts
|
||||
i++
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// selfstat is a package for tracking and collecting internal statistics
|
||||
// Package selfstat is a package for tracking and collecting internal statistics
|
||||
// about telegraf. Metrics can be registered using this package, and then
|
||||
// incremented or set within your code. If the inputs.internal plugin is enabled,
|
||||
// then all registered stats will be collected as they would by any other input
|
||||
|
|
|
|||
Loading…
Reference in New Issue