Linter fixes - revive:unused-parameter, unparam, varcheck and unused (#8984)
* Linter fixes - revive:unused-parameter and unparam * Linter fixes - revive:unused-parameter and unparam * Linter fixes - revive:unused-parameter and unparam * "nolint"'s removed * Fixes for "varcheck" and "unused" added. * Fixes for "varcheck" and "unused" added. * Fixes for "varcheck" and "unused" added. * Fixes for "varcheck" and "unused" added. * Fixes for "varcheck" and "unused" added. * Fixes for "varcheck" and "unused" added. * Fixes for "varcheck" and "unused" added. Co-authored-by: Pawel Zak <Pawel Zak>
This commit is contained in:
parent
24c8fb20dc
commit
74a1acd814
|
|
@ -1,4 +1,5 @@
|
||||||
linters:
|
linters:
|
||||||
|
disable-all: true
|
||||||
enable:
|
enable:
|
||||||
- bodyclose
|
- bodyclose
|
||||||
- dogsled
|
- dogsled
|
||||||
|
|
@ -18,55 +19,6 @@ linters:
|
||||||
- unparam
|
- unparam
|
||||||
- unused
|
- unused
|
||||||
- varcheck
|
- varcheck
|
||||||
disable:
|
|
||||||
- asciicheck
|
|
||||||
- deadcode
|
|
||||||
- depguard
|
|
||||||
- dupl
|
|
||||||
- exhaustive
|
|
||||||
- funlen
|
|
||||||
- gci
|
|
||||||
- gochecknoglobals
|
|
||||||
- gochecknoinits
|
|
||||||
- gocognit
|
|
||||||
- goconst
|
|
||||||
- gocritic
|
|
||||||
- gocyclo
|
|
||||||
- godot
|
|
||||||
- godox
|
|
||||||
- goerr113
|
|
||||||
- gofmt
|
|
||||||
- gofumpt
|
|
||||||
- goheader
|
|
||||||
- goimports
|
|
||||||
- golint
|
|
||||||
- gomnd
|
|
||||||
- gomodguard
|
|
||||||
- gosec
|
|
||||||
- ifshort
|
|
||||||
- interfacer
|
|
||||||
- lll
|
|
||||||
- makezero
|
|
||||||
- maligned
|
|
||||||
- megacheck
|
|
||||||
- misspell
|
|
||||||
- nestif
|
|
||||||
- nlreturn
|
|
||||||
- noctx
|
|
||||||
- nolintlint
|
|
||||||
- paralleltest
|
|
||||||
- prealloc
|
|
||||||
- rowserrcheck
|
|
||||||
- scopelint
|
|
||||||
- structcheck
|
|
||||||
- stylecheck
|
|
||||||
- testpackage
|
|
||||||
- thelper
|
|
||||||
- tparallel
|
|
||||||
- wastedassign
|
|
||||||
- whitespace
|
|
||||||
- wrapcheck
|
|
||||||
- wsl
|
|
||||||
|
|
||||||
linters-settings:
|
linters-settings:
|
||||||
revive:
|
revive:
|
||||||
|
|
@ -131,7 +83,7 @@ linters-settings:
|
||||||
|
|
||||||
run:
|
run:
|
||||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||||
timeout: 5m
|
timeout: 10m
|
||||||
|
|
||||||
# which dirs to skip: issues from them won't be reported;
|
# which dirs to skip: issues from them won't be reported;
|
||||||
# can use regexp here: generated.*, regexp is applied on full path;
|
# can use regexp here: generated.*, regexp is applied on full path;
|
||||||
|
|
@ -169,10 +121,8 @@ issues:
|
||||||
linters:
|
linters:
|
||||||
- govet
|
- govet
|
||||||
|
|
||||||
# Show only new issues created after git revision `HEAD~`
|
- path: _test\.go
|
||||||
# Great for CI setups
|
text: "parameter.*seems to be a control flag, avoid control coupling"
|
||||||
# It's not practical to fix all existing issues at the moment of integration: much better to not allow issues in new code.
|
|
||||||
# new-from-rev: "HEAD~"
|
|
||||||
|
|
||||||
output:
|
output:
|
||||||
format: tab
|
format: tab
|
||||||
|
|
|
||||||
123
agent/agent.go
123
agent/agent.go
|
|
@ -126,10 +126,7 @@ func (a *Agent) Run(ctx context.Context) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
next, au, err = a.startAggregators(aggC, next, a.Config.Aggregators)
|
next, au = a.startAggregators(aggC, next, a.Config.Aggregators)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var pu []*processorUnit
|
var pu []*processorUnit
|
||||||
|
|
@ -149,29 +146,20 @@ func (a *Agent) Run(ctx context.Context) error {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
err := a.runOutputs(ou)
|
a.runOutputs(ou)
|
||||||
if err != nil {
|
|
||||||
log.Printf("E! [agent] Error running outputs: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if au != nil {
|
if au != nil {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
err := a.runProcessors(apu)
|
a.runProcessors(apu)
|
||||||
if err != nil {
|
|
||||||
log.Printf("E! [agent] Error running processors: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
}()
|
||||||
|
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
err := a.runAggregators(startTime, au)
|
a.runAggregators(startTime, au)
|
||||||
if err != nil {
|
|
||||||
log.Printf("E! [agent] Error running aggregators: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -179,20 +167,14 @@ func (a *Agent) Run(ctx context.Context) error {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
err := a.runProcessors(pu)
|
a.runProcessors(pu)
|
||||||
if err != nil {
|
|
||||||
log.Printf("E! [agent] Error running processors: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
err := a.runInputs(ctx, startTime, iu)
|
a.runInputs(ctx, startTime, iu)
|
||||||
if err != nil {
|
|
||||||
log.Printf("E! [agent] Error running inputs: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
}()
|
||||||
|
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
@ -288,7 +270,7 @@ func (a *Agent) runInputs(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
startTime time.Time,
|
startTime time.Time,
|
||||||
unit *inputUnit,
|
unit *inputUnit,
|
||||||
) error {
|
) {
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
for _, input := range unit.inputs {
|
for _, input := range unit.inputs {
|
||||||
// Overwrite agent interval if this plugin has its own.
|
// Overwrite agent interval if this plugin has its own.
|
||||||
|
|
@ -334,8 +316,6 @@ func (a *Agent) runInputs(
|
||||||
|
|
||||||
close(unit.dst)
|
close(unit.dst)
|
||||||
log.Printf("D! [agent] Input channel closed")
|
log.Printf("D! [agent] Input channel closed")
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// testStartInputs is a variation of startInputs for use in --test and --once
|
// testStartInputs is a variation of startInputs for use in --test and --once
|
||||||
|
|
@ -344,7 +324,7 @@ func (a *Agent) runInputs(
|
||||||
func (a *Agent) testStartInputs(
|
func (a *Agent) testStartInputs(
|
||||||
dst chan<- telegraf.Metric,
|
dst chan<- telegraf.Metric,
|
||||||
inputs []*models.RunningInput,
|
inputs []*models.RunningInput,
|
||||||
) (*inputUnit, error) {
|
) *inputUnit {
|
||||||
log.Printf("D! [agent] Starting service inputs")
|
log.Printf("D! [agent] Starting service inputs")
|
||||||
|
|
||||||
unit := &inputUnit{
|
unit := &inputUnit{
|
||||||
|
|
@ -369,7 +349,7 @@ func (a *Agent) testStartInputs(
|
||||||
unit.inputs = append(unit.inputs, input)
|
unit.inputs = append(unit.inputs, input)
|
||||||
}
|
}
|
||||||
|
|
||||||
return unit, nil
|
return unit
|
||||||
}
|
}
|
||||||
|
|
||||||
// testRunInputs is a variation of runInputs for use in --test and --once mode.
|
// testRunInputs is a variation of runInputs for use in --test and --once mode.
|
||||||
|
|
@ -378,7 +358,7 @@ func (a *Agent) testRunInputs(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
wait time.Duration,
|
wait time.Duration,
|
||||||
unit *inputUnit,
|
unit *inputUnit,
|
||||||
) error {
|
) {
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
nul := make(chan telegraf.Metric)
|
nul := make(chan telegraf.Metric)
|
||||||
|
|
@ -434,7 +414,6 @@ func (a *Agent) testRunInputs(
|
||||||
|
|
||||||
close(unit.dst)
|
close(unit.dst)
|
||||||
log.Printf("D! [agent] Input channel closed")
|
log.Printf("D! [agent] Input channel closed")
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// stopServiceInputs stops all service inputs.
|
// stopServiceInputs stops all service inputs.
|
||||||
|
|
@ -553,7 +532,7 @@ func (a *Agent) startProcessors(
|
||||||
// closed and all metrics have been written.
|
// closed and all metrics have been written.
|
||||||
func (a *Agent) runProcessors(
|
func (a *Agent) runProcessors(
|
||||||
units []*processorUnit,
|
units []*processorUnit,
|
||||||
) error {
|
) {
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
for _, unit := range units {
|
for _, unit := range units {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
|
|
@ -573,8 +552,6 @@ func (a *Agent) runProcessors(
|
||||||
}(unit)
|
}(unit)
|
||||||
}
|
}
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// startAggregators sets up the aggregator unit and returns the source channel.
|
// startAggregators sets up the aggregator unit and returns the source channel.
|
||||||
|
|
@ -582,7 +559,7 @@ func (a *Agent) startAggregators(
|
||||||
aggC chan<- telegraf.Metric,
|
aggC chan<- telegraf.Metric,
|
||||||
outputC chan<- telegraf.Metric,
|
outputC chan<- telegraf.Metric,
|
||||||
aggregators []*models.RunningAggregator,
|
aggregators []*models.RunningAggregator,
|
||||||
) (chan<- telegraf.Metric, *aggregatorUnit, error) {
|
) (chan<- telegraf.Metric, *aggregatorUnit) {
|
||||||
src := make(chan telegraf.Metric, 100)
|
src := make(chan telegraf.Metric, 100)
|
||||||
unit := &aggregatorUnit{
|
unit := &aggregatorUnit{
|
||||||
src: src,
|
src: src,
|
||||||
|
|
@ -590,7 +567,7 @@ func (a *Agent) startAggregators(
|
||||||
outputC: outputC,
|
outputC: outputC,
|
||||||
aggregators: aggregators,
|
aggregators: aggregators,
|
||||||
}
|
}
|
||||||
return src, unit, nil
|
return src, unit
|
||||||
}
|
}
|
||||||
|
|
||||||
// runAggregators beings aggregating metrics and runs until the source channel
|
// runAggregators beings aggregating metrics and runs until the source channel
|
||||||
|
|
@ -598,7 +575,7 @@ func (a *Agent) startAggregators(
|
||||||
func (a *Agent) runAggregators(
|
func (a *Agent) runAggregators(
|
||||||
startTime time.Time,
|
startTime time.Time,
|
||||||
unit *aggregatorUnit,
|
unit *aggregatorUnit,
|
||||||
) error {
|
) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
// Before calling Add, initialize the aggregation window. This ensures
|
// Before calling Add, initialize the aggregation window. This ensures
|
||||||
|
|
@ -650,8 +627,6 @@ func (a *Agent) runAggregators(
|
||||||
// processor chain will close the outputC when it finishes processing.
|
// processor chain will close the outputC when it finishes processing.
|
||||||
close(unit.aggC)
|
close(unit.aggC)
|
||||||
log.Printf("D! [agent] Aggregator channel closed")
|
log.Printf("D! [agent] Aggregator channel closed")
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func updateWindow(start time.Time, roundInterval bool, period time.Duration) (time.Time, time.Time) {
|
func updateWindow(start time.Time, roundInterval bool, period time.Duration) (time.Time, time.Time) {
|
||||||
|
|
@ -744,7 +719,7 @@ func (a *Agent) connectOutput(ctx context.Context, output *models.RunningOutput)
|
||||||
// written one last time and dropped if unsuccessful.
|
// written one last time and dropped if unsuccessful.
|
||||||
func (a *Agent) runOutputs(
|
func (a *Agent) runOutputs(
|
||||||
unit *outputUnit,
|
unit *outputUnit,
|
||||||
) error {
|
) {
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
// Start flush loop
|
// Start flush loop
|
||||||
|
|
@ -793,8 +768,6 @@ func (a *Agent) runOutputs(
|
||||||
|
|
||||||
log.Println("I! [agent] Stopping running outputs")
|
log.Println("I! [agent] Stopping running outputs")
|
||||||
stopRunningOutputs(unit.outputs)
|
stopRunningOutputs(unit.outputs)
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// flushLoop runs an output's flush function periodically until the context is
|
// flushLoop runs an output's flush function periodically until the context is
|
||||||
|
|
@ -924,10 +897,7 @@ func (a *Agent) test(ctx context.Context, wait time.Duration, outputC chan<- tel
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
next, au, err = a.startAggregators(procC, next, a.Config.Aggregators)
|
next, au = a.startAggregators(procC, next, a.Config.Aggregators)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var pu []*processorUnit
|
var pu []*processorUnit
|
||||||
|
|
@ -938,30 +908,20 @@ func (a *Agent) test(ctx context.Context, wait time.Duration, outputC chan<- tel
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
iu, err := a.testStartInputs(next, a.Config.Inputs)
|
iu := a.testStartInputs(next, a.Config.Inputs)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
if au != nil {
|
if au != nil {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
err := a.runProcessors(apu)
|
a.runProcessors(apu)
|
||||||
if err != nil {
|
|
||||||
log.Printf("E! [agent] Error running processors: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
}()
|
||||||
|
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
err := a.runAggregators(startTime, au)
|
a.runAggregators(startTime, au)
|
||||||
if err != nil {
|
|
||||||
log.Printf("E! [agent] Error running aggregators: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -969,20 +929,14 @@ func (a *Agent) test(ctx context.Context, wait time.Duration, outputC chan<- tel
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
err := a.runProcessors(pu)
|
a.runProcessors(pu)
|
||||||
if err != nil {
|
|
||||||
log.Printf("E! [agent] Error running processors: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
err := a.testRunInputs(ctx, wait, iu)
|
a.testRunInputs(ctx, wait, iu)
|
||||||
if err != nil {
|
|
||||||
log.Printf("E! [agent] Error running inputs: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
}()
|
||||||
|
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
@ -1042,10 +996,7 @@ func (a *Agent) once(ctx context.Context, wait time.Duration) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
next, au, err = a.startAggregators(procC, next, a.Config.Aggregators)
|
next, au = a.startAggregators(procC, next, a.Config.Aggregators)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var pu []*processorUnit
|
var pu []*processorUnit
|
||||||
|
|
@ -1056,38 +1007,26 @@ func (a *Agent) once(ctx context.Context, wait time.Duration) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
iu, err := a.testStartInputs(next, a.Config.Inputs)
|
iu := a.testStartInputs(next, a.Config.Inputs)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
err := a.runOutputs(ou)
|
a.runOutputs(ou)
|
||||||
if err != nil {
|
|
||||||
log.Printf("E! [agent] Error running outputs: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if au != nil {
|
if au != nil {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
err := a.runProcessors(apu)
|
a.runProcessors(apu)
|
||||||
if err != nil {
|
|
||||||
log.Printf("E! [agent] Error running processors: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
}()
|
||||||
|
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
err := a.runAggregators(startTime, au)
|
a.runAggregators(startTime, au)
|
||||||
if err != nil {
|
|
||||||
log.Printf("E! [agent] Error running aggregators: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1095,20 +1034,14 @@ func (a *Agent) once(ctx context.Context, wait time.Duration) error {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
err := a.runProcessors(pu)
|
a.runProcessors(pu)
|
||||||
if err != nil {
|
|
||||||
log.Printf("E! [agent] Error running processors: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
err := a.testRunInputs(ctx, wait, iu)
|
a.testRunInputs(ctx, wait, iu)
|
||||||
if err != nil {
|
|
||||||
log.Printf("E! [agent] Error running inputs: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
}()
|
||||||
|
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
|
||||||
|
|
@ -9,8 +9,6 @@ import (
|
||||||
"github.com/influxdata/telegraf/internal"
|
"github.com/influxdata/telegraf/internal"
|
||||||
)
|
)
|
||||||
|
|
||||||
type empty struct{}
|
|
||||||
|
|
||||||
type Ticker interface {
|
type Ticker interface {
|
||||||
Elapsed() <-chan time.Time
|
Elapsed() <-chan time.Time
|
||||||
Stop()
|
Stop()
|
||||||
|
|
|
||||||
|
|
@ -61,11 +61,22 @@ var fProcessorFilters = flag.String("processor-filter", "",
|
||||||
"filter the processors to enable, separator is :")
|
"filter the processors to enable, separator is :")
|
||||||
var fUsage = flag.String("usage", "",
|
var fUsage = flag.String("usage", "",
|
||||||
"print usage for a plugin, ie, 'telegraf --usage mysql'")
|
"print usage for a plugin, ie, 'telegraf --usage mysql'")
|
||||||
|
|
||||||
|
//nolint:varcheck,unused // False positive - this var is used for non-default build tag: windows
|
||||||
var fService = flag.String("service", "",
|
var fService = flag.String("service", "",
|
||||||
"operate on the service (windows only)")
|
"operate on the service (windows only)")
|
||||||
var fServiceName = flag.String("service-name", "telegraf", "service name (windows only)")
|
|
||||||
var fServiceDisplayName = flag.String("service-display-name", "Telegraf Data Collector Service", "service display name (windows only)")
|
//nolint:varcheck,unused // False positive - this var is used for non-default build tag: windows
|
||||||
var fRunAsConsole = flag.Bool("console", false, "run as console application (windows only)")
|
var fServiceName = flag.String("service-name", "telegraf",
|
||||||
|
"service name (windows only)")
|
||||||
|
|
||||||
|
//nolint:varcheck,unused // False positive - this var is used for non-default build tag: windows
|
||||||
|
var fServiceDisplayName = flag.String("service-display-name", "Telegraf Data Collector Service",
|
||||||
|
"service display name (windows only)")
|
||||||
|
|
||||||
|
//nolint:varcheck,unused // False positive - this var is used for non-default build tag: windows
|
||||||
|
var fRunAsConsole = flag.Bool("console", false,
|
||||||
|
"run as console application (windows only)")
|
||||||
var fPlugins = flag.String("plugin-directory", "",
|
var fPlugins = flag.String("plugin-directory", "",
|
||||||
"path to directory containing external plugins")
|
"path to directory containing external plugins")
|
||||||
var fRunOnce = flag.Bool("once", false, "run one gather and exit")
|
var fRunOnce = flag.Bool("once", false, "run one gather and exit")
|
||||||
|
|
@ -81,14 +92,11 @@ var stop chan struct{}
|
||||||
func reloadLoop(
|
func reloadLoop(
|
||||||
inputFilters []string,
|
inputFilters []string,
|
||||||
outputFilters []string,
|
outputFilters []string,
|
||||||
aggregatorFilters []string,
|
|
||||||
processorFilters []string,
|
|
||||||
) {
|
) {
|
||||||
reload := make(chan bool, 1)
|
reload := make(chan bool, 1)
|
||||||
reload <- true
|
reload <- true
|
||||||
for <-reload {
|
for <-reload {
|
||||||
reload <- false
|
reload <- false
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
signals := make(chan os.Signal, 1)
|
signals := make(chan os.Signal, 1)
|
||||||
|
|
@ -363,7 +371,5 @@ func main() {
|
||||||
run(
|
run(
|
||||||
inputFilters,
|
inputFilters,
|
||||||
outputFilters,
|
outputFilters,
|
||||||
aggregatorFilters,
|
|
||||||
processorFilters,
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -2,12 +2,10 @@
|
||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
func run(inputFilters, outputFilters, aggregatorFilters, processorFilters []string) {
|
func run(inputFilters, outputFilters []string) {
|
||||||
stop = make(chan struct{})
|
stop = make(chan struct{})
|
||||||
reloadLoop(
|
reloadLoop(
|
||||||
inputFilters,
|
inputFilters,
|
||||||
outputFilters,
|
outputFilters,
|
||||||
aggregatorFilters,
|
|
||||||
processorFilters,
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -11,7 +11,7 @@ import (
|
||||||
"github.com/kardianos/service"
|
"github.com/kardianos/service"
|
||||||
)
|
)
|
||||||
|
|
||||||
func run(inputFilters, outputFilters, aggregatorFilters, processorFilters []string) {
|
func run(inputFilters, outputFilters []string) {
|
||||||
// Register the eventlog logging target for windows.
|
// Register the eventlog logging target for windows.
|
||||||
logger.RegisterEventLogger(*fServiceName)
|
logger.RegisterEventLogger(*fServiceName)
|
||||||
|
|
||||||
|
|
@ -19,25 +19,19 @@ func run(inputFilters, outputFilters, aggregatorFilters, processorFilters []stri
|
||||||
runAsWindowsService(
|
runAsWindowsService(
|
||||||
inputFilters,
|
inputFilters,
|
||||||
outputFilters,
|
outputFilters,
|
||||||
aggregatorFilters,
|
|
||||||
processorFilters,
|
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
stop = make(chan struct{})
|
stop = make(chan struct{})
|
||||||
reloadLoop(
|
reloadLoop(
|
||||||
inputFilters,
|
inputFilters,
|
||||||
outputFilters,
|
outputFilters,
|
||||||
aggregatorFilters,
|
|
||||||
processorFilters,
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type program struct {
|
type program struct {
|
||||||
inputFilters []string
|
inputFilters []string
|
||||||
outputFilters []string
|
outputFilters []string
|
||||||
aggregatorFilters []string
|
|
||||||
processorFilters []string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *program) Start(s service.Service) error {
|
func (p *program) Start(s service.Service) error {
|
||||||
|
|
@ -49,8 +43,6 @@ func (p *program) run() {
|
||||||
reloadLoop(
|
reloadLoop(
|
||||||
p.inputFilters,
|
p.inputFilters,
|
||||||
p.outputFilters,
|
p.outputFilters,
|
||||||
p.aggregatorFilters,
|
|
||||||
p.processorFilters,
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
func (p *program) Stop(s service.Service) error {
|
func (p *program) Stop(s service.Service) error {
|
||||||
|
|
@ -58,7 +50,7 @@ func (p *program) Stop(s service.Service) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func runAsWindowsService(inputFilters, outputFilters, aggregatorFilters, processorFilters []string) {
|
func runAsWindowsService(inputFilters, outputFilters []string) {
|
||||||
programFiles := os.Getenv("ProgramFiles")
|
programFiles := os.Getenv("ProgramFiles")
|
||||||
if programFiles == "" { // Should never happen
|
if programFiles == "" { // Should never happen
|
||||||
programFiles = "C:\\Program Files"
|
programFiles = "C:\\Program Files"
|
||||||
|
|
@ -72,10 +64,8 @@ func runAsWindowsService(inputFilters, outputFilters, aggregatorFilters, process
|
||||||
}
|
}
|
||||||
|
|
||||||
prg := &program{
|
prg := &program{
|
||||||
inputFilters: inputFilters,
|
inputFilters: inputFilters,
|
||||||
outputFilters: outputFilters,
|
outputFilters: outputFilters,
|
||||||
aggregatorFilters: aggregatorFilters,
|
|
||||||
processorFilters: processorFilters,
|
|
||||||
}
|
}
|
||||||
s, err := service.New(prg, svcConfig)
|
s, err := service.New(prg, svcConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
||||||
|
|
@ -1006,14 +1006,14 @@ func (c *Config) addProcessor(name string, table *ast.Table) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
rf, err := c.newRunningProcessor(creator, processorConfig, name, table)
|
rf, err := c.newRunningProcessor(creator, processorConfig, table)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
c.Processors = append(c.Processors, rf)
|
c.Processors = append(c.Processors, rf)
|
||||||
|
|
||||||
// save a copy for the aggregator
|
// save a copy for the aggregator
|
||||||
rf, err = c.newRunningProcessor(creator, processorConfig, name, table)
|
rf, err = c.newRunningProcessor(creator, processorConfig, table)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
@ -1025,7 +1025,6 @@ func (c *Config) addProcessor(name string, table *ast.Table) error {
|
||||||
func (c *Config) newRunningProcessor(
|
func (c *Config) newRunningProcessor(
|
||||||
creator processors.StreamingCreator,
|
creator processors.StreamingCreator,
|
||||||
processorConfig *models.ProcessorConfig,
|
processorConfig *models.ProcessorConfig,
|
||||||
name string,
|
|
||||||
table *ast.Table,
|
table *ast.Table,
|
||||||
) (*models.RunningProcessor, error) {
|
) (*models.RunningProcessor, error) {
|
||||||
processor := creator()
|
processor := creator()
|
||||||
|
|
@ -1058,7 +1057,7 @@ func (c *Config) addOutput(name string, table *ast.Table) error {
|
||||||
// arbitrary types of output, so build the serializer and set it.
|
// arbitrary types of output, so build the serializer and set it.
|
||||||
switch t := output.(type) {
|
switch t := output.(type) {
|
||||||
case serializers.SerializerOutput:
|
case serializers.SerializerOutput:
|
||||||
serializer, err := c.buildSerializer(name, table)
|
serializer, err := c.buildSerializer(table)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
@ -1074,8 +1073,7 @@ func (c *Config) addOutput(name string, table *ast.Table) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
ro := models.NewRunningOutput(name, output, outputConfig,
|
ro := models.NewRunningOutput(output, outputConfig, c.Agent.MetricBatchSize, c.Agent.MetricBufferLimit)
|
||||||
c.Agent.MetricBatchSize, c.Agent.MetricBufferLimit)
|
|
||||||
c.Outputs = append(c.Outputs, ro)
|
c.Outputs = append(c.Outputs, ro)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
@ -1377,8 +1375,8 @@ func (c *Config) getParserConfig(name string, tbl *ast.Table) (*parsers.Config,
|
||||||
// buildSerializer grabs the necessary entries from the ast.Table for creating
|
// buildSerializer grabs the necessary entries from the ast.Table for creating
|
||||||
// a serializers.Serializer object, and creates it, which can then be added onto
|
// a serializers.Serializer object, and creates it, which can then be added onto
|
||||||
// an Output object.
|
// an Output object.
|
||||||
func (c *Config) buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error) {
|
func (c *Config) buildSerializer(tbl *ast.Table) (serializers.Serializer, error) {
|
||||||
sc := &serializers.Config{TimestampUnits: time.Duration(1 * time.Second)}
|
sc := &serializers.Config{TimestampUnits: 1 * time.Second}
|
||||||
|
|
||||||
c.getFieldString(tbl, "data_format", &sc.DataFormat)
|
c.getFieldString(tbl, "data_format", &sc.DataFormat)
|
||||||
|
|
||||||
|
|
@ -1449,7 +1447,7 @@ func (c *Config) buildOutput(name string, tbl *ast.Table) (*models.OutputConfig,
|
||||||
return oc, nil
|
return oc, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Config) missingTomlField(typ reflect.Type, key string) error {
|
func (c *Config) missingTomlField(_ reflect.Type, key string) error {
|
||||||
switch key {
|
switch key {
|
||||||
case "alias", "carbon2_format", "collectd_auth_file", "collectd_parse_multivalue",
|
case "alias", "carbon2_format", "collectd_auth_file", "collectd_parse_multivalue",
|
||||||
"collectd_security_level", "collectd_typesdb", "collection_jitter", "csv_column_names",
|
"collectd_security_level", "collectd_typesdb", "collection_jitter", "csv_column_names",
|
||||||
|
|
|
||||||
|
|
@ -4,6 +4,6 @@ package goplugin
|
||||||
|
|
||||||
import "errors"
|
import "errors"
|
||||||
|
|
||||||
func LoadExternalPlugins(rootDir string) error {
|
func LoadExternalPlugins(_ string) error {
|
||||||
return errors.New("go plugin support is not enabled")
|
return errors.New("go plugin support is not enabled")
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -46,7 +46,7 @@ func TestSnakeCase(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
sleepbin, _ = exec.LookPath("sleep")
|
sleepbin, _ = exec.LookPath("sleep") //nolint:unused // Used in skipped tests
|
||||||
echobin, _ = exec.LookPath("echo")
|
echobin, _ = exec.LookPath("echo")
|
||||||
shell, _ = exec.LookPath("sh")
|
shell, _ = exec.LookPath("sh")
|
||||||
)
|
)
|
||||||
|
|
|
||||||
|
|
@ -251,8 +251,8 @@ func (m *metric) Copy() telegraf.Metric {
|
||||||
return m2
|
return m2
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *metric) SetAggregate(b bool) {
|
func (m *metric) SetAggregate(aggregate bool) {
|
||||||
m.aggregate = true
|
m.aggregate = aggregate
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *metric) IsAggregate() bool {
|
func (m *metric) IsAggregate() bool {
|
||||||
|
|
|
||||||
|
|
@ -78,12 +78,13 @@ func TestTracking(t *testing.T) {
|
||||||
{
|
{
|
||||||
name: "accept",
|
name: "accept",
|
||||||
metric: mustMetric(
|
metric: mustMetric(
|
||||||
"cpu",
|
"memory",
|
||||||
map[string]string{},
|
map[string]string{},
|
||||||
map[string]interface{}{
|
map[string]interface{}{
|
||||||
"value": 42,
|
"value": 42,
|
||||||
},
|
},
|
||||||
time.Unix(0, 0),
|
time.Unix(0, 0),
|
||||||
|
telegraf.Gauge,
|
||||||
),
|
),
|
||||||
actions: func(m telegraf.Metric) {
|
actions: func(m telegraf.Metric) {
|
||||||
m.Accept()
|
m.Accept()
|
||||||
|
|
@ -93,12 +94,13 @@ func TestTracking(t *testing.T) {
|
||||||
{
|
{
|
||||||
name: "reject",
|
name: "reject",
|
||||||
metric: mustMetric(
|
metric: mustMetric(
|
||||||
"cpu",
|
"memory",
|
||||||
map[string]string{},
|
map[string]string{},
|
||||||
map[string]interface{}{
|
map[string]interface{}{
|
||||||
"value": 42,
|
"value": 42,
|
||||||
},
|
},
|
||||||
time.Unix(0, 0),
|
time.Unix(0, 0),
|
||||||
|
telegraf.Gauge,
|
||||||
),
|
),
|
||||||
actions: func(m telegraf.Metric) {
|
actions: func(m telegraf.Metric) {
|
||||||
m.Reject()
|
m.Reject()
|
||||||
|
|
@ -108,12 +110,13 @@ func TestTracking(t *testing.T) {
|
||||||
{
|
{
|
||||||
name: "accept copy",
|
name: "accept copy",
|
||||||
metric: mustMetric(
|
metric: mustMetric(
|
||||||
"cpu",
|
"memory",
|
||||||
map[string]string{},
|
map[string]string{},
|
||||||
map[string]interface{}{
|
map[string]interface{}{
|
||||||
"value": 42,
|
"value": 42,
|
||||||
},
|
},
|
||||||
time.Unix(0, 0),
|
time.Unix(0, 0),
|
||||||
|
telegraf.Gauge,
|
||||||
),
|
),
|
||||||
actions: func(m telegraf.Metric) {
|
actions: func(m telegraf.Metric) {
|
||||||
m2 := m.Copy()
|
m2 := m.Copy()
|
||||||
|
|
@ -125,12 +128,13 @@ func TestTracking(t *testing.T) {
|
||||||
{
|
{
|
||||||
name: "copy with accept and done",
|
name: "copy with accept and done",
|
||||||
metric: mustMetric(
|
metric: mustMetric(
|
||||||
"cpu",
|
"memory",
|
||||||
map[string]string{},
|
map[string]string{},
|
||||||
map[string]interface{}{
|
map[string]interface{}{
|
||||||
"value": 42,
|
"value": 42,
|
||||||
},
|
},
|
||||||
time.Unix(0, 0),
|
time.Unix(0, 0),
|
||||||
|
telegraf.Gauge,
|
||||||
),
|
),
|
||||||
actions: func(m telegraf.Metric) {
|
actions: func(m telegraf.Metric) {
|
||||||
m2 := m.Copy()
|
m2 := m.Copy()
|
||||||
|
|
@ -142,12 +146,13 @@ func TestTracking(t *testing.T) {
|
||||||
{
|
{
|
||||||
name: "copy with mixed delivery",
|
name: "copy with mixed delivery",
|
||||||
metric: mustMetric(
|
metric: mustMetric(
|
||||||
"cpu",
|
"memory",
|
||||||
map[string]string{},
|
map[string]string{},
|
||||||
map[string]interface{}{
|
map[string]interface{}{
|
||||||
"value": 42,
|
"value": 42,
|
||||||
},
|
},
|
||||||
time.Unix(0, 0),
|
time.Unix(0, 0),
|
||||||
|
telegraf.Gauge,
|
||||||
),
|
),
|
||||||
actions: func(m telegraf.Metric) {
|
actions: func(m telegraf.Metric) {
|
||||||
m2 := m.Copy()
|
m2 := m.Copy()
|
||||||
|
|
|
||||||
|
|
@ -220,16 +220,6 @@ func (b *Buffer) Reject(batch []telegraf.Metric) {
|
||||||
b.BufferSize.Set(int64(b.length()))
|
b.BufferSize.Set(int64(b.length()))
|
||||||
}
|
}
|
||||||
|
|
||||||
// dist returns the distance between two indexes. Because this data structure
|
|
||||||
// uses a half open range the arguments must both either left side or right
|
|
||||||
// side pairs.
|
|
||||||
func (b *Buffer) dist(begin, end int) int {
|
|
||||||
if begin <= end {
|
|
||||||
return end - begin
|
|
||||||
}
|
|
||||||
return b.cap - begin + end
|
|
||||||
}
|
|
||||||
|
|
||||||
// next returns the next index with wrapping.
|
// next returns the next index with wrapping.
|
||||||
func (b *Buffer) next(index int) int {
|
func (b *Buffer) next(index int) int {
|
||||||
index++
|
index++
|
||||||
|
|
@ -246,15 +236,6 @@ func (b *Buffer) nextby(index, count int) int {
|
||||||
return index
|
return index
|
||||||
}
|
}
|
||||||
|
|
||||||
// next returns the prev index with wrapping.
|
|
||||||
func (b *Buffer) prev(index int) int {
|
|
||||||
index--
|
|
||||||
if index < 0 {
|
|
||||||
return b.cap - 1
|
|
||||||
}
|
|
||||||
return index
|
|
||||||
}
|
|
||||||
|
|
||||||
// prevby returns the index that is count older with wrapping.
|
// prevby returns the index that is count older with wrapping.
|
||||||
func (b *Buffer) prevby(index, count int) int {
|
func (b *Buffer) prevby(index, count int) int {
|
||||||
index -= count
|
index -= count
|
||||||
|
|
|
||||||
|
|
@ -289,6 +289,6 @@ func TestMetricErrorCounters(t *testing.T) {
|
||||||
|
|
||||||
type testInput struct{}
|
type testInput struct{}
|
||||||
|
|
||||||
func (t *testInput) Description() string { return "" }
|
func (t *testInput) Description() string { return "" }
|
||||||
func (t *testInput) SampleConfig() string { return "" }
|
func (t *testInput) SampleConfig() string { return "" }
|
||||||
func (t *testInput) Gather(acc telegraf.Accumulator) error { return nil }
|
func (t *testInput) Gather(_ telegraf.Accumulator) error { return nil }
|
||||||
|
|
|
||||||
|
|
@ -56,7 +56,6 @@ type RunningOutput struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewRunningOutput(
|
func NewRunningOutput(
|
||||||
name string,
|
|
||||||
output telegraf.Output,
|
output telegraf.Output,
|
||||||
config *OutputConfig,
|
config *OutputConfig,
|
||||||
batchSize int,
|
batchSize int,
|
||||||
|
|
|
||||||
|
|
@ -29,14 +29,6 @@ var next5 = []telegraf.Metric{
|
||||||
testutil.TestMetric(101, "metric10"),
|
testutil.TestMetric(101, "metric10"),
|
||||||
}
|
}
|
||||||
|
|
||||||
func reverse(metrics []telegraf.Metric) []telegraf.Metric {
|
|
||||||
result := make([]telegraf.Metric, 0, len(metrics))
|
|
||||||
for i := len(metrics) - 1; i >= 0; i-- {
|
|
||||||
result = append(result, metrics[i])
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// Benchmark adding metrics.
|
// Benchmark adding metrics.
|
||||||
func BenchmarkRunningOutputAddWrite(b *testing.B) {
|
func BenchmarkRunningOutputAddWrite(b *testing.B) {
|
||||||
conf := &OutputConfig{
|
conf := &OutputConfig{
|
||||||
|
|
@ -44,7 +36,7 @@ func BenchmarkRunningOutputAddWrite(b *testing.B) {
|
||||||
}
|
}
|
||||||
|
|
||||||
m := &perfOutput{}
|
m := &perfOutput{}
|
||||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
ro := NewRunningOutput(m, conf, 1000, 10000)
|
||||||
|
|
||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
||||||
|
|
@ -59,7 +51,7 @@ func BenchmarkRunningOutputAddWriteEvery100(b *testing.B) {
|
||||||
}
|
}
|
||||||
|
|
||||||
m := &perfOutput{}
|
m := &perfOutput{}
|
||||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
ro := NewRunningOutput(m, conf, 1000, 10000)
|
||||||
|
|
||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
||||||
|
|
@ -77,7 +69,7 @@ func BenchmarkRunningOutputAddFailWrites(b *testing.B) {
|
||||||
|
|
||||||
m := &perfOutput{}
|
m := &perfOutput{}
|
||||||
m.failWrite = true
|
m.failWrite = true
|
||||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
ro := NewRunningOutput(m, conf, 1000, 10000)
|
||||||
|
|
||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
||||||
|
|
@ -94,7 +86,7 @@ func TestRunningOutput_DropFilter(t *testing.T) {
|
||||||
assert.NoError(t, conf.Filter.Compile())
|
assert.NoError(t, conf.Filter.Compile())
|
||||||
|
|
||||||
m := &mockOutput{}
|
m := &mockOutput{}
|
||||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
ro := NewRunningOutput(m, conf, 1000, 10000)
|
||||||
|
|
||||||
for _, metric := range first5 {
|
for _, metric := range first5 {
|
||||||
ro.AddMetric(metric)
|
ro.AddMetric(metric)
|
||||||
|
|
@ -119,7 +111,7 @@ func TestRunningOutput_PassFilter(t *testing.T) {
|
||||||
assert.NoError(t, conf.Filter.Compile())
|
assert.NoError(t, conf.Filter.Compile())
|
||||||
|
|
||||||
m := &mockOutput{}
|
m := &mockOutput{}
|
||||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
ro := NewRunningOutput(m, conf, 1000, 10000)
|
||||||
|
|
||||||
for _, metric := range first5 {
|
for _, metric := range first5 {
|
||||||
ro.AddMetric(metric)
|
ro.AddMetric(metric)
|
||||||
|
|
@ -144,7 +136,7 @@ func TestRunningOutput_TagIncludeNoMatch(t *testing.T) {
|
||||||
assert.NoError(t, conf.Filter.Compile())
|
assert.NoError(t, conf.Filter.Compile())
|
||||||
|
|
||||||
m := &mockOutput{}
|
m := &mockOutput{}
|
||||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
ro := NewRunningOutput(m, conf, 1000, 10000)
|
||||||
|
|
||||||
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
||||||
assert.Len(t, m.Metrics(), 0)
|
assert.Len(t, m.Metrics(), 0)
|
||||||
|
|
@ -165,7 +157,7 @@ func TestRunningOutput_TagExcludeMatch(t *testing.T) {
|
||||||
assert.NoError(t, conf.Filter.Compile())
|
assert.NoError(t, conf.Filter.Compile())
|
||||||
|
|
||||||
m := &mockOutput{}
|
m := &mockOutput{}
|
||||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
ro := NewRunningOutput(m, conf, 1000, 10000)
|
||||||
|
|
||||||
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
||||||
assert.Len(t, m.Metrics(), 0)
|
assert.Len(t, m.Metrics(), 0)
|
||||||
|
|
@ -186,7 +178,7 @@ func TestRunningOutput_TagExcludeNoMatch(t *testing.T) {
|
||||||
assert.NoError(t, conf.Filter.Compile())
|
assert.NoError(t, conf.Filter.Compile())
|
||||||
|
|
||||||
m := &mockOutput{}
|
m := &mockOutput{}
|
||||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
ro := NewRunningOutput(m, conf, 1000, 10000)
|
||||||
|
|
||||||
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
||||||
assert.Len(t, m.Metrics(), 0)
|
assert.Len(t, m.Metrics(), 0)
|
||||||
|
|
@ -207,7 +199,7 @@ func TestRunningOutput_TagIncludeMatch(t *testing.T) {
|
||||||
assert.NoError(t, conf.Filter.Compile())
|
assert.NoError(t, conf.Filter.Compile())
|
||||||
|
|
||||||
m := &mockOutput{}
|
m := &mockOutput{}
|
||||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
ro := NewRunningOutput(m, conf, 1000, 10000)
|
||||||
|
|
||||||
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
||||||
assert.Len(t, m.Metrics(), 0)
|
assert.Len(t, m.Metrics(), 0)
|
||||||
|
|
@ -225,7 +217,7 @@ func TestRunningOutput_NameOverride(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
m := &mockOutput{}
|
m := &mockOutput{}
|
||||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
ro := NewRunningOutput(m, conf, 1000, 10000)
|
||||||
|
|
||||||
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
||||||
assert.Len(t, m.Metrics(), 0)
|
assert.Len(t, m.Metrics(), 0)
|
||||||
|
|
@ -243,7 +235,7 @@ func TestRunningOutput_NamePrefix(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
m := &mockOutput{}
|
m := &mockOutput{}
|
||||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
ro := NewRunningOutput(m, conf, 1000, 10000)
|
||||||
|
|
||||||
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
||||||
assert.Len(t, m.Metrics(), 0)
|
assert.Len(t, m.Metrics(), 0)
|
||||||
|
|
@ -261,7 +253,7 @@ func TestRunningOutput_NameSuffix(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
m := &mockOutput{}
|
m := &mockOutput{}
|
||||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
ro := NewRunningOutput(m, conf, 1000, 10000)
|
||||||
|
|
||||||
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
||||||
assert.Len(t, m.Metrics(), 0)
|
assert.Len(t, m.Metrics(), 0)
|
||||||
|
|
@ -279,7 +271,7 @@ func TestRunningOutputDefault(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
m := &mockOutput{}
|
m := &mockOutput{}
|
||||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
ro := NewRunningOutput(m, conf, 1000, 10000)
|
||||||
|
|
||||||
for _, metric := range first5 {
|
for _, metric := range first5 {
|
||||||
ro.AddMetric(metric)
|
ro.AddMetric(metric)
|
||||||
|
|
@ -301,7 +293,7 @@ func TestRunningOutputWriteFail(t *testing.T) {
|
||||||
|
|
||||||
m := &mockOutput{}
|
m := &mockOutput{}
|
||||||
m.failWrite = true
|
m.failWrite = true
|
||||||
ro := NewRunningOutput("test", m, conf, 4, 12)
|
ro := NewRunningOutput(m, conf, 4, 12)
|
||||||
|
|
||||||
// Fill buffer to limit twice
|
// Fill buffer to limit twice
|
||||||
for _, metric := range first5 {
|
for _, metric := range first5 {
|
||||||
|
|
@ -334,7 +326,7 @@ func TestRunningOutputWriteFailOrder(t *testing.T) {
|
||||||
|
|
||||||
m := &mockOutput{}
|
m := &mockOutput{}
|
||||||
m.failWrite = true
|
m.failWrite = true
|
||||||
ro := NewRunningOutput("test", m, conf, 100, 1000)
|
ro := NewRunningOutput(m, conf, 100, 1000)
|
||||||
|
|
||||||
// add 5 metrics
|
// add 5 metrics
|
||||||
for _, metric := range first5 {
|
for _, metric := range first5 {
|
||||||
|
|
@ -372,7 +364,7 @@ func TestRunningOutputWriteFailOrder2(t *testing.T) {
|
||||||
|
|
||||||
m := &mockOutput{}
|
m := &mockOutput{}
|
||||||
m.failWrite = true
|
m.failWrite = true
|
||||||
ro := NewRunningOutput("test", m, conf, 5, 100)
|
ro := NewRunningOutput(m, conf, 5, 100)
|
||||||
|
|
||||||
// add 5 metrics
|
// add 5 metrics
|
||||||
for _, metric := range first5 {
|
for _, metric := range first5 {
|
||||||
|
|
@ -436,7 +428,7 @@ func TestRunningOutputWriteFailOrder3(t *testing.T) {
|
||||||
|
|
||||||
m := &mockOutput{}
|
m := &mockOutput{}
|
||||||
m.failWrite = true
|
m.failWrite = true
|
||||||
ro := NewRunningOutput("test", m, conf, 5, 1000)
|
ro := NewRunningOutput(m, conf, 5, 1000)
|
||||||
|
|
||||||
// add 5 metrics
|
// add 5 metrics
|
||||||
for _, metric := range first5 {
|
for _, metric := range first5 {
|
||||||
|
|
@ -470,7 +462,6 @@ func TestRunningOutputWriteFailOrder3(t *testing.T) {
|
||||||
|
|
||||||
func TestInternalMetrics(t *testing.T) {
|
func TestInternalMetrics(t *testing.T) {
|
||||||
_ = NewRunningOutput(
|
_ = NewRunningOutput(
|
||||||
"test_internal",
|
|
||||||
&mockOutput{},
|
&mockOutput{},
|
||||||
&OutputConfig{
|
&OutputConfig{
|
||||||
Filter: Filter{},
|
Filter: Filter{},
|
||||||
|
|
@ -581,7 +572,7 @@ func (m *perfOutput) SampleConfig() string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *perfOutput) Write(metrics []telegraf.Metric) error {
|
func (m *perfOutput) Write(_ []telegraf.Metric) error {
|
||||||
if m.failWrite {
|
if m.failWrite {
|
||||||
return fmt.Errorf("failed write")
|
return fmt.Errorf("failed write")
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -19,7 +19,6 @@ const (
|
||||||
|
|
||||||
type Merge struct {
|
type Merge struct {
|
||||||
grouper *metric.SeriesGrouper
|
grouper *metric.SeriesGrouper
|
||||||
log telegraf.Logger
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Merge) Init() error {
|
func (a *Merge) Init() error {
|
||||||
|
|
|
||||||
|
|
@ -21,7 +21,7 @@ type exactAlgorithmR7 struct {
|
||||||
sorted bool
|
sorted bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func newExactR7(compression float64) (algorithm, error) {
|
func newExactR7(_ float64) (algorithm, error) {
|
||||||
return &exactAlgorithmR7{xs: make([]float64, 0, 100), sorted: false}, nil
|
return &exactAlgorithmR7{xs: make([]float64, 0, 100), sorted: false}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -68,7 +68,7 @@ type exactAlgorithmR8 struct {
|
||||||
sorted bool
|
sorted bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func newExactR8(compression float64) (algorithm, error) {
|
func newExactR8(_ float64) (algorithm, error) {
|
||||||
return &exactAlgorithmR8{xs: make([]float64, 0, 100), sorted: false}, nil
|
return &exactAlgorithmR8{xs: make([]float64, 0, 100), sorted: false}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -81,7 +81,7 @@ func (i *testDurationInput) SampleConfig() string {
|
||||||
func (i *testDurationInput) Description() string {
|
func (i *testDurationInput) Description() string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
func (i *testDurationInput) Gather(acc telegraf.Accumulator) error {
|
func (i *testDurationInput) Gather(_ telegraf.Accumulator) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -71,7 +71,7 @@ func (i *erroringInput) Gather(acc telegraf.Accumulator) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *erroringInput) Start(acc telegraf.Accumulator) error {
|
func (i *erroringInput) Start(_ telegraf.Accumulator) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -100,7 +100,7 @@ func (i *testInput) Gather(acc telegraf.Accumulator) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *testInput) Start(acc telegraf.Accumulator) error {
|
func (i *testInput) Start(_ telegraf.Accumulator) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -133,7 +133,7 @@ func (i *serviceInput) Gather(acc telegraf.Accumulator) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *serviceInput) Start(acc telegraf.Accumulator) error {
|
func (i *serviceInput) Start(_ telegraf.Accumulator) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -486,14 +486,6 @@ func parseAerospikeValue(key string, v string) interface{} {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func copyTags(m map[string]string) map[string]string {
|
|
||||||
out := make(map[string]string)
|
|
||||||
for k, v := range m {
|
|
||||||
out[k] = v
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
inputs.Add("aerospike", func() telegraf.Input {
|
inputs.Add("aerospike", func() telegraf.Input {
|
||||||
return &Aerospike{}
|
return &Aerospike{}
|
||||||
|
|
|
||||||
|
|
@ -71,7 +71,7 @@ type mockAliyunSDKCli struct {
|
||||||
resp *responses.CommonResponse
|
resp *responses.CommonResponse
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *mockAliyunSDKCli) ProcessCommonRequest(req *requests.CommonRequest) (response *responses.CommonResponse, err error) {
|
func (m *mockAliyunSDKCli) ProcessCommonRequest(_ *requests.CommonRequest) (response *responses.CommonResponse, err error) {
|
||||||
return m.resp, nil
|
return m.resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -12,7 +12,7 @@ import (
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestApcupsdDocs(t *testing.T) {
|
func TestApcupsdDocs(_ *testing.T) {
|
||||||
apc := &ApcUpsd{}
|
apc := &ApcUpsd{}
|
||||||
apc.Description()
|
apc.Description()
|
||||||
apc.SampleConfig()
|
apc.SampleConfig()
|
||||||
|
|
|
||||||
|
|
@ -77,19 +77,6 @@ const validCassandraNestedMultiValueJSON = `
|
||||||
}
|
}
|
||||||
}`
|
}`
|
||||||
|
|
||||||
const validSingleValueJSON = `
|
|
||||||
{
|
|
||||||
"request":{
|
|
||||||
"path":"used",
|
|
||||||
"mbean":"java.lang:type=Memory",
|
|
||||||
"attribute":"HeapMemoryUsage",
|
|
||||||
"type":"read"
|
|
||||||
},
|
|
||||||
"value":209274376,
|
|
||||||
"timestamp":1446129256,
|
|
||||||
"status":200
|
|
||||||
}`
|
|
||||||
|
|
||||||
const validJavaMultiTypeJSON = `
|
const validJavaMultiTypeJSON = `
|
||||||
{
|
{
|
||||||
"request":{
|
"request":{
|
||||||
|
|
@ -104,8 +91,6 @@ const validJavaMultiTypeJSON = `
|
||||||
|
|
||||||
const invalidJSON = "I don't think this is JSON"
|
const invalidJSON = "I don't think this is JSON"
|
||||||
|
|
||||||
const empty = ""
|
|
||||||
|
|
||||||
var Servers = []string{"10.10.10.10:8778"}
|
var Servers = []string{"10.10.10.10:8778"}
|
||||||
var AuthServers = []string{"user:passwd@10.10.10.10:8778"}
|
var AuthServers = []string{"user:passwd@10.10.10.10:8778"}
|
||||||
var MultipleServers = []string{"10.10.10.10:8778", "10.10.10.11:8778"}
|
var MultipleServers = []string{"10.10.10.10:8778", "10.10.10.11:8778"}
|
||||||
|
|
@ -121,7 +106,7 @@ type jolokiaClientStub struct {
|
||||||
statusCode int
|
statusCode int
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c jolokiaClientStub) MakeRequest(req *http.Request) (*http.Response, error) {
|
func (c jolokiaClientStub) MakeRequest(_ *http.Request) (*http.Response, error) {
|
||||||
resp := http.Response{}
|
resp := http.Response{}
|
||||||
resp.StatusCode = c.statusCode
|
resp.StatusCode = c.statusCode
|
||||||
resp.Body = ioutil.NopCloser(strings.NewReader(c.responseBody))
|
resp.Body = ioutil.NopCloser(strings.NewReader(c.responseBody))
|
||||||
|
|
|
||||||
|
|
@ -86,7 +86,7 @@ func TestDecodeOSDPoolStats(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGather(t *testing.T) {
|
func TestGather(_ *testing.T) {
|
||||||
saveFind := findSockets
|
saveFind := findSockets
|
||||||
saveDump := perfDump
|
saveDump := perfDump
|
||||||
defer func() {
|
defer func() {
|
||||||
|
|
|
||||||
|
|
@ -65,7 +65,7 @@ func fakeExecCommand(command string, args ...string) *exec.Cmd {
|
||||||
// For example, if you run:
|
// For example, if you run:
|
||||||
// GO_WANT_HELPER_PROCESS=1 go test -test.run=TestHelperProcess -- chrony tracking
|
// GO_WANT_HELPER_PROCESS=1 go test -test.run=TestHelperProcess -- chrony tracking
|
||||||
// it returns below mockData.
|
// it returns below mockData.
|
||||||
func TestHelperProcess(t *testing.T) {
|
func TestHelperProcess(_ *testing.T) {
|
||||||
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
|
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -88,15 +88,15 @@ func (c *CiscoTelemetryMDT) Start(acc telegraf.Accumulator) error {
|
||||||
// Invert aliases list
|
// Invert aliases list
|
||||||
c.warned = make(map[string]struct{})
|
c.warned = make(map[string]struct{})
|
||||||
c.aliases = make(map[string]string, len(c.Aliases))
|
c.aliases = make(map[string]string, len(c.Aliases))
|
||||||
for alias, path := range c.Aliases {
|
for alias, encodingPath := range c.Aliases {
|
||||||
c.aliases[path] = alias
|
c.aliases[encodingPath] = alias
|
||||||
}
|
}
|
||||||
c.initDb()
|
c.initDb()
|
||||||
|
|
||||||
c.dmesFuncs = make(map[string]string, len(c.Dmes))
|
c.dmesFuncs = make(map[string]string, len(c.Dmes))
|
||||||
for dme, path := range c.Dmes {
|
for dme, dmeKey := range c.Dmes {
|
||||||
c.dmesFuncs[path] = dme
|
c.dmesFuncs[dmeKey] = dme
|
||||||
switch path {
|
switch dmeKey {
|
||||||
case "uint64 to int":
|
case "uint64 to int":
|
||||||
c.propMap[dme] = nxosValueXformUint64Toint64
|
c.propMap[dme] = nxosValueXformUint64Toint64
|
||||||
case "uint64 to string":
|
case "uint64 to string":
|
||||||
|
|
@ -115,7 +115,7 @@ func (c *CiscoTelemetryMDT) Start(acc telegraf.Accumulator) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
var jsStruct NxPayloadXfromStructure
|
var jsStruct NxPayloadXfromStructure
|
||||||
err := json.Unmarshal([]byte(path), &jsStruct)
|
err := json.Unmarshal([]byte(dmeKey), &jsStruct)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
@ -449,9 +449,10 @@ func (c *CiscoTelemetryMDT) parseKeyField(tags map[string]string, field *telemet
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *CiscoTelemetryMDT) parseRib(grouper *metric.SeriesGrouper, field *telemetry.TelemetryField, prefix string, path string, tags map[string]string, timestamp time.Time) {
|
func (c *CiscoTelemetryMDT) parseRib(grouper *metric.SeriesGrouper, field *telemetry.TelemetryField,
|
||||||
|
encodingPath string, tags map[string]string, timestamp time.Time) {
|
||||||
// RIB
|
// RIB
|
||||||
measurement := path
|
measurement := encodingPath
|
||||||
for _, subfield := range field.Fields {
|
for _, subfield := range field.Fields {
|
||||||
//For Every table fill the keys which are vrfName, address and masklen
|
//For Every table fill the keys which are vrfName, address and masklen
|
||||||
switch subfield.Name {
|
switch subfield.Name {
|
||||||
|
|
@ -481,13 +482,14 @@ func (c *CiscoTelemetryMDT) parseRib(grouper *metric.SeriesGrouper, field *telem
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *CiscoTelemetryMDT) parseClassAttributeField(grouper *metric.SeriesGrouper, field *telemetry.TelemetryField, prefix string, path string, tags map[string]string, timestamp time.Time) {
|
func (c *CiscoTelemetryMDT) parseClassAttributeField(grouper *metric.SeriesGrouper, field *telemetry.TelemetryField,
|
||||||
|
encodingPath string, tags map[string]string, timestamp time.Time) {
|
||||||
// DME structure: https://developer.cisco.com/site/nxapi-dme-model-reference-api/
|
// DME structure: https://developer.cisco.com/site/nxapi-dme-model-reference-api/
|
||||||
var nxAttributes *telemetry.TelemetryField
|
var nxAttributes *telemetry.TelemetryField
|
||||||
isDme := strings.Contains(path, "sys/")
|
isDme := strings.Contains(encodingPath, "sys/")
|
||||||
if path == "rib" {
|
if encodingPath == "rib" {
|
||||||
//handle native data path rib
|
//handle native data path rib
|
||||||
c.parseRib(grouper, field, prefix, path, tags, timestamp)
|
c.parseRib(grouper, field, encodingPath, tags, timestamp)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if field == nil || !isDme || len(field.Fields) == 0 || len(field.Fields[0].Fields) == 0 || len(field.Fields[0].Fields[0].Fields) == 0 {
|
if field == nil || !isDme || len(field.Fields) == 0 || len(field.Fields[0].Fields) == 0 || len(field.Fields[0].Fields[0].Fields) == 0 {
|
||||||
|
|
@ -503,13 +505,13 @@ func (c *CiscoTelemetryMDT) parseClassAttributeField(grouper *metric.SeriesGroup
|
||||||
if subfield.Name == "dn" {
|
if subfield.Name == "dn" {
|
||||||
tags["dn"] = decodeTag(subfield)
|
tags["dn"] = decodeTag(subfield)
|
||||||
} else {
|
} else {
|
||||||
c.parseContentField(grouper, subfield, "", path, tags, timestamp)
|
c.parseContentField(grouper, subfield, "", encodingPath, tags, timestamp)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *CiscoTelemetryMDT) parseContentField(grouper *metric.SeriesGrouper, field *telemetry.TelemetryField, prefix string,
|
func (c *CiscoTelemetryMDT) parseContentField(grouper *metric.SeriesGrouper, field *telemetry.TelemetryField, prefix string,
|
||||||
path string, tags map[string]string, timestamp time.Time) {
|
encodingPath string, tags map[string]string, timestamp time.Time) {
|
||||||
name := strings.Replace(field.Name, "-", "_", -1)
|
name := strings.Replace(field.Name, "-", "_", -1)
|
||||||
|
|
||||||
if (name == "modTs" || name == "createTs") && decodeValue(field) == "never" {
|
if (name == "modTs" || name == "createTs") && decodeValue(field) == "never" {
|
||||||
|
|
@ -521,23 +523,23 @@ func (c *CiscoTelemetryMDT) parseContentField(grouper *metric.SeriesGrouper, fie
|
||||||
name = prefix + "/" + name
|
name = prefix + "/" + name
|
||||||
}
|
}
|
||||||
|
|
||||||
extraTags := c.extraTags[strings.Replace(path, "-", "_", -1)+"/"+name]
|
extraTags := c.extraTags[strings.Replace(encodingPath, "-", "_", -1)+"/"+name]
|
||||||
|
|
||||||
if value := decodeValue(field); value != nil {
|
if value := decodeValue(field); value != nil {
|
||||||
// Do alias lookup, to shorten measurement names
|
// Do alias lookup, to shorten measurement names
|
||||||
measurement := path
|
measurement := encodingPath
|
||||||
if alias, ok := c.aliases[path]; ok {
|
if alias, ok := c.aliases[encodingPath]; ok {
|
||||||
measurement = alias
|
measurement = alias
|
||||||
} else {
|
} else {
|
||||||
c.mutex.Lock()
|
c.mutex.Lock()
|
||||||
if _, haveWarned := c.warned[path]; !haveWarned {
|
if _, haveWarned := c.warned[encodingPath]; !haveWarned {
|
||||||
c.Log.Debugf("No measurement alias for encoding path: %s", path)
|
c.Log.Debugf("No measurement alias for encoding path: %s", encodingPath)
|
||||||
c.warned[path] = struct{}{}
|
c.warned[encodingPath] = struct{}{}
|
||||||
}
|
}
|
||||||
c.mutex.Unlock()
|
c.mutex.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
if val := c.nxosValueXform(field, value, path); val != nil {
|
if val := c.nxosValueXform(field, value, encodingPath); val != nil {
|
||||||
grouper.Add(measurement, tags, timestamp, name, val)
|
grouper.Add(measurement, tags, timestamp, name, val)
|
||||||
} else {
|
} else {
|
||||||
grouper.Add(measurement, tags, timestamp, name, value)
|
grouper.Add(measurement, tags, timestamp, name, value)
|
||||||
|
|
@ -554,8 +556,8 @@ func (c *CiscoTelemetryMDT) parseContentField(grouper *metric.SeriesGrouper, fie
|
||||||
}
|
}
|
||||||
|
|
||||||
var nxAttributes, nxChildren, nxRows *telemetry.TelemetryField
|
var nxAttributes, nxChildren, nxRows *telemetry.TelemetryField
|
||||||
isNXOS := !strings.ContainsRune(path, ':') // IOS-XR and IOS-XE have a colon in their encoding path, NX-OS does not
|
isNXOS := !strings.ContainsRune(encodingPath, ':') // IOS-XR and IOS-XE have a colon in their encoding path, NX-OS does not
|
||||||
isEVENT := isNXOS && strings.Contains(path, "EVENT-LIST")
|
isEVENT := isNXOS && strings.Contains(encodingPath, "EVENT-LIST")
|
||||||
nxChildren = nil
|
nxChildren = nil
|
||||||
nxAttributes = nil
|
nxAttributes = nil
|
||||||
for _, subfield := range field.Fields {
|
for _, subfield := range field.Fields {
|
||||||
|
|
@ -574,13 +576,13 @@ func (c *CiscoTelemetryMDT) parseContentField(grouper *metric.SeriesGrouper, fie
|
||||||
if nxAttributes == nil {
|
if nxAttributes == nil {
|
||||||
//call function walking over walking list.
|
//call function walking over walking list.
|
||||||
for _, sub := range subfield.Fields {
|
for _, sub := range subfield.Fields {
|
||||||
c.parseClassAttributeField(grouper, sub, name, path, tags, timestamp)
|
c.parseClassAttributeField(grouper, sub, encodingPath, tags, timestamp)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if isNXOS && strings.HasPrefix(subfield.Name, "ROW_") {
|
} else if isNXOS && strings.HasPrefix(subfield.Name, "ROW_") {
|
||||||
nxRows = subfield
|
nxRows = subfield
|
||||||
} else if _, isExtraTag := extraTags[subfield.Name]; !isExtraTag { // Regular telemetry decoding
|
} else if _, isExtraTag := extraTags[subfield.Name]; !isExtraTag { // Regular telemetry decoding
|
||||||
c.parseContentField(grouper, subfield, name, path, tags, timestamp)
|
c.parseContentField(grouper, subfield, name, encodingPath, tags, timestamp)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -595,10 +597,10 @@ func (c *CiscoTelemetryMDT) parseContentField(grouper *metric.SeriesGrouper, fie
|
||||||
//We can have subfield so recursively handle it.
|
//We can have subfield so recursively handle it.
|
||||||
if len(row.Fields) == 1 {
|
if len(row.Fields) == 1 {
|
||||||
tags["row_number"] = strconv.FormatInt(int64(i), 10)
|
tags["row_number"] = strconv.FormatInt(int64(i), 10)
|
||||||
c.parseContentField(grouper, subfield, "", path, tags, timestamp)
|
c.parseContentField(grouper, subfield, "", encodingPath, tags, timestamp)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
c.parseContentField(grouper, subfield, "", path, tags, timestamp)
|
c.parseContentField(grouper, subfield, "", encodingPath, tags, timestamp)
|
||||||
}
|
}
|
||||||
// Nxapi we can't identify keys always from prefix
|
// Nxapi we can't identify keys always from prefix
|
||||||
tags["row_number"] = strconv.FormatInt(int64(i), 10)
|
tags["row_number"] = strconv.FormatInt(int64(i), 10)
|
||||||
|
|
@ -629,14 +631,14 @@ func (c *CiscoTelemetryMDT) parseContentField(grouper *metric.SeriesGrouper, fie
|
||||||
|
|
||||||
for _, subfield := range nxAttributes.Fields {
|
for _, subfield := range nxAttributes.Fields {
|
||||||
if subfield.Name != "rn" {
|
if subfield.Name != "rn" {
|
||||||
c.parseContentField(grouper, subfield, "", path, tags, timestamp)
|
c.parseContentField(grouper, subfield, "", encodingPath, tags, timestamp)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if nxChildren != nil {
|
if nxChildren != nil {
|
||||||
// This is a nested structure, children will inherit relative name keys of parent
|
// This is a nested structure, children will inherit relative name keys of parent
|
||||||
for _, subfield := range nxChildren.Fields {
|
for _, subfield := range nxChildren.Fields {
|
||||||
c.parseContentField(grouper, subfield, prefix, path, tags, timestamp)
|
c.parseContentField(grouper, subfield, prefix, encodingPath, tags, timestamp)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
delete(tags, prefix)
|
delete(tags, prefix)
|
||||||
|
|
|
||||||
|
|
@ -39,7 +39,7 @@ func nxosValueXformUint64Toint64(field *telemetry.TelemetryField, value interfac
|
||||||
}
|
}
|
||||||
|
|
||||||
//xform string to float
|
//xform string to float
|
||||||
func nxosValueXformStringTofloat(field *telemetry.TelemetryField, value interface{}) interface{} {
|
func nxosValueXformStringTofloat(field *telemetry.TelemetryField, _ interface{}) interface{} {
|
||||||
//convert property to float from string.
|
//convert property to float from string.
|
||||||
vals := field.GetStringValue()
|
vals := field.GetStringValue()
|
||||||
if vals != "" {
|
if vals != "" {
|
||||||
|
|
@ -51,7 +51,7 @@ func nxosValueXformStringTofloat(field *telemetry.TelemetryField, value interfac
|
||||||
}
|
}
|
||||||
|
|
||||||
//xform string to uint64
|
//xform string to uint64
|
||||||
func nxosValueXformStringToUint64(field *telemetry.TelemetryField, value interface{}) interface{} {
|
func nxosValueXformStringToUint64(field *telemetry.TelemetryField, _ interface{}) interface{} {
|
||||||
//string to uint64
|
//string to uint64
|
||||||
vals := field.GetStringValue()
|
vals := field.GetStringValue()
|
||||||
if vals != "" {
|
if vals != "" {
|
||||||
|
|
@ -63,7 +63,7 @@ func nxosValueXformStringToUint64(field *telemetry.TelemetryField, value interfa
|
||||||
}
|
}
|
||||||
|
|
||||||
//xform string to int64
|
//xform string to int64
|
||||||
func nxosValueXformStringToInt64(field *telemetry.TelemetryField, value interface{}) interface{} {
|
func nxosValueXformStringToInt64(field *telemetry.TelemetryField, _ interface{}) interface{} {
|
||||||
//string to int64
|
//string to int64
|
||||||
vals := field.GetStringValue()
|
vals := field.GetStringValue()
|
||||||
if vals != "" {
|
if vals != "" {
|
||||||
|
|
@ -74,26 +74,8 @@ func nxosValueXformStringToInt64(field *telemetry.TelemetryField, value interfac
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
//auto-xform
|
|
||||||
func nxosValueAutoXform(field *telemetry.TelemetryField, value interface{}) interface{} {
|
|
||||||
//check if we want auto xformation
|
|
||||||
vals := field.GetStringValue()
|
|
||||||
if vals != "" {
|
|
||||||
if val64, err := strconv.ParseUint(vals, 10, 64); err == nil {
|
|
||||||
return val64
|
|
||||||
}
|
|
||||||
if valf, err := strconv.ParseFloat(vals, 64); err == nil {
|
|
||||||
return valf
|
|
||||||
}
|
|
||||||
if val64, err := strconv.ParseInt(vals, 10, 64); err == nil {
|
|
||||||
return val64
|
|
||||||
}
|
|
||||||
} // switch
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
//auto-xform float properties
|
//auto-xform float properties
|
||||||
func nxosValueAutoXformFloatProp(field *telemetry.TelemetryField, value interface{}) interface{} {
|
func nxosValueAutoXformFloatProp(field *telemetry.TelemetryField, _ interface{}) interface{} {
|
||||||
//check if we want auto xformation
|
//check if we want auto xformation
|
||||||
vals := field.GetStringValue()
|
vals := field.GetStringValue()
|
||||||
if vals != "" {
|
if vals != "" {
|
||||||
|
|
@ -105,7 +87,7 @@ func nxosValueAutoXformFloatProp(field *telemetry.TelemetryField, value interfac
|
||||||
}
|
}
|
||||||
|
|
||||||
//xform uint64 to string
|
//xform uint64 to string
|
||||||
func nxosValueXformUint64ToString(field *telemetry.TelemetryField, value interface{}) interface{} {
|
func nxosValueXformUint64ToString(field *telemetry.TelemetryField, _ interface{}) interface{} {
|
||||||
switch val := field.ValueByType.(type) {
|
switch val := field.ValueByType.(type) {
|
||||||
case *telemetry.TelemetryField_StringValue:
|
case *telemetry.TelemetryField_StringValue:
|
||||||
if len(val.StringValue) > 0 {
|
if len(val.StringValue) > 0 {
|
||||||
|
|
|
||||||
|
|
@ -548,7 +548,7 @@ func TestOfflineServer(t *testing.T) {
|
||||||
assert.GreaterOrEqual(t, len(allMeasurements), len(acc.Errors))
|
assert.GreaterOrEqual(t, len(allMeasurements), len(acc.Errors))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAutoDiscovery(t *testing.T) {
|
func TestAutoDiscovery(_ *testing.T) {
|
||||||
var (
|
var (
|
||||||
ts = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
ts = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
type result struct {
|
type result struct {
|
||||||
|
|
|
||||||
|
|
@ -67,7 +67,7 @@ func (ps *PubSub) SampleConfig() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Gather does nothing for this service input.
|
// Gather does nothing for this service input.
|
||||||
func (ps *PubSub) Gather(acc telegraf.Accumulator) error {
|
func (ps *PubSub) Gather(_ telegraf.Accumulator) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -207,7 +207,7 @@ func TestRunErrorInSubscriber(t *testing.T) {
|
||||||
messages: make(chan *testMsg, 100),
|
messages: make(chan *testMsg, 100),
|
||||||
}
|
}
|
||||||
fakeErrStr := "a fake error"
|
fakeErrStr := "a fake error"
|
||||||
sub.receiver = testMessagesError(sub, errors.New("a fake error"))
|
sub.receiver = testMessagesError(errors.New("a fake error"))
|
||||||
|
|
||||||
ps := &PubSub{
|
ps := &PubSub{
|
||||||
Log: testutil.Logger{},
|
Log: testutil.Logger{},
|
||||||
|
|
|
||||||
|
|
@ -22,7 +22,7 @@ func (s *stubSub) Receive(ctx context.Context, f func(context.Context, message))
|
||||||
|
|
||||||
type receiveFunc func(ctx context.Context, f func(context.Context, message)) error
|
type receiveFunc func(ctx context.Context, f func(context.Context, message)) error
|
||||||
|
|
||||||
func testMessagesError(s *stubSub, expectedErr error) receiveFunc {
|
func testMessagesError(expectedErr error) receiveFunc {
|
||||||
return func(ctx context.Context, f func(context.Context, message)) error {
|
return func(ctx context.Context, f func(context.Context, message)) error {
|
||||||
return expectedErr
|
return expectedErr
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -6,7 +6,6 @@ import (
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
@ -39,13 +38,12 @@ type PubSubPush struct {
|
||||||
tlsint.ServerConfig
|
tlsint.ServerConfig
|
||||||
parsers.Parser
|
parsers.Parser
|
||||||
|
|
||||||
listener net.Listener
|
server *http.Server
|
||||||
server *http.Server
|
acc telegraf.TrackingAccumulator
|
||||||
acc telegraf.TrackingAccumulator
|
ctx context.Context
|
||||||
ctx context.Context
|
cancel context.CancelFunc
|
||||||
cancel context.CancelFunc
|
wg *sync.WaitGroup
|
||||||
wg *sync.WaitGroup
|
mu *sync.Mutex
|
||||||
mu *sync.Mutex
|
|
||||||
|
|
||||||
undelivered map[telegraf.TrackingID]chan bool
|
undelivered map[telegraf.TrackingID]chan bool
|
||||||
sem chan struct{}
|
sem chan struct{}
|
||||||
|
|
|
||||||
|
|
@ -144,7 +144,7 @@ func TestServeHTTP(t *testing.T) {
|
||||||
pubPush.SetParser(p)
|
pubPush.SetParser(p)
|
||||||
|
|
||||||
dst := make(chan telegraf.Metric, 1)
|
dst := make(chan telegraf.Metric, 1)
|
||||||
ro := models.NewRunningOutput("test", &testOutput{failWrite: test.fail}, &models.OutputConfig{}, 1, 1)
|
ro := models.NewRunningOutput(&testOutput{failWrite: test.fail}, &models.OutputConfig{}, 1, 1)
|
||||||
pubPush.acc = agent.NewAccumulator(&testMetricMaker{}, dst).WithTracking(1)
|
pubPush.acc = agent.NewAccumulator(&testMetricMaker{}, dst).WithTracking(1)
|
||||||
|
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
|
|
@ -154,13 +154,13 @@ func TestServeHTTP(t *testing.T) {
|
||||||
}()
|
}()
|
||||||
|
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func(status int, d chan telegraf.Metric) {
|
go func(d chan telegraf.Metric) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
for m := range d {
|
for m := range d {
|
||||||
ro.AddMetric(m)
|
ro.AddMetric(m)
|
||||||
ro.Write()
|
ro.Write()
|
||||||
}
|
}
|
||||||
}(test.status, dst)
|
}(dst)
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(req.Context(), pubPush.WriteTimeout.Duration)
|
ctx, cancel := context.WithTimeout(req.Context(), pubPush.WriteTimeout.Duration)
|
||||||
req = req.WithContext(ctx)
|
req = req.WithContext(ctx)
|
||||||
|
|
@ -218,7 +218,7 @@ func (*testOutput) SampleConfig() string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *testOutput) Write(metrics []telegraf.Metric) error {
|
func (t *testOutput) Write(_ []telegraf.Metric) error {
|
||||||
if t.failWrite {
|
if t.failWrite {
|
||||||
return fmt.Errorf("failed write")
|
return fmt.Errorf("failed write")
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -208,11 +208,7 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error {
|
||||||
c.updateWindow(time.Now())
|
c.updateWindow(time.Now())
|
||||||
|
|
||||||
// Get all of the possible queries so we can send groups of 100.
|
// Get all of the possible queries so we can send groups of 100.
|
||||||
queries, err := c.getDataQueries(filteredMetrics)
|
queries := c.getDataQueries(filteredMetrics)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(queries) == 0 {
|
if len(queries) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
@ -441,9 +437,9 @@ func (c *CloudWatch) updateWindow(relativeTo time.Time) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// getDataQueries gets all of the possible queries so we can maximize the request payload.
|
// getDataQueries gets all of the possible queries so we can maximize the request payload.
|
||||||
func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) ([]*cloudwatch.MetricDataQuery, error) {
|
func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) []*cloudwatch.MetricDataQuery {
|
||||||
if c.metricCache != nil && c.metricCache.queries != nil && c.metricCache.isValid() {
|
if c.metricCache != nil && c.metricCache.queries != nil && c.metricCache.isValid() {
|
||||||
return c.metricCache.queries, nil
|
return c.metricCache.queries
|
||||||
}
|
}
|
||||||
|
|
||||||
c.queryDimensions = map[string]*map[string]string{}
|
c.queryDimensions = map[string]*map[string]string{}
|
||||||
|
|
@ -518,7 +514,7 @@ func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) ([]*cloudw
|
||||||
|
|
||||||
if len(dataQueries) == 0 {
|
if len(dataQueries) == 0 {
|
||||||
c.Log.Debug("no metrics found to collect")
|
c.Log.Debug("no metrics found to collect")
|
||||||
return nil, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.metricCache == nil {
|
if c.metricCache == nil {
|
||||||
|
|
@ -531,7 +527,7 @@ func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) ([]*cloudw
|
||||||
c.metricCache.queries = dataQueries
|
c.metricCache.queries = dataQueries
|
||||||
}
|
}
|
||||||
|
|
||||||
return dataQueries, nil
|
return dataQueries
|
||||||
}
|
}
|
||||||
|
|
||||||
// gatherMetrics gets metric data from Cloudwatch.
|
// gatherMetrics gets metric data from Cloudwatch.
|
||||||
|
|
|
||||||
|
|
@ -135,7 +135,7 @@ func TestGather(t *testing.T) {
|
||||||
|
|
||||||
type mockSelectMetricsCloudWatchClient struct{}
|
type mockSelectMetricsCloudWatchClient struct{}
|
||||||
|
|
||||||
func (m *mockSelectMetricsCloudWatchClient) ListMetrics(params *cloudwatch.ListMetricsInput) (*cloudwatch.ListMetricsOutput, error) {
|
func (m *mockSelectMetricsCloudWatchClient) ListMetrics(_ *cloudwatch.ListMetricsInput) (*cloudwatch.ListMetricsOutput, error) {
|
||||||
metrics := []*cloudwatch.Metric{}
|
metrics := []*cloudwatch.Metric{}
|
||||||
// 4 metrics are available
|
// 4 metrics are available
|
||||||
metricNames := []string{"Latency", "RequestCount", "HealthyHostCount", "UnHealthyHostCount"}
|
metricNames := []string{"Latency", "RequestCount", "HealthyHostCount", "UnHealthyHostCount"}
|
||||||
|
|
@ -182,7 +182,7 @@ func (m *mockSelectMetricsCloudWatchClient) ListMetrics(params *cloudwatch.ListM
|
||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *mockSelectMetricsCloudWatchClient) GetMetricData(params *cloudwatch.GetMetricDataInput) (*cloudwatch.GetMetricDataOutput, error) {
|
func (m *mockSelectMetricsCloudWatchClient) GetMetricData(_ *cloudwatch.GetMetricDataInput) (*cloudwatch.GetMetricDataOutput, error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -246,7 +246,7 @@ func TestGenerateStatisticsInputParams(t *testing.T) {
|
||||||
c.updateWindow(now)
|
c.updateWindow(now)
|
||||||
|
|
||||||
statFilter, _ := filter.NewIncludeExcludeFilter(nil, nil)
|
statFilter, _ := filter.NewIncludeExcludeFilter(nil, nil)
|
||||||
queries, _ := c.getDataQueries([]filteredMetric{{metrics: []*cloudwatch.Metric{m}, statFilter: statFilter}})
|
queries := c.getDataQueries([]filteredMetric{{metrics: []*cloudwatch.Metric{m}, statFilter: statFilter}})
|
||||||
params := c.getDataInputs(queries)
|
params := c.getDataInputs(queries)
|
||||||
|
|
||||||
assert.EqualValues(t, *params.EndTime, now.Add(-time.Duration(c.Delay)))
|
assert.EqualValues(t, *params.EndTime, now.Add(-time.Duration(c.Delay)))
|
||||||
|
|
@ -283,7 +283,7 @@ func TestGenerateStatisticsInputParamsFiltered(t *testing.T) {
|
||||||
c.updateWindow(now)
|
c.updateWindow(now)
|
||||||
|
|
||||||
statFilter, _ := filter.NewIncludeExcludeFilter([]string{"average", "sample_count"}, nil)
|
statFilter, _ := filter.NewIncludeExcludeFilter([]string{"average", "sample_count"}, nil)
|
||||||
queries, _ := c.getDataQueries([]filteredMetric{{metrics: []*cloudwatch.Metric{m}, statFilter: statFilter}})
|
queries := c.getDataQueries([]filteredMetric{{metrics: []*cloudwatch.Metric{m}, statFilter: statFilter}})
|
||||||
params := c.getDataInputs(queries)
|
params := c.getDataInputs(queries)
|
||||||
|
|
||||||
assert.EqualValues(t, *params.EndTime, now.Add(-time.Duration(c.Delay)))
|
assert.EqualValues(t, *params.EndTime, now.Add(-time.Duration(c.Delay)))
|
||||||
|
|
|
||||||
|
|
@ -48,26 +48,22 @@ func TestCPUStats(t *testing.T) {
|
||||||
|
|
||||||
cs := NewCPUStats(&mps)
|
cs := NewCPUStats(&mps)
|
||||||
|
|
||||||
cputags := map[string]string{
|
|
||||||
"cpu": "cpu0",
|
|
||||||
}
|
|
||||||
|
|
||||||
err := cs.Gather(&acc)
|
err := cs.Gather(&acc)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Computed values are checked with delta > 0 because of floating point arithmetic
|
// Computed values are checked with delta > 0 because of floating point arithmetic
|
||||||
// imprecision
|
// imprecision
|
||||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_user", 8.8, 0, cputags)
|
assertContainsTaggedFloat(t, &acc, "time_user", 8.8, 0)
|
||||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_system", 8.2, 0, cputags)
|
assertContainsTaggedFloat(t, &acc, "time_system", 8.2, 0)
|
||||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_idle", 80.1, 0, cputags)
|
assertContainsTaggedFloat(t, &acc, "time_idle", 80.1, 0)
|
||||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_active", 19.9, 0.0005, cputags)
|
assertContainsTaggedFloat(t, &acc, "time_active", 19.9, 0.0005)
|
||||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_nice", 1.3, 0, cputags)
|
assertContainsTaggedFloat(t, &acc, "time_nice", 1.3, 0)
|
||||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_iowait", 0.8389, 0, cputags)
|
assertContainsTaggedFloat(t, &acc, "time_iowait", 0.8389, 0)
|
||||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_irq", 0.6, 0, cputags)
|
assertContainsTaggedFloat(t, &acc, "time_irq", 0.6, 0)
|
||||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_softirq", 0.11, 0, cputags)
|
assertContainsTaggedFloat(t, &acc, "time_softirq", 0.11, 0)
|
||||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_steal", 0.0511, 0, cputags)
|
assertContainsTaggedFloat(t, &acc, "time_steal", 0.0511, 0)
|
||||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_guest", 3.1, 0, cputags)
|
assertContainsTaggedFloat(t, &acc, "time_guest", 3.1, 0)
|
||||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_guest_nice", 0.324, 0, cputags)
|
assertContainsTaggedFloat(t, &acc, "time_guest_nice", 0.324, 0)
|
||||||
|
|
||||||
mps2 := system.MockPS{}
|
mps2 := system.MockPS{}
|
||||||
mps2.On("CPUTimes").Return([]cpu.TimesStat{cts2}, nil)
|
mps2.On("CPUTimes").Return([]cpu.TimesStat{cts2}, nil)
|
||||||
|
|
@ -77,29 +73,29 @@ func TestCPUStats(t *testing.T) {
|
||||||
err = cs.Gather(&acc)
|
err = cs.Gather(&acc)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_user", 24.9, 0, cputags)
|
assertContainsTaggedFloat(t, &acc, "time_user", 24.9, 0)
|
||||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_system", 10.9, 0, cputags)
|
assertContainsTaggedFloat(t, &acc, "time_system", 10.9, 0)
|
||||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_idle", 157.9798, 0, cputags)
|
assertContainsTaggedFloat(t, &acc, "time_idle", 157.9798, 0)
|
||||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_active", 42.0202, 0.0005, cputags)
|
assertContainsTaggedFloat(t, &acc, "time_active", 42.0202, 0.0005)
|
||||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_nice", 3.5, 0, cputags)
|
assertContainsTaggedFloat(t, &acc, "time_nice", 3.5, 0)
|
||||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_iowait", 0.929, 0, cputags)
|
assertContainsTaggedFloat(t, &acc, "time_iowait", 0.929, 0)
|
||||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_irq", 1.2, 0, cputags)
|
assertContainsTaggedFloat(t, &acc, "time_irq", 1.2, 0)
|
||||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_softirq", 0.31, 0, cputags)
|
assertContainsTaggedFloat(t, &acc, "time_softirq", 0.31, 0)
|
||||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_steal", 0.2812, 0, cputags)
|
assertContainsTaggedFloat(t, &acc, "time_steal", 0.2812, 0)
|
||||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_guest", 11.4, 0, cputags)
|
assertContainsTaggedFloat(t, &acc, "time_guest", 11.4, 0)
|
||||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_guest_nice", 2.524, 0, cputags)
|
assertContainsTaggedFloat(t, &acc, "time_guest_nice", 2.524, 0)
|
||||||
|
|
||||||
assertContainsTaggedFloat(t, &acc, "cpu", "usage_user", 7.8, 0.0005, cputags)
|
assertContainsTaggedFloat(t, &acc, "usage_user", 7.8, 0.0005)
|
||||||
assertContainsTaggedFloat(t, &acc, "cpu", "usage_system", 2.7, 0.0005, cputags)
|
assertContainsTaggedFloat(t, &acc, "usage_system", 2.7, 0.0005)
|
||||||
assertContainsTaggedFloat(t, &acc, "cpu", "usage_idle", 77.8798, 0.0005, cputags)
|
assertContainsTaggedFloat(t, &acc, "usage_idle", 77.8798, 0.0005)
|
||||||
assertContainsTaggedFloat(t, &acc, "cpu", "usage_active", 22.1202, 0.0005, cputags)
|
assertContainsTaggedFloat(t, &acc, "usage_active", 22.1202, 0.0005)
|
||||||
assertContainsTaggedFloat(t, &acc, "cpu", "usage_nice", 0, 0.0005, cputags)
|
assertContainsTaggedFloat(t, &acc, "usage_nice", 0, 0.0005)
|
||||||
assertContainsTaggedFloat(t, &acc, "cpu", "usage_iowait", 0.0901, 0.0005, cputags)
|
assertContainsTaggedFloat(t, &acc, "usage_iowait", 0.0901, 0.0005)
|
||||||
assertContainsTaggedFloat(t, &acc, "cpu", "usage_irq", 0.6, 0.0005, cputags)
|
assertContainsTaggedFloat(t, &acc, "usage_irq", 0.6, 0.0005)
|
||||||
assertContainsTaggedFloat(t, &acc, "cpu", "usage_softirq", 0.2, 0.0005, cputags)
|
assertContainsTaggedFloat(t, &acc, "usage_softirq", 0.2, 0.0005)
|
||||||
assertContainsTaggedFloat(t, &acc, "cpu", "usage_steal", 0.2301, 0.0005, cputags)
|
assertContainsTaggedFloat(t, &acc, "usage_steal", 0.2301, 0.0005)
|
||||||
assertContainsTaggedFloat(t, &acc, "cpu", "usage_guest", 8.3, 0.0005, cputags)
|
assertContainsTaggedFloat(t, &acc, "usage_guest", 8.3, 0.0005)
|
||||||
assertContainsTaggedFloat(t, &acc, "cpu", "usage_guest_nice", 2.2, 0.0005, cputags)
|
assertContainsTaggedFloat(t, &acc, "usage_guest_nice", 2.2, 0.0005)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Asserts that a given accumulator contains a measurement of type float64 with
|
// Asserts that a given accumulator contains a measurement of type float64 with
|
||||||
|
|
@ -109,24 +105,21 @@ func TestCPUStats(t *testing.T) {
|
||||||
// Parameters:
|
// Parameters:
|
||||||
// t *testing.T : Testing object to use
|
// t *testing.T : Testing object to use
|
||||||
// acc testutil.Accumulator: Accumulator to examine
|
// acc testutil.Accumulator: Accumulator to examine
|
||||||
// measurement string : Name of the measurement to examine
|
// field string : Name of field to examine
|
||||||
// expectedValue float64 : Value to search for within the measurement
|
// expectedValue float64 : Value to search for within the measurement
|
||||||
// delta float64 : Maximum acceptable distance of an accumulated value
|
// delta float64 : Maximum acceptable distance of an accumulated value
|
||||||
// from the expectedValue parameter. Useful when
|
// from the expectedValue parameter. Useful when
|
||||||
// floating-point arithmetic imprecision makes looking
|
// floating-point arithmetic imprecision makes looking
|
||||||
// for an exact match impractical
|
// for an exact match impractical
|
||||||
// tags map[string]string : Tag set the found measurement must have. Set to nil to
|
|
||||||
// ignore the tag set.
|
|
||||||
func assertContainsTaggedFloat(
|
func assertContainsTaggedFloat(
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
acc *testutil.Accumulator,
|
acc *testutil.Accumulator,
|
||||||
measurement string,
|
|
||||||
field string,
|
field string,
|
||||||
expectedValue float64,
|
expectedValue float64,
|
||||||
delta float64,
|
delta float64,
|
||||||
tags map[string]string,
|
|
||||||
) {
|
) {
|
||||||
var actualValue float64
|
var actualValue float64
|
||||||
|
measurement := "cpu" // always cpu
|
||||||
for _, pt := range acc.Metrics {
|
for _, pt := range acc.Metrics {
|
||||||
if pt.Measurement == measurement {
|
if pt.Measurement == measurement {
|
||||||
for fieldname, value := range pt.Fields {
|
for fieldname, value := range pt.Fields {
|
||||||
|
|
@ -218,18 +211,14 @@ func TestCPUTimesDecrease(t *testing.T) {
|
||||||
|
|
||||||
cs := NewCPUStats(&mps)
|
cs := NewCPUStats(&mps)
|
||||||
|
|
||||||
cputags := map[string]string{
|
|
||||||
"cpu": "cpu0",
|
|
||||||
}
|
|
||||||
|
|
||||||
err := cs.Gather(&acc)
|
err := cs.Gather(&acc)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Computed values are checked with delta > 0 because of floating point arithmetic
|
// Computed values are checked with delta > 0 because of floating point arithmetic
|
||||||
// imprecision
|
// imprecision
|
||||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_user", 18, 0, cputags)
|
assertContainsTaggedFloat(t, &acc, "time_user", 18, 0)
|
||||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_idle", 80, 0, cputags)
|
assertContainsTaggedFloat(t, &acc, "time_idle", 80, 0)
|
||||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_iowait", 2, 0, cputags)
|
assertContainsTaggedFloat(t, &acc, "time_iowait", 2, 0)
|
||||||
|
|
||||||
mps2 := system.MockPS{}
|
mps2 := system.MockPS{}
|
||||||
mps2.On("CPUTimes").Return([]cpu.TimesStat{cts2}, nil)
|
mps2.On("CPUTimes").Return([]cpu.TimesStat{cts2}, nil)
|
||||||
|
|
@ -246,11 +235,11 @@ func TestCPUTimesDecrease(t *testing.T) {
|
||||||
err = cs.Gather(&acc)
|
err = cs.Gather(&acc)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_user", 56, 0, cputags)
|
assertContainsTaggedFloat(t, &acc, "time_user", 56, 0)
|
||||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_idle", 120, 0, cputags)
|
assertContainsTaggedFloat(t, &acc, "time_idle", 120, 0)
|
||||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_iowait", 3, 0, cputags)
|
assertContainsTaggedFloat(t, &acc, "time_iowait", 3, 0)
|
||||||
|
|
||||||
assertContainsTaggedFloat(t, &acc, "cpu", "usage_user", 18, 0.0005, cputags)
|
assertContainsTaggedFloat(t, &acc, "usage_user", 18, 0.0005)
|
||||||
assertContainsTaggedFloat(t, &acc, "cpu", "usage_idle", 80, 0.0005, cputags)
|
assertContainsTaggedFloat(t, &acc, "usage_idle", 80, 0.0005)
|
||||||
assertContainsTaggedFloat(t, &acc, "cpu", "usage_iowait", 2, 0.0005, cputags)
|
assertContainsTaggedFloat(t, &acc, "usage_iowait", 2, 0.0005)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -92,11 +92,10 @@ type AuthToken struct {
|
||||||
|
|
||||||
// ClusterClient is a Client that uses the cluster URL.
|
// ClusterClient is a Client that uses the cluster URL.
|
||||||
type ClusterClient struct {
|
type ClusterClient struct {
|
||||||
clusterURL *url.URL
|
clusterURL *url.URL
|
||||||
httpClient *http.Client
|
httpClient *http.Client
|
||||||
credentials *Credentials
|
token string
|
||||||
token string
|
semaphore chan struct{}
|
||||||
semaphore chan struct{}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type claims struct {
|
type claims struct {
|
||||||
|
|
|
||||||
|
|
@ -47,13 +47,13 @@ func (c *ServiceAccount) IsExpired() bool {
|
||||||
return c.auth.Text != "" || c.auth.Expire.Add(relogDuration).After(time.Now())
|
return c.auth.Text != "" || c.auth.Expire.Add(relogDuration).After(time.Now())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *TokenCreds) Token(ctx context.Context, client Client) (string, error) {
|
func (c *TokenCreds) Token(_ context.Context, _ Client) (string, error) {
|
||||||
octets, err := ioutil.ReadFile(c.Path)
|
octets, err := ioutil.ReadFile(c.Path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("Error reading token file %q: %s", c.Path, err)
|
return "", fmt.Errorf("error reading token file %q: %s", c.Path, err)
|
||||||
}
|
}
|
||||||
if !utf8.Valid(octets) {
|
if !utf8.Valid(octets) {
|
||||||
return "", fmt.Errorf("Token file does not contain utf-8 encoded text: %s", c.Path)
|
return "", fmt.Errorf("token file does not contain utf-8 encoded text: %s", c.Path)
|
||||||
}
|
}
|
||||||
token := strings.TrimSpace(string(octets))
|
token := strings.TrimSpace(string(octets))
|
||||||
return token, nil
|
return token, nil
|
||||||
|
|
@ -63,7 +63,7 @@ func (c *TokenCreds) IsExpired() bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *NullCreds) Token(ctx context.Context, client Client) (string, error) {
|
func (c *NullCreds) Token(_ context.Context, _ Client) (string, error) {
|
||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -66,7 +66,6 @@ type DCOS struct {
|
||||||
nodeFilter filter.Filter
|
nodeFilter filter.Filter
|
||||||
containerFilter filter.Filter
|
containerFilter filter.Filter
|
||||||
appFilter filter.Filter
|
appFilter filter.Filter
|
||||||
taskNameFilter filter.Filter
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *DCOS) Description() string {
|
func (d *DCOS) Description() string {
|
||||||
|
|
@ -223,7 +222,7 @@ type point struct {
|
||||||
fields map[string]interface{}
|
fields map[string]interface{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *DCOS) createPoints(acc telegraf.Accumulator, m *Metrics) []*point {
|
func (d *DCOS) createPoints(m *Metrics) []*point {
|
||||||
points := make(map[string]*point)
|
points := make(map[string]*point)
|
||||||
for _, dp := range m.Datapoints {
|
for _, dp := range m.Datapoints {
|
||||||
fieldKey := strings.Replace(dp.Name, ".", "_", -1)
|
fieldKey := strings.Replace(dp.Name, ".", "_", -1)
|
||||||
|
|
@ -288,7 +287,7 @@ func (d *DCOS) createPoints(acc telegraf.Accumulator, m *Metrics) []*point {
|
||||||
func (d *DCOS) addMetrics(acc telegraf.Accumulator, cluster, mname string, m *Metrics, tagDimensions []string) {
|
func (d *DCOS) addMetrics(acc telegraf.Accumulator, cluster, mname string, m *Metrics, tagDimensions []string) {
|
||||||
tm := time.Now()
|
tm := time.Now()
|
||||||
|
|
||||||
points := d.createPoints(acc, m)
|
points := d.createPoints(m)
|
||||||
|
|
||||||
for _, p := range points {
|
for _, p := range points {
|
||||||
tags := make(map[string]string)
|
tags := make(map[string]string)
|
||||||
|
|
|
||||||
|
|
@ -106,7 +106,7 @@ func (monitor *DirectoryMonitor) Description() string {
|
||||||
return "Ingests files in a directory and then moves them to a target directory."
|
return "Ingests files in a directory and then moves them to a target directory."
|
||||||
}
|
}
|
||||||
|
|
||||||
func (monitor *DirectoryMonitor) Gather(acc telegraf.Accumulator) error {
|
func (monitor *DirectoryMonitor) Gather(_ telegraf.Accumulator) error {
|
||||||
// Get all files sitting in the directory.
|
// Get all files sitting in the directory.
|
||||||
files, err := ioutil.ReadDir(monitor.Directory)
|
files, err := ioutil.ReadDir(monitor.Directory)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -130,7 +130,7 @@ func (monitor *DirectoryMonitor) Gather(acc telegraf.Accumulator) error {
|
||||||
|
|
||||||
// If file is decaying, process it.
|
// If file is decaying, process it.
|
||||||
if timeThresholdExceeded {
|
if timeThresholdExceeded {
|
||||||
monitor.processFile(file, acc)
|
monitor.processFile(file)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -149,7 +149,7 @@ func (monitor *DirectoryMonitor) Start(acc telegraf.Accumulator) error {
|
||||||
// Monitor the files channel and read what they receive.
|
// Monitor the files channel and read what they receive.
|
||||||
monitor.waitGroup.Add(1)
|
monitor.waitGroup.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
monitor.Monitor(acc)
|
monitor.Monitor()
|
||||||
monitor.waitGroup.Done()
|
monitor.waitGroup.Done()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
|
@ -164,7 +164,7 @@ func (monitor *DirectoryMonitor) Stop() {
|
||||||
monitor.waitGroup.Wait()
|
monitor.waitGroup.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (monitor *DirectoryMonitor) Monitor(acc telegraf.Accumulator) {
|
func (monitor *DirectoryMonitor) Monitor() {
|
||||||
for filePath := range monitor.filesToProcess {
|
for filePath := range monitor.filesToProcess {
|
||||||
if monitor.context.Err() != nil {
|
if monitor.context.Err() != nil {
|
||||||
return
|
return
|
||||||
|
|
@ -182,7 +182,7 @@ func (monitor *DirectoryMonitor) Monitor(acc telegraf.Accumulator) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (monitor *DirectoryMonitor) processFile(file os.FileInfo, acc telegraf.Accumulator) {
|
func (monitor *DirectoryMonitor) processFile(file os.FileInfo) {
|
||||||
if file.IsDir() {
|
if file.IsDir() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -77,13 +77,6 @@ func TestCSVGZImport(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// For JSON data.
|
|
||||||
type event struct {
|
|
||||||
Name string
|
|
||||||
Speed float64
|
|
||||||
Length float64
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMultipleJSONFileImports(t *testing.T) {
|
func TestMultipleJSONFileImports(t *testing.T) {
|
||||||
acc := testutil.Accumulator{}
|
acc := testutil.Accumulator{}
|
||||||
testJsonFile := "test.json"
|
testJsonFile := "test.json"
|
||||||
|
|
|
||||||
|
|
@ -18,8 +18,7 @@ import (
|
||||||
type Disque struct {
|
type Disque struct {
|
||||||
Servers []string
|
Servers []string
|
||||||
|
|
||||||
c net.Conn
|
c net.Conn
|
||||||
buf []byte
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var sampleConfig = `
|
var sampleConfig = `
|
||||||
|
|
@ -87,10 +86,10 @@ func (d *Disque) Gather(acc telegraf.Accumulator) error {
|
||||||
u.Path = ""
|
u.Path = ""
|
||||||
}
|
}
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func(serv string) {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
acc.AddError(d.gatherServer(u, acc))
|
acc.AddError(d.gatherServer(u, acc))
|
||||||
}(serv)
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
|
||||||
|
|
@ -6,7 +6,6 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
@ -57,7 +56,6 @@ type Docker struct {
|
||||||
newClient func(string, *tls.Config) (Client, error)
|
newClient func(string, *tls.Config) (Client, error)
|
||||||
|
|
||||||
client Client
|
client Client
|
||||||
httpClient *http.Client
|
|
||||||
engineHost string
|
engineHost string
|
||||||
serverVersion string
|
serverVersion string
|
||||||
filtersCreated bool
|
filtersCreated bool
|
||||||
|
|
@ -937,15 +935,6 @@ func copyTags(in map[string]string) map[string]string {
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
func sliceContains(in string, sl []string) bool {
|
|
||||||
for _, str := range sl {
|
|
||||||
if str == in {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parses the human-readable size string into the amount it represents.
|
// Parses the human-readable size string into the amount it represents.
|
||||||
func parseSize(sizeStr string) (int64, error) {
|
func parseSize(sizeStr string) (int64, error) {
|
||||||
matches := sizeRegex.FindStringSubmatch(sizeStr)
|
matches := sizeRegex.FindStringSubmatch(sizeStr)
|
||||||
|
|
|
||||||
|
|
@ -99,7 +99,7 @@ var baseClient = MockClient{
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
func newClient(host string, tlsConfig *tls.Config) (Client, error) {
|
func newClient(_ string, _ *tls.Config) (Client, error) {
|
||||||
return &baseClient, nil
|
return &baseClient, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1127,7 +1127,6 @@ func TestHostnameFromID(t *testing.T) {
|
||||||
func Test_parseContainerStatsPerDeviceAndTotal(t *testing.T) {
|
func Test_parseContainerStatsPerDeviceAndTotal(t *testing.T) {
|
||||||
type args struct {
|
type args struct {
|
||||||
stat *types.StatsJSON
|
stat *types.StatsJSON
|
||||||
acc telegraf.Accumulator
|
|
||||||
tags map[string]string
|
tags map[string]string
|
||||||
id string
|
id string
|
||||||
perDeviceInclude []string
|
perDeviceInclude []string
|
||||||
|
|
|
||||||
|
|
@ -64,11 +64,6 @@ var sampleConfig = `
|
||||||
|
|
||||||
const (
|
const (
|
||||||
defaultEndpoint = "unix:///var/run/docker.sock"
|
defaultEndpoint = "unix:///var/run/docker.sock"
|
||||||
|
|
||||||
// Maximum bytes of a log line before it will be split, size is mirroring
|
|
||||||
// docker code:
|
|
||||||
// https://github.com/moby/moby/blob/master/daemon/logger/copier.go#L21
|
|
||||||
maxLineBytes = 16 * 1024
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
|
@ -160,18 +155,16 @@ func (d *DockerLogs) Init() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *DockerLogs) addToContainerList(containerID string, cancel context.CancelFunc) error {
|
func (d *DockerLogs) addToContainerList(containerID string, cancel context.CancelFunc) {
|
||||||
d.mu.Lock()
|
d.mu.Lock()
|
||||||
defer d.mu.Unlock()
|
defer d.mu.Unlock()
|
||||||
d.containerList[containerID] = cancel
|
d.containerList[containerID] = cancel
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *DockerLogs) removeFromContainerList(containerID string) error {
|
func (d *DockerLogs) removeFromContainerList(containerID string) {
|
||||||
d.mu.Lock()
|
d.mu.Lock()
|
||||||
defer d.mu.Unlock()
|
defer d.mu.Unlock()
|
||||||
delete(d.containerList, containerID)
|
delete(d.containerList, containerID)
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *DockerLogs) containerInContainerList(containerID string) bool {
|
func (d *DockerLogs) containerInContainerList(containerID string) bool {
|
||||||
|
|
@ -181,13 +174,12 @@ func (d *DockerLogs) containerInContainerList(containerID string) bool {
|
||||||
return ok
|
return ok
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *DockerLogs) cancelTails() error {
|
func (d *DockerLogs) cancelTails() {
|
||||||
d.mu.Lock()
|
d.mu.Lock()
|
||||||
defer d.mu.Unlock()
|
defer d.mu.Unlock()
|
||||||
for _, cancel := range d.containerList {
|
for _, cancel := range d.containerList {
|
||||||
cancel()
|
cancel()
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *DockerLogs) matchedContainerName(names []string) string {
|
func (d *DockerLogs) matchedContainerName(names []string) string {
|
||||||
|
|
|
||||||
|
|
@ -48,8 +48,6 @@ var validQuery = map[string]bool{
|
||||||
|
|
||||||
func (d *Dovecot) SampleConfig() string { return sampleConfig }
|
func (d *Dovecot) SampleConfig() string { return sampleConfig }
|
||||||
|
|
||||||
const defaultPort = "24242"
|
|
||||||
|
|
||||||
// Reads stats from all configured servers.
|
// Reads stats from all configured servers.
|
||||||
func (d *Dovecot) Gather(acc telegraf.Accumulator) error {
|
func (d *Dovecot) Gather(acc telegraf.Accumulator) error {
|
||||||
if !validQuery[d.Type] {
|
if !validQuery[d.Type] {
|
||||||
|
|
|
||||||
|
|
@ -558,11 +558,7 @@ func (e *Elasticsearch) gatherIndicesStats(url string, acc telegraf.Accumulator)
|
||||||
// gatherSortedIndicesStats gathers stats for all indices in no particular order.
|
// gatherSortedIndicesStats gathers stats for all indices in no particular order.
|
||||||
func (e *Elasticsearch) gatherIndividualIndicesStats(indices map[string]indexStat, now time.Time, acc telegraf.Accumulator) error {
|
func (e *Elasticsearch) gatherIndividualIndicesStats(indices map[string]indexStat, now time.Time, acc telegraf.Accumulator) error {
|
||||||
// Sort indices into buckets based on their configured prefix, if any matches.
|
// Sort indices into buckets based on their configured prefix, if any matches.
|
||||||
categorizedIndexNames, err := e.categorizeIndices(indices)
|
categorizedIndexNames := e.categorizeIndices(indices)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, matchingIndices := range categorizedIndexNames {
|
for _, matchingIndices := range categorizedIndexNames {
|
||||||
// Establish the number of each category of indices to use. User can configure to use only the latest 'X' amount.
|
// Establish the number of each category of indices to use. User can configure to use only the latest 'X' amount.
|
||||||
indicesCount := len(matchingIndices)
|
indicesCount := len(matchingIndices)
|
||||||
|
|
@ -590,7 +586,7 @@ func (e *Elasticsearch) gatherIndividualIndicesStats(indices map[string]indexSta
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *Elasticsearch) categorizeIndices(indices map[string]indexStat) (map[string][]string, error) {
|
func (e *Elasticsearch) categorizeIndices(indices map[string]indexStat) map[string][]string {
|
||||||
categorizedIndexNames := map[string][]string{}
|
categorizedIndexNames := map[string][]string{}
|
||||||
|
|
||||||
// If all indices are configured to be gathered, bucket them all together.
|
// If all indices are configured to be gathered, bucket them all together.
|
||||||
|
|
@ -599,7 +595,7 @@ func (e *Elasticsearch) categorizeIndices(indices map[string]indexStat) (map[str
|
||||||
categorizedIndexNames["_all"] = append(categorizedIndexNames["_all"], indexName)
|
categorizedIndexNames["_all"] = append(categorizedIndexNames["_all"], indexName)
|
||||||
}
|
}
|
||||||
|
|
||||||
return categorizedIndexNames, nil
|
return categorizedIndexNames
|
||||||
}
|
}
|
||||||
|
|
||||||
// Bucket each returned index with its associated configured index (if any match).
|
// Bucket each returned index with its associated configured index (if any match).
|
||||||
|
|
@ -617,7 +613,7 @@ func (e *Elasticsearch) categorizeIndices(indices map[string]indexStat) (map[str
|
||||||
categorizedIndexNames[match] = append(categorizedIndexNames[match], indexName)
|
categorizedIndexNames[match] = append(categorizedIndexNames[match], indexName)
|
||||||
}
|
}
|
||||||
|
|
||||||
return categorizedIndexNames, nil
|
return categorizedIndexNames
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *Elasticsearch) gatherSingleIndexStats(name string, index indexStat, now time.Time, acc telegraf.Accumulator) error {
|
func (e *Elasticsearch) gatherSingleIndexStats(name string, index indexStat, now time.Time, acc telegraf.Accumulator) error {
|
||||||
|
|
|
||||||
|
|
@ -33,9 +33,9 @@ type transportMock struct {
|
||||||
body string
|
body string
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTransportMock(statusCode int, body string) http.RoundTripper {
|
func newTransportMock(body string) http.RoundTripper {
|
||||||
return &transportMock{
|
return &transportMock{
|
||||||
statusCode: statusCode,
|
statusCode: http.StatusOK,
|
||||||
body: body,
|
body: body,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -77,7 +77,7 @@ func checkNodeStatsResult(t *testing.T, acc *testutil.Accumulator) {
|
||||||
func TestGather(t *testing.T) {
|
func TestGather(t *testing.T) {
|
||||||
es := newElasticsearchWithClient()
|
es := newElasticsearchWithClient()
|
||||||
es.Servers = []string{"http://example.com:9200"}
|
es.Servers = []string{"http://example.com:9200"}
|
||||||
es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponse)
|
es.client.Transport = newTransportMock(nodeStatsResponse)
|
||||||
es.serverInfo = make(map[string]serverInfo)
|
es.serverInfo = make(map[string]serverInfo)
|
||||||
es.serverInfo["http://example.com:9200"] = defaultServerInfo()
|
es.serverInfo["http://example.com:9200"] = defaultServerInfo()
|
||||||
|
|
||||||
|
|
@ -94,7 +94,7 @@ func TestGatherIndividualStats(t *testing.T) {
|
||||||
es := newElasticsearchWithClient()
|
es := newElasticsearchWithClient()
|
||||||
es.Servers = []string{"http://example.com:9200"}
|
es.Servers = []string{"http://example.com:9200"}
|
||||||
es.NodeStats = []string{"jvm", "process"}
|
es.NodeStats = []string{"jvm", "process"}
|
||||||
es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponseJVMProcess)
|
es.client.Transport = newTransportMock(nodeStatsResponseJVMProcess)
|
||||||
es.serverInfo = make(map[string]serverInfo)
|
es.serverInfo = make(map[string]serverInfo)
|
||||||
es.serverInfo["http://example.com:9200"] = defaultServerInfo()
|
es.serverInfo["http://example.com:9200"] = defaultServerInfo()
|
||||||
|
|
||||||
|
|
@ -120,7 +120,7 @@ func TestGatherIndividualStats(t *testing.T) {
|
||||||
func TestGatherNodeStats(t *testing.T) {
|
func TestGatherNodeStats(t *testing.T) {
|
||||||
es := newElasticsearchWithClient()
|
es := newElasticsearchWithClient()
|
||||||
es.Servers = []string{"http://example.com:9200"}
|
es.Servers = []string{"http://example.com:9200"}
|
||||||
es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponse)
|
es.client.Transport = newTransportMock(nodeStatsResponse)
|
||||||
es.serverInfo = make(map[string]serverInfo)
|
es.serverInfo = make(map[string]serverInfo)
|
||||||
es.serverInfo["http://example.com:9200"] = defaultServerInfo()
|
es.serverInfo["http://example.com:9200"] = defaultServerInfo()
|
||||||
|
|
||||||
|
|
@ -138,7 +138,7 @@ func TestGatherClusterHealthEmptyClusterHealth(t *testing.T) {
|
||||||
es.Servers = []string{"http://example.com:9200"}
|
es.Servers = []string{"http://example.com:9200"}
|
||||||
es.ClusterHealth = true
|
es.ClusterHealth = true
|
||||||
es.ClusterHealthLevel = ""
|
es.ClusterHealthLevel = ""
|
||||||
es.client.Transport = newTransportMock(http.StatusOK, clusterHealthResponse)
|
es.client.Transport = newTransportMock(clusterHealthResponse)
|
||||||
es.serverInfo = make(map[string]serverInfo)
|
es.serverInfo = make(map[string]serverInfo)
|
||||||
es.serverInfo["http://example.com:9200"] = defaultServerInfo()
|
es.serverInfo["http://example.com:9200"] = defaultServerInfo()
|
||||||
|
|
||||||
|
|
@ -165,7 +165,7 @@ func TestGatherClusterHealthSpecificClusterHealth(t *testing.T) {
|
||||||
es.Servers = []string{"http://example.com:9200"}
|
es.Servers = []string{"http://example.com:9200"}
|
||||||
es.ClusterHealth = true
|
es.ClusterHealth = true
|
||||||
es.ClusterHealthLevel = "cluster"
|
es.ClusterHealthLevel = "cluster"
|
||||||
es.client.Transport = newTransportMock(http.StatusOK, clusterHealthResponse)
|
es.client.Transport = newTransportMock(clusterHealthResponse)
|
||||||
es.serverInfo = make(map[string]serverInfo)
|
es.serverInfo = make(map[string]serverInfo)
|
||||||
es.serverInfo["http://example.com:9200"] = defaultServerInfo()
|
es.serverInfo["http://example.com:9200"] = defaultServerInfo()
|
||||||
|
|
||||||
|
|
@ -192,7 +192,7 @@ func TestGatherClusterHealthAlsoIndicesHealth(t *testing.T) {
|
||||||
es.Servers = []string{"http://example.com:9200"}
|
es.Servers = []string{"http://example.com:9200"}
|
||||||
es.ClusterHealth = true
|
es.ClusterHealth = true
|
||||||
es.ClusterHealthLevel = "indices"
|
es.ClusterHealthLevel = "indices"
|
||||||
es.client.Transport = newTransportMock(http.StatusOK, clusterHealthResponseWithIndices)
|
es.client.Transport = newTransportMock(clusterHealthResponseWithIndices)
|
||||||
es.serverInfo = make(map[string]serverInfo)
|
es.serverInfo = make(map[string]serverInfo)
|
||||||
es.serverInfo["http://example.com:9200"] = defaultServerInfo()
|
es.serverInfo["http://example.com:9200"] = defaultServerInfo()
|
||||||
|
|
||||||
|
|
@ -223,7 +223,7 @@ func TestGatherClusterStatsMaster(t *testing.T) {
|
||||||
info := serverInfo{nodeID: "SDFsfSDFsdfFSDSDfSFDSDF", masterID: ""}
|
info := serverInfo{nodeID: "SDFsfSDFsdfFSDSDfSFDSDF", masterID: ""}
|
||||||
|
|
||||||
// first get catMaster
|
// first get catMaster
|
||||||
es.client.Transport = newTransportMock(http.StatusOK, IsMasterResult)
|
es.client.Transport = newTransportMock(IsMasterResult)
|
||||||
masterID, err := es.getCatMaster("junk")
|
masterID, err := es.getCatMaster("junk")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
info.masterID = masterID
|
info.masterID = masterID
|
||||||
|
|
@ -238,7 +238,7 @@ func TestGatherClusterStatsMaster(t *testing.T) {
|
||||||
// now get node status, which determines whether we're master
|
// now get node status, which determines whether we're master
|
||||||
var acc testutil.Accumulator
|
var acc testutil.Accumulator
|
||||||
es.Local = true
|
es.Local = true
|
||||||
es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponse)
|
es.client.Transport = newTransportMock(nodeStatsResponse)
|
||||||
if err := es.gatherNodeStats("junk", &acc); err != nil {
|
if err := es.gatherNodeStats("junk", &acc); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
@ -247,7 +247,7 @@ func TestGatherClusterStatsMaster(t *testing.T) {
|
||||||
checkNodeStatsResult(t, &acc)
|
checkNodeStatsResult(t, &acc)
|
||||||
|
|
||||||
// now test the clusterstats method
|
// now test the clusterstats method
|
||||||
es.client.Transport = newTransportMock(http.StatusOK, clusterStatsResponse)
|
es.client.Transport = newTransportMock(clusterStatsResponse)
|
||||||
require.NoError(t, es.gatherClusterStats("junk", &acc))
|
require.NoError(t, es.gatherClusterStats("junk", &acc))
|
||||||
|
|
||||||
tags := map[string]string{
|
tags := map[string]string{
|
||||||
|
|
@ -269,7 +269,7 @@ func TestGatherClusterStatsNonMaster(t *testing.T) {
|
||||||
es.serverInfo["http://example.com:9200"] = serverInfo{nodeID: "SDFsfSDFsdfFSDSDfSFDSDF", masterID: ""}
|
es.serverInfo["http://example.com:9200"] = serverInfo{nodeID: "SDFsfSDFsdfFSDSDfSFDSDF", masterID: ""}
|
||||||
|
|
||||||
// first get catMaster
|
// first get catMaster
|
||||||
es.client.Transport = newTransportMock(http.StatusOK, IsNotMasterResult)
|
es.client.Transport = newTransportMock(IsNotMasterResult)
|
||||||
masterID, err := es.getCatMaster("junk")
|
masterID, err := es.getCatMaster("junk")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
|
@ -282,7 +282,7 @@ func TestGatherClusterStatsNonMaster(t *testing.T) {
|
||||||
// now get node status, which determines whether we're master
|
// now get node status, which determines whether we're master
|
||||||
var acc testutil.Accumulator
|
var acc testutil.Accumulator
|
||||||
es.Local = true
|
es.Local = true
|
||||||
es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponse)
|
es.client.Transport = newTransportMock(nodeStatsResponse)
|
||||||
if err := es.gatherNodeStats("junk", &acc); err != nil {
|
if err := es.gatherNodeStats("junk", &acc); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
@ -296,7 +296,7 @@ func TestGatherClusterIndicesStats(t *testing.T) {
|
||||||
es := newElasticsearchWithClient()
|
es := newElasticsearchWithClient()
|
||||||
es.IndicesInclude = []string{"_all"}
|
es.IndicesInclude = []string{"_all"}
|
||||||
es.Servers = []string{"http://example.com:9200"}
|
es.Servers = []string{"http://example.com:9200"}
|
||||||
es.client.Transport = newTransportMock(http.StatusOK, clusterIndicesResponse)
|
es.client.Transport = newTransportMock(clusterIndicesResponse)
|
||||||
es.serverInfo = make(map[string]serverInfo)
|
es.serverInfo = make(map[string]serverInfo)
|
||||||
es.serverInfo["http://example.com:9200"] = defaultServerInfo()
|
es.serverInfo["http://example.com:9200"] = defaultServerInfo()
|
||||||
|
|
||||||
|
|
@ -315,7 +315,7 @@ func TestGatherDateStampedIndicesStats(t *testing.T) {
|
||||||
es.IndicesInclude = []string{"twitter*", "influx*", "penguins"}
|
es.IndicesInclude = []string{"twitter*", "influx*", "penguins"}
|
||||||
es.NumMostRecentIndices = 2
|
es.NumMostRecentIndices = 2
|
||||||
es.Servers = []string{"http://example.com:9200"}
|
es.Servers = []string{"http://example.com:9200"}
|
||||||
es.client.Transport = newTransportMock(http.StatusOK, dateStampedIndicesResponse)
|
es.client.Transport = newTransportMock(dateStampedIndicesResponse)
|
||||||
es.serverInfo = make(map[string]serverInfo)
|
es.serverInfo = make(map[string]serverInfo)
|
||||||
es.serverInfo["http://example.com:9200"] = defaultServerInfo()
|
es.serverInfo["http://example.com:9200"] = defaultServerInfo()
|
||||||
es.Init()
|
es.Init()
|
||||||
|
|
@ -357,7 +357,7 @@ func TestGatherClusterIndiceShardsStats(t *testing.T) {
|
||||||
es := newElasticsearchWithClient()
|
es := newElasticsearchWithClient()
|
||||||
es.IndicesLevel = "shards"
|
es.IndicesLevel = "shards"
|
||||||
es.Servers = []string{"http://example.com:9200"}
|
es.Servers = []string{"http://example.com:9200"}
|
||||||
es.client.Transport = newTransportMock(http.StatusOK, clusterIndicesShardsResponse)
|
es.client.Transport = newTransportMock(clusterIndicesShardsResponse)
|
||||||
es.serverInfo = make(map[string]serverInfo)
|
es.serverInfo = make(map[string]serverInfo)
|
||||||
es.serverInfo["http://example.com:9200"] = defaultServerInfo()
|
es.serverInfo["http://example.com:9200"] = defaultServerInfo()
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -207,11 +207,7 @@ func (e *EventHub) Start(acc telegraf.Accumulator) error {
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Configure receiver options
|
// Configure receiver options
|
||||||
receiveOpts, err := e.configureReceiver()
|
receiveOpts := e.configureReceiver()
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
partitions := e.PartitionIDs
|
partitions := e.PartitionIDs
|
||||||
|
|
||||||
if len(e.PartitionIDs) == 0 {
|
if len(e.PartitionIDs) == 0 {
|
||||||
|
|
@ -224,7 +220,7 @@ func (e *EventHub) Start(acc telegraf.Accumulator) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, partitionID := range partitions {
|
for _, partitionID := range partitions {
|
||||||
_, err = e.hub.Receive(ctx, partitionID, e.onMessage, receiveOpts...)
|
_, err := e.hub.Receive(ctx, partitionID, e.onMessage, receiveOpts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("creating receiver for partition %q: %v", partitionID, err)
|
return fmt.Errorf("creating receiver for partition %q: %v", partitionID, err)
|
||||||
}
|
}
|
||||||
|
|
@ -233,7 +229,7 @@ func (e *EventHub) Start(acc telegraf.Accumulator) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *EventHub) configureReceiver() ([]eventhub.ReceiveOption, error) {
|
func (e *EventHub) configureReceiver() []eventhub.ReceiveOption {
|
||||||
receiveOpts := []eventhub.ReceiveOption{}
|
receiveOpts := []eventhub.ReceiveOption{}
|
||||||
|
|
||||||
if e.ConsumerGroup != "" {
|
if e.ConsumerGroup != "" {
|
||||||
|
|
@ -254,7 +250,7 @@ func (e *EventHub) configureReceiver() ([]eventhub.ReceiveOption, error) {
|
||||||
receiveOpts = append(receiveOpts, eventhub.ReceiveWithEpoch(e.Epoch))
|
receiveOpts = append(receiveOpts, eventhub.ReceiveWithEpoch(e.Epoch))
|
||||||
}
|
}
|
||||||
|
|
||||||
return receiveOpts, nil
|
return receiveOpts
|
||||||
}
|
}
|
||||||
|
|
||||||
// OnMessage handles an Event. When this function returns without error the
|
// OnMessage handles an Event. When this function returns without error the
|
||||||
|
|
|
||||||
|
|
@ -18,9 +18,6 @@ import (
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Midnight 9/22/2015
|
|
||||||
const baseTimeSeconds = 1442905200
|
|
||||||
|
|
||||||
const validJSON = `
|
const validJSON = `
|
||||||
{
|
{
|
||||||
"status": "green",
|
"status": "green",
|
||||||
|
|
@ -40,20 +37,6 @@ const malformedJSON = `
|
||||||
"status": "green",
|
"status": "green",
|
||||||
`
|
`
|
||||||
|
|
||||||
const lineProtocol = "cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1\n"
|
|
||||||
const lineProtocolEmpty = ""
|
|
||||||
const lineProtocolShort = "ab"
|
|
||||||
|
|
||||||
const lineProtocolMulti = `
|
|
||||||
cpu,cpu=cpu0,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
|
||||||
cpu,cpu=cpu1,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
|
||||||
cpu,cpu=cpu2,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
|
||||||
cpu,cpu=cpu3,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
|
||||||
cpu,cpu=cpu4,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
|
||||||
cpu,cpu=cpu5,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
|
||||||
cpu,cpu=cpu6,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
|
||||||
`
|
|
||||||
|
|
||||||
type CarriageReturnTest struct {
|
type CarriageReturnTest struct {
|
||||||
input []byte
|
input []byte
|
||||||
output []byte
|
output []byte
|
||||||
|
|
@ -91,7 +74,7 @@ func newRunnerMock(out []byte, errout []byte, err error) Runner {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r runnerMock) Run(command string, _ time.Duration) ([]byte, []byte, error) {
|
func (r runnerMock) Run(_ string, _ time.Duration) ([]byte, []byte, error) {
|
||||||
return r.out, r.errout, r.err
|
return r.out, r.errout, r.err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -12,7 +12,7 @@ import (
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (e *Execd) Gather(acc telegraf.Accumulator) error {
|
func (e *Execd) Gather(_ telegraf.Accumulator) error {
|
||||||
if e.process == nil || e.process.Cmd == nil {
|
if e.process == nil || e.process.Cmd == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -26,7 +26,6 @@ import (
|
||||||
type empty struct{}
|
type empty struct{}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
forever = 100 * 365 * 24 * time.Hour
|
|
||||||
envVarEscaper = strings.NewReplacer(
|
envVarEscaper = strings.NewReplacer(
|
||||||
`"`, `\"`,
|
`"`, `\"`,
|
||||||
`\`, `\\`,
|
`\`, `\\`,
|
||||||
|
|
|
||||||
|
|
@ -104,7 +104,7 @@ func (i *testInput) Gather(acc telegraf.Accumulator) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *testInput) Start(acc telegraf.Accumulator) error {
|
func (i *testInput) Start(_ telegraf.Accumulator) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -156,7 +156,7 @@ func (i *serviceInput) Gather(acc telegraf.Accumulator) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *serviceInput) Start(acc telegraf.Accumulator) error {
|
func (i *serviceInput) Start(_ telegraf.Accumulator) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -92,7 +92,7 @@ func fakeExecCommand(command string, args ...string) *exec.Cmd {
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHelperProcess(t *testing.T) {
|
func TestHelperProcess(_ *testing.T) {
|
||||||
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
|
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -96,8 +96,6 @@ const sampleJSON = `
|
||||||
|
|
||||||
var (
|
var (
|
||||||
zero float64
|
zero float64
|
||||||
err error
|
|
||||||
pluginOutput []pluginData
|
|
||||||
expectedOutput = []pluginData{
|
expectedOutput = []pluginData{
|
||||||
// {"object:f48698", "dummy", "input", nil, nil, nil},
|
// {"object:f48698", "dummy", "input", nil, nil, nil},
|
||||||
// {"object:e27138", "dummy", "input", nil, nil, nil},
|
// {"object:e27138", "dummy", "input", nil, nil, nil},
|
||||||
|
|
|
||||||
|
|
@ -19,7 +19,6 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
type ResponseMetrics struct {
|
type ResponseMetrics struct {
|
||||||
total int
|
|
||||||
Metrics []Metric `json:"metrics"`
|
Metrics []Metric `json:"metrics"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -12,7 +12,7 @@ import (
|
||||||
type mockFetcher struct {
|
type mockFetcher struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *mockFetcher) Fetch(address string) ([]hddtemp.Disk, error) {
|
func (h *mockFetcher) Fetch(_ string) ([]hddtemp.Disk, error) {
|
||||||
return []hddtemp.Disk{
|
return []hddtemp.Disk{
|
||||||
{
|
{
|
||||||
DeviceName: "Disk1",
|
DeviceName: "Disk1",
|
||||||
|
|
|
||||||
|
|
@ -319,11 +319,6 @@ func methodNotAllowed(res http.ResponseWriter) {
|
||||||
res.Write([]byte(`{"error":"http: method not allowed"}`))
|
res.Write([]byte(`{"error":"http: method not allowed"}`))
|
||||||
}
|
}
|
||||||
|
|
||||||
func internalServerError(res http.ResponseWriter) {
|
|
||||||
res.Header().Set("Content-Type", "application/json")
|
|
||||||
res.WriteHeader(http.StatusInternalServerError)
|
|
||||||
}
|
|
||||||
|
|
||||||
func badRequest(res http.ResponseWriter) {
|
func badRequest(res http.ResponseWriter) {
|
||||||
res.Header().Set("Content-Type", "application/json")
|
res.Header().Set("Content-Type", "application/json")
|
||||||
res.WriteHeader(http.StatusBadRequest)
|
res.WriteHeader(http.StatusBadRequest)
|
||||||
|
|
|
||||||
|
|
@ -912,10 +912,8 @@ type fakeClient struct {
|
||||||
err error
|
err error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *fakeClient) Do(req *http.Request) (*http.Response, error) {
|
func (f *fakeClient) Do(_ *http.Request) (*http.Response, error) {
|
||||||
return &http.Response{
|
return &http.Response{StatusCode: f.statusCode}, f.err
|
||||||
StatusCode: f.statusCode,
|
|
||||||
}, f.err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNetworkErrors(t *testing.T) {
|
func TestNetworkErrors(t *testing.T) {
|
||||||
|
|
|
||||||
|
|
@ -196,9 +196,9 @@ func TestAddMetricsForSingleCoreNegative(t *testing.T) {
|
||||||
|
|
||||||
func TestAddCPUFrequencyMetric(t *testing.T) {
|
func TestAddCPUFrequencyMetric(t *testing.T) {
|
||||||
var acc testutil.Accumulator
|
var acc testutil.Accumulator
|
||||||
cpuID := "0"
|
cpuID := "1"
|
||||||
coreID := "2"
|
coreID := "3"
|
||||||
packageID := "1"
|
packageID := "0"
|
||||||
frequency := 1200000.2
|
frequency := 1200000.2
|
||||||
power, _, _, msrMock := getPowerWithMockedServices()
|
power, _, _, msrMock := getPowerWithMockedServices()
|
||||||
prepareCPUInfoForSingleCPU(power, cpuID, coreID, packageID)
|
prepareCPUInfoForSingleCPU(power, cpuID, coreID, packageID)
|
||||||
|
|
|
||||||
|
|
@ -18,7 +18,6 @@ type Publisher struct {
|
||||||
BufferChanProcess chan processMeasurement
|
BufferChanProcess chan processMeasurement
|
||||||
BufferChanCores chan string
|
BufferChanCores chan string
|
||||||
errChan chan error
|
errChan chan error
|
||||||
stopChan chan bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewPublisher(acc telegraf.Accumulator, log telegraf.Logger, shortenedMetrics bool) Publisher {
|
func NewPublisher(acc telegraf.Accumulator, log telegraf.Logger, shortenedMetrics bool) Publisher {
|
||||||
|
|
|
||||||
|
|
@ -6,11 +6,6 @@ import (
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
type conTest struct {
|
|
||||||
Got string
|
|
||||||
Want *Connection
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNewConnection(t *testing.T) {
|
func TestNewConnection(t *testing.T) {
|
||||||
testData := []struct {
|
testData := []struct {
|
||||||
addr string
|
addr string
|
||||||
|
|
|
||||||
|
|
@ -227,7 +227,7 @@ func fakeExecCommand(command string, args ...string) *exec.Cmd {
|
||||||
// For example, if you run:
|
// For example, if you run:
|
||||||
// GO_WANT_HELPER_PROCESS=1 go test -test.run=TestHelperProcess -- chrony tracking
|
// GO_WANT_HELPER_PROCESS=1 go test -test.run=TestHelperProcess -- chrony tracking
|
||||||
// it returns below mockData.
|
// it returns below mockData.
|
||||||
func TestHelperProcess(t *testing.T) {
|
func TestHelperProcess(_ *testing.T) {
|
||||||
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
|
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
@ -546,7 +546,7 @@ func fakeExecCommandV2(command string, args ...string) *exec.Cmd {
|
||||||
// For example, if you run:
|
// For example, if you run:
|
||||||
// GO_WANT_HELPER_PROCESS=1 go test -test.run=TestHelperProcessV2 -- chrony tracking
|
// GO_WANT_HELPER_PROCESS=1 go test -test.run=TestHelperProcessV2 -- chrony tracking
|
||||||
// it returns below mockData.
|
// it returns below mockData.
|
||||||
func TestHelperProcessV2(t *testing.T) {
|
func TestHelperProcessV2(_ *testing.T) {
|
||||||
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
|
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -295,18 +295,6 @@ func (j *Jenkins) gatherJobs(acc telegraf.Accumulator) {
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
// wrap the tcp request with doGet
|
|
||||||
// block tcp request if buffered channel is full
|
|
||||||
func (j *Jenkins) doGet(tcp func() error) error {
|
|
||||||
j.semaphore <- struct{}{}
|
|
||||||
if err := tcp(); err != nil {
|
|
||||||
<-j.semaphore
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
<-j.semaphore
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (j *Jenkins) getJobDetail(jr jobRequest, acc telegraf.Accumulator) error {
|
func (j *Jenkins) getJobDetail(jr jobRequest, acc telegraf.Accumulator) error {
|
||||||
if j.MaxSubJobDepth > 0 && jr.layer == j.MaxSubJobDepth {
|
if j.MaxSubJobDepth > 0 && jr.layer == j.MaxSubJobDepth {
|
||||||
return nil
|
return nil
|
||||||
|
|
@ -451,7 +439,6 @@ type jobRequest struct {
|
||||||
name string
|
name string
|
||||||
parents []string
|
parents []string
|
||||||
layer int
|
layer int
|
||||||
number int64
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (jr jobRequest) combined() []string {
|
func (jr jobRequest) combined() []string {
|
||||||
|
|
|
||||||
|
|
@ -98,25 +98,8 @@ const validMultiValueJSON = `
|
||||||
}
|
}
|
||||||
]`
|
]`
|
||||||
|
|
||||||
const validSingleValueJSON = `
|
|
||||||
[
|
|
||||||
{
|
|
||||||
"request":{
|
|
||||||
"path":"used",
|
|
||||||
"mbean":"java.lang:type=Memory",
|
|
||||||
"attribute":"HeapMemoryUsage",
|
|
||||||
"type":"read"
|
|
||||||
},
|
|
||||||
"value":209274376,
|
|
||||||
"timestamp":1446129256,
|
|
||||||
"status":200
|
|
||||||
}
|
|
||||||
]`
|
|
||||||
|
|
||||||
const invalidJSON = "I don't think this is JSON"
|
const invalidJSON = "I don't think this is JSON"
|
||||||
|
|
||||||
const empty = ""
|
|
||||||
|
|
||||||
var Servers = []Server{{Name: "as1", Host: "127.0.0.1", Port: "8080"}}
|
var Servers = []Server{{Name: "as1", Host: "127.0.0.1", Port: "8080"}}
|
||||||
var HeapMetric = Metric{Name: "heap_memory_usage",
|
var HeapMetric = Metric{Name: "heap_memory_usage",
|
||||||
Mbean: "java.lang:type=Memory", Attribute: "HeapMemoryUsage"}
|
Mbean: "java.lang:type=Memory", Attribute: "HeapMemoryUsage"}
|
||||||
|
|
@ -130,7 +113,7 @@ type jolokiaClientStub struct {
|
||||||
statusCode int
|
statusCode int
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c jolokiaClientStub) MakeRequest(req *http.Request) (*http.Response, error) {
|
func (c jolokiaClientStub) MakeRequest(_ *http.Request) (*http.Response, error) {
|
||||||
resp := http.Response{}
|
resp := http.Response{}
|
||||||
resp.StatusCode = c.statusCode
|
resp.StatusCode = c.statusCode
|
||||||
resp.Body = ioutil.NopCloser(strings.NewReader(c.responseBody))
|
resp.Body = ioutil.NopCloser(strings.NewReader(c.responseBody))
|
||||||
|
|
|
||||||
|
|
@ -74,7 +74,7 @@ func TestJolokia2_ScalarValues(t *testing.T) {
|
||||||
"status": 200
|
"status": 200
|
||||||
}]`
|
}]`
|
||||||
|
|
||||||
server := setupServer(http.StatusOK, response)
|
server := setupServer(response)
|
||||||
defer server.Close()
|
defer server.Close()
|
||||||
plugin := setupPlugin(t, fmt.Sprintf(config, server.URL))
|
plugin := setupPlugin(t, fmt.Sprintf(config, server.URL))
|
||||||
|
|
||||||
|
|
@ -234,7 +234,7 @@ func TestJolokia2_ObjectValues(t *testing.T) {
|
||||||
"status": 200
|
"status": 200
|
||||||
}]`
|
}]`
|
||||||
|
|
||||||
server := setupServer(http.StatusOK, response)
|
server := setupServer(response)
|
||||||
defer server.Close()
|
defer server.Close()
|
||||||
plugin := setupPlugin(t, fmt.Sprintf(config, server.URL))
|
plugin := setupPlugin(t, fmt.Sprintf(config, server.URL))
|
||||||
|
|
||||||
|
|
@ -322,7 +322,7 @@ func TestJolokia2_StatusCodes(t *testing.T) {
|
||||||
"status": 500
|
"status": 500
|
||||||
}]`
|
}]`
|
||||||
|
|
||||||
server := setupServer(http.StatusOK, response)
|
server := setupServer(response)
|
||||||
defer server.Close()
|
defer server.Close()
|
||||||
plugin := setupPlugin(t, fmt.Sprintf(config, server.URL))
|
plugin := setupPlugin(t, fmt.Sprintf(config, server.URL))
|
||||||
|
|
||||||
|
|
@ -372,7 +372,7 @@ func TestJolokia2_TagRenaming(t *testing.T) {
|
||||||
"status": 200
|
"status": 200
|
||||||
}]`
|
}]`
|
||||||
|
|
||||||
server := setupServer(http.StatusOK, response)
|
server := setupServer(response)
|
||||||
defer server.Close()
|
defer server.Close()
|
||||||
plugin := setupPlugin(t, fmt.Sprintf(config, server.URL))
|
plugin := setupPlugin(t, fmt.Sprintf(config, server.URL))
|
||||||
|
|
||||||
|
|
@ -465,7 +465,7 @@ func TestJolokia2_FieldRenaming(t *testing.T) {
|
||||||
"status": 200
|
"status": 200
|
||||||
}]`
|
}]`
|
||||||
|
|
||||||
server := setupServer(http.StatusOK, response)
|
server := setupServer(response)
|
||||||
defer server.Close()
|
defer server.Close()
|
||||||
plugin := setupPlugin(t, fmt.Sprintf(config, server.URL))
|
plugin := setupPlugin(t, fmt.Sprintf(config, server.URL))
|
||||||
|
|
||||||
|
|
@ -573,7 +573,7 @@ func TestJolokia2_MetricMbeanMatching(t *testing.T) {
|
||||||
"status": 200
|
"status": 200
|
||||||
}]`
|
}]`
|
||||||
|
|
||||||
server := setupServer(http.StatusOK, response)
|
server := setupServer(response)
|
||||||
defer server.Close()
|
defer server.Close()
|
||||||
plugin := setupPlugin(t, fmt.Sprintf(config, server.URL))
|
plugin := setupPlugin(t, fmt.Sprintf(config, server.URL))
|
||||||
|
|
||||||
|
|
@ -666,7 +666,7 @@ func TestJolokia2_MetricCompaction(t *testing.T) {
|
||||||
"status": 200
|
"status": 200
|
||||||
}]`
|
}]`
|
||||||
|
|
||||||
server := setupServer(http.StatusOK, response)
|
server := setupServer(response)
|
||||||
defer server.Close()
|
defer server.Close()
|
||||||
plugin := setupPlugin(t, fmt.Sprintf(config, server.URL))
|
plugin := setupPlugin(t, fmt.Sprintf(config, server.URL))
|
||||||
|
|
||||||
|
|
@ -727,7 +727,7 @@ func TestJolokia2_ProxyTargets(t *testing.T) {
|
||||||
"status": 200
|
"status": 200
|
||||||
}]`
|
}]`
|
||||||
|
|
||||||
server := setupServer(http.StatusOK, response)
|
server := setupServer(response)
|
||||||
defer server.Close()
|
defer server.Close()
|
||||||
plugin := setupPlugin(t, fmt.Sprintf(config, server.URL))
|
plugin := setupPlugin(t, fmt.Sprintf(config, server.URL))
|
||||||
|
|
||||||
|
|
@ -762,7 +762,7 @@ func TestFillFields(t *testing.T) {
|
||||||
assert.Equal(t, map[string]interface{}{}, results)
|
assert.Equal(t, map[string]interface{}{}, results)
|
||||||
}
|
}
|
||||||
|
|
||||||
func setupServer(status int, resp string) *httptest.Server {
|
func setupServer(resp string) *httptest.Server {
|
||||||
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
w.WriteHeader(http.StatusOK)
|
w.WriteHeader(http.StatusOK)
|
||||||
//body, err := ioutil.ReadAll(r.Body)
|
//body, err := ioutil.ReadAll(r.Body)
|
||||||
|
|
|
||||||
|
|
@ -102,7 +102,7 @@ func (m *OpenConfigTelemetry) Description() string {
|
||||||
return "Read JTI OpenConfig Telemetry from listed sensors"
|
return "Read JTI OpenConfig Telemetry from listed sensors"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *OpenConfigTelemetry) Gather(acc telegraf.Accumulator) error {
|
func (m *OpenConfigTelemetry) Gather(_ telegraf.Accumulator) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -278,9 +278,12 @@ func (m *OpenConfigTelemetry) splitSensorConfig() int {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Subscribes and collects OpenConfig telemetry data from given server
|
// Subscribes and collects OpenConfig telemetry data from given server
|
||||||
func (m *OpenConfigTelemetry) collectData(ctx context.Context,
|
func (m *OpenConfigTelemetry) collectData(
|
||||||
grpcServer string, grpcClientConn *grpc.ClientConn,
|
ctx context.Context,
|
||||||
acc telegraf.Accumulator) error {
|
grpcServer string,
|
||||||
|
grpcClientConn *grpc.ClientConn,
|
||||||
|
acc telegraf.Accumulator,
|
||||||
|
) {
|
||||||
c := telemetry.NewOpenConfigTelemetryClient(grpcClientConn)
|
c := telemetry.NewOpenConfigTelemetryClient(grpcClientConn)
|
||||||
for _, sensor := range m.sensorsConfig {
|
for _, sensor := range m.sensorsConfig {
|
||||||
m.wg.Add(1)
|
m.wg.Add(1)
|
||||||
|
|
@ -342,8 +345,6 @@ func (m *OpenConfigTelemetry) collectData(ctx context.Context,
|
||||||
}
|
}
|
||||||
}(ctx, sensor)
|
}(ctx, sensor)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *OpenConfigTelemetry) Start(acc telegraf.Accumulator) error {
|
func (m *OpenConfigTelemetry) Start(acc telegraf.Accumulator) error {
|
||||||
|
|
|
||||||
|
|
@ -63,19 +63,19 @@ func (s *openConfigTelemetryServer) TelemetrySubscribe(req *telemetry.Subscripti
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *openConfigTelemetryServer) CancelTelemetrySubscription(ctx context.Context, req *telemetry.CancelSubscriptionRequest) (*telemetry.CancelSubscriptionReply, error) {
|
func (s *openConfigTelemetryServer) CancelTelemetrySubscription(_ context.Context, _ *telemetry.CancelSubscriptionRequest) (*telemetry.CancelSubscriptionReply, error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *openConfigTelemetryServer) GetTelemetrySubscriptions(ctx context.Context, req *telemetry.GetSubscriptionsRequest) (*telemetry.GetSubscriptionsReply, error) {
|
func (s *openConfigTelemetryServer) GetTelemetrySubscriptions(_ context.Context, _ *telemetry.GetSubscriptionsRequest) (*telemetry.GetSubscriptionsReply, error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *openConfigTelemetryServer) GetTelemetryOperationalState(ctx context.Context, req *telemetry.GetOperationalStateRequest) (*telemetry.GetOperationalStateReply, error) {
|
func (s *openConfigTelemetryServer) GetTelemetryOperationalState(_ context.Context, _ *telemetry.GetOperationalStateRequest) (*telemetry.GetOperationalStateReply, error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *openConfigTelemetryServer) GetDataEncodings(ctx context.Context, req *telemetry.DataEncodingRequest) (*telemetry.DataEncodingReply, error) {
|
func (s *openConfigTelemetryServer) GetDataEncodings(_ context.Context, _ *telemetry.DataEncodingRequest) (*telemetry.DataEncodingReply, error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -107,7 +107,6 @@ const sampleConfig = `
|
||||||
|
|
||||||
const (
|
const (
|
||||||
defaultMaxUndeliveredMessages = 1000
|
defaultMaxUndeliveredMessages = 1000
|
||||||
defaultMaxMessageLen = 1000000
|
|
||||||
defaultConsumerGroup = "telegraf_metrics_consumers"
|
defaultConsumerGroup = "telegraf_metrics_consumers"
|
||||||
reconnectDelay = 5 * time.Second
|
reconnectDelay = 5 * time.Second
|
||||||
)
|
)
|
||||||
|
|
@ -256,7 +255,7 @@ func (k *KafkaConsumer) Start(acc telegraf.Accumulator) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (k *KafkaConsumer) Gather(acc telegraf.Accumulator) error {
|
func (k *KafkaConsumer) Gather(_ telegraf.Accumulator) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -314,11 +313,11 @@ func (h *ConsumerGroupHandler) Setup(sarama.ConsumerGroupSession) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run processes any delivered metrics during the lifetime of the session.
|
// Run processes any delivered metrics during the lifetime of the session.
|
||||||
func (h *ConsumerGroupHandler) run(ctx context.Context) error {
|
func (h *ConsumerGroupHandler) run(ctx context.Context) {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return nil
|
return
|
||||||
case track := <-h.acc.Delivered():
|
case track := <-h.acc.Delivered():
|
||||||
h.onDelivery(track)
|
h.onDelivery(track)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -23,7 +23,7 @@ type FakeConsumerGroup struct {
|
||||||
errors chan error
|
errors chan error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *FakeConsumerGroup) Consume(ctx context.Context, topics []string, handler sarama.ConsumerGroupHandler) error {
|
func (g *FakeConsumerGroup) Consume(_ context.Context, _ []string, handler sarama.ConsumerGroupHandler) error {
|
||||||
g.handler = handler
|
g.handler = handler
|
||||||
g.handler.Setup(nil)
|
g.handler.Setup(nil)
|
||||||
return nil
|
return nil
|
||||||
|
|
@ -213,15 +213,15 @@ func (s *FakeConsumerGroupSession) GenerationID() int32 {
|
||||||
panic("not implemented")
|
panic("not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *FakeConsumerGroupSession) MarkOffset(topic string, partition int32, offset int64, metadata string) {
|
func (s *FakeConsumerGroupSession) MarkOffset(_ string, _ int32, _ int64, _ string) {
|
||||||
panic("not implemented")
|
panic("not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *FakeConsumerGroupSession) ResetOffset(topic string, partition int32, offset int64, metadata string) {
|
func (s *FakeConsumerGroupSession) ResetOffset(_ string, _ int32, _ int64, _ string) {
|
||||||
panic("not implemented")
|
panic("not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *FakeConsumerGroupSession) MarkMessage(msg *sarama.ConsumerMessage, metadata string) {
|
func (s *FakeConsumerGroupSession) MarkMessage(_ *sarama.ConsumerMessage, _ string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *FakeConsumerGroupSession) Context() context.Context {
|
func (s *FakeConsumerGroupSession) Context() context.Context {
|
||||||
|
|
|
||||||
|
|
@ -177,7 +177,7 @@ func (k *Kafka) Stop() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (k *Kafka) Gather(acc telegraf.Accumulator) error {
|
func (k *Kafka) Gather(_ telegraf.Accumulator) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -78,8 +78,8 @@ func TestReadsMetricsFromKafka(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Waits for the metric that was sent to the kafka broker to arrive at the kafka
|
//nolint:unused // Used in skipped tests
|
||||||
// consumer
|
// Waits for the metric that was sent to the kafka broker to arrive at the kafka consumer
|
||||||
func waitForPoint(acc *testutil.Accumulator, t *testing.T) {
|
func waitForPoint(acc *testutil.Accumulator, t *testing.T) {
|
||||||
// Give the kafka container up to 2 seconds to get the point to the consumer
|
// Give the kafka container up to 2 seconds to get the point to the consumer
|
||||||
ticker := time.NewTicker(5 * time.Millisecond)
|
ticker := time.NewTicker(5 * time.Millisecond)
|
||||||
|
|
|
||||||
|
|
@ -49,7 +49,6 @@ type (
|
||||||
cons *consumer.Consumer
|
cons *consumer.Consumer
|
||||||
parser parsers.Parser
|
parser parsers.Parser
|
||||||
cancel context.CancelFunc
|
cancel context.CancelFunc
|
||||||
ctx context.Context
|
|
||||||
acc telegraf.TrackingAccumulator
|
acc telegraf.TrackingAccumulator
|
||||||
sem chan struct{}
|
sem chan struct{}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -5,7 +5,6 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf/plugins/common/tls"
|
"github.com/influxdata/telegraf/plugins/common/tls"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type mockHandler struct {
|
type mockHandler struct {
|
||||||
|
|
@ -20,21 +19,10 @@ func toInt32Ptr(i int32) *int32 {
|
||||||
return &i
|
return &i
|
||||||
}
|
}
|
||||||
|
|
||||||
func toInt64Ptr(i int64) *int64 {
|
|
||||||
return &i
|
|
||||||
}
|
|
||||||
|
|
||||||
func toBoolPtr(b bool) *bool {
|
func toBoolPtr(b bool) *bool {
|
||||||
return &b
|
return &b
|
||||||
}
|
}
|
||||||
|
|
||||||
func toIntStrPtrS(s string) *intstr.IntOrString {
|
|
||||||
return &intstr.IntOrString{StrVal: s}
|
|
||||||
}
|
|
||||||
|
|
||||||
func toIntStrPtrI(i int32) *intstr.IntOrString {
|
|
||||||
return &intstr.IntOrString{IntVal: i}
|
|
||||||
}
|
|
||||||
func TestNewClient(t *testing.T) {
|
func TestNewClient(t *testing.T) {
|
||||||
_, err := newClient("https://127.0.0.1:443/", "default", "abc123", time.Second, tls.ClientConfig{})
|
_, err := newClient("https://127.0.0.1:443/", "default", "abc123", time.Second, tls.ClientConfig{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
||||||
|
|
@ -15,14 +15,11 @@ func collectDaemonSets(ctx context.Context, acc telegraf.Accumulator, ki *Kubern
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
for _, d := range list.Items {
|
for _, d := range list.Items {
|
||||||
if err = ki.gatherDaemonSet(d, acc); err != nil {
|
ki.gatherDaemonSet(d, acc)
|
||||||
acc.AddError(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ki *KubernetesInventory) gatherDaemonSet(d v1.DaemonSet, acc telegraf.Accumulator) error {
|
func (ki *KubernetesInventory) gatherDaemonSet(d v1.DaemonSet, acc telegraf.Accumulator) {
|
||||||
fields := map[string]interface{}{
|
fields := map[string]interface{}{
|
||||||
"generation": d.Generation,
|
"generation": d.Generation,
|
||||||
"current_number_scheduled": d.Status.CurrentNumberScheduled,
|
"current_number_scheduled": d.Status.CurrentNumberScheduled,
|
||||||
|
|
@ -48,6 +45,4 @@ func (ki *KubernetesInventory) gatherDaemonSet(d v1.DaemonSet, acc telegraf.Accu
|
||||||
}
|
}
|
||||||
|
|
||||||
acc.AddFields(daemonSetMeasurement, fields, tags)
|
acc.AddFields(daemonSetMeasurement, fields, tags)
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -108,10 +108,7 @@ func TestDaemonSet(t *testing.T) {
|
||||||
ks.createSelectorFilters()
|
ks.createSelectorFilters()
|
||||||
acc := new(testutil.Accumulator)
|
acc := new(testutil.Accumulator)
|
||||||
for _, dset := range ((v.handler.responseMap["/daemonsets/"]).(*v1.DaemonSetList)).Items {
|
for _, dset := range ((v.handler.responseMap["/daemonsets/"]).(*v1.DaemonSetList)).Items {
|
||||||
err := ks.gatherDaemonSet(dset, acc)
|
ks.gatherDaemonSet(dset, acc)
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Failed to gather daemonset - %s", err.Error())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
err := acc.FirstError()
|
err := acc.FirstError()
|
||||||
|
|
@ -284,10 +281,7 @@ func TestDaemonSetSelectorFilter(t *testing.T) {
|
||||||
ks.createSelectorFilters()
|
ks.createSelectorFilters()
|
||||||
acc := new(testutil.Accumulator)
|
acc := new(testutil.Accumulator)
|
||||||
for _, dset := range ((v.handler.responseMap["/daemonsets/"]).(*v1.DaemonSetList)).Items {
|
for _, dset := range ((v.handler.responseMap["/daemonsets/"]).(*v1.DaemonSetList)).Items {
|
||||||
err := ks.gatherDaemonSet(dset, acc)
|
ks.gatherDaemonSet(dset, acc)
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Failed to gather daemonset - %s", err.Error())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Grab selector tags
|
// Grab selector tags
|
||||||
|
|
|
||||||
|
|
@ -14,14 +14,11 @@ func collectDeployments(ctx context.Context, acc telegraf.Accumulator, ki *Kuber
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
for _, d := range list.Items {
|
for _, d := range list.Items {
|
||||||
if err = ki.gatherDeployment(d, acc); err != nil {
|
ki.gatherDeployment(d, acc)
|
||||||
acc.AddError(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ki *KubernetesInventory) gatherDeployment(d v1.Deployment, acc telegraf.Accumulator) error {
|
func (ki *KubernetesInventory) gatherDeployment(d v1.Deployment, acc telegraf.Accumulator) {
|
||||||
fields := map[string]interface{}{
|
fields := map[string]interface{}{
|
||||||
"replicas_available": d.Status.AvailableReplicas,
|
"replicas_available": d.Status.AvailableReplicas,
|
||||||
"replicas_unavailable": d.Status.UnavailableReplicas,
|
"replicas_unavailable": d.Status.UnavailableReplicas,
|
||||||
|
|
@ -38,6 +35,4 @@ func (ki *KubernetesInventory) gatherDeployment(d v1.Deployment, acc telegraf.Ac
|
||||||
}
|
}
|
||||||
|
|
||||||
acc.AddFields(deploymentMeasurement, fields, tags)
|
acc.AddFields(deploymentMeasurement, fields, tags)
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -114,10 +114,7 @@ func TestDeployment(t *testing.T) {
|
||||||
ks.createSelectorFilters()
|
ks.createSelectorFilters()
|
||||||
acc := new(testutil.Accumulator)
|
acc := new(testutil.Accumulator)
|
||||||
for _, deployment := range ((v.handler.responseMap["/deployments/"]).(*v1.DeploymentList)).Items {
|
for _, deployment := range ((v.handler.responseMap["/deployments/"]).(*v1.DeploymentList)).Items {
|
||||||
err := ks.gatherDeployment(deployment, acc)
|
ks.gatherDeployment(deployment, acc)
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Failed to gather deployment - %s", err.Error())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
err := acc.FirstError()
|
err := acc.FirstError()
|
||||||
|
|
@ -299,10 +296,7 @@ func TestDeploymentSelectorFilter(t *testing.T) {
|
||||||
ks.createSelectorFilters()
|
ks.createSelectorFilters()
|
||||||
acc := new(testutil.Accumulator)
|
acc := new(testutil.Accumulator)
|
||||||
for _, deployment := range ((v.handler.responseMap["/deployments/"]).(*v1.DeploymentList)).Items {
|
for _, deployment := range ((v.handler.responseMap["/deployments/"]).(*v1.DeploymentList)).Items {
|
||||||
err := ks.gatherDeployment(deployment, acc)
|
ks.gatherDeployment(deployment, acc)
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Failed to gather deployment - %s", err.Error())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Grab selector tags
|
// Grab selector tags
|
||||||
|
|
|
||||||
|
|
@ -15,14 +15,11 @@ func collectStatefulSets(ctx context.Context, acc telegraf.Accumulator, ki *Kube
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
for _, s := range list.Items {
|
for _, s := range list.Items {
|
||||||
if err = ki.gatherStatefulSet(s, acc); err != nil {
|
ki.gatherStatefulSet(s, acc)
|
||||||
acc.AddError(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ki *KubernetesInventory) gatherStatefulSet(s v1.StatefulSet, acc telegraf.Accumulator) error {
|
func (ki *KubernetesInventory) gatherStatefulSet(s v1.StatefulSet, acc telegraf.Accumulator) {
|
||||||
status := s.Status
|
status := s.Status
|
||||||
fields := map[string]interface{}{
|
fields := map[string]interface{}{
|
||||||
"created": s.GetCreationTimestamp().UnixNano(),
|
"created": s.GetCreationTimestamp().UnixNano(),
|
||||||
|
|
@ -45,6 +42,4 @@ func (ki *KubernetesInventory) gatherStatefulSet(s v1.StatefulSet, acc telegraf.
|
||||||
}
|
}
|
||||||
|
|
||||||
acc.AddFields(statefulSetMeasurement, fields, tags)
|
acc.AddFields(statefulSetMeasurement, fields, tags)
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -102,10 +102,7 @@ func TestStatefulSet(t *testing.T) {
|
||||||
ks.createSelectorFilters()
|
ks.createSelectorFilters()
|
||||||
acc := new(testutil.Accumulator)
|
acc := new(testutil.Accumulator)
|
||||||
for _, ss := range ((v.handler.responseMap["/statefulsets/"]).(*v1.StatefulSetList)).Items {
|
for _, ss := range ((v.handler.responseMap["/statefulsets/"]).(*v1.StatefulSetList)).Items {
|
||||||
err := ks.gatherStatefulSet(ss, acc)
|
ks.gatherStatefulSet(ss, acc)
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Failed to gather ss - %s", err.Error())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
err := acc.FirstError()
|
err := acc.FirstError()
|
||||||
|
|
@ -273,10 +270,7 @@ func TestStatefulSetSelectorFilter(t *testing.T) {
|
||||||
ks.createSelectorFilters()
|
ks.createSelectorFilters()
|
||||||
acc := new(testutil.Accumulator)
|
acc := new(testutil.Accumulator)
|
||||||
for _, ss := range ((v.handler.responseMap["/statefulsets/"]).(*v1.StatefulSetList)).Items {
|
for _, ss := range ((v.handler.responseMap["/statefulsets/"]).(*v1.StatefulSetList)).Items {
|
||||||
err := ks.gatherStatefulSet(ss, acc)
|
ks.gatherStatefulSet(ss, acc)
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Failed to gather ss - %s", err.Error())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Grab selector tags
|
// Grab selector tags
|
||||||
|
|
|
||||||
|
|
@ -5,7 +5,6 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
|
@ -65,7 +64,6 @@ var sampleConfig = `
|
||||||
`
|
`
|
||||||
|
|
||||||
const (
|
const (
|
||||||
summaryEndpoint = `%s/stats/summary`
|
|
||||||
defaultServiceAccountPath = "/run/secrets/kubernetes.io/serviceaccount/token"
|
defaultServiceAccountPath = "/run/secrets/kubernetes.io/serviceaccount/token"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -117,15 +115,6 @@ func (k *Kubernetes) Gather(acc telegraf.Accumulator) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildURL(endpoint string, base string) (*url.URL, error) {
|
|
||||||
u := fmt.Sprintf(endpoint, base)
|
|
||||||
addr, err := url.Parse(u)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Unable to parse address '%s': %s", u, err)
|
|
||||||
}
|
|
||||||
return addr, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (k *Kubernetes) gatherSummary(baseURL string, acc telegraf.Accumulator) error {
|
func (k *Kubernetes) gatherSummary(baseURL string, acc telegraf.Accumulator) error {
|
||||||
summaryMetrics := &SummaryMetrics{}
|
summaryMetrics := &SummaryMetrics{}
|
||||||
err := k.LoadJSON(fmt.Sprintf("%s/stats/summary", baseURL), summaryMetrics)
|
err := k.LoadJSON(fmt.Sprintf("%s/stats/summary", baseURL), summaryMetrics)
|
||||||
|
|
@ -139,7 +128,7 @@ func (k *Kubernetes) gatherSummary(baseURL string, acc telegraf.Accumulator) err
|
||||||
}
|
}
|
||||||
buildSystemContainerMetrics(summaryMetrics, acc)
|
buildSystemContainerMetrics(summaryMetrics, acc)
|
||||||
buildNodeMetrics(summaryMetrics, acc)
|
buildNodeMetrics(summaryMetrics, acc)
|
||||||
buildPodMetrics(baseURL, summaryMetrics, podInfos, k.labelFilter, acc)
|
buildPodMetrics(summaryMetrics, podInfos, k.labelFilter, acc)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -243,7 +232,7 @@ func (k *Kubernetes) LoadJSON(url string, v interface{}) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildPodMetrics(baseURL string, summaryMetrics *SummaryMetrics, podInfo []Metadata, labelFilter filter.Filter, acc telegraf.Accumulator) {
|
func buildPodMetrics(summaryMetrics *SummaryMetrics, podInfo []Metadata, labelFilter filter.Filter, acc telegraf.Accumulator) {
|
||||||
for _, pod := range summaryMetrics.Pods {
|
for _, pod := range summaryMetrics.Pods {
|
||||||
for _, container := range pod.Containers {
|
for _, container := range pod.Containers {
|
||||||
tags := map[string]string{
|
tags := map[string]string{
|
||||||
|
|
|
||||||
|
|
@ -43,7 +43,7 @@ func (l *Lanz) Description() string {
|
||||||
return "Read metrics off Arista LANZ, via socket"
|
return "Read metrics off Arista LANZ, via socket"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *Lanz) Gather(acc telegraf.Accumulator) error {
|
func (l *Lanz) Gather(_ telegraf.Accumulator) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -143,7 +143,7 @@ func (l *LogParserPlugin) Init() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Gather is the primary function to collect the metrics for the plugin
|
// Gather is the primary function to collect the metrics for the plugin
|
||||||
func (l *LogParserPlugin) Gather(acc telegraf.Accumulator) error {
|
func (l *LogParserPlugin) Gather(_ telegraf.Accumulator) error {
|
||||||
l.Lock()
|
l.Lock()
|
||||||
defer l.Unlock()
|
defer l.Unlock()
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -54,7 +54,6 @@ type mapping struct {
|
||||||
inProc string // What to look for at the start of a line in /proc/fs/lustre/*
|
inProc string // What to look for at the start of a line in /proc/fs/lustre/*
|
||||||
field uint32 // which field to extract from that line
|
field uint32 // which field to extract from that line
|
||||||
reportAs string // What measurement name to use
|
reportAs string // What measurement name to use
|
||||||
tag string // Additional tag to add for this metric
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var wantedOstFields = []*mapping{
|
var wantedOstFields = []*mapping{
|
||||||
|
|
|
||||||
|
|
@ -532,49 +532,6 @@ type TaskStats struct {
|
||||||
Statistics map[string]interface{} `json:"statistics"`
|
Statistics map[string]interface{} `json:"statistics"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Mesos) gatherSlaveTaskMetrics(u *url.URL, acc telegraf.Accumulator) error {
|
|
||||||
var metrics []TaskStats
|
|
||||||
|
|
||||||
tags := map[string]string{
|
|
||||||
"server": u.Hostname(),
|
|
||||||
"url": urlTag(u),
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := m.client.Get(withPath(u, "/monitor/statistics").String())
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
data, err := ioutil.ReadAll(resp.Body)
|
|
||||||
resp.Body.Close()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = json.Unmarshal([]byte(data), &metrics); err != nil {
|
|
||||||
return errors.New("Error decoding JSON response")
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, task := range metrics {
|
|
||||||
tags["framework_id"] = task.FrameworkID
|
|
||||||
|
|
||||||
jf := jsonparser.JSONFlattener{}
|
|
||||||
err = jf.FlattenJSON("", task.Statistics)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
timestamp := time.Unix(int64(jf.Fields["timestamp"].(float64)), 0)
|
|
||||||
jf.Fields["executor_id"] = task.ExecutorID
|
|
||||||
|
|
||||||
acc.AddFields("mesos_tasks", jf.Fields, tags, timestamp)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func withPath(u *url.URL, path string) *url.URL {
|
func withPath(u *url.URL, path string) *url.URL {
|
||||||
c := *u
|
c := *u
|
||||||
c.Path = path
|
c.Path = path
|
||||||
|
|
|
||||||
|
|
@ -25,12 +25,12 @@ type Connector interface {
|
||||||
Connect() (Connection, error)
|
Connect() (Connection, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newConnector(hostname, port, password string) (*connector, error) {
|
func newConnector(hostname, port, password string) *connector {
|
||||||
return &connector{
|
return &connector{
|
||||||
hostname: hostname,
|
hostname: hostname,
|
||||||
port: port,
|
port: port,
|
||||||
password: password,
|
password: password,
|
||||||
}, nil
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type connector struct {
|
type connector struct {
|
||||||
|
|
@ -58,8 +58,8 @@ func (c *connector) Connect() (Connection, error) {
|
||||||
return &connection{rcon: rcon}, nil
|
return &connection{rcon: rcon}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newClient(connector Connector) (*client, error) {
|
func newClient(connector Connector) *client {
|
||||||
return &client{connector: connector}, nil
|
return &client{connector: connector}
|
||||||
}
|
}
|
||||||
|
|
||||||
type client struct {
|
type client struct {
|
||||||
|
|
@ -90,13 +90,7 @@ func (c *client) Players() ([]string, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
players, err := parsePlayers(resp)
|
return parsePlayers(resp), nil
|
||||||
if err != nil {
|
|
||||||
c.conn = nil
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return players, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *client) Scores(player string) ([]Score, error) {
|
func (c *client) Scores(player string) ([]Score, error) {
|
||||||
|
|
@ -113,13 +107,7 @@ func (c *client) Scores(player string) ([]Score, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
scores, err := parseScores(resp)
|
return parseScores(resp), nil
|
||||||
if err != nil {
|
|
||||||
c.conn = nil
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return scores, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type connection struct {
|
type connection struct {
|
||||||
|
|
@ -134,10 +122,10 @@ func (c *connection) Execute(command string) (string, error) {
|
||||||
return packet.Body, nil
|
return packet.Body, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func parsePlayers(input string) ([]string, error) {
|
func parsePlayers(input string) []string {
|
||||||
parts := strings.SplitAfterN(input, ":", 2)
|
parts := strings.SplitAfterN(input, ":", 2)
|
||||||
if len(parts) != 2 {
|
if len(parts) != 2 {
|
||||||
return []string{}, nil
|
return []string{}
|
||||||
}
|
}
|
||||||
|
|
||||||
names := strings.Split(parts[1], ",")
|
names := strings.Split(parts[1], ",")
|
||||||
|
|
@ -158,7 +146,7 @@ func parsePlayers(input string) ([]string, error) {
|
||||||
}
|
}
|
||||||
players = append(players, name)
|
players = append(players, name)
|
||||||
}
|
}
|
||||||
return players, nil
|
return players
|
||||||
}
|
}
|
||||||
|
|
||||||
// Score is an individual tracked scoreboard stat.
|
// Score is an individual tracked scoreboard stat.
|
||||||
|
|
@ -167,9 +155,9 @@ type Score struct {
|
||||||
Value int64
|
Value int64
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseScores(input string) ([]Score, error) {
|
func parseScores(input string) []Score {
|
||||||
if strings.Contains(input, "has no scores") {
|
if strings.Contains(input, "has no scores") {
|
||||||
return []Score{}, nil
|
return []Score{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Detect Minecraft <= 1.12
|
// Detect Minecraft <= 1.12
|
||||||
|
|
@ -200,5 +188,6 @@ func parseScores(input string) ([]Score, error) {
|
||||||
}
|
}
|
||||||
scores = append(scores, score)
|
scores = append(scores, score)
|
||||||
}
|
}
|
||||||
return scores, nil
|
|
||||||
|
return scores
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -98,9 +98,7 @@ func TestClient_Player(t *testing.T) {
|
||||||
conn: &MockConnection{commands: tt.commands},
|
conn: &MockConnection{commands: tt.commands},
|
||||||
}
|
}
|
||||||
|
|
||||||
client, err := newClient(connector)
|
client := newClient(connector)
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
actual, err := client.Players()
|
actual, err := client.Players()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
|
@ -183,9 +181,7 @@ func TestClient_Scores(t *testing.T) {
|
||||||
conn: &MockConnection{commands: tt.commands},
|
conn: &MockConnection{commands: tt.commands},
|
||||||
}
|
}
|
||||||
|
|
||||||
client, err := newClient(connector)
|
client := newClient(connector)
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
actual, err := client.Scores(tt.player)
|
actual, err := client.Scores(tt.player)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -50,17 +50,8 @@ func (s *Minecraft) SampleConfig() string {
|
||||||
|
|
||||||
func (s *Minecraft) Gather(acc telegraf.Accumulator) error {
|
func (s *Minecraft) Gather(acc telegraf.Accumulator) error {
|
||||||
if s.client == nil {
|
if s.client == nil {
|
||||||
connector, err := newConnector(s.Server, s.Port, s.Password)
|
connector := newConnector(s.Server, s.Port, s.Password)
|
||||||
if err != nil {
|
s.client = newClient(connector)
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
client, err := newClient(connector)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
s.client = client
|
|
||||||
}
|
}
|
||||||
|
|
||||||
players, err := s.client.Players()
|
players, err := s.client.Players()
|
||||||
|
|
|
||||||
|
|
@ -17,7 +17,7 @@ import (
|
||||||
type transportMock struct {
|
type transportMock struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *transportMock) RoundTrip(r *http.Request) (*http.Response, error) {
|
func (t *transportMock) RoundTrip(_ *http.Request) (*http.Response, error) {
|
||||||
errorString := "Get http://127.0.0.1:2812/_status?format=xml: " +
|
errorString := "Get http://127.0.0.1:2812/_status?format=xml: " +
|
||||||
"read tcp 192.168.10.2:55610->127.0.0.1:2812: " +
|
"read tcp 192.168.10.2:55610->127.0.0.1:2812: " +
|
||||||
"read: connection reset by peer"
|
"read: connection reset by peer"
|
||||||
|
|
|
||||||
|
|
@ -248,14 +248,14 @@ func (m *MQTTConsumer) connect() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MQTTConsumer) onConnectionLost(c mqtt.Client, err error) {
|
func (m *MQTTConsumer) onConnectionLost(_ mqtt.Client, err error) {
|
||||||
m.acc.AddError(fmt.Errorf("connection lost: %v", err))
|
m.acc.AddError(fmt.Errorf("connection lost: %v", err))
|
||||||
m.Log.Debugf("Disconnected %v", m.Servers)
|
m.Log.Debugf("Disconnected %v", m.Servers)
|
||||||
m.state = Disconnected
|
m.state = Disconnected
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MQTTConsumer) recvMessage(c mqtt.Client, msg mqtt.Message) {
|
func (m *MQTTConsumer) recvMessage(_ mqtt.Client, msg mqtt.Message) {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case track := <-m.acc.Delivered():
|
case track := <-m.acc.Delivered():
|
||||||
|
|
@ -310,7 +310,7 @@ func (m *MQTTConsumer) Stop() {
|
||||||
m.cancel()
|
m.cancel()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MQTTConsumer) Gather(acc telegraf.Accumulator) error {
|
func (m *MQTTConsumer) Gather(_ telegraf.Accumulator) error {
|
||||||
if m.state == Disconnected {
|
if m.state == Disconnected {
|
||||||
m.state = Connecting
|
m.state = Connecting
|
||||||
m.Log.Debugf("Connecting %v", m.Servers)
|
m.Log.Debugf("Connecting %v", m.Servers)
|
||||||
|
|
|
||||||
|
|
@ -49,15 +49,15 @@ type FakeParser struct {
|
||||||
// FakeParser satisfies parsers.Parser
|
// FakeParser satisfies parsers.Parser
|
||||||
var _ parsers.Parser = &FakeParser{}
|
var _ parsers.Parser = &FakeParser{}
|
||||||
|
|
||||||
func (p *FakeParser) Parse(buf []byte) ([]telegraf.Metric, error) {
|
func (p *FakeParser) Parse(_ []byte) ([]telegraf.Metric, error) {
|
||||||
panic("not implemented")
|
panic("not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *FakeParser) ParseLine(line string) (telegraf.Metric, error) {
|
func (p *FakeParser) ParseLine(_ string) (telegraf.Metric, error) {
|
||||||
panic("not implemented")
|
panic("not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *FakeParser) SetDefaultTags(tags map[string]string) {
|
func (p *FakeParser) SetDefaultTags(_ map[string]string) {
|
||||||
panic("not implemented")
|
panic("not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -264,7 +264,7 @@ func (n *natsConsumer) Stop() {
|
||||||
n.clean()
|
n.clean()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *natsConsumer) Gather(acc telegraf.Accumulator) error {
|
func (n *natsConsumer) Gather(_ telegraf.Accumulator) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -125,7 +125,7 @@ func TestTCPOK1(t *testing.T) {
|
||||||
}
|
}
|
||||||
// Start TCP server
|
// Start TCP server
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go TCPServer(t, &wg)
|
go TCPServer(&wg)
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
// Connect
|
// Connect
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
|
|
@ -169,7 +169,7 @@ func TestTCPOK2(t *testing.T) {
|
||||||
}
|
}
|
||||||
// Start TCP server
|
// Start TCP server
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go TCPServer(t, &wg)
|
go TCPServer(&wg)
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
// Connect
|
// Connect
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
|
|
@ -247,7 +247,7 @@ func TestUDPOK1(t *testing.T) {
|
||||||
}
|
}
|
||||||
// Start UDP server
|
// Start UDP server
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go UDPServer(t, &wg)
|
go UDPServer(&wg)
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
// Connect
|
// Connect
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
|
|
@ -277,7 +277,7 @@ func TestUDPOK1(t *testing.T) {
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
func UDPServer(t *testing.T, wg *sync.WaitGroup) {
|
func UDPServer(wg *sync.WaitGroup) {
|
||||||
udpAddr, _ := net.ResolveUDPAddr("udp", "127.0.0.1:2004")
|
udpAddr, _ := net.ResolveUDPAddr("udp", "127.0.0.1:2004")
|
||||||
conn, _ := net.ListenUDP("udp", udpAddr)
|
conn, _ := net.ListenUDP("udp", udpAddr)
|
||||||
wg.Done()
|
wg.Done()
|
||||||
|
|
@ -288,7 +288,7 @@ func UDPServer(t *testing.T, wg *sync.WaitGroup) {
|
||||||
wg.Done()
|
wg.Done()
|
||||||
}
|
}
|
||||||
|
|
||||||
func TCPServer(t *testing.T, wg *sync.WaitGroup) {
|
func TCPServer(wg *sync.WaitGroup) {
|
||||||
tcpAddr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2004")
|
tcpAddr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2004")
|
||||||
tcpServer, _ := net.ListenTCP("tcp", tcpAddr)
|
tcpServer, _ := net.ListenTCP("tcp", tcpAddr)
|
||||||
wg.Done()
|
wg.Done()
|
||||||
|
|
|
||||||
|
|
@ -93,13 +93,13 @@ func convertToInt64(line []string) []int64 {
|
||||||
return nline
|
return nline
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *NFSClient) parseStat(mountpoint string, export string, version string, line []string, fullstat bool, acc telegraf.Accumulator) error {
|
func (n *NFSClient) parseStat(mountpoint string, export string, version string, line []string, acc telegraf.Accumulator) {
|
||||||
tags := map[string]string{"mountpoint": mountpoint, "serverexport": export}
|
tags := map[string]string{"mountpoint": mountpoint, "serverexport": export}
|
||||||
nline := convertToInt64(line)
|
nline := convertToInt64(line)
|
||||||
|
|
||||||
if len(nline) == 0 {
|
if len(nline) == 0 {
|
||||||
n.Log.Warnf("Parsing Stat line with one field: %s\n", line)
|
n.Log.Warnf("Parsing Stat line with one field: %s\n", line)
|
||||||
return nil
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
first := strings.Replace(line[0], ":", "", 1)
|
first := strings.Replace(line[0], ":", "", 1)
|
||||||
|
|
@ -191,7 +191,7 @@ func (n *NFSClient) parseStat(mountpoint string, export string, version string,
|
||||||
acc.AddFields("nfsstat", fields, tags)
|
acc.AddFields("nfsstat", fields, tags)
|
||||||
}
|
}
|
||||||
|
|
||||||
if fullstat {
|
if n.Fullstat {
|
||||||
switch first {
|
switch first {
|
||||||
case "events":
|
case "events":
|
||||||
if len(nline) >= len(eventsFields) {
|
if len(nline) >= len(eventsFields) {
|
||||||
|
|
@ -240,11 +240,9 @@ func (n *NFSClient) parseStat(mountpoint string, export string, version string,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *NFSClient) processText(scanner *bufio.Scanner, acc telegraf.Accumulator) error {
|
func (n *NFSClient) processText(scanner *bufio.Scanner, acc telegraf.Accumulator) {
|
||||||
var mount string
|
var mount string
|
||||||
var version string
|
var version string
|
||||||
var export string
|
var export string
|
||||||
|
|
@ -252,10 +250,9 @@ func (n *NFSClient) processText(scanner *bufio.Scanner, acc telegraf.Accumulator
|
||||||
|
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
line := strings.Fields(scanner.Text())
|
line := strings.Fields(scanner.Text())
|
||||||
|
lineLength := len(line)
|
||||||
|
|
||||||
line_len := len(line)
|
if lineLength == 0 {
|
||||||
|
|
||||||
if line_len == 0 {
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -263,10 +260,10 @@ func (n *NFSClient) processText(scanner *bufio.Scanner, acc telegraf.Accumulator
|
||||||
|
|
||||||
// This denotes a new mount has been found, so set
|
// This denotes a new mount has been found, so set
|
||||||
// mount and export, and stop skipping (for now)
|
// mount and export, and stop skipping (for now)
|
||||||
if line_len > 4 && choice.Contains("fstype", line) && (choice.Contains("nfs", line) || choice.Contains("nfs4", line)) {
|
if lineLength > 4 && choice.Contains("fstype", line) && (choice.Contains("nfs", line) || choice.Contains("nfs4", line)) {
|
||||||
mount = line[4]
|
mount = line[4]
|
||||||
export = line[1]
|
export = line[1]
|
||||||
} else if line_len > 5 && (choice.Contains("(nfs)", line) || choice.Contains("(nfs4)", line)) {
|
} else if lineLength > 5 && (choice.Contains("(nfs)", line) || choice.Contains("(nfs4)", line)) {
|
||||||
version = strings.Split(line[5], "/")[1]
|
version = strings.Split(line[5], "/")[1]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -296,10 +293,9 @@ func (n *NFSClient) processText(scanner *bufio.Scanner, acc telegraf.Accumulator
|
||||||
}
|
}
|
||||||
|
|
||||||
if !skip {
|
if !skip {
|
||||||
n.parseStat(mount, export, version, line, n.Fullstat, acc)
|
n.parseStat(mount, export, version, line, acc)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *NFSClient) getMountStatsPath() string {
|
func (n *NFSClient) getMountStatsPath() string {
|
||||||
|
|
|
||||||
|
|
@ -20,11 +20,11 @@ func getMountStatsPath() string {
|
||||||
func TestNFSClientParsev3(t *testing.T) {
|
func TestNFSClientParsev3(t *testing.T) {
|
||||||
var acc testutil.Accumulator
|
var acc testutil.Accumulator
|
||||||
|
|
||||||
nfsclient := NFSClient{}
|
nfsclient := NFSClient{Fullstat: true}
|
||||||
nfsclient.nfs3Ops = map[string]bool{"READLINK": true, "GETATTR": false}
|
nfsclient.nfs3Ops = map[string]bool{"READLINK": true, "GETATTR": false}
|
||||||
nfsclient.nfs4Ops = map[string]bool{"READLINK": true, "GETATTR": false}
|
nfsclient.nfs4Ops = map[string]bool{"READLINK": true, "GETATTR": false}
|
||||||
data := strings.Fields(" READLINK: 500 501 502 503 504 505 506 507")
|
data := strings.Fields(" READLINK: 500 501 502 503 504 505 506 507")
|
||||||
nfsclient.parseStat("1.2.3.4:/storage/NFS", "/A", "3", data, true, &acc)
|
nfsclient.parseStat("1.2.3.4:/storage/NFS", "/A", "3", data, &acc)
|
||||||
|
|
||||||
fields_ops := map[string]interface{}{
|
fields_ops := map[string]interface{}{
|
||||||
"ops": int64(500),
|
"ops": int64(500),
|
||||||
|
|
@ -42,11 +42,11 @@ func TestNFSClientParsev3(t *testing.T) {
|
||||||
func TestNFSClientParsev4(t *testing.T) {
|
func TestNFSClientParsev4(t *testing.T) {
|
||||||
var acc testutil.Accumulator
|
var acc testutil.Accumulator
|
||||||
|
|
||||||
nfsclient := NFSClient{}
|
nfsclient := NFSClient{Fullstat: true}
|
||||||
nfsclient.nfs3Ops = map[string]bool{"DESTROY_SESSION": true, "GETATTR": false}
|
nfsclient.nfs3Ops = map[string]bool{"DESTROY_SESSION": true, "GETATTR": false}
|
||||||
nfsclient.nfs4Ops = map[string]bool{"DESTROY_SESSION": true, "GETATTR": false}
|
nfsclient.nfs4Ops = map[string]bool{"DESTROY_SESSION": true, "GETATTR": false}
|
||||||
data := strings.Fields(" DESTROY_SESSION: 500 501 502 503 504 505 506 507")
|
data := strings.Fields(" DESTROY_SESSION: 500 501 502 503 504 505 506 507")
|
||||||
nfsclient.parseStat("2.2.2.2:/nfsdata/", "/B", "4", data, true, &acc)
|
nfsclient.parseStat("2.2.2.2:/nfsdata/", "/B", "4", data, &acc)
|
||||||
|
|
||||||
fields_ops := map[string]interface{}{
|
fields_ops := map[string]interface{}{
|
||||||
"ops": int64(500),
|
"ops": int64(500),
|
||||||
|
|
|
||||||
|
|
@ -520,7 +520,7 @@ const streamServerZonesPayload = `
|
||||||
`
|
`
|
||||||
|
|
||||||
func TestGatherProcessesMetrics(t *testing.T) {
|
func TestGatherProcessesMetrics(t *testing.T) {
|
||||||
ts, n := prepareEndpoint(t, processesPath, defaultAPIVersion, processesPayload)
|
ts, n := prepareEndpoint(t, processesPath, processesPayload)
|
||||||
defer ts.Close()
|
defer ts.Close()
|
||||||
|
|
||||||
var acc testutil.Accumulator
|
var acc testutil.Accumulator
|
||||||
|
|
@ -541,7 +541,7 @@ func TestGatherProcessesMetrics(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGatherConnectionsMetrics(t *testing.T) {
|
func TestGatherConnectionsMetrics(t *testing.T) {
|
||||||
ts, n := prepareEndpoint(t, connectionsPath, defaultAPIVersion, connectionsPayload)
|
ts, n := prepareEndpoint(t, connectionsPath, connectionsPayload)
|
||||||
defer ts.Close()
|
defer ts.Close()
|
||||||
|
|
||||||
var acc testutil.Accumulator
|
var acc testutil.Accumulator
|
||||||
|
|
@ -565,7 +565,7 @@ func TestGatherConnectionsMetrics(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGatherSslMetrics(t *testing.T) {
|
func TestGatherSslMetrics(t *testing.T) {
|
||||||
ts, n := prepareEndpoint(t, sslPath, defaultAPIVersion, sslPayload)
|
ts, n := prepareEndpoint(t, sslPath, sslPayload)
|
||||||
defer ts.Close()
|
defer ts.Close()
|
||||||
|
|
||||||
var acc testutil.Accumulator
|
var acc testutil.Accumulator
|
||||||
|
|
@ -588,7 +588,7 @@ func TestGatherSslMetrics(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGatherHttpRequestsMetrics(t *testing.T) {
|
func TestGatherHttpRequestsMetrics(t *testing.T) {
|
||||||
ts, n := prepareEndpoint(t, httpRequestsPath, defaultAPIVersion, httpRequestsPayload)
|
ts, n := prepareEndpoint(t, httpRequestsPath, httpRequestsPayload)
|
||||||
defer ts.Close()
|
defer ts.Close()
|
||||||
|
|
||||||
var acc testutil.Accumulator
|
var acc testutil.Accumulator
|
||||||
|
|
@ -610,7 +610,7 @@ func TestGatherHttpRequestsMetrics(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGatherHttpServerZonesMetrics(t *testing.T) {
|
func TestGatherHttpServerZonesMetrics(t *testing.T) {
|
||||||
ts, n := prepareEndpoint(t, httpServerZonesPath, defaultAPIVersion, httpServerZonesPayload)
|
ts, n := prepareEndpoint(t, httpServerZonesPath, httpServerZonesPayload)
|
||||||
defer ts.Close()
|
defer ts.Close()
|
||||||
|
|
||||||
var acc testutil.Accumulator
|
var acc testutil.Accumulator
|
||||||
|
|
@ -664,7 +664,7 @@ func TestGatherHttpServerZonesMetrics(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGatherHttpLocationZonesMetrics(t *testing.T) {
|
func TestGatherHttpLocationZonesMetrics(t *testing.T) {
|
||||||
ts, n := prepareEndpoint(t, httpLocationZonesPath, defaultAPIVersion, httpLocationZonesPayload)
|
ts, n := prepareEndpoint(t, httpLocationZonesPath, httpLocationZonesPayload)
|
||||||
defer ts.Close()
|
defer ts.Close()
|
||||||
|
|
||||||
var acc testutil.Accumulator
|
var acc testutil.Accumulator
|
||||||
|
|
@ -716,7 +716,7 @@ func TestGatherHttpLocationZonesMetrics(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGatherHttpUpstreamsMetrics(t *testing.T) {
|
func TestGatherHttpUpstreamsMetrics(t *testing.T) {
|
||||||
ts, n := prepareEndpoint(t, httpUpstreamsPath, defaultAPIVersion, httpUpstreamsPayload)
|
ts, n := prepareEndpoint(t, httpUpstreamsPath, httpUpstreamsPayload)
|
||||||
defer ts.Close()
|
defer ts.Close()
|
||||||
|
|
||||||
var acc testutil.Accumulator
|
var acc testutil.Accumulator
|
||||||
|
|
@ -888,7 +888,7 @@ func TestGatherHttpUpstreamsMetrics(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGatherHttpCachesMetrics(t *testing.T) {
|
func TestGatherHttpCachesMetrics(t *testing.T) {
|
||||||
ts, n := prepareEndpoint(t, httpCachesPath, defaultAPIVersion, httpCachesPayload)
|
ts, n := prepareEndpoint(t, httpCachesPath, httpCachesPayload)
|
||||||
defer ts.Close()
|
defer ts.Close()
|
||||||
|
|
||||||
var acc testutil.Accumulator
|
var acc testutil.Accumulator
|
||||||
|
|
@ -966,7 +966,7 @@ func TestGatherHttpCachesMetrics(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGatherResolverZonesMetrics(t *testing.T) {
|
func TestGatherResolverZonesMetrics(t *testing.T) {
|
||||||
ts, n := prepareEndpoint(t, resolverZonesPath, defaultAPIVersion, resolverZonesPayload)
|
ts, n := prepareEndpoint(t, resolverZonesPath, resolverZonesPayload)
|
||||||
defer ts.Close()
|
defer ts.Close()
|
||||||
|
|
||||||
var acc testutil.Accumulator
|
var acc testutil.Accumulator
|
||||||
|
|
@ -1020,7 +1020,7 @@ func TestGatherResolverZonesMetrics(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGatherStreamUpstreams(t *testing.T) {
|
func TestGatherStreamUpstreams(t *testing.T) {
|
||||||
ts, n := prepareEndpoint(t, streamUpstreamsPath, defaultAPIVersion, streamUpstreamsPayload)
|
ts, n := prepareEndpoint(t, streamUpstreamsPath, streamUpstreamsPayload)
|
||||||
defer ts.Close()
|
defer ts.Close()
|
||||||
|
|
||||||
var acc testutil.Accumulator
|
var acc testutil.Accumulator
|
||||||
|
|
@ -1162,7 +1162,7 @@ func TestGatherStreamUpstreams(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGatherStreamServerZonesMetrics(t *testing.T) {
|
func TestGatherStreamServerZonesMetrics(t *testing.T) {
|
||||||
ts, n := prepareEndpoint(t, streamServerZonesPath, defaultAPIVersion, streamServerZonesPayload)
|
ts, n := prepareEndpoint(t, streamServerZonesPath, streamServerZonesPayload)
|
||||||
defer ts.Close()
|
defer ts.Close()
|
||||||
|
|
||||||
var acc testutil.Accumulator
|
var acc testutil.Accumulator
|
||||||
|
|
@ -1305,11 +1305,11 @@ func prepareAddr(t *testing.T, ts *httptest.Server) (*url.URL, string, string) {
|
||||||
return addr, host, port
|
return addr, host, port
|
||||||
}
|
}
|
||||||
|
|
||||||
func prepareEndpoint(t *testing.T, path string, apiVersion int64, payload string) (*httptest.Server, *NginxPlusAPI) {
|
func prepareEndpoint(t *testing.T, path string, payload string) (*httptest.Server, *NginxPlusAPI) {
|
||||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
var rsp string
|
var rsp string
|
||||||
|
|
||||||
if r.URL.Path == fmt.Sprintf("/api/%d/%s", apiVersion, path) {
|
if r.URL.Path == fmt.Sprintf("/api/%d/%s", defaultAPIVersion, path) {
|
||||||
rsp = payload
|
rsp = payload
|
||||||
w.Header()["Content-Type"] = []string{"application/json"}
|
w.Header()["Content-Type"] = []string{"application/json"}
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -1321,7 +1321,7 @@ func prepareEndpoint(t *testing.T, path string, apiVersion int64, payload string
|
||||||
|
|
||||||
n := &NginxPlusAPI{
|
n := &NginxPlusAPI{
|
||||||
Urls: []string{fmt.Sprintf("%s/api", ts.URL)},
|
Urls: []string{fmt.Sprintf("%s/api", ts.URL)},
|
||||||
APIVersion: apiVersion,
|
APIVersion: defaultAPIVersion,
|
||||||
}
|
}
|
||||||
|
|
||||||
client, err := n.createHTTPClient()
|
client, err := n.createHTTPClient()
|
||||||
|
|
|
||||||
|
|
@ -11,7 +11,6 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
"github.com/influxdata/telegraf/filter"
|
|
||||||
"github.com/influxdata/telegraf/internal"
|
"github.com/influxdata/telegraf/internal"
|
||||||
"github.com/influxdata/telegraf/plugins/inputs"
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
)
|
)
|
||||||
|
|
@ -26,8 +25,7 @@ type NSD struct {
|
||||||
Server string
|
Server string
|
||||||
ConfigFile string
|
ConfigFile string
|
||||||
|
|
||||||
filter filter.Filter
|
run runner
|
||||||
run runner
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var defaultBinary = "/usr/sbin/nsd-control"
|
var defaultBinary = "/usr/sbin/nsd-control"
|
||||||
|
|
|
||||||
|
|
@ -3,16 +3,14 @@ package nsd
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf/internal"
|
"github.com/influxdata/telegraf/internal"
|
||||||
"github.com/influxdata/telegraf/testutil"
|
"github.com/influxdata/telegraf/testutil"
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var TestTimeout = internal.Duration{Duration: time.Second}
|
func NSDControl(output string) func(string, internal.Duration, bool, string, string) (*bytes.Buffer, error) {
|
||||||
|
|
||||||
func NSDControl(output string, Timeout internal.Duration, useSudo bool, Server string, ConfigFile string) func(string, internal.Duration, bool, string, string) (*bytes.Buffer, error) {
|
|
||||||
return func(string, internal.Duration, bool, string, string) (*bytes.Buffer, error) {
|
return func(string, internal.Duration, bool, string, string) (*bytes.Buffer, error) {
|
||||||
return bytes.NewBuffer([]byte(output)), nil
|
return bytes.NewBuffer([]byte(output)), nil
|
||||||
}
|
}
|
||||||
|
|
@ -21,7 +19,7 @@ func NSDControl(output string, Timeout internal.Duration, useSudo bool, Server s
|
||||||
func TestParseFullOutput(t *testing.T) {
|
func TestParseFullOutput(t *testing.T) {
|
||||||
acc := &testutil.Accumulator{}
|
acc := &testutil.Accumulator{}
|
||||||
v := &NSD{
|
v := &NSD{
|
||||||
run: NSDControl(fullOutput, TestTimeout, true, "", ""),
|
run: NSDControl(fullOutput),
|
||||||
}
|
}
|
||||||
err := v.Gather(acc)
|
err := v.Gather(acc)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -21,7 +21,7 @@ type logger struct {
|
||||||
log telegraf.Logger
|
log telegraf.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *logger) Output(calldepth int, s string) error {
|
func (l *logger) Output(_ int, s string) error {
|
||||||
l.log.Debug(s)
|
l.log.Debug(s)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
@ -179,7 +179,7 @@ func (n *NSQConsumer) Stop() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Gather is a noop
|
// Gather is a noop
|
||||||
func (n *NSQConsumer) Gather(acc telegraf.Accumulator) error {
|
func (n *NSQConsumer) Gather(_ telegraf.Accumulator) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue