fix: Linter fixes for plugins/inputs/p* (#10066)

This commit is contained in:
Paweł Żak 2021-11-15 16:14:09 +01:00 committed by GitHub
parent c1263fb03b
commit b9e4978b17
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
32 changed files with 556 additions and 490 deletions

4
go.mod
View File

@ -72,7 +72,6 @@ require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/bitly/go-hostpool v0.1.0 // indirect
github.com/bmatcuk/doublestar/v3 v3.0.0
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869
github.com/caio/go-tdigest v3.1.0+incompatible
github.com/cenkalti/backoff v2.2.1+incompatible // indirect
github.com/cespare/xxhash/v2 v2.1.1 // indirect
@ -171,8 +170,6 @@ require (
github.com/karrick/godirwalk v1.16.1
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51
github.com/klauspost/compress v1.13.6 // indirect
github.com/kr/pretty v0.3.0 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
@ -345,7 +342,6 @@ require (
github.com/jcmturner/rpc/v2 v2.0.3 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/pierrec/lz4/v4 v4.1.8 // indirect
github.com/rogpeppe/go-internal v1.6.2 // indirect
go.opentelemetry.io/otel v1.0.1 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.24.0 // indirect
go.opentelemetry.io/otel/internal/metric v0.24.0 // indirect

View File

@ -8,7 +8,6 @@ import (
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf/testutil"
@ -49,7 +48,7 @@ func Test_Invalid_Passenger_Status_Cli(t *testing.T) {
err := r.Gather(&acc)
require.Error(t, err)
assert.Contains(t, err.Error(), `exec: "an-invalid-command": executable file not found in `)
require.Contains(t, err.Error(), `exec: "an-invalid-command": executable file not found in `)
}
func Test_Invalid_Xml(t *testing.T) {
@ -65,7 +64,7 @@ func Test_Invalid_Xml(t *testing.T) {
err = r.Gather(&acc)
require.Error(t, err)
assert.Equal(t, "cannot parse input with error: EOF", err.Error())
require.Equal(t, "cannot parse input with error: EOF", err.Error())
}
// We test this by ensure that the error message match the path of default cli
@ -80,7 +79,7 @@ func Test_Default_Config_Load_Default_Command(t *testing.T) {
err = r.Gather(&acc)
require.Error(t, err)
assert.Contains(t, err.Error(), "exec: \"passenger-status\": executable file not found in ")
require.Contains(t, err.Error(), "exec: \"passenger-status\": executable file not found in ")
}
func TestPassengerGenerateMetric(t *testing.T) {

View File

@ -4,10 +4,10 @@ import (
"fmt"
"testing"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf/plugins/inputs/postgresql"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestPgBouncerGeneratesMetricsIntegration(t *testing.T) {
@ -55,20 +55,20 @@ func TestPgBouncerGeneratesMetricsIntegration(t *testing.T) {
metricsCounted := 0
for _, metric := range intMetricsPgBouncer {
assert.True(t, acc.HasInt64Field("pgbouncer", metric))
require.True(t, acc.HasInt64Field("pgbouncer", metric))
metricsCounted++
}
for _, metric := range intMetricsPgBouncerPools {
assert.True(t, acc.HasInt64Field("pgbouncer_pools", metric))
require.True(t, acc.HasInt64Field("pgbouncer_pools", metric))
metricsCounted++
}
for _, metric := range int32Metrics {
assert.True(t, acc.HasInt32Field("pgbouncer", metric))
require.True(t, acc.HasInt32Field("pgbouncer", metric))
metricsCounted++
}
assert.True(t, metricsCounted > 0)
assert.Equal(t, len(intMetricsPgBouncer)+len(intMetricsPgBouncerPools)+len(int32Metrics), metricsCounted)
require.True(t, metricsCounted > 0)
require.Equal(t, len(intMetricsPgBouncer)+len(intMetricsPgBouncerPools)+len(int32Metrics), metricsCounted)
}

View File

@ -33,26 +33,23 @@ func newFcgiClient(h string, args ...interface{}) (*conn, error) {
return fcgi, err
}
func (c *conn) Request(
env map[string]string,
requestData string,
) (retout []byte, reterr []byte, err error) {
func (c *conn) Request(env map[string]string, requestData string) (retout []byte, reterr []byte, err error) {
defer c.rwc.Close()
var reqID uint16 = 1
err = c.writeBeginRequest(reqID, uint16(roleResponder), 0)
if err != nil {
return
return nil, nil, err
}
err = c.writePairs(typeParams, reqID, env)
if err != nil {
return
return nil, nil, err
}
if len(requestData) > 0 {
if err = c.writeRecord(typeStdin, reqID, []byte(requestData)); err != nil {
return
return nil, nil, err
}
}
@ -82,5 +79,5 @@ READ_LOOP:
}
}
return
return retout, reterr, err
}

View File

@ -276,12 +276,12 @@ func importMetric(r io.Reader, acc telegraf.Accumulator, addr string) {
func expandUrls(urls []string) ([]string, error) {
addrs := make([]string, 0, len(urls))
for _, url := range urls {
if isNetworkURL(url) {
addrs = append(addrs, url)
for _, address := range urls {
if isNetworkURL(address) {
addrs = append(addrs, address)
continue
}
paths, err := globUnixSocket(url)
paths, err := globUnixSocket(address)
if err != nil {
return nil, err
}
@ -290,8 +290,8 @@ func expandUrls(urls []string) ([]string, error) {
return addrs, nil
}
func globUnixSocket(url string) ([]string, error) {
pattern, status := unixSocketPaths(url)
func globUnixSocket(address string) ([]string, error) {
pattern, status := unixSocketPaths(address)
glob, err := globpath.Compile(pattern)
if err != nil {
return nil, fmt.Errorf("could not compile glob %q: %v", pattern, err)
@ -312,9 +312,7 @@ func globUnixSocket(url string) ([]string, error) {
return addresses, nil
}
func unixSocketPaths(addr string) (string, string) {
var socketPath, statusPath string
func unixSocketPaths(addr string) (socketPath string, statusPath string) {
socketAddr := strings.Split(addr, ":")
if len(socketAddr) >= 2 {
socketPath = socketAddr[0]

View File

@ -16,9 +16,9 @@ import (
"net/http/httptest"
"testing"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf/testutil"
)
type statServer struct{}
@ -283,7 +283,7 @@ func TestPhpFpmDefaultGetFromLocalhost(t *testing.T) {
err := acc.GatherError(r.Gather)
require.Error(t, err)
assert.Contains(t, err.Error(), "/status")
require.Contains(t, err.Error(), "/status")
}
func TestPhpFpmGeneratesMetrics_Throw_Error_When_Fpm_Status_Is_Not_Responding(t *testing.T) {
@ -297,8 +297,8 @@ func TestPhpFpmGeneratesMetrics_Throw_Error_When_Fpm_Status_Is_Not_Responding(t
err := acc.GatherError(r.Gather)
require.Error(t, err)
assert.Contains(t, err.Error(), `unable to connect to phpfpm status page 'http://aninvalidone'`)
assert.Contains(t, err.Error(), `lookup aninvalidone`)
require.Contains(t, err.Error(), `unable to connect to phpfpm status page 'http://aninvalidone'`)
require.Contains(t, err.Error(), `lookup aninvalidone`)
}
func TestPhpFpmGeneratesMetrics_Throw_Error_When_Socket_Path_Is_Invalid(t *testing.T) {
@ -312,7 +312,7 @@ func TestPhpFpmGeneratesMetrics_Throw_Error_When_Socket_Path_Is_Invalid(t *testi
err := acc.GatherError(r.Gather)
require.Error(t, err)
assert.Equal(t, `socket doesn't exist "/tmp/invalid.sock"`, err.Error())
require.Equal(t, `socket doesn't exist "/tmp/invalid.sock"`, err.Error())
}
const outputSample = `

View File

@ -13,6 +13,7 @@ import (
"time"
"github.com/go-ping/ping"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
@ -82,6 +83,20 @@ type Ping struct {
Size *int
}
type roundTripTimeStats struct {
min float64
avg float64
max float64
stddev float64
}
type stats struct {
trans int
recv int
ttl int
roundTripTimeStats
}
func (*Ping) Description() string {
return "Ping given url(s) and return statistics"
}
@ -262,7 +277,7 @@ func (p *Ping) pingToURLNative(destination string, acc telegraf.Accumulator) {
sort.Sort(durationSlice(stats.Rtts))
for _, perc := range p.Percentiles {
var value = percentile(durationSlice(stats.Rtts), perc)
var value = percentile(stats.Rtts, perc)
var field = fmt.Sprintf("percentile%v_ms", perc)
fields[field] = float64(value.Nanoseconds()) / float64(time.Millisecond)
}
@ -273,6 +288,7 @@ func (p *Ping) pingToURLNative(destination string, acc telegraf.Accumulator) {
fields["ttl"] = stats.ttl
}
//nolint:unconvert // Conversion may be needed for float64 https://github.com/mdempsky/unconvert/issues/40
fields["percent_packet_loss"] = float64(stats.PacketLoss)
fields["minimum_response_ms"] = float64(stats.MinRtt) / float64(time.Millisecond)
fields["average_response_ms"] = float64(stats.AvgRtt) / float64(time.Millisecond)

View File

@ -57,7 +57,7 @@ func (p *Ping) pingToURL(u string, acc telegraf.Accumulator) {
return
}
}
trans, rec, ttl, min, avg, max, stddev, err := processPingOutput(out)
stats, err := processPingOutput(out)
if err != nil {
// fatal error
acc.AddError(fmt.Errorf("%s: %s", err, u))
@ -67,25 +67,25 @@ func (p *Ping) pingToURL(u string, acc telegraf.Accumulator) {
}
// Calculate packet loss percentage
loss := float64(trans-rec) / float64(trans) * 100.0
loss := float64(stats.trans-stats.recv) / float64(stats.trans) * 100.0
fields["packets_transmitted"] = trans
fields["packets_received"] = rec
fields["packets_transmitted"] = stats.trans
fields["packets_received"] = stats.recv
fields["percent_packet_loss"] = loss
if ttl >= 0 {
fields["ttl"] = ttl
if stats.ttl >= 0 {
fields["ttl"] = stats.ttl
}
if min >= 0 {
fields["minimum_response_ms"] = min
if stats.min >= 0 {
fields["minimum_response_ms"] = stats.min
}
if avg >= 0 {
fields["average_response_ms"] = avg
if stats.avg >= 0 {
fields["average_response_ms"] = stats.avg
}
if max >= 0 {
fields["maximum_response_ms"] = max
if stats.max >= 0 {
fields["maximum_response_ms"] = stats.max
}
if stddev >= 0 {
fields["standard_deviation_ms"] = stddev
if stats.stddev >= 0 {
fields["standard_deviation_ms"] = stats.stddev
}
acc.AddFields("ping", fields, tags)
}
@ -165,36 +165,47 @@ func (p *Ping) args(url string, system string) []string {
// round-trip min/avg/max/stddev = 34.843/43.508/52.172/8.664 ms
//
// It returns (<transmitted packets>, <received packets>, <average response>)
func processPingOutput(out string) (int, int, int, float64, float64, float64, float64, error) {
var trans, recv, ttl int = 0, 0, -1
var min, avg, max, stddev float64 = -1.0, -1.0, -1.0, -1.0
func processPingOutput(out string) (stats, error) {
stats := stats{
trans: 0,
recv: 0,
ttl: -1,
roundTripTimeStats: roundTripTimeStats{
min: -1.0,
avg: -1.0,
max: -1.0,
stddev: -1.0,
},
}
// Set this error to nil if we find a 'transmitted' line
err := errors.New("Fatal error processing ping output")
err := errors.New("fatal error processing ping output")
lines := strings.Split(out, "\n")
for _, line := range lines {
// Reading only first TTL, ignoring other TTL messages
if ttl == -1 && (strings.Contains(line, "ttl=") || strings.Contains(line, "hlim=")) {
ttl, err = getTTL(line)
} else if strings.Contains(line, "transmitted") &&
strings.Contains(line, "received") {
trans, recv, err = getPacketStats(line, trans, recv)
if stats.ttl == -1 && (strings.Contains(line, "ttl=") || strings.Contains(line, "hlim=")) {
stats.ttl, err = getTTL(line)
} else if strings.Contains(line, "transmitted") && strings.Contains(line, "received") {
stats.trans, stats.recv, err = getPacketStats(line)
if err != nil {
return trans, recv, ttl, min, avg, max, stddev, err
return stats, err
}
} else if strings.Contains(line, "min/avg/max") {
min, avg, max, stddev, err = checkRoundTripTimeStats(line, min, avg, max, stddev)
stats.roundTripTimeStats, err = checkRoundTripTimeStats(line)
if err != nil {
return trans, recv, ttl, min, avg, max, stddev, err
return stats, err
}
}
}
return trans, recv, ttl, min, avg, max, stddev, err
return stats, err
}
func getPacketStats(line string, trans, recv int) (int, int, error) {
func getPacketStats(line string) (trans int, recv int, err error) {
trans, recv = 0, 0
stats := strings.Split(line, ", ")
// Transmitted packets
trans, err := strconv.Atoi(strings.Split(stats[0], " ")[0])
trans, err = strconv.Atoi(strings.Split(stats[0], " ")[0])
if err != nil {
return trans, recv, err
}
@ -209,28 +220,35 @@ func getTTL(line string) (int, error) {
return strconv.Atoi(ttlMatch[2])
}
func checkRoundTripTimeStats(line string, min, avg, max,
stddev float64) (float64, float64, float64, float64, error) {
func checkRoundTripTimeStats(line string) (roundTripTimeStats, error) {
roundTripTimeStats := roundTripTimeStats{
min: -1.0,
avg: -1.0,
max: -1.0,
stddev: -1.0,
}
stats := strings.Split(line, " ")[3]
data := strings.Split(stats, "/")
min, err := strconv.ParseFloat(data[0], 64)
var err error
roundTripTimeStats.min, err = strconv.ParseFloat(data[0], 64)
if err != nil {
return min, avg, max, stddev, err
return roundTripTimeStats, err
}
avg, err = strconv.ParseFloat(data[1], 64)
roundTripTimeStats.avg, err = strconv.ParseFloat(data[1], 64)
if err != nil {
return min, avg, max, stddev, err
return roundTripTimeStats, err
}
max, err = strconv.ParseFloat(data[2], 64)
roundTripTimeStats.max, err = strconv.ParseFloat(data[2], 64)
if err != nil {
return min, avg, max, stddev, err
return roundTripTimeStats, err
}
if len(data) == 4 {
stddev, err = strconv.ParseFloat(data[3], 64)
roundTripTimeStats.stddev, err = strconv.ParseFloat(data[3], 64)
if err != nil {
return min, avg, max, stddev, err
return roundTripTimeStats, err
}
}
return min, avg, max, stddev, err
return roundTripTimeStats, err
}

View File

@ -12,10 +12,10 @@ import (
"time"
"github.com/go-ping/ping"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// BSD/Darwin ping output
@ -80,45 +80,45 @@ ping: -i interval too short: Operation not permitted
// Test that ping command output is processed properly
func TestProcessPingOutput(t *testing.T) {
trans, rec, ttl, min, avg, max, stddev, err := processPingOutput(bsdPingOutput)
assert.NoError(t, err)
assert.Equal(t, 55, ttl, "ttl value is 55")
assert.Equal(t, 5, trans, "5 packets were transmitted")
assert.Equal(t, 5, rec, "5 packets were received")
assert.InDelta(t, 15.087, min, 0.001)
assert.InDelta(t, 20.224, avg, 0.001)
assert.InDelta(t, 27.263, max, 0.001)
assert.InDelta(t, 4.076, stddev, 0.001)
stats, err := processPingOutput(bsdPingOutput)
require.NoError(t, err)
require.Equal(t, 55, stats.ttl, "ttl value is 55")
require.Equal(t, 5, stats.trans, "5 packets were transmitted")
require.Equal(t, 5, stats.recv, "5 packets were received")
require.InDelta(t, 15.087, stats.min, 0.001)
require.InDelta(t, 20.224, stats.avg, 0.001)
require.InDelta(t, 27.263, stats.max, 0.001)
require.InDelta(t, 4.076, stats.stddev, 0.001)
trans, rec, ttl, min, avg, max, stddev, err = processPingOutput(freebsdPing6Output)
assert.NoError(t, err)
assert.Equal(t, 117, ttl, "ttl value is 117")
assert.Equal(t, 5, trans, "5 packets were transmitted")
assert.Equal(t, 5, rec, "5 packets were received")
assert.InDelta(t, 35.727, min, 0.001)
assert.InDelta(t, 53.211, avg, 0.001)
assert.InDelta(t, 93.870, max, 0.001)
assert.InDelta(t, 22.000, stddev, 0.001)
stats, err = processPingOutput(freebsdPing6Output)
require.NoError(t, err)
require.Equal(t, 117, stats.ttl, "ttl value is 117")
require.Equal(t, 5, stats.trans, "5 packets were transmitted")
require.Equal(t, 5, stats.recv, "5 packets were received")
require.InDelta(t, 35.727, stats.min, 0.001)
require.InDelta(t, 53.211, stats.avg, 0.001)
require.InDelta(t, 93.870, stats.max, 0.001)
require.InDelta(t, 22.000, stats.stddev, 0.001)
trans, rec, ttl, min, avg, max, stddev, err = processPingOutput(linuxPingOutput)
assert.NoError(t, err)
assert.Equal(t, 63, ttl, "ttl value is 63")
assert.Equal(t, 5, trans, "5 packets were transmitted")
assert.Equal(t, 5, rec, "5 packets were received")
assert.InDelta(t, 35.225, min, 0.001)
assert.InDelta(t, 43.628, avg, 0.001)
assert.InDelta(t, 51.806, max, 0.001)
assert.InDelta(t, 5.325, stddev, 0.001)
stats, err = processPingOutput(linuxPingOutput)
require.NoError(t, err)
require.Equal(t, 63, stats.ttl, "ttl value is 63")
require.Equal(t, 5, stats.trans, "5 packets were transmitted")
require.Equal(t, 5, stats.recv, "5 packets were received")
require.InDelta(t, 35.225, stats.min, 0.001)
require.InDelta(t, 43.628, stats.avg, 0.001)
require.InDelta(t, 51.806, stats.max, 0.001)
require.InDelta(t, 5.325, stats.stddev, 0.001)
trans, rec, ttl, min, avg, max, stddev, err = processPingOutput(busyBoxPingOutput)
assert.NoError(t, err)
assert.Equal(t, 56, ttl, "ttl value is 56")
assert.Equal(t, 4, trans, "4 packets were transmitted")
assert.Equal(t, 4, rec, "4 packets were received")
assert.InDelta(t, 15.810, min, 0.001)
assert.InDelta(t, 17.611, avg, 0.001)
assert.InDelta(t, 22.559, max, 0.001)
assert.InDelta(t, -1.0, stddev, 0.001)
stats, err = processPingOutput(busyBoxPingOutput)
require.NoError(t, err)
require.Equal(t, 56, stats.ttl, "ttl value is 56")
require.Equal(t, 4, stats.trans, "4 packets were transmitted")
require.Equal(t, 4, stats.recv, "4 packets were received")
require.InDelta(t, 15.810, stats.min, 0.001)
require.InDelta(t, 17.611, stats.avg, 0.001)
require.InDelta(t, 22.559, stats.max, 0.001)
require.InDelta(t, -1.0, stats.stddev, 0.001)
}
// Linux ping output with varying TTL
@ -137,22 +137,22 @@ rtt min/avg/max/mdev = 35.225/43.628/51.806/5.325 ms
// Test that ping command output is processed properly
func TestProcessPingOutputWithVaryingTTL(t *testing.T) {
trans, rec, ttl, min, avg, max, stddev, err := processPingOutput(linuxPingOutputWithVaryingTTL)
assert.NoError(t, err)
assert.Equal(t, 63, ttl, "ttl value is 63")
assert.Equal(t, 5, trans, "5 packets were transmitted")
assert.Equal(t, 5, rec, "5 packets were transmitted")
assert.InDelta(t, 35.225, min, 0.001)
assert.InDelta(t, 43.628, avg, 0.001)
assert.InDelta(t, 51.806, max, 0.001)
assert.InDelta(t, 5.325, stddev, 0.001)
stats, err := processPingOutput(linuxPingOutputWithVaryingTTL)
require.NoError(t, err)
require.Equal(t, 63, stats.ttl, "ttl value is 63")
require.Equal(t, 5, stats.trans, "5 packets were transmitted")
require.Equal(t, 5, stats.recv, "5 packets were transmitted")
require.InDelta(t, 35.225, stats.min, 0.001)
require.InDelta(t, 43.628, stats.avg, 0.001)
require.InDelta(t, 51.806, stats.max, 0.001)
require.InDelta(t, 5.325, stats.stddev, 0.001)
}
// Test that processPingOutput returns an error when 'ping' fails to run, such
// as when an invalid argument is provided
func TestErrorProcessPingOutput(t *testing.T) {
_, _, _, _, _, _, _, err := processPingOutput(fatalPingOutput)
assert.Error(t, err, "Error was expected from processPingOutput")
_, err := processPingOutput(fatalPingOutput)
require.Error(t, err, "Error was expected from processPingOutput")
}
// Test that default arg lists are created correctly
@ -350,7 +350,7 @@ func TestBadPingGather(t *testing.T) {
}
func mockFatalHostPinger(_ string, _ float64, _ ...string) (string, error) {
return fatalPingOutput, errors.New("So very bad")
return fatalPingOutput, errors.New("so very bad")
}
// Test that a fatal ping command does not gather any statistics.
@ -363,20 +363,20 @@ func TestFatalPingGather(t *testing.T) {
err := acc.GatherError(p.Gather)
require.Error(t, err)
require.EqualValues(t, err.Error(), "host www.amazon.com: ping: -i interval too short: Operation not permitted, So very bad")
assert.False(t, acc.HasMeasurement("packets_transmitted"),
require.EqualValues(t, err.Error(), "host www.amazon.com: ping: -i interval too short: Operation not permitted, so very bad")
require.False(t, acc.HasMeasurement("packets_transmitted"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasMeasurement("packets_received"),
require.False(t, acc.HasMeasurement("packets_received"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasMeasurement("percent_packet_loss"),
require.False(t, acc.HasMeasurement("percent_packet_loss"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasMeasurement("ttl"),
require.False(t, acc.HasMeasurement("ttl"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasMeasurement("minimum_response_ms"),
require.False(t, acc.HasMeasurement("minimum_response_ms"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasMeasurement("average_response_ms"),
require.False(t, acc.HasMeasurement("average_response_ms"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasMeasurement("maximum_response_ms"),
require.False(t, acc.HasMeasurement("maximum_response_ms"),
"Fatal ping should not have packet measurements")
}
@ -385,8 +385,8 @@ func TestErrorWithHostNamePingGather(t *testing.T) {
out string
error error
}{
{"", errors.New("host www.amazon.com: So very bad")},
{"so bad", errors.New("host www.amazon.com: so bad, So very bad")},
{"", errors.New("host www.amazon.com: so very bad")},
{"so bad", errors.New("host www.amazon.com: so bad, so very bad")},
}
for _, param := range params {
@ -394,12 +394,12 @@ func TestErrorWithHostNamePingGather(t *testing.T) {
p := Ping{
Urls: []string{"www.amazon.com"},
pingHost: func(binary string, timeout float64, args ...string) (string, error) {
return param.out, errors.New("So very bad")
return param.out, errors.New("so very bad")
},
}
require.Error(t, acc.GatherError(p.Gather))
assert.True(t, len(acc.Errors) > 0)
assert.Contains(t, acc.Errors, param.error)
require.True(t, len(acc.Errors) > 0)
require.Contains(t, acc.Errors, param.error)
}
}
@ -409,13 +409,13 @@ func TestPingBinary(t *testing.T) {
Urls: []string{"www.google.com"},
Binary: "ping6",
pingHost: func(binary string, timeout float64, args ...string) (string, error) {
assert.True(t, binary == "ping6")
require.True(t, binary == "ping6")
return "", nil
},
}
err := acc.GatherError(p.Gather)
require.Error(t, err)
require.EqualValues(t, err.Error(), "Fatal error processing ping output: www.google.com")
require.EqualValues(t, err.Error(), "fatal error processing ping output: www.google.com")
}
// Test that Gather function works using native ping
@ -469,19 +469,19 @@ func TestPingGatherNative(t *testing.T) {
var acc testutil.Accumulator
require.NoError(t, tc.P.Init())
require.NoError(t, acc.GatherError(tc.P.Gather))
assert.True(t, acc.HasPoint("ping", map[string]string{"url": "localhost"}, "packets_transmitted", 5))
assert.True(t, acc.HasPoint("ping", map[string]string{"url": "localhost"}, "packets_received", 5))
assert.True(t, acc.HasField("ping", "percentile50_ms"))
assert.Equal(t, float64(3), acc.Metrics[0].Fields["percentile50_ms"])
assert.True(t, acc.HasField("ping", "percentile95_ms"))
assert.Equal(t, float64(4.799999), acc.Metrics[0].Fields["percentile95_ms"])
assert.True(t, acc.HasField("ping", "percentile99_ms"))
assert.Equal(t, float64(4.96), acc.Metrics[0].Fields["percentile99_ms"])
assert.True(t, acc.HasField("ping", "percent_packet_loss"))
assert.True(t, acc.HasField("ping", "minimum_response_ms"))
assert.True(t, acc.HasField("ping", "average_response_ms"))
assert.True(t, acc.HasField("ping", "maximum_response_ms"))
assert.True(t, acc.HasField("ping", "standard_deviation_ms"))
require.True(t, acc.HasPoint("ping", map[string]string{"url": "localhost"}, "packets_transmitted", 5))
require.True(t, acc.HasPoint("ping", map[string]string{"url": "localhost"}, "packets_received", 5))
require.True(t, acc.HasField("ping", "percentile50_ms"))
require.Equal(t, float64(3), acc.Metrics[0].Fields["percentile50_ms"])
require.True(t, acc.HasField("ping", "percentile95_ms"))
require.Equal(t, float64(4.799999), acc.Metrics[0].Fields["percentile95_ms"])
require.True(t, acc.HasField("ping", "percentile99_ms"))
require.Equal(t, float64(4.96), acc.Metrics[0].Fields["percentile99_ms"])
require.True(t, acc.HasField("ping", "percent_packet_loss"))
require.True(t, acc.HasField("ping", "minimum_response_ms"))
require.True(t, acc.HasField("ping", "average_response_ms"))
require.True(t, acc.HasField("ping", "maximum_response_ms"))
require.True(t, acc.HasField("ping", "standard_deviation_ms"))
}
}

View File

@ -8,9 +8,9 @@ import (
"reflect"
"testing"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf/testutil"
)
// Windows ping format ( should support multilanguage ?)
@ -44,22 +44,22 @@ Approximate round trip times in milli-seconds:
func TestHost(t *testing.T) {
trans, recReply, recPacket, avg, min, max, err := processPingOutput(winPLPingOutput)
assert.NoError(t, err)
assert.Equal(t, 4, trans, "4 packets were transmitted")
assert.Equal(t, 4, recReply, "4 packets were reply")
assert.Equal(t, 4, recPacket, "4 packets were received")
assert.Equal(t, 50, avg, "Average 50")
assert.Equal(t, 46, min, "Min 46")
assert.Equal(t, 57, max, "max 57")
require.NoError(t, err)
require.Equal(t, 4, trans, "4 packets were transmitted")
require.Equal(t, 4, recReply, "4 packets were reply")
require.Equal(t, 4, recPacket, "4 packets were received")
require.Equal(t, 50, avg, "Average 50")
require.Equal(t, 46, min, "Min 46")
require.Equal(t, 57, max, "max 57")
trans, recReply, recPacket, avg, min, max, err = processPingOutput(winENPingOutput)
assert.NoError(t, err)
assert.Equal(t, 4, trans, "4 packets were transmitted")
assert.Equal(t, 4, recReply, "4 packets were reply")
assert.Equal(t, 4, recPacket, "4 packets were received")
assert.Equal(t, 50, avg, "Average 50")
assert.Equal(t, 50, min, "Min 50")
assert.Equal(t, 52, max, "Max 52")
require.NoError(t, err)
require.Equal(t, 4, trans, "4 packets were transmitted")
require.Equal(t, 4, recReply, "4 packets were reply")
require.Equal(t, 4, recPacket, "4 packets were received")
require.Equal(t, 50, avg, "Average 50")
require.Equal(t, 50, min, "Min 50")
require.Equal(t, 52, max, "Max 52")
}
func mockHostPinger(binary string, timeout float64, args ...string) (string, error) {
@ -239,21 +239,21 @@ func TestFatalPingGather(t *testing.T) {
}
acc.GatherError(p.Gather)
assert.True(t, acc.HasFloatField("ping", "errors"),
require.True(t, acc.HasFloatField("ping", "errors"),
"Fatal ping should have packet measurements")
assert.False(t, acc.HasInt64Field("ping", "packets_transmitted"),
require.False(t, acc.HasInt64Field("ping", "packets_transmitted"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasInt64Field("ping", "packets_received"),
require.False(t, acc.HasInt64Field("ping", "packets_received"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasFloatField("ping", "percent_packet_loss"),
require.False(t, acc.HasFloatField("ping", "percent_packet_loss"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasFloatField("ping", "percent_reply_loss"),
require.False(t, acc.HasFloatField("ping", "percent_reply_loss"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasInt64Field("ping", "average_response_ms"),
require.False(t, acc.HasInt64Field("ping", "average_response_ms"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasInt64Field("ping", "maximum_response_ms"),
require.False(t, acc.HasInt64Field("ping", "maximum_response_ms"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasInt64Field("ping", "minimum_response_ms"),
require.False(t, acc.HasInt64Field("ping", "minimum_response_ms"),
"Fatal ping should not have packet measurements")
}
@ -297,13 +297,13 @@ func TestUnreachablePingGather(t *testing.T) {
}
acc.AssertContainsTaggedFields(t, "ping", fields, tags)
assert.False(t, acc.HasFloatField("ping", "errors"),
require.False(t, acc.HasFloatField("ping", "errors"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasInt64Field("ping", "average_response_ms"),
require.False(t, acc.HasInt64Field("ping", "average_response_ms"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasInt64Field("ping", "maximum_response_ms"),
require.False(t, acc.HasInt64Field("ping", "maximum_response_ms"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasInt64Field("ping", "minimum_response_ms"),
require.False(t, acc.HasInt64Field("ping", "minimum_response_ms"),
"Fatal ping should not have packet measurements")
}
@ -345,13 +345,13 @@ func TestTTLExpiredPingGather(t *testing.T) {
}
acc.AssertContainsTaggedFields(t, "ping", fields, tags)
assert.False(t, acc.HasFloatField("ping", "errors"),
require.False(t, acc.HasFloatField("ping", "errors"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasInt64Field("ping", "average_response_ms"),
require.False(t, acc.HasInt64Field("ping", "average_response_ms"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasInt64Field("ping", "maximum_response_ms"),
require.False(t, acc.HasInt64Field("ping", "maximum_response_ms"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasInt64Field("ping", "minimum_response_ms"),
require.False(t, acc.HasInt64Field("ping", "minimum_response_ms"),
"Fatal ping should not have packet measurements")
}
@ -362,7 +362,7 @@ func TestPingBinary(t *testing.T) {
Urls: []string{"www.google.com"},
Binary: "ping6",
pingHost: func(binary string, timeout float64, args ...string) (string, error) {
assert.True(t, binary == "ping6")
require.True(t, binary == "ping6")
return "", nil
},
}

View File

@ -33,9 +33,10 @@ func getQueueDirectory() (string, error) {
return strings.TrimSpace(string(qd)), nil
}
func qScan(path string, acc telegraf.Accumulator) (int64, int64, int64, error) {
func qScan(path string, acc telegraf.Accumulator) (map[string]interface{}, error) {
var length, size int64
var oldest time.Time
err := filepath.Walk(path, func(_ string, finfo os.FileInfo, err error) error {
if err != nil {
acc.AddError(fmt.Errorf("error scanning %s: %s", path, err))
@ -57,9 +58,11 @@ func qScan(path string, acc telegraf.Accumulator) (int64, int64, int64, error) {
}
return nil
})
if err != nil {
return 0, 0, 0, err
return nil, err
}
var age int64
if !oldest.IsZero() {
age = int64(time.Since(oldest) / time.Second)
@ -67,7 +70,13 @@ func qScan(path string, acc telegraf.Accumulator) (int64, int64, int64, error) {
// system doesn't support ctime
age = -1
}
return length, size, age, nil
fields := map[string]interface{}{"length": length, "size": size}
if age != -1 {
fields["age"] = age
}
return fields, nil
}
type Postfix struct {
@ -84,15 +93,12 @@ func (p *Postfix) Gather(acc telegraf.Accumulator) error {
}
for _, q := range []string{"active", "hold", "incoming", "maildrop", "deferred"} {
length, size, age, err := qScan(filepath.Join(p.QueueDirectory, q), acc)
fields, err := qScan(filepath.Join(p.QueueDirectory, q), acc)
if err != nil {
acc.AddError(fmt.Errorf("error scanning queue %s: %s", q, err))
continue
}
fields := map[string]interface{}{"length": length, "size": size}
if age != -1 {
fields["age"] = age
}
acc.AddFields("postfix_queue", fields, map[string]string{"queue": q})
}

View File

@ -8,9 +8,9 @@ import (
"path/filepath"
"testing"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf/testutil"
)
func TestGather(t *testing.T) {
@ -41,20 +41,20 @@ func TestGather(t *testing.T) {
metrics[m.Tags["queue"]] = m
}
assert.Equal(t, int64(2), metrics["active"].Fields["length"])
assert.Equal(t, int64(7), metrics["active"].Fields["size"])
assert.InDelta(t, 0, metrics["active"].Fields["age"], 10)
require.Equal(t, int64(2), metrics["active"].Fields["length"])
require.Equal(t, int64(7), metrics["active"].Fields["size"])
require.InDelta(t, 0, metrics["active"].Fields["age"], 10)
assert.Equal(t, int64(1), metrics["hold"].Fields["length"])
assert.Equal(t, int64(3), metrics["hold"].Fields["size"])
require.Equal(t, int64(1), metrics["hold"].Fields["length"])
require.Equal(t, int64(3), metrics["hold"].Fields["size"])
assert.Equal(t, int64(1), metrics["incoming"].Fields["length"])
assert.Equal(t, int64(4), metrics["incoming"].Fields["size"])
require.Equal(t, int64(1), metrics["incoming"].Fields["length"])
require.Equal(t, int64(4), metrics["incoming"].Fields["size"])
assert.Equal(t, int64(0), metrics["maildrop"].Fields["length"])
assert.Equal(t, int64(0), metrics["maildrop"].Fields["size"])
assert.Equal(t, int64(0), metrics["maildrop"].Fields["age"])
require.Equal(t, int64(0), metrics["maildrop"].Fields["length"])
require.Equal(t, int64(0), metrics["maildrop"].Fields["size"])
require.Equal(t, int64(0), metrics["maildrop"].Fields["age"])
assert.Equal(t, int64(2), metrics["deferred"].Fields["length"])
assert.Equal(t, int64(6), metrics["deferred"].Fields["size"])
require.Equal(t, int64(2), metrics["deferred"].Fields["length"])
require.Equal(t, int64(6), metrics["deferred"].Fields["size"])
}

View File

@ -4,9 +4,9 @@ import (
"fmt"
"testing"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf/testutil"
)
func TestPostgresqlGeneratesMetricsIntegration(t *testing.T) {
@ -71,27 +71,27 @@ func TestPostgresqlGeneratesMetricsIntegration(t *testing.T) {
metricsCounted := 0
for _, metric := range intMetrics {
assert.True(t, acc.HasInt64Field("postgresql", metric))
require.True(t, acc.HasInt64Field("postgresql", metric))
metricsCounted++
}
for _, metric := range int32Metrics {
assert.True(t, acc.HasInt32Field("postgresql", metric))
require.True(t, acc.HasInt32Field("postgresql", metric))
metricsCounted++
}
for _, metric := range floatMetrics {
assert.True(t, acc.HasFloatField("postgresql", metric))
require.True(t, acc.HasFloatField("postgresql", metric))
metricsCounted++
}
for _, metric := range stringMetrics {
assert.True(t, acc.HasStringField("postgresql", metric))
require.True(t, acc.HasStringField("postgresql", metric))
metricsCounted++
}
assert.True(t, metricsCounted > 0)
assert.Equal(t, len(floatMetrics)+len(intMetrics)+len(int32Metrics)+len(stringMetrics), metricsCounted)
require.True(t, metricsCounted > 0)
require.Equal(t, len(floatMetrics)+len(intMetrics)+len(int32Metrics)+len(stringMetrics), metricsCounted)
}
func TestPostgresqlTagsMetricsWithDatabaseNameIntegration(t *testing.T) {
@ -117,7 +117,7 @@ func TestPostgresqlTagsMetricsWithDatabaseNameIntegration(t *testing.T) {
point, ok := acc.Get("postgresql")
require.True(t, ok)
assert.Equal(t, "postgres", point.Tags["db"])
require.Equal(t, "postgres", point.Tags["db"])
}
func TestPostgresqlDefaultsToAllDatabasesIntegration(t *testing.T) {
@ -150,7 +150,7 @@ func TestPostgresqlDefaultsToAllDatabasesIntegration(t *testing.T) {
}
}
assert.True(t, found)
require.True(t, found)
}
func TestPostgresqlIgnoresUnwantedColumnsIntegration(t *testing.T) {
@ -172,7 +172,7 @@ func TestPostgresqlIgnoresUnwantedColumnsIntegration(t *testing.T) {
require.NoError(t, p.Gather(&acc))
for col := range p.IgnoredColumns() {
assert.False(t, acc.HasMeasurement(col))
require.False(t, acc.HasMeasurement(col))
}
}
@ -212,8 +212,8 @@ func TestPostgresqlDatabaseWhitelistTestIntegration(t *testing.T) {
}
}
assert.True(t, foundTemplate0)
assert.False(t, foundTemplate1)
require.True(t, foundTemplate0)
require.False(t, foundTemplate1)
}
func TestPostgresqlDatabaseBlacklistTestIntegration(t *testing.T) {
@ -251,6 +251,6 @@ func TestPostgresqlDatabaseBlacklistTestIntegration(t *testing.T) {
}
}
assert.False(t, foundTemplate0)
assert.True(t, foundTemplate1)
require.False(t, foundTemplate0)
require.True(t, foundTemplate1)
}

View File

@ -142,7 +142,7 @@ func (p *Service) Stop() {
p.DB.Close()
}
var kvMatcher, _ = regexp.Compile("(password|sslcert|sslkey|sslmode|sslrootcert)=\\S+ ?")
var kvMatcher, _ = regexp.Compile(`(password|sslcert|sslkey|sslmode|sslrootcert)=\S+ ?`)
// SanitizedAddress utility function to strip sensitive information from the connection string.
func (p *Service) SanitizedAddress() (sanitizedAddress string, err error) {

View File

@ -161,10 +161,7 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error {
queryAddon string
dbVersion int
query string
tagValue string
measName string
timestamp string
columns []string
)
// Retrieving the database version
@ -177,8 +174,6 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error {
// Query is not run if Database version does not match the query version.
for i := range p.Query {
sqlQuery = p.Query[i].Sqlquery
tagValue = p.Query[i].Tagvalue
timestamp = p.Query[i].Timestamp
if p.Query[i].Measurement != "" {
measName = p.Query[i].Measurement
@ -198,42 +193,48 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error {
sqlQuery += queryAddon
if p.Query[i].Version <= dbVersion {
rows, err := p.DB.Query(sqlQuery)
if err != nil {
p.Log.Error(err.Error())
continue
}
defer rows.Close()
// grab the column information from the result
if columns, err = rows.Columns(); err != nil {
p.Log.Error(err.Error())
continue
}
p.AdditionalTags = nil
if tagValue != "" {
tagList := strings.Split(tagValue, ",")
for t := range tagList {
p.AdditionalTags = append(p.AdditionalTags, tagList[t])
}
}
p.Timestamp = timestamp
for rows.Next() {
err = p.accRow(measName, rows, acc, columns)
if err != nil {
p.Log.Error(err.Error())
break
}
}
p.gatherMetricsFromQuery(acc, sqlQuery, p.Query[i].Tagvalue, p.Query[i].Timestamp, measName)
}
}
return nil
}
func (p *Postgresql) gatherMetricsFromQuery(acc telegraf.Accumulator, sqlQuery string, tagValue string, timestamp string, measName string) {
var columns []string
rows, err := p.DB.Query(sqlQuery)
if err != nil {
acc.AddError(err)
return
}
defer rows.Close()
// grab the column information from the result
if columns, err = rows.Columns(); err != nil {
acc.AddError(err)
return
}
p.AdditionalTags = nil
if tagValue != "" {
tagList := strings.Split(tagValue, ",")
for t := range tagList {
p.AdditionalTags = append(p.AdditionalTags, tagList[t])
}
}
p.Timestamp = timestamp
for rows.Next() {
err = p.accRow(measName, rows, acc, columns)
if err != nil {
acc.AddError(err)
break
}
}
}
type scanner interface {
Scan(dest ...interface{}) error
}

View File

@ -6,10 +6,10 @@ import (
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf/plugins/inputs/postgresql"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func queryRunner(t *testing.T, q query) *testutil.Accumulator {
@ -76,27 +76,27 @@ func TestPostgresqlGeneratesMetricsIntegration(t *testing.T) {
metricsCounted := 0
for _, metric := range intMetrics {
assert.True(t, acc.HasInt64Field("postgresql", metric))
require.True(t, acc.HasInt64Field("postgresql", metric))
metricsCounted++
}
for _, metric := range int32Metrics {
assert.True(t, acc.HasInt32Field("postgresql", metric))
require.True(t, acc.HasInt32Field("postgresql", metric))
metricsCounted++
}
for _, metric := range floatMetrics {
assert.True(t, acc.HasFloatField("postgresql", metric))
require.True(t, acc.HasFloatField("postgresql", metric))
metricsCounted++
}
for _, metric := range stringMetrics {
assert.True(t, acc.HasStringField("postgresql", metric))
require.True(t, acc.HasStringField("postgresql", metric))
metricsCounted++
}
assert.True(t, metricsCounted > 0)
assert.Equal(t, len(floatMetrics)+len(intMetrics)+len(int32Metrics)+len(stringMetrics), metricsCounted)
require.True(t, metricsCounted > 0)
require.Equal(t, len(floatMetrics)+len(intMetrics)+len(int32Metrics)+len(stringMetrics), metricsCounted)
}
func TestPostgresqlQueryOutputTestsIntegration(t *testing.T) {
@ -109,30 +109,30 @@ func TestPostgresqlQueryOutputTestsIntegration(t *testing.T) {
examples := map[string]func(*testutil.Accumulator){
"SELECT 10.0::float AS myvalue": func(acc *testutil.Accumulator) {
v, found := acc.FloatField(measurement, "myvalue")
assert.True(t, found)
assert.Equal(t, 10.0, v)
require.True(t, found)
require.Equal(t, 10.0, v)
},
"SELECT 10.0 AS myvalue": func(acc *testutil.Accumulator) {
v, found := acc.StringField(measurement, "myvalue")
assert.True(t, found)
assert.Equal(t, "10.0", v)
require.True(t, found)
require.Equal(t, "10.0", v)
},
"SELECT 'hello world' AS myvalue": func(acc *testutil.Accumulator) {
v, found := acc.StringField(measurement, "myvalue")
assert.True(t, found)
assert.Equal(t, "hello world", v)
require.True(t, found)
require.Equal(t, "hello world", v)
},
"SELECT true AS myvalue": func(acc *testutil.Accumulator) {
v, found := acc.BoolField(measurement, "myvalue")
assert.True(t, found)
assert.Equal(t, true, v)
require.True(t, found)
require.Equal(t, true, v)
},
"SELECT timestamp'1980-07-23' as ts, true AS myvalue": func(acc *testutil.Accumulator) {
expectedTime := time.Date(1980, 7, 23, 0, 0, 0, 0, time.UTC)
v, found := acc.BoolField(measurement, "myvalue")
assert.True(t, found)
assert.Equal(t, true, v)
assert.True(t, acc.HasTimestamp(measurement, expectedTime))
require.True(t, found)
require.Equal(t, true, v)
require.True(t, acc.HasTimestamp(measurement, expectedTime))
},
}
@ -192,22 +192,22 @@ func TestPostgresqlFieldOutputIntegration(t *testing.T) {
for _, field := range intMetrics {
_, found := acc.Int64Field(measurement, field)
assert.True(t, found, fmt.Sprintf("expected %s to be an integer", field))
require.True(t, found, fmt.Sprintf("expected %s to be an integer", field))
}
for _, field := range int32Metrics {
_, found := acc.Int32Field(measurement, field)
assert.True(t, found, fmt.Sprintf("expected %s to be an int32", field))
require.True(t, found, fmt.Sprintf("expected %s to be an int32", field))
}
for _, field := range floatMetrics {
_, found := acc.FloatField(measurement, field)
assert.True(t, found, fmt.Sprintf("expected %s to be a float64", field))
require.True(t, found, fmt.Sprintf("expected %s to be a float64", field))
}
for _, field := range stringMetrics {
_, found := acc.StringField(measurement, field)
assert.True(t, found, fmt.Sprintf("expected %s to be a str", field))
require.True(t, found, fmt.Sprintf("expected %s to be a str", field))
}
}
@ -256,9 +256,9 @@ func TestPostgresqlIgnoresUnwantedColumnsIntegration(t *testing.T) {
require.NoError(t, p.Start(&acc))
require.NoError(t, acc.GatherError(p.Gather))
assert.NotEmpty(t, p.IgnoredColumns())
require.NotEmpty(t, p.IgnoredColumns())
for col := range p.IgnoredColumns() {
assert.False(t, acc.HasMeasurement(col))
require.False(t, acc.HasMeasurement(col))
}
}

View File

@ -4,7 +4,6 @@ import (
"bufio"
"fmt"
"io"
"log"
"net"
"strconv"
"strings"
@ -16,6 +15,8 @@ import (
type Powerdns struct {
UnixSockets []string
Log telegraf.Logger `toml:"-"`
}
var sampleConfig = `
@ -89,7 +90,7 @@ func (p *Powerdns) gatherServer(address string, acc telegraf.Accumulator) error
metrics := string(buf)
// Process data
fields := parseResponse(metrics)
fields := p.parseResponse(metrics)
// Add server socket as a tag
tags := map[string]string{"server": address}
@ -99,7 +100,7 @@ func (p *Powerdns) gatherServer(address string, acc telegraf.Accumulator) error
return nil
}
func parseResponse(metrics string) map[string]interface{} {
func (p *Powerdns) parseResponse(metrics string) map[string]interface{} {
values := make(map[string]interface{})
s := strings.Split(metrics, ",")
@ -112,8 +113,7 @@ func parseResponse(metrics string) map[string]interface{} {
i, err := strconv.ParseInt(m[1], 10, 64)
if err != nil {
log.Printf("E! [inputs.powerdns] error parsing integer for metric %q: %s",
metric, err.Error())
p.Log.Errorf("error parsing integer for metric %q: %s", metric, err.Error())
continue
}
values[m[0]] = i

View File

@ -7,7 +7,6 @@ import (
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf/testutil"
@ -108,12 +107,16 @@ func TestPowerdnsGeneratesMetrics(t *testing.T) {
"meta-cache-size", "qsize-q", "signature-cache-size", "sys-msec", "uptime", "user-msec"}
for _, metric := range intMetrics {
assert.True(t, acc.HasInt64Field("powerdns", metric), metric)
require.True(t, acc.HasInt64Field("powerdns", metric), metric)
}
}
func TestPowerdnsParseMetrics(t *testing.T) {
values := parseResponse(metrics)
p := &Powerdns{
Log: testutil.Logger{},
}
values := p.parseResponse(metrics)
tests := []struct {
key string
@ -173,7 +176,11 @@ func TestPowerdnsParseMetrics(t *testing.T) {
}
func TestPowerdnsParseCorruptMetrics(t *testing.T) {
values := parseResponse(corruptMetrics)
p := &Powerdns{
Log: testutil.Logger{},
}
values := p.parseResponse(corruptMetrics)
tests := []struct {
key string
@ -232,7 +239,11 @@ func TestPowerdnsParseCorruptMetrics(t *testing.T) {
}
func TestPowerdnsParseIntOverflowMetrics(t *testing.T) {
values := parseResponse(intOverflowMetrics)
p := &Powerdns{
Log: testutil.Logger{},
}
values := p.parseResponse(intOverflowMetrics)
tests := []struct {
key string

View File

@ -4,7 +4,6 @@ import (
"bufio"
"errors"
"fmt"
"log"
"math/rand"
"net"
"os"
@ -22,6 +21,8 @@ type PowerdnsRecursor struct {
SocketDir string `toml:"socket_dir"`
SocketMode string `toml:"socket_mode"`
Log telegraf.Logger `toml:"-"`
mode uint32
}
@ -125,7 +126,7 @@ func (p *PowerdnsRecursor) gatherServer(address string, acc telegraf.Accumulator
metrics := string(buf)
// Process data
fields := parseResponse(metrics)
fields := p.parseResponse(metrics)
// Add server socket as a tag
tags := map[string]string{"server": address}
@ -135,7 +136,7 @@ func (p *PowerdnsRecursor) gatherServer(address string, acc telegraf.Accumulator
return conn.Close()
}
func parseResponse(metrics string) map[string]interface{} {
func (p *PowerdnsRecursor) parseResponse(metrics string) map[string]interface{} {
values := make(map[string]interface{})
s := strings.Split(metrics, "\n")
@ -148,8 +149,7 @@ func parseResponse(metrics string) map[string]interface{} {
i, err := strconv.ParseInt(m[1], 10, 64)
if err != nil {
log.Printf("E! [inputs.powerdns_recursor] error parsing integer for metric %q: %s",
metric, err.Error())
p.Log.Errorf("error parsing integer for metric %q: %s", metric, err.Error())
continue
}
values[m[0]] = i

View File

@ -8,9 +8,9 @@ import (
"testing"
"time"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf/testutil"
)
var metrics = "all-outqueries\t3591637\nanswers-slow\t36451\nanswers0-1\t177297\nanswers1-10\t1209328\n" +
@ -183,12 +183,16 @@ func TestPowerdnsRecursorGeneratesMetrics(t *testing.T) {
"x-ourtime2-4", "x-ourtime4-8", "x-ourtime8-16"}
for _, metric := range intMetrics {
assert.True(t, acc.HasInt64Field("powerdns_recursor", metric), metric)
require.True(t, acc.HasInt64Field("powerdns_recursor", metric), metric)
}
}
func TestPowerdnsRecursorParseMetrics(t *testing.T) {
values := parseResponse(metrics)
p := &PowerdnsRecursor{
Log: testutil.Logger{},
}
values := p.parseResponse(metrics)
tests := []struct {
key string
@ -302,15 +306,17 @@ func TestPowerdnsRecursorParseMetrics(t *testing.T) {
for _, test := range tests {
value, ok := values[test.key]
if !assert.Truef(t, ok, "Did not find key for metric %s in values", test.key) {
continue
}
require.Truef(t, ok, "Did not find key for metric %s in values", test.key)
require.EqualValuesf(t, value, test.value, "Metric: %s, Expected: %d, actual: %d", test.key, test.value, value)
}
}
func TestPowerdnsRecursorParseCorruptMetrics(t *testing.T) {
values := parseResponse(corruptMetrics)
p := &PowerdnsRecursor{
Log: testutil.Logger{},
}
values := p.parseResponse(corruptMetrics)
tests := []struct {
key string
@ -423,15 +429,17 @@ func TestPowerdnsRecursorParseCorruptMetrics(t *testing.T) {
for _, test := range tests {
value, ok := values[test.key]
if !assert.Truef(t, ok, "Did not find key for metric %s in values", test.key) {
continue
}
require.Truef(t, ok, "Did not find key for metric %s in values", test.key)
require.EqualValuesf(t, value, test.value, "Metric: %s, Expected: %d, actual: %d", test.key, test.value, value)
}
}
func TestPowerdnsRecursorParseIntOverflowMetrics(t *testing.T) {
values := parseResponse(intOverflowMetrics)
p := &PowerdnsRecursor{
Log: testutil.Logger{},
}
values := p.parseResponse(intOverflowMetrics)
tests := []struct {
key string
@ -544,9 +552,7 @@ func TestPowerdnsRecursorParseIntOverflowMetrics(t *testing.T) {
for _, test := range tests {
value, ok := values[test.key]
if !assert.Truef(t, ok, "Did not find key for metric %s in values", test.key) {
continue
}
require.Truef(t, ok, "Did not find key for metric %s in values", test.key)
require.EqualValuesf(t, value, test.value, "Metric: %s, Expected: %d, actual: %d", test.key, test.value, value)
}
}

View File

@ -9,10 +9,10 @@ import (
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestProcesses(t *testing.T) {
@ -27,13 +27,13 @@ func TestProcesses(t *testing.T) {
err := processes.Gather(&acc)
require.NoError(t, err)
assert.True(t, acc.HasInt64Field("processes", "running"))
assert.True(t, acc.HasInt64Field("processes", "sleeping"))
assert.True(t, acc.HasInt64Field("processes", "stopped"))
assert.True(t, acc.HasInt64Field("processes", "total"))
require.True(t, acc.HasInt64Field("processes", "running"))
require.True(t, acc.HasInt64Field("processes", "sleeping"))
require.True(t, acc.HasInt64Field("processes", "stopped"))
require.True(t, acc.HasInt64Field("processes", "total"))
total, ok := acc.Get("processes")
require.True(t, ok)
assert.True(t, total.Fields["total"].(int64) > 0)
require.True(t, total.Fields["total"].(int64) > 0)
}
func TestFromPS(t *testing.T) {

View File

@ -2,11 +2,9 @@ package procstat
import (
"fmt"
"os/user"
"testing"
"os/user"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@ -19,7 +17,7 @@ func TestGather_RealPatternIntegration(t *testing.T) {
pids, err := pg.Pattern(`procstat`)
require.NoError(t, err)
fmt.Println(pids)
assert.Equal(t, len(pids) > 0, true)
require.Equal(t, len(pids) > 0, true)
}
func TestGather_RealFullPatternIntegration(t *testing.T) {
@ -31,7 +29,7 @@ func TestGather_RealFullPatternIntegration(t *testing.T) {
pids, err := pg.FullPattern(`%procstat%`)
require.NoError(t, err)
fmt.Println(pids)
assert.Equal(t, len(pids) > 0, true)
require.Equal(t, len(pids) > 0, true)
}
func TestGather_RealUserIntegration(t *testing.T) {
@ -45,5 +43,5 @@ func TestGather_RealUserIntegration(t *testing.T) {
pids, err := pg.UID(user.Username)
require.NoError(t, err)
fmt.Println(pids)
assert.Equal(t, len(pids) > 0, true)
require.Equal(t, len(pids) > 0, true)
}

View File

@ -43,13 +43,13 @@ type Proc struct {
}
func NewProc(pid PID) (Process, error) {
process, err := process.NewProcess(int32(pid))
p, err := process.NewProcess(int32(pid))
if err != nil {
return nil, err
}
proc := &Proc{
Process: process,
Process: p,
hasCPUTimes: false,
tags: make(map[string]string),
}

View File

@ -10,11 +10,11 @@ import (
"testing"
"time"
"github.com/influxdata/telegraf/testutil"
"github.com/shirou/gopsutil/cpu"
"github.com/shirou/gopsutil/process"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf/testutil"
)
func init() {
@ -51,11 +51,13 @@ MainPID=11408
ControlPID=0
ExecMainPID=11408
`)
//nolint:revive // error code is important for this "test"
os.Exit(0)
}
//nolint:errcheck,revive
fmt.Printf("command not found\n")
//nolint:revive // error code is important for this "test"
os.Exit(1)
}
@ -208,7 +210,7 @@ func TestGather_ProcessName(t *testing.T) {
}
require.NoError(t, acc.GatherError(p.Gather))
assert.Equal(t, "custom_name", acc.TagValue("procstat", "process_name"))
require.Equal(t, "custom_name", acc.TagValue("procstat", "process_name"))
}
func TestGather_NoProcessNameUsesReal(t *testing.T) {
@ -222,7 +224,7 @@ func TestGather_NoProcessNameUsesReal(t *testing.T) {
}
require.NoError(t, acc.GatherError(p.Gather))
assert.True(t, acc.HasTag("procstat", "process_name"))
require.True(t, acc.HasTag("procstat", "process_name"))
}
func TestGather_NoPidTag(t *testing.T) {
@ -234,8 +236,8 @@ func TestGather_NoPidTag(t *testing.T) {
createProcess: newTestProc,
}
require.NoError(t, acc.GatherError(p.Gather))
assert.True(t, acc.HasInt32Field("procstat", "pid"))
assert.False(t, acc.HasTag("procstat", "pid"))
require.True(t, acc.HasInt32Field("procstat", "pid"))
require.False(t, acc.HasTag("procstat", "pid"))
}
func TestGather_PidTag(t *testing.T) {
@ -248,8 +250,8 @@ func TestGather_PidTag(t *testing.T) {
createProcess: newTestProc,
}
require.NoError(t, acc.GatherError(p.Gather))
assert.Equal(t, "42", acc.TagValue("procstat", "pid"))
assert.False(t, acc.HasInt32Field("procstat", "pid"))
require.Equal(t, "42", acc.TagValue("procstat", "pid"))
require.False(t, acc.HasInt32Field("procstat", "pid"))
}
func TestGather_Prefix(t *testing.T) {
@ -262,7 +264,7 @@ func TestGather_Prefix(t *testing.T) {
createProcess: newTestProc,
}
require.NoError(t, acc.GatherError(p.Gather))
assert.True(t, acc.HasInt32Field("procstat", "custom_prefix_num_fds"))
require.True(t, acc.HasInt32Field("procstat", "custom_prefix_num_fds"))
}
func TestGather_Exe(t *testing.T) {
@ -275,7 +277,7 @@ func TestGather_Exe(t *testing.T) {
}
require.NoError(t, acc.GatherError(p.Gather))
assert.Equal(t, exe, acc.TagValue("procstat", "exe"))
require.Equal(t, exe, acc.TagValue("procstat", "exe"))
}
func TestGather_User(t *testing.T) {
@ -289,7 +291,7 @@ func TestGather_User(t *testing.T) {
}
require.NoError(t, acc.GatherError(p.Gather))
assert.Equal(t, user, acc.TagValue("procstat", "user"))
require.Equal(t, user, acc.TagValue("procstat", "user"))
}
func TestGather_Pattern(t *testing.T) {
@ -303,7 +305,7 @@ func TestGather_Pattern(t *testing.T) {
}
require.NoError(t, acc.GatherError(p.Gather))
assert.Equal(t, pattern, acc.TagValue("procstat", "pattern"))
require.Equal(t, pattern, acc.TagValue("procstat", "pattern"))
}
func TestGather_MissingPidMethod(t *testing.T) {
@ -327,7 +329,7 @@ func TestGather_PidFile(t *testing.T) {
}
require.NoError(t, acc.GatherError(p.Gather))
assert.Equal(t, pidfile, acc.TagValue("procstat", "pidfile"))
require.Equal(t, pidfile, acc.TagValue("procstat", "pidfile"))
}
func TestGather_PercentFirstPass(t *testing.T) {
@ -342,8 +344,8 @@ func TestGather_PercentFirstPass(t *testing.T) {
}
require.NoError(t, acc.GatherError(p.Gather))
assert.True(t, acc.HasFloatField("procstat", "cpu_time_user"))
assert.False(t, acc.HasFloatField("procstat", "cpu_usage"))
require.True(t, acc.HasFloatField("procstat", "cpu_time_user"))
require.False(t, acc.HasFloatField("procstat", "cpu_usage"))
}
func TestGather_PercentSecondPass(t *testing.T) {
@ -359,8 +361,8 @@ func TestGather_PercentSecondPass(t *testing.T) {
require.NoError(t, acc.GatherError(p.Gather))
require.NoError(t, acc.GatherError(p.Gather))
assert.True(t, acc.HasFloatField("procstat", "cpu_time_user"))
assert.True(t, acc.HasFloatField("procstat", "cpu_usage"))
require.True(t, acc.HasFloatField("procstat", "cpu_time_user"))
require.True(t, acc.HasFloatField("procstat", "cpu_usage"))
}
func TestGather_systemdUnitPIDs(t *testing.T) {
@ -374,8 +376,8 @@ func TestGather_systemdUnitPIDs(t *testing.T) {
tags := pidsTag.Tags
err := pidsTag.Err
require.NoError(t, err)
assert.Equal(t, []PID{11408}, pids)
assert.Equal(t, "TestGather_systemdUnitPIDs", tags["systemd_unit"])
require.Equal(t, []PID{11408}, pids)
require.Equal(t, "TestGather_systemdUnitPIDs", tags["systemd_unit"])
}
}
@ -400,8 +402,8 @@ func TestGather_cgroupPIDs(t *testing.T) {
tags := pidsTag.Tags
err := pidsTag.Err
require.NoError(t, err)
assert.Equal(t, []PID{1234, 5678}, pids)
assert.Equal(t, td, tags["cgroup"])
require.Equal(t, []PID{1234, 5678}, pids)
require.Equal(t, td, tags["cgroup"])
}
}

View File

@ -3,21 +3,21 @@ package prometheus
import (
"testing"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/influxdata/telegraf/testutil"
)
func TestScrapeURLNoAnnotations(t *testing.T) {
p := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{}}
p.Annotations = map[string]string{}
url, err := getScrapeURL(p)
assert.NoError(t, err)
assert.Nil(t, url)
require.NoError(t, err)
require.Nil(t, url)
}
func TestScrapeURLAnnotationsNoScrape(t *testing.T) {
@ -25,56 +25,56 @@ func TestScrapeURLAnnotationsNoScrape(t *testing.T) {
p.Name = "myPod"
p.Annotations = map[string]string{"prometheus.io/scrape": "false"}
url, err := getScrapeURL(p)
assert.NoError(t, err)
assert.Nil(t, url)
require.NoError(t, err)
require.Nil(t, url)
}
func TestScrapeURLAnnotations(t *testing.T) {
p := pod()
p.Annotations = map[string]string{"prometheus.io/scrape": "true"}
url, err := getScrapeURL(p)
assert.NoError(t, err)
assert.Equal(t, "http://127.0.0.1:9102/metrics", url.String())
require.NoError(t, err)
require.Equal(t, "http://127.0.0.1:9102/metrics", url.String())
}
func TestScrapeURLAnnotationsCustomPort(t *testing.T) {
p := pod()
p.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/port": "9000"}
url, err := getScrapeURL(p)
assert.NoError(t, err)
assert.Equal(t, "http://127.0.0.1:9000/metrics", url.String())
require.NoError(t, err)
require.Equal(t, "http://127.0.0.1:9000/metrics", url.String())
}
func TestScrapeURLAnnotationsCustomPath(t *testing.T) {
p := pod()
p.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/path": "mymetrics"}
url, err := getScrapeURL(p)
assert.NoError(t, err)
assert.Equal(t, "http://127.0.0.1:9102/mymetrics", url.String())
require.NoError(t, err)
require.Equal(t, "http://127.0.0.1:9102/mymetrics", url.String())
}
func TestScrapeURLAnnotationsCustomPathWithSep(t *testing.T) {
p := pod()
p.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/path": "/mymetrics"}
url, err := getScrapeURL(p)
assert.NoError(t, err)
assert.Equal(t, "http://127.0.0.1:9102/mymetrics", url.String())
require.NoError(t, err)
require.Equal(t, "http://127.0.0.1:9102/mymetrics", url.String())
}
func TestScrapeURLAnnotationsCustomPathWithQueryParameters(t *testing.T) {
p := pod()
p.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/path": "/v1/agent/metrics?format=prometheus"}
url, err := getScrapeURL(p)
assert.NoError(t, err)
assert.Equal(t, "http://127.0.0.1:9102/v1/agent/metrics?format=prometheus", url.String())
require.NoError(t, err)
require.Equal(t, "http://127.0.0.1:9102/v1/agent/metrics?format=prometheus", url.String())
}
func TestScrapeURLAnnotationsCustomPathWithFragment(t *testing.T) {
p := pod()
p.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/path": "/v1/agent/metrics#prometheus"}
url, err := getScrapeURL(p)
assert.NoError(t, err)
assert.Equal(t, "http://127.0.0.1:9102/v1/agent/metrics#prometheus", url.String())
require.NoError(t, err)
require.Equal(t, "http://127.0.0.1:9102/v1/agent/metrics#prometheus", url.String())
}
func TestAddPod(t *testing.T) {
@ -83,7 +83,7 @@ func TestAddPod(t *testing.T) {
p := pod()
p.Annotations = map[string]string{"prometheus.io/scrape": "true"}
registerPod(p, prom)
assert.Equal(t, 1, len(prom.kubernetesPods))
require.Equal(t, 1, len(prom.kubernetesPods))
}
func TestAddMultipleDuplicatePods(t *testing.T) {
@ -94,7 +94,7 @@ func TestAddMultipleDuplicatePods(t *testing.T) {
registerPod(p, prom)
p.Name = "Pod2"
registerPod(p, prom)
assert.Equal(t, 1, len(prom.kubernetesPods))
require.Equal(t, 1, len(prom.kubernetesPods))
}
func TestAddMultiplePods(t *testing.T) {
@ -106,7 +106,7 @@ func TestAddMultiplePods(t *testing.T) {
p.Name = "Pod2"
p.Status.PodIP = "127.0.0.2"
registerPod(p, prom)
assert.Equal(t, 2, len(prom.kubernetesPods))
require.Equal(t, 2, len(prom.kubernetesPods))
}
func TestDeletePods(t *testing.T) {
@ -116,7 +116,7 @@ func TestDeletePods(t *testing.T) {
p.Annotations = map[string]string{"prometheus.io/scrape": "true"}
registerPod(p, prom)
unregisterPod(p, prom)
assert.Equal(t, 0, len(prom.kubernetesPods))
require.Equal(t, 0, len(prom.kubernetesPods))
}
func TestPodHasMatchingNamespace(t *testing.T) {
@ -126,12 +126,12 @@ func TestPodHasMatchingNamespace(t *testing.T) {
pod.Name = "Pod1"
pod.Namespace = "default"
shouldMatch := podHasMatchingNamespace(pod, prom)
assert.Equal(t, true, shouldMatch)
require.Equal(t, true, shouldMatch)
pod.Name = "Pod2"
pod.Namespace = "namespace"
shouldNotMatch := podHasMatchingNamespace(pod, prom)
assert.Equal(t, false, shouldNotMatch)
require.Equal(t, false, shouldNotMatch)
}
func TestPodHasMatchingLabelSelector(t *testing.T) {
@ -148,8 +148,8 @@ func TestPodHasMatchingLabelSelector(t *testing.T) {
pod.Labels["label5"] = "label5"
labelSelector, err := labels.Parse(prom.KubernetesLabelSelector)
assert.Equal(t, err, nil)
assert.Equal(t, true, podHasMatchingLabelSelector(pod, labelSelector))
require.Equal(t, err, nil)
require.Equal(t, true, podHasMatchingLabelSelector(pod, labelSelector))
}
func TestPodHasMatchingFieldSelector(t *testing.T) {
@ -160,8 +160,8 @@ func TestPodHasMatchingFieldSelector(t *testing.T) {
pod.Spec.NodeName = "node1000"
fieldSelector, err := fields.ParseSelector(prom.KubernetesFieldSelector)
assert.Equal(t, err, nil)
assert.Equal(t, true, podHasMatchingFieldSelector(pod, fieldSelector))
require.Equal(t, err, nil)
require.Equal(t, true, podHasMatchingFieldSelector(pod, fieldSelector))
}
func TestInvalidFieldSelector(t *testing.T) {
@ -172,7 +172,7 @@ func TestInvalidFieldSelector(t *testing.T) {
pod.Spec.NodeName = "node1000"
_, err := fields.ParseSelector(prom.KubernetesFieldSelector)
assert.NotEqual(t, err, nil)
require.NotEqual(t, err, nil)
}
func pod() *corev1.Pod {

View File

@ -10,13 +10,13 @@ import (
"net/http"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/plugins/parsers/prometheus/common"
"github.com/matttproud/golang_protobuf_extensions/pbutil"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/expfmt"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/plugins/parsers/prometheus/common"
)
func Parse(buf []byte, header http.Header, ignoreTimestamp bool) ([]telegraf.Metric, error) {
@ -63,11 +63,13 @@ func Parse(buf []byte, header http.Header, ignoreTimestamp bool) ([]telegraf.Met
// summary metric
fields = makeQuantiles(m)
fields["count"] = float64(m.GetSummary().GetSampleCount())
//nolint:unconvert // Conversion may be needed for float64 https://github.com/mdempsky/unconvert/issues/40
fields["sum"] = float64(m.GetSummary().GetSampleSum())
} else if mf.GetType() == dto.MetricType_HISTOGRAM {
// histogram metric
fields = makeBuckets(m)
fields["count"] = float64(m.GetHistogram().GetSampleCount())
//nolint:unconvert // Conversion may be needed for float64 https://github.com/mdempsky/unconvert/issues/40
fields["sum"] = float64(m.GetHistogram().GetSampleSum())
} else {
// standard metric
@ -106,6 +108,7 @@ func makeQuantiles(m *dto.Metric) map[string]interface{} {
fields := make(map[string]interface{})
for _, q := range m.GetSummary().Quantile {
if !math.IsNaN(q.GetValue()) {
//nolint:unconvert // Conversion may be needed for float64 https://github.com/mdempsky/unconvert/issues/40
fields[fmt.Sprint(q.GetQuantile())] = float64(q.GetValue())
}
}
@ -126,14 +129,17 @@ func getNameAndValue(m *dto.Metric) map[string]interface{} {
fields := make(map[string]interface{})
if m.Gauge != nil {
if !math.IsNaN(m.GetGauge().GetValue()) {
//nolint:unconvert // Conversion may be needed for float64 https://github.com/mdempsky/unconvert/issues/40
fields["gauge"] = float64(m.GetGauge().GetValue())
}
} else if m.Counter != nil {
if !math.IsNaN(m.GetCounter().GetValue()) {
//nolint:unconvert // Conversion may be needed for float64 https://github.com/mdempsky/unconvert/issues/40
fields["counter"] = float64(m.GetCounter().GetValue())
}
} else if m.Untyped != nil {
if !math.IsNaN(m.GetUntyped().GetValue()) {
//nolint:unconvert // Conversion may be needed for float64 https://github.com/mdempsky/unconvert/issues/40
fields["value"] = float64(m.GetUntyped().GetValue())
}
}

View File

@ -6,7 +6,7 @@ import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const validUniqueGauge = `# HELP cadvisor_version_info A metric with a constant '1' value labeled by kernel version, OS version, docker version, cadvisor version & cadvisor revision.
@ -45,13 +45,13 @@ apiserver_request_latencies_count{resource="bindings",verb="POST"} 2025
func TestParseValidPrometheus(t *testing.T) {
// Gauge value
metrics, err := Parse([]byte(validUniqueGauge), http.Header{}, false)
assert.NoError(t, err)
assert.Len(t, metrics, 1)
assert.Equal(t, "cadvisor_version_info", metrics[0].Name())
assert.Equal(t, map[string]interface{}{
require.NoError(t, err)
require.Len(t, metrics, 1)
require.Equal(t, "cadvisor_version_info", metrics[0].Name())
require.Equal(t, map[string]interface{}{
"gauge": float64(1),
}, metrics[0].Fields())
assert.Equal(t, map[string]string{
require.Equal(t, map[string]string{
"osVersion": "CentOS Linux 7 (Core)",
"cadvisorRevision": "",
"cadvisorVersion": "",
@ -61,35 +61,35 @@ func TestParseValidPrometheus(t *testing.T) {
// Counter value
metrics, err = Parse([]byte(validUniqueCounter), http.Header{}, false)
assert.NoError(t, err)
assert.Len(t, metrics, 1)
assert.Equal(t, "get_token_fail_count", metrics[0].Name())
assert.Equal(t, map[string]interface{}{
require.NoError(t, err)
require.Len(t, metrics, 1)
require.Equal(t, "get_token_fail_count", metrics[0].Name())
require.Equal(t, map[string]interface{}{
"counter": float64(0),
}, metrics[0].Fields())
assert.Equal(t, map[string]string{}, metrics[0].Tags())
require.Equal(t, map[string]string{}, metrics[0].Tags())
// Summary data
//SetDefaultTags(map[string]string{})
metrics, err = Parse([]byte(validUniqueSummary), http.Header{}, false)
assert.NoError(t, err)
assert.Len(t, metrics, 1)
assert.Equal(t, "http_request_duration_microseconds", metrics[0].Name())
assert.Equal(t, map[string]interface{}{
require.NoError(t, err)
require.Len(t, metrics, 1)
require.Equal(t, "http_request_duration_microseconds", metrics[0].Name())
require.Equal(t, map[string]interface{}{
"0.5": 552048.506,
"0.9": 5.876804288e+06,
"0.99": 5.876804288e+06,
"count": 9.0,
"sum": 1.8909097205e+07,
}, metrics[0].Fields())
assert.Equal(t, map[string]string{"handler": "prometheus"}, metrics[0].Tags())
require.Equal(t, map[string]string{"handler": "prometheus"}, metrics[0].Tags())
// histogram data
metrics, err = Parse([]byte(validUniqueHistogram), http.Header{}, false)
assert.NoError(t, err)
assert.Len(t, metrics, 1)
assert.Equal(t, "apiserver_request_latencies", metrics[0].Name())
assert.Equal(t, map[string]interface{}{
require.NoError(t, err)
require.Len(t, metrics, 1)
require.Equal(t, "apiserver_request_latencies", metrics[0].Name())
require.Equal(t, map[string]interface{}{
"500000": 2000.0,
"count": 2025.0,
"sum": 1.02726334e+08,
@ -101,7 +101,7 @@ func TestParseValidPrometheus(t *testing.T) {
"125000": 1994.0,
"1e+06": 2005.0,
}, metrics[0].Fields())
assert.Equal(t,
require.Equal(t,
map[string]string{"verb": "POST", "resource": "bindings"},
metrics[0].Tags())
}
@ -116,27 +116,27 @@ test_counter{label="test"} 1 %d
// IgnoreTimestamp is false
metrics, err := Parse([]byte(metricsWithTimestamps), http.Header{}, false)
assert.NoError(t, err)
assert.Len(t, metrics, 1)
assert.Equal(t, "test_counter", metrics[0].Name())
assert.Equal(t, map[string]interface{}{
require.NoError(t, err)
require.Len(t, metrics, 1)
require.Equal(t, "test_counter", metrics[0].Name())
require.Equal(t, map[string]interface{}{
"counter": float64(1),
}, metrics[0].Fields())
assert.Equal(t, map[string]string{
require.Equal(t, map[string]string{
"label": "test",
}, metrics[0].Tags())
assert.Equal(t, testTime, metrics[0].Time().UTC())
require.Equal(t, testTime, metrics[0].Time().UTC())
// IgnoreTimestamp is true
metrics, err = Parse([]byte(metricsWithTimestamps), http.Header{}, true)
assert.NoError(t, err)
assert.Len(t, metrics, 1)
assert.Equal(t, "test_counter", metrics[0].Name())
assert.Equal(t, map[string]interface{}{
require.NoError(t, err)
require.Len(t, metrics, 1)
require.Equal(t, "test_counter", metrics[0].Name())
require.Equal(t, map[string]interface{}{
"counter": float64(1),
}, metrics[0].Fields())
assert.Equal(t, map[string]string{
require.Equal(t, map[string]string{
"label": "test",
}, metrics[0].Tags())
assert.WithinDuration(t, time.Now(), metrics[0].Time().UTC(), 5*time.Second)
require.WithinDuration(t, time.Now(), metrics[0].Time().UTC(), 5*time.Second)
}

View File

@ -13,14 +13,15 @@ import (
"sync"
"time"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
parser_v2 "github.com/influxdata/telegraf/plugins/parsers/prometheus"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
parserV2 "github.com/influxdata/telegraf/plugins/parsers/prometheus"
)
const acceptHeader = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.7,text/plain;version=0.0.4;q=0.3,*/*;q=0.1`
@ -182,8 +183,7 @@ func (p *Prometheus) Description() string {
}
func (p *Prometheus) Init() error {
// Config proccessing for node scrape scope for monitor_kubernetes_pods
// Config processing for node scrape scope for monitor_kubernetes_pods
p.isNodeScrapeScope = strings.EqualFold(p.PodScrapeScope, "node")
if p.isNodeScrapeScope {
// Need node IP to make cAdvisor call for pod list. Check if set in config and valid IP address
@ -222,8 +222,6 @@ func (p *Prometheus) Init() error {
return nil
}
var ErrProtocolError = errors.New("prometheus protocol error")
func (p *Prometheus) AddressToURL(u *url.URL, address string) *url.URL {
host := address
if u.Port() != "" {
@ -253,12 +251,12 @@ type URLAndAddress struct {
func (p *Prometheus) GetAllURLs() (map[string]URLAndAddress, error) {
allURLs := make(map[string]URLAndAddress)
for _, u := range p.URLs {
URL, err := url.Parse(u)
address, err := url.Parse(u)
if err != nil {
p.Log.Errorf("Could not parse %q, skipping it. Error: %s", u, err.Error())
continue
}
allURLs[URL.String()] = URLAndAddress{URL: URL, OriginalURL: URL}
allURLs[address.String()] = URLAndAddress{URL: address, OriginalURL: address}
}
p.lock.Lock()
@ -273,22 +271,22 @@ func (p *Prometheus) GetAllURLs() (map[string]URLAndAddress, error) {
}
for _, service := range p.KubernetesServices {
URL, err := url.Parse(service)
address, err := url.Parse(service)
if err != nil {
return nil, err
}
resolvedAddresses, err := net.LookupHost(URL.Hostname())
resolvedAddresses, err := net.LookupHost(address.Hostname())
if err != nil {
p.Log.Errorf("Could not resolve %q, skipping it. Error: %s", URL.Host, err.Error())
p.Log.Errorf("Could not resolve %q, skipping it. Error: %s", address.Host, err.Error())
continue
}
for _, resolved := range resolvedAddresses {
serviceURL := p.AddressToURL(URL, resolved)
serviceURL := p.AddressToURL(address, resolved)
allURLs[serviceURL.String()] = URLAndAddress{
URL: serviceURL,
Address: resolved,
OriginalURL: URL,
OriginalURL: address,
}
}
}
@ -401,8 +399,10 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error
var resp *http.Response
if u.URL.Scheme != "unix" {
//nolint:bodyclose // False positive (because of if-else) - body will be closed in `defer`
resp, err = p.client.Do(req)
} else {
//nolint:bodyclose // False positive (because of if-else) - body will be closed in `defer`
resp, err = uClient.Do(req)
}
if err != nil {
@ -420,7 +420,7 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error
}
if p.MetricVersion == 2 {
parser := parser_v2.Parser{
parser := parserV2.Parser{
Header: resp.Header,
IgnoreTimestamp: p.IgnoreTimestamp,
}

View File

@ -10,11 +10,11 @@ import (
"testing"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/fields"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
)
const sampleTextFormat = `# HELP go_gc_duration_seconds A summary of the GC invocation durations.
@ -67,12 +67,12 @@ func TestPrometheusGeneratesMetrics(t *testing.T) {
err := acc.GatherError(p.Gather)
require.NoError(t, err)
assert.True(t, acc.HasFloatField("go_gc_duration_seconds", "count"))
assert.True(t, acc.HasFloatField("go_goroutines", "gauge"))
assert.True(t, acc.HasFloatField("test_metric", "value"))
assert.True(t, acc.HasTimestamp("test_metric", time.Unix(1490802350, 0)))
assert.False(t, acc.HasTag("test_metric", "address"))
assert.True(t, acc.TagValue("test_metric", "url") == ts.URL+"/metrics")
require.True(t, acc.HasFloatField("go_gc_duration_seconds", "count"))
require.True(t, acc.HasFloatField("go_goroutines", "gauge"))
require.True(t, acc.HasFloatField("test_metric", "value"))
require.True(t, acc.HasTimestamp("test_metric", time.Unix(1490802350, 0)))
require.False(t, acc.HasTag("test_metric", "address"))
require.True(t, acc.TagValue("test_metric", "url") == ts.URL+"/metrics")
}
func TestPrometheusGeneratesMetricsWithHostNameTag(t *testing.T) {
@ -95,12 +95,12 @@ func TestPrometheusGeneratesMetricsWithHostNameTag(t *testing.T) {
err := acc.GatherError(p.Gather)
require.NoError(t, err)
assert.True(t, acc.HasFloatField("go_gc_duration_seconds", "count"))
assert.True(t, acc.HasFloatField("go_goroutines", "gauge"))
assert.True(t, acc.HasFloatField("test_metric", "value"))
assert.True(t, acc.HasTimestamp("test_metric", time.Unix(1490802350, 0)))
assert.True(t, acc.TagValue("test_metric", "address") == tsAddress)
assert.True(t, acc.TagValue("test_metric", "url") == ts.URL)
require.True(t, acc.HasFloatField("go_gc_duration_seconds", "count"))
require.True(t, acc.HasFloatField("go_goroutines", "gauge"))
require.True(t, acc.HasFloatField("test_metric", "value"))
require.True(t, acc.HasTimestamp("test_metric", time.Unix(1490802350, 0)))
require.True(t, acc.TagValue("test_metric", "address") == tsAddress)
require.True(t, acc.TagValue("test_metric", "url") == ts.URL)
}
func TestPrometheusGeneratesMetricsAlthoughFirstDNSFailsIntegration(t *testing.T) {
@ -125,10 +125,10 @@ func TestPrometheusGeneratesMetricsAlthoughFirstDNSFailsIntegration(t *testing.T
err := acc.GatherError(p.Gather)
require.NoError(t, err)
assert.True(t, acc.HasFloatField("go_gc_duration_seconds", "count"))
assert.True(t, acc.HasFloatField("go_goroutines", "gauge"))
assert.True(t, acc.HasFloatField("test_metric", "value"))
assert.True(t, acc.HasTimestamp("test_metric", time.Unix(1490802350, 0)))
require.True(t, acc.HasFloatField("go_gc_duration_seconds", "count"))
require.True(t, acc.HasFloatField("go_goroutines", "gauge"))
require.True(t, acc.HasFloatField("test_metric", "value"))
require.True(t, acc.HasTimestamp("test_metric", time.Unix(1490802350, 0)))
}
func TestPrometheusGeneratesSummaryMetricsV2(t *testing.T) {
@ -149,10 +149,10 @@ func TestPrometheusGeneratesSummaryMetricsV2(t *testing.T) {
err := acc.GatherError(p.Gather)
require.NoError(t, err)
assert.True(t, acc.TagSetValue("prometheus", "quantile") == "0")
assert.True(t, acc.HasFloatField("prometheus", "go_gc_duration_seconds_sum"))
assert.True(t, acc.HasFloatField("prometheus", "go_gc_duration_seconds_count"))
assert.True(t, acc.TagValue("prometheus", "url") == ts.URL+"/metrics")
require.True(t, acc.TagSetValue("prometheus", "quantile") == "0")
require.True(t, acc.HasFloatField("prometheus", "go_gc_duration_seconds_sum"))
require.True(t, acc.HasFloatField("prometheus", "go_gc_duration_seconds_count"))
require.True(t, acc.TagValue("prometheus", "url") == ts.URL+"/metrics")
}
func TestSummaryMayContainNaN(t *testing.T) {
@ -237,9 +237,9 @@ func TestPrometheusGeneratesGaugeMetricsV2(t *testing.T) {
err := acc.GatherError(p.Gather)
require.NoError(t, err)
assert.True(t, acc.HasFloatField("prometheus", "go_goroutines"))
assert.True(t, acc.TagValue("prometheus", "url") == ts.URL+"/metrics")
assert.True(t, acc.HasTimestamp("prometheus", time.Unix(1490802350, 0)))
require.True(t, acc.HasFloatField("prometheus", "go_goroutines"))
require.True(t, acc.TagValue("prometheus", "url") == ts.URL+"/metrics")
require.True(t, acc.HasTimestamp("prometheus", time.Unix(1490802350, 0)))
}
func TestPrometheusGeneratesMetricsWithIgnoreTimestamp(t *testing.T) {
@ -262,7 +262,7 @@ func TestPrometheusGeneratesMetricsWithIgnoreTimestamp(t *testing.T) {
require.NoError(t, err)
m, _ := acc.Get("test_metric")
assert.WithinDuration(t, time.Now(), m.Time, 5*time.Second)
require.WithinDuration(t, time.Now(), m.Time, 5*time.Second)
}
func TestUnsupportedFieldSelector(t *testing.T) {
@ -271,8 +271,8 @@ func TestUnsupportedFieldSelector(t *testing.T) {
fieldSelector, _ := fields.ParseSelector(prom.KubernetesFieldSelector)
isValid, invalidSelector := fieldSelectorIsSupported(fieldSelector)
assert.Equal(t, false, isValid)
assert.Equal(t, "spec.containerName", invalidSelector)
require.Equal(t, false, isValid)
require.Equal(t, "spec.containerName", invalidSelector)
}
func TestInitConfigErrors(t *testing.T) {

View File

@ -213,30 +213,30 @@ func getVMConfig(px *Proxmox, vmID json.Number, rt ResourceType) (VMConfig, erro
}
func getFields(vmStat VMStat) map[string]interface{} {
memTotal, memUsed, memFree, memUsedPercentage := getByteMetrics(vmStat.TotalMem, vmStat.UsedMem)
swapTotal, swapUsed, swapFree, swapUsedPercentage := getByteMetrics(vmStat.TotalSwap, vmStat.UsedSwap)
diskTotal, diskUsed, diskFree, diskUsedPercentage := getByteMetrics(vmStat.TotalDisk, vmStat.UsedDisk)
memMetrics := getByteMetrics(vmStat.TotalMem, vmStat.UsedMem)
swapMetrics := getByteMetrics(vmStat.TotalSwap, vmStat.UsedSwap)
diskMetrics := getByteMetrics(vmStat.TotalDisk, vmStat.UsedDisk)
return map[string]interface{}{
"status": vmStat.Status,
"uptime": jsonNumberToInt64(vmStat.Uptime),
"cpuload": jsonNumberToFloat64(vmStat.CPULoad),
"mem_used": memUsed,
"mem_total": memTotal,
"mem_free": memFree,
"mem_used_percentage": memUsedPercentage,
"swap_used": swapUsed,
"swap_total": swapTotal,
"swap_free": swapFree,
"swap_used_percentage": swapUsedPercentage,
"disk_used": diskUsed,
"disk_total": diskTotal,
"disk_free": diskFree,
"disk_used_percentage": diskUsedPercentage,
"mem_used": memMetrics.used,
"mem_total": memMetrics.total,
"mem_free": memMetrics.free,
"mem_used_percentage": memMetrics.usedPercentage,
"swap_used": swapMetrics.used,
"swap_total": swapMetrics.total,
"swap_free": swapMetrics.free,
"swap_used_percentage": swapMetrics.usedPercentage,
"disk_used": diskMetrics.used,
"disk_total": diskMetrics.total,
"disk_free": diskMetrics.free,
"disk_used_percentage": diskMetrics.usedPercentage,
}
}
func getByteMetrics(total json.Number, used json.Number) (int64, int64, int64, float64) {
func getByteMetrics(total json.Number, used json.Number) metrics {
int64Total := jsonNumberToInt64(total)
int64Used := jsonNumberToInt64(used)
int64Free := int64Total - int64Used
@ -245,7 +245,12 @@ func getByteMetrics(total json.Number, used json.Number) (int64, int64, int64, f
usedPercentage = float64(int64Used) * 100 / float64(int64Total)
}
return int64Total, int64Used, int64Free, usedPercentage
return metrics{
total: int64Total,
used: int64Used,
free: int64Free,
usedPercentage: usedPercentage,
}
}
func jsonNumberToInt64(value json.Number) int64 {

View File

@ -5,9 +5,9 @@ import (
"strings"
"testing"
"github.com/bmizerany/assert"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf/testutil"
)
var nodeSearchDomainTestData = `{"data":{"search":"test.example.com","dns1":"1.0.0.1"}}`
@ -59,7 +59,7 @@ func TestGetNodeSearchDomain(t *testing.T) {
err := getNodeSearchDomain(px)
require.NoError(t, err)
assert.Equal(t, px.nodeSearchDomain, "test.example.com")
require.Equal(t, px.nodeSearchDomain, "test.example.com")
}
func TestGatherLxcData(t *testing.T) {
@ -69,7 +69,7 @@ func TestGatherLxcData(t *testing.T) {
acc := &testutil.Accumulator{}
gatherLxcData(px, acc)
assert.Equal(t, acc.NFields(), 15)
require.Equal(t, acc.NFields(), 15)
testFields := map[string]interface{}{
"status": "running",
"uptime": int64(2078164),
@ -103,7 +103,7 @@ func TestGatherQemuData(t *testing.T) {
acc := &testutil.Accumulator{}
gatherQemuData(px, acc)
assert.Equal(t, acc.NFields(), 15)
require.Equal(t, acc.NFields(), 15)
testFields := map[string]interface{}{
"status": "running",
"uptime": int64(2159739),
@ -139,5 +139,5 @@ func TestGather(t *testing.T) {
require.NoError(t, err)
// Results from both tests above
assert.Equal(t, acc.NFields(), 30)
require.Equal(t, acc.NFields(), 30)
}

View File

@ -67,3 +67,10 @@ type NodeDNS struct {
Searchdomain string `json:"search"`
} `json:"data"`
}
type metrics struct {
total int64
used int64
free int64
usedPercentage float64
}