chore: Fix linter findings for `revive:unused-receiver` in `plugins/inputs/[s-z]` (#16327)
This commit is contained in:
parent
d829a5b29c
commit
b92700f98a
|
|
@ -372,9 +372,9 @@ func (d *packetDecoder) decodeIPv4Header(r io.Reader) (h ipV4Header, err error)
|
||||||
}
|
}
|
||||||
switch h.Protocol {
|
switch h.Protocol {
|
||||||
case ipProtocolTCP:
|
case ipProtocolTCP:
|
||||||
h.ProtocolHeader, err = d.decodeTCPHeader(r)
|
h.ProtocolHeader, err = decodeTCPHeader(r)
|
||||||
case ipProtocolUDP:
|
case ipProtocolUDP:
|
||||||
h.ProtocolHeader, err = d.decodeUDPHeader(r)
|
h.ProtocolHeader, err = decodeUDPHeader(r)
|
||||||
default:
|
default:
|
||||||
d.debug("Unknown IP protocol: ", h.Protocol)
|
d.debug("Unknown IP protocol: ", h.Protocol)
|
||||||
}
|
}
|
||||||
|
|
@ -412,9 +412,9 @@ func (d *packetDecoder) decodeIPv6Header(r io.Reader) (h ipV6Header, err error)
|
||||||
}
|
}
|
||||||
switch h.NextHeaderProto {
|
switch h.NextHeaderProto {
|
||||||
case ipProtocolTCP:
|
case ipProtocolTCP:
|
||||||
h.ProtocolHeader, err = d.decodeTCPHeader(r)
|
h.ProtocolHeader, err = decodeTCPHeader(r)
|
||||||
case ipProtocolUDP:
|
case ipProtocolUDP:
|
||||||
h.ProtocolHeader, err = d.decodeUDPHeader(r)
|
h.ProtocolHeader, err = decodeUDPHeader(r)
|
||||||
default:
|
default:
|
||||||
// not handled
|
// not handled
|
||||||
d.debug("Unknown IP protocol: ", h.NextHeaderProto)
|
d.debug("Unknown IP protocol: ", h.NextHeaderProto)
|
||||||
|
|
@ -423,7 +423,7 @@ func (d *packetDecoder) decodeIPv6Header(r io.Reader) (h ipV6Header, err error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// https://en.wikipedia.org/wiki/Transmission_Control_Protocol#TCP_segment_structure
|
// https://en.wikipedia.org/wiki/Transmission_Control_Protocol#TCP_segment_structure
|
||||||
func (d *packetDecoder) decodeTCPHeader(r io.Reader) (h tcpHeader, err error) {
|
func decodeTCPHeader(r io.Reader) (h tcpHeader, err error) {
|
||||||
if err := read(r, &h.SourcePort, "SourcePort"); err != nil {
|
if err := read(r, &h.SourcePort, "SourcePort"); err != nil {
|
||||||
return h, err
|
return h, err
|
||||||
}
|
}
|
||||||
|
|
@ -461,7 +461,7 @@ func (d *packetDecoder) decodeTCPHeader(r io.Reader) (h tcpHeader, err error) {
|
||||||
return h, err
|
return h, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *packetDecoder) decodeUDPHeader(r io.Reader) (h udpHeader, err error) {
|
func decodeUDPHeader(r io.Reader) (h udpHeader, err error) {
|
||||||
if err := read(r, &h.SourcePort, "SourcePort"); err != nil {
|
if err := read(r, &h.SourcePort, "SourcePort"); err != nil {
|
||||||
return h, err
|
return h, err
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -15,8 +15,7 @@ func TestUDPHeader(t *testing.T) {
|
||||||
0x00, 0x00, // checksum
|
0x00, 0x00, // checksum
|
||||||
})
|
})
|
||||||
|
|
||||||
dc := newDecoder()
|
actual, err := decodeUDPHeader(octets)
|
||||||
actual, err := dc.decodeUDPHeader(octets)
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
expected := udpHeader{
|
expected := udpHeader{
|
||||||
|
|
@ -36,11 +35,9 @@ func BenchmarkUDPHeader(b *testing.B) {
|
||||||
0x00, 0x00, // checksum
|
0x00, 0x00, // checksum
|
||||||
})
|
})
|
||||||
|
|
||||||
dc := newDecoder()
|
|
||||||
|
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
_, err := dc.decodeUDPHeader(octets)
|
_, err := decodeUDPHeader(octets)
|
||||||
require.NoError(b, err)
|
require.NoError(b, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -84,7 +84,7 @@ func (s *SFlow) Start(acc telegraf.Accumulator) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Gather is a NOOP for sFlow as it receives, asynchronously, sFlow network packets
|
// Gather is a NOOP for sFlow as it receives, asynchronously, sFlow network packets
|
||||||
func (s *SFlow) Gather(_ telegraf.Accumulator) error {
|
func (*SFlow) Gather(telegraf.Accumulator) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -35,10 +35,6 @@ func (*SlabStats) SampleConfig() string {
|
||||||
return sampleConfig
|
return sampleConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ss *SlabStats) Init() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ss *SlabStats) Gather(acc telegraf.Accumulator) error {
|
func (ss *SlabStats) Gather(acc telegraf.Accumulator) error {
|
||||||
fields, err := ss.getSlabStats()
|
fields, err := ss.getSlabStats()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
||||||
|
|
@ -103,7 +103,7 @@ func (s *Slurm) Init() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Slurm) parseTres(tres string) map[string]interface{} {
|
func parseTres(tres string) map[string]interface{} {
|
||||||
tresKVs := strings.Split(tres, ",")
|
tresKVs := strings.Split(tres, ",")
|
||||||
parsedValues := make(map[string]interface{}, len(tresKVs))
|
parsedValues := make(map[string]interface{}, len(tresKVs))
|
||||||
|
|
||||||
|
|
@ -258,7 +258,7 @@ func (s *Slurm) gatherJobsMetrics(acc telegraf.Accumulator, jobs []goslurm.V0038
|
||||||
records["time_limit"] = *int64Ptr
|
records["time_limit"] = *int64Ptr
|
||||||
}
|
}
|
||||||
if strPtr, ok := jobs[i].GetTresReqStrOk(); ok {
|
if strPtr, ok := jobs[i].GetTresReqStrOk(); ok {
|
||||||
for k, v := range s.parseTres(*strPtr) {
|
for k, v := range parseTres(*strPtr) {
|
||||||
records["tres_"+k] = v
|
records["tres_"+k] = v
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -302,12 +302,12 @@ func (s *Slurm) gatherNodesMetrics(acc telegraf.Accumulator, nodes []goslurm.V00
|
||||||
records["alloc_memory"] = *int64Ptr
|
records["alloc_memory"] = *int64Ptr
|
||||||
}
|
}
|
||||||
if strPtr, ok := node.GetTresOk(); ok {
|
if strPtr, ok := node.GetTresOk(); ok {
|
||||||
for k, v := range s.parseTres(*strPtr) {
|
for k, v := range parseTres(*strPtr) {
|
||||||
records["tres_"+k] = v
|
records["tres_"+k] = v
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if strPtr, ok := node.GetTresUsedOk(); ok {
|
if strPtr, ok := node.GetTresUsedOk(); ok {
|
||||||
for k, v := range s.parseTres(*strPtr) {
|
for k, v := range parseTres(*strPtr) {
|
||||||
records["tres_used_"+k] = v
|
records["tres_used_"+k] = v
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -348,7 +348,7 @@ func (s *Slurm) gatherPartitionsMetrics(acc telegraf.Accumulator, partitions []g
|
||||||
records["nodes"] = *strPtr
|
records["nodes"] = *strPtr
|
||||||
}
|
}
|
||||||
if strPtr, ok := partition.GetTresOk(); ok {
|
if strPtr, ok := partition.GetTresOk(); ok {
|
||||||
for k, v := range s.parseTres(*strPtr) {
|
for k, v := range parseTres(*strPtr) {
|
||||||
records["tres_"+k] = v
|
records["tres_"+k] = v
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -56,7 +56,7 @@ func (tsc *testSNMPConnection) Walk(oid string, wf gosnmp.WalkFunc) error {
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (tsc *testSNMPConnection) Reconnect() error {
|
func (*testSNMPConnection) Reconnect() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -8,7 +8,7 @@ import (
|
||||||
type gosmiTranslator struct {
|
type gosmiTranslator struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *gosmiTranslator) lookup(oid string) (snmp.MibEntry, error) {
|
func (*gosmiTranslator) lookup(oid string) (snmp.MibEntry, error) {
|
||||||
return snmp.TrapLookup(oid)
|
return snmp.TrapLookup(oid)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -75,7 +75,7 @@ func (*SnmpTrap) SampleConfig() string {
|
||||||
return sampleConfig
|
return sampleConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *SnmpTrap) Gather(_ telegraf.Accumulator) error {
|
func (*SnmpTrap) Gather(telegraf.Accumulator) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -44,7 +44,7 @@ func (sl *SocketListener) Init() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sl *SocketListener) Gather(_ telegraf.Accumulator) error {
|
func (*SocketListener) Gather(telegraf.Accumulator) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -68,7 +68,7 @@ func (s *Solr) Start(_ telegraf.Accumulator) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Solr) Stop() {}
|
func (*Solr) Stop() {}
|
||||||
|
|
||||||
func (s *Solr) Gather(acc telegraf.Accumulator) error {
|
func (s *Solr) Gather(acc telegraf.Accumulator) error {
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
|
|
|
||||||
|
|
@ -232,7 +232,7 @@ func (s *SQLServer) Gather(acc telegraf.Accumulator) error {
|
||||||
|
|
||||||
if s.HealthMetric {
|
if s.HealthMetric {
|
||||||
mutex.Lock()
|
mutex.Lock()
|
||||||
s.gatherHealth(healthMetrics, dsn, queryError)
|
gatherHealth(healthMetrics, dsn, queryError)
|
||||||
mutex.Unlock()
|
mutex.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -425,7 +425,7 @@ func (s *SQLServer) accRow(query Query, acc telegraf.Accumulator, row scanner) e
|
||||||
}
|
}
|
||||||
|
|
||||||
// gatherHealth stores info about any query errors in the healthMetrics map
|
// gatherHealth stores info about any query errors in the healthMetrics map
|
||||||
func (s *SQLServer) gatherHealth(healthMetrics map[string]*HealthMetric, serv string, queryError error) {
|
func gatherHealth(healthMetrics map[string]*HealthMetric, serv string, queryError error) {
|
||||||
if healthMetrics[serv] == nil {
|
if healthMetrics[serv] == nil {
|
||||||
healthMetrics[serv] = &HealthMetric{}
|
healthMetrics[serv] = &HealthMetric{}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -572,7 +572,7 @@ func (s *stackdriver) gatherTimeSeries(
|
||||||
|
|
||||||
if tsDesc.ValueType == metricpb.MetricDescriptor_DISTRIBUTION {
|
if tsDesc.ValueType == metricpb.MetricDescriptor_DISTRIBUTION {
|
||||||
dist := p.Value.GetDistributionValue()
|
dist := p.Value.GetDistributionValue()
|
||||||
if err := s.addDistribution(dist, tags, ts, grouper, tsConf); err != nil {
|
if err := addDistribution(dist, tags, ts, grouper, tsConf); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -666,10 +666,8 @@ func NewBucket(dist *distributionpb.Distribution) (buckets, error) {
|
||||||
return nil, errors.New("no buckets available")
|
return nil, errors.New("no buckets available")
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddDistribution adds metrics from a distribution value type.
|
// addDistribution adds metrics from a distribution value type.
|
||||||
func (s *stackdriver) addDistribution(dist *distributionpb.Distribution, tags map[string]string, ts time.Time,
|
func addDistribution(dist *distributionpb.Distribution, tags map[string]string, ts time.Time, grouper *lockedSeriesGrouper, tsConf *timeSeriesConf) error {
|
||||||
grouper *lockedSeriesGrouper, tsConf *timeSeriesConf,
|
|
||||||
) error {
|
|
||||||
field := tsConf.fieldKey
|
field := tsConf.fieldKey
|
||||||
name := tsConf.measurement
|
name := tsConf.measurement
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -49,11 +49,7 @@ type supervisorInfo struct {
|
||||||
//go:embed sample.conf
|
//go:embed sample.conf
|
||||||
var sampleConfig string
|
var sampleConfig string
|
||||||
|
|
||||||
func (s *Supervisor) Description() string {
|
func (*Supervisor) SampleConfig() string {
|
||||||
return "Gather info about processes state, that running under supervisor using its XML-RPC API"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Supervisor) SampleConfig() string {
|
|
||||||
return sampleConfig
|
return sampleConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -344,7 +344,7 @@ func (s *Suricata) parse(acc telegraf.Accumulator, sjson []byte) error {
|
||||||
|
|
||||||
// Gather measures and submits one full set of telemetry to Telegraf.
|
// Gather measures and submits one full set of telemetry to Telegraf.
|
||||||
// Not used here, submission is completely input-driven.
|
// Not used here, submission is completely input-driven.
|
||||||
func (s *Suricata) Gather(_ telegraf.Accumulator) error {
|
func (*Suricata) Gather(telegraf.Accumulator) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -199,7 +199,7 @@ func withCLocale(cmd *exec.Cmd) *exec.Cmd {
|
||||||
//
|
//
|
||||||
// and parses the output to add it to the telegraf.Accumulator acc.
|
// and parses the output to add it to the telegraf.Accumulator acc.
|
||||||
func (s *Sysstat) parse(acc telegraf.Accumulator, option, tmpfile string, ts time.Time) error {
|
func (s *Sysstat) parse(acc telegraf.Accumulator, option, tmpfile string, ts time.Time) error {
|
||||||
cmd := execCommand(s.Sadf, s.sadfOptions(option, tmpfile)...)
|
cmd := execCommand(s.Sadf, sadfOptions(option, tmpfile)...)
|
||||||
cmd = withCLocale(cmd)
|
cmd = withCLocale(cmd)
|
||||||
stdout, err := cmd.StdoutPipe()
|
stdout, err := cmd.StdoutPipe()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -282,7 +282,7 @@ func (s *Sysstat) parse(acc telegraf.Accumulator, option, tmpfile string, ts tim
|
||||||
}
|
}
|
||||||
|
|
||||||
// sadfOptions creates the correct options for the sadf utility.
|
// sadfOptions creates the correct options for the sadf utility.
|
||||||
func (s *Sysstat) sadfOptions(activityOption, tmpfile string) []string {
|
func sadfOptions(activityOption, tmpfile string) []string {
|
||||||
options := []string{
|
options := []string{
|
||||||
"-p",
|
"-p",
|
||||||
"--",
|
"--",
|
||||||
|
|
|
||||||
|
|
@ -45,7 +45,7 @@ type SystemPS struct {
|
||||||
|
|
||||||
type SystemPSDisk struct{}
|
type SystemPSDisk struct{}
|
||||||
|
|
||||||
func (s *SystemPS) CPUTimes(perCPU, totalCPU bool) ([]cpu.TimesStat, error) {
|
func (*SystemPS) CPUTimes(perCPU, totalCPU bool) ([]cpu.TimesStat, error) {
|
||||||
var cpuTimes []cpu.TimesStat
|
var cpuTimes []cpu.TimesStat
|
||||||
if perCPU {
|
if perCPU {
|
||||||
perCPUTimes, err := cpu.Times(true)
|
perCPUTimes, err := cpu.Times(true)
|
||||||
|
|
@ -175,23 +175,23 @@ partitionRange:
|
||||||
return usage, partitions, nil
|
return usage, partitions, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *SystemPS) NetProto() ([]net.ProtoCountersStat, error) {
|
func (*SystemPS) NetProto() ([]net.ProtoCountersStat, error) {
|
||||||
return net.ProtoCounters(nil)
|
return net.ProtoCounters(nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *SystemPS) NetIO() ([]net.IOCountersStat, error) {
|
func (*SystemPS) NetIO() ([]net.IOCountersStat, error) {
|
||||||
return net.IOCounters(true)
|
return net.IOCounters(true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *SystemPS) NetConnections() ([]net.ConnectionStat, error) {
|
func (*SystemPS) NetConnections() ([]net.ConnectionStat, error) {
|
||||||
return net.Connections("all")
|
return net.Connections("all")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *SystemPS) NetConntrack(perCPU bool) ([]net.ConntrackStat, error) {
|
func (*SystemPS) NetConntrack(perCPU bool) ([]net.ConntrackStat, error) {
|
||||||
return net.ConntrackStats(perCPU)
|
return net.ConntrackStats(perCPU)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *SystemPS) DiskIO(names []string) (map[string]disk.IOCountersStat, error) {
|
func (*SystemPS) DiskIO(names []string) (map[string]disk.IOCountersStat, error) {
|
||||||
m, err := disk.IOCounters(names...)
|
m, err := disk.IOCounters(names...)
|
||||||
if errors.Is(err, internal.ErrNotImplemented) {
|
if errors.Is(err, internal.ErrNotImplemented) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
|
|
@ -200,26 +200,26 @@ func (s *SystemPS) DiskIO(names []string) (map[string]disk.IOCountersStat, error
|
||||||
return m, err
|
return m, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *SystemPS) VMStat() (*mem.VirtualMemoryStat, error) {
|
func (*SystemPS) VMStat() (*mem.VirtualMemoryStat, error) {
|
||||||
return mem.VirtualMemory()
|
return mem.VirtualMemory()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *SystemPS) SwapStat() (*mem.SwapMemoryStat, error) {
|
func (*SystemPS) SwapStat() (*mem.SwapMemoryStat, error) {
|
||||||
return mem.SwapMemory()
|
return mem.SwapMemory()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *SystemPSDisk) Partitions(all bool) ([]disk.PartitionStat, error) {
|
func (*SystemPSDisk) Partitions(all bool) ([]disk.PartitionStat, error) {
|
||||||
return disk.Partitions(all)
|
return disk.Partitions(all)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *SystemPSDisk) OSGetenv(key string) string {
|
func (*SystemPSDisk) OSGetenv(key string) string {
|
||||||
return os.Getenv(key)
|
return os.Getenv(key)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *SystemPSDisk) OSStat(name string) (os.FileInfo, error) {
|
func (*SystemPSDisk) OSStat(name string) (os.FileInfo, error) {
|
||||||
return os.Stat(name)
|
return os.Stat(name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *SystemPSDisk) PSDiskUsage(path string) (*disk.UsageStat, error) {
|
func (*SystemPSDisk) PSDiskUsage(path string) (*disk.UsageStat, error) {
|
||||||
return disk.Usage(path)
|
return disk.Usage(path)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -34,7 +34,7 @@ type Tacacs struct {
|
||||||
//go:embed sample.conf
|
//go:embed sample.conf
|
||||||
var sampleConfig string
|
var sampleConfig string
|
||||||
|
|
||||||
func (t *Tacacs) SampleConfig() string {
|
func (*Tacacs) SampleConfig() string {
|
||||||
return sampleConfig
|
return sampleConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -74,7 +74,7 @@ func (t *Tacacs) Init() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *Tacacs) AuthenReplyToString(code uint8) string {
|
func AuthenReplyToString(code uint8) string {
|
||||||
switch code {
|
switch code {
|
||||||
case tacplus.AuthenStatusPass:
|
case tacplus.AuthenStatusPass:
|
||||||
return `AuthenStatusPass`
|
return `AuthenStatusPass`
|
||||||
|
|
@ -157,7 +157,7 @@ func (t *Tacacs) pollServer(acc telegraf.Accumulator, client *tacplus.Client) er
|
||||||
defer session.Close()
|
defer session.Close()
|
||||||
if reply.Status != tacplus.AuthenStatusGetUser {
|
if reply.Status != tacplus.AuthenStatusGetUser {
|
||||||
fields["responsetime_ms"] = time.Since(startTime).Milliseconds()
|
fields["responsetime_ms"] = time.Since(startTime).Milliseconds()
|
||||||
fields["response_status"] = t.AuthenReplyToString(reply.Status)
|
fields["response_status"] = AuthenReplyToString(reply.Status)
|
||||||
acc.AddFields("tacacs", fields, tags)
|
acc.AddFields("tacacs", fields, tags)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
@ -174,7 +174,7 @@ func (t *Tacacs) pollServer(acc telegraf.Accumulator, client *tacplus.Client) er
|
||||||
}
|
}
|
||||||
if reply.Status != tacplus.AuthenStatusGetPass {
|
if reply.Status != tacplus.AuthenStatusGetPass {
|
||||||
fields["responsetime_ms"] = time.Since(startTime).Milliseconds()
|
fields["responsetime_ms"] = time.Since(startTime).Milliseconds()
|
||||||
fields["response_status"] = t.AuthenReplyToString(reply.Status)
|
fields["response_status"] = AuthenReplyToString(reply.Status)
|
||||||
acc.AddFields("tacacs", fields, tags)
|
acc.AddFields("tacacs", fields, tags)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
@ -191,13 +191,13 @@ func (t *Tacacs) pollServer(acc telegraf.Accumulator, client *tacplus.Client) er
|
||||||
}
|
}
|
||||||
if reply.Status != tacplus.AuthenStatusPass {
|
if reply.Status != tacplus.AuthenStatusPass {
|
||||||
fields["responsetime_ms"] = time.Since(startTime).Milliseconds()
|
fields["responsetime_ms"] = time.Since(startTime).Milliseconds()
|
||||||
fields["response_status"] = t.AuthenReplyToString(reply.Status)
|
fields["response_status"] = AuthenReplyToString(reply.Status)
|
||||||
acc.AddFields("tacacs", fields, tags)
|
acc.AddFields("tacacs", fields, tags)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
fields["responsetime_ms"] = time.Since(startTime).Milliseconds()
|
fields["responsetime_ms"] = time.Since(startTime).Milliseconds()
|
||||||
fields["response_status"] = t.AuthenReplyToString(reply.Status)
|
fields["response_status"] = AuthenReplyToString(reply.Status)
|
||||||
acc.AddFields("tacacs", fields, tags)
|
acc.AddFields("tacacs", fields, tags)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -109,7 +109,7 @@ func (m *Multiline) ProcessLine(text string, buffer *bytes.Buffer) string {
|
||||||
return text
|
return text
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Multiline) Flush(buffer *bytes.Buffer) string {
|
func Flush(buffer *bytes.Buffer) string {
|
||||||
if buffer.Len() == 0 {
|
if buffer.Len() == 0 {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -87,30 +87,17 @@ func TestMultilineIsDisabled(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMultilineFlushEmpty(t *testing.T) {
|
func TestMultilineFlushEmpty(t *testing.T) {
|
||||||
c := &MultilineConfig{
|
|
||||||
Pattern: "^=>",
|
|
||||||
MatchWhichLine: Previous,
|
|
||||||
}
|
|
||||||
m, err := c.NewMultiline()
|
|
||||||
require.NoError(t, err, "Configuration was OK.")
|
|
||||||
var buffer bytes.Buffer
|
var buffer bytes.Buffer
|
||||||
|
text := Flush(&buffer)
|
||||||
text := m.Flush(&buffer)
|
|
||||||
|
|
||||||
require.Empty(t, text)
|
require.Empty(t, text)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMultilineFlush(t *testing.T) {
|
func TestMultilineFlush(t *testing.T) {
|
||||||
c := &MultilineConfig{
|
|
||||||
Pattern: "^=>",
|
|
||||||
MatchWhichLine: Previous,
|
|
||||||
}
|
|
||||||
m, err := c.NewMultiline()
|
|
||||||
require.NoError(t, err, "Configuration was OK.")
|
|
||||||
var buffer bytes.Buffer
|
var buffer bytes.Buffer
|
||||||
buffer.WriteString("foo")
|
buffer.WriteString("foo")
|
||||||
|
|
||||||
text := m.Flush(&buffer)
|
text := Flush(&buffer)
|
||||||
require.Equal(t, "foo", text)
|
require.Equal(t, "foo", text)
|
||||||
require.Zero(t, buffer.Len())
|
require.Zero(t, buffer.Len())
|
||||||
}
|
}
|
||||||
|
|
@ -302,7 +289,7 @@ func TestMultilineQuoted(t *testing.T) {
|
||||||
}
|
}
|
||||||
result = append(result, text)
|
result = append(result, text)
|
||||||
}
|
}
|
||||||
if text := m.Flush(&buffer); text != "" {
|
if text := Flush(&buffer); text != "" {
|
||||||
result = append(result, text)
|
result = append(result, text)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -364,7 +351,7 @@ func TestMultilineQuotedError(t *testing.T) {
|
||||||
}
|
}
|
||||||
result = append(result, text)
|
result = append(result, text)
|
||||||
}
|
}
|
||||||
if text := m.Flush(&buffer); text != "" {
|
if text := Flush(&buffer); text != "" {
|
||||||
result = append(result, text)
|
result = append(result, text)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -438,7 +425,7 @@ java.lang.ArithmeticException: / by zero
|
||||||
}
|
}
|
||||||
result = append(result, text)
|
result = append(result, text)
|
||||||
}
|
}
|
||||||
if text := m.Flush(&buffer); text != "" {
|
if text := Flush(&buffer); text != "" {
|
||||||
result = append(result, text)
|
result = append(result, text)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -311,7 +311,7 @@ func (t *Tail) receiver(parser telegraf.Parser, tailer *tail.Tail) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if line == nil || !channelOpen || !tailerOpen {
|
if line == nil || !channelOpen || !tailerOpen {
|
||||||
if text += t.multiline.Flush(&buffer); text == "" {
|
if text += Flush(&buffer); text == "" {
|
||||||
if !channelOpen {
|
if !channelOpen {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -76,18 +76,14 @@ func (t *Twemproxy) processStat(
|
||||||
if data, ok := poolStat.(map[string]interface{}); ok {
|
if data, ok := poolStat.(map[string]interface{}); ok {
|
||||||
poolTags := copyTags(tags)
|
poolTags := copyTags(tags)
|
||||||
poolTags["pool"] = pool
|
poolTags["pool"] = pool
|
||||||
t.processPool(acc, poolTags, data)
|
processPool(acc, poolTags, data)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Process pool data in Twemproxy stats
|
// Process pool data in Twemproxy stats
|
||||||
func (t *Twemproxy) processPool(
|
func processPool(acc telegraf.Accumulator, tags map[string]string, data map[string]interface{}) {
|
||||||
acc telegraf.Accumulator,
|
|
||||||
tags map[string]string,
|
|
||||||
data map[string]interface{},
|
|
||||||
) {
|
|
||||||
serverTags := make(map[string]map[string]string)
|
serverTags := make(map[string]map[string]string)
|
||||||
|
|
||||||
fields := make(map[string]interface{})
|
fields := make(map[string]interface{})
|
||||||
|
|
@ -103,7 +99,7 @@ func (t *Twemproxy) processPool(
|
||||||
serverTags[key] = copyTags(tags)
|
serverTags[key] = copyTags(tags)
|
||||||
serverTags[key]["server"] = key
|
serverTags[key]["server"] = key
|
||||||
}
|
}
|
||||||
t.processServer(acc, serverTags[key], data)
|
processServer(acc, serverTags[key], data)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -111,11 +107,7 @@ func (t *Twemproxy) processPool(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Process backend server(redis/memcached) stats
|
// Process backend server(redis/memcached) stats
|
||||||
func (t *Twemproxy) processServer(
|
func processServer(acc telegraf.Accumulator, tags map[string]string, data map[string]interface{}) {
|
||||||
acc telegraf.Accumulator,
|
|
||||||
tags map[string]string,
|
|
||||||
data map[string]interface{},
|
|
||||||
) {
|
|
||||||
fields := make(map[string]interface{})
|
fields := make(map[string]interface{})
|
||||||
for key, value := range data {
|
for key, value := range data {
|
||||||
if val, ok := value.(float64); ok {
|
if val, ok := value.(float64); ok {
|
||||||
|
|
|
||||||
|
|
@ -126,7 +126,7 @@ func (u *Upsd) gatherUps(acc telegraf.Accumulator, upsname string, variables []n
|
||||||
}
|
}
|
||||||
|
|
||||||
// For compatibility with the apcupsd plugin's output we map the status string status into a bit-format
|
// For compatibility with the apcupsd plugin's output we map the status string status into a bit-format
|
||||||
status := u.mapStatus(metrics, tags)
|
status := mapStatus(metrics, tags)
|
||||||
|
|
||||||
timeLeftS, err := internal.ToFloat64(metrics["battery.runtime"])
|
timeLeftS, err := internal.ToFloat64(metrics["battery.runtime"])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -190,7 +190,7 @@ func (u *Upsd) gatherUps(acc telegraf.Accumulator, upsname string, variables []n
|
||||||
acc.AddFields("upsd", fields, tags)
|
acc.AddFields("upsd", fields, tags)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u *Upsd) mapStatus(metrics map[string]interface{}, tags map[string]string) uint64 {
|
func mapStatus(metrics map[string]interface{}, tags map[string]string) uint64 {
|
||||||
status := uint64(0)
|
status := uint64(0)
|
||||||
statusString := fmt.Sprintf("%v", metrics["ups.status"])
|
statusString := fmt.Sprintf("%v", metrics["ups.status"])
|
||||||
statuses := strings.Fields(statusString)
|
statuses := strings.Fields(statusString)
|
||||||
|
|
|
||||||
|
|
@ -106,12 +106,12 @@ func (u *Uwsgi) gatherServer(acc telegraf.Accumulator, address *url.URL) error {
|
||||||
return fmt.Errorf("failed to decode json payload from %q: %w", address.String(), err)
|
return fmt.Errorf("failed to decode json payload from %q: %w", address.String(), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
u.gatherStatServer(acc, &s)
|
gatherStatServer(acc, &s)
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u *Uwsgi) gatherStatServer(acc telegraf.Accumulator, s *StatsServer) {
|
func gatherStatServer(acc telegraf.Accumulator, s *StatsServer) {
|
||||||
fields := map[string]interface{}{
|
fields := map[string]interface{}{
|
||||||
"listen_queue": s.ListenQueue,
|
"listen_queue": s.ListenQueue,
|
||||||
"listen_queue_errors": s.ListenQueueErrors,
|
"listen_queue_errors": s.ListenQueueErrors,
|
||||||
|
|
@ -128,12 +128,12 @@ func (u *Uwsgi) gatherStatServer(acc telegraf.Accumulator, s *StatsServer) {
|
||||||
}
|
}
|
||||||
acc.AddFields("uwsgi_overview", fields, tags)
|
acc.AddFields("uwsgi_overview", fields, tags)
|
||||||
|
|
||||||
u.gatherWorkers(acc, s)
|
gatherWorkers(acc, s)
|
||||||
u.gatherApps(acc, s)
|
gatherApps(acc, s)
|
||||||
u.gatherCores(acc, s)
|
gatherCores(acc, s)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u *Uwsgi) gatherWorkers(acc telegraf.Accumulator, s *StatsServer) {
|
func gatherWorkers(acc telegraf.Accumulator, s *StatsServer) {
|
||||||
for _, w := range s.Workers {
|
for _, w := range s.Workers {
|
||||||
fields := map[string]interface{}{
|
fields := map[string]interface{}{
|
||||||
"requests": w.Requests,
|
"requests": w.Requests,
|
||||||
|
|
@ -162,7 +162,7 @@ func (u *Uwsgi) gatherWorkers(acc telegraf.Accumulator, s *StatsServer) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u *Uwsgi) gatherApps(acc telegraf.Accumulator, s *StatsServer) {
|
func gatherApps(acc telegraf.Accumulator, s *StatsServer) {
|
||||||
for _, w := range s.Workers {
|
for _, w := range s.Workers {
|
||||||
for _, a := range w.Apps {
|
for _, a := range w.Apps {
|
||||||
fields := map[string]interface{}{
|
fields := map[string]interface{}{
|
||||||
|
|
@ -181,7 +181,7 @@ func (u *Uwsgi) gatherApps(acc telegraf.Accumulator, s *StatsServer) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u *Uwsgi) gatherCores(acc telegraf.Accumulator, s *StatsServer) {
|
func gatherCores(acc telegraf.Accumulator, s *StatsServer) {
|
||||||
for _, w := range s.Workers {
|
for _, w := range s.Workers {
|
||||||
for _, c := range w.Cores {
|
for _, c := range w.Cores {
|
||||||
fields := map[string]interface{}{
|
fields := map[string]interface{}{
|
||||||
|
|
|
||||||
|
|
@ -70,7 +70,7 @@ func (n *Vault) Init() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *Vault) Start(_ telegraf.Accumulator) error {
|
func (*Vault) Start(telegraf.Accumulator) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -72,7 +72,7 @@ func (e *Endpoint) collectVsanPerCluster(ctx context.Context, clusterRef *object
|
||||||
metrics map[string]string, acc telegraf.Accumulator) {
|
metrics map[string]string, acc telegraf.Accumulator) {
|
||||||
// Construct a map for cmmds
|
// Construct a map for cmmds
|
||||||
cluster := object.NewClusterComputeResource(vimClient, clusterRef.ref)
|
cluster := object.NewClusterComputeResource(vimClient, clusterRef.ref)
|
||||||
if !e.vsanEnabled(ctx, cluster) {
|
if !vsanEnabled(ctx, cluster) {
|
||||||
acc.AddError(fmt.Errorf("[vSAN] Fail to identify vSAN for cluster %s. Skipping", clusterRef.name))
|
acc.AddError(fmt.Errorf("[vSAN] Fail to identify vSAN for cluster %s. Skipping", clusterRef.name))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
@ -103,7 +103,7 @@ func (e *Endpoint) collectVsanPerCluster(ctx context.Context, clusterRef *object
|
||||||
}
|
}
|
||||||
|
|
||||||
// vsanEnabled returns True if vSAN is enabled, otherwise False
|
// vsanEnabled returns True if vSAN is enabled, otherwise False
|
||||||
func (e *Endpoint) vsanEnabled(ctx context.Context, clusterObj *object.ClusterComputeResource) bool {
|
func vsanEnabled(ctx context.Context, clusterObj *object.ClusterComputeResource) bool {
|
||||||
config, err := clusterObj.Configuration(ctx)
|
config, err := clusterObj.Configuration(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false
|
return false
|
||||||
|
|
|
||||||
|
|
@ -21,7 +21,7 @@ type MandrillWebhook struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (md *MandrillWebhook) Register(router *mux.Router, acc telegraf.Accumulator, log telegraf.Logger) {
|
func (md *MandrillWebhook) Register(router *mux.Router, acc telegraf.Accumulator, log telegraf.Logger) {
|
||||||
router.HandleFunc(md.Path, md.returnOK).Methods("HEAD")
|
router.HandleFunc(md.Path, returnOK).Methods("HEAD")
|
||||||
router.HandleFunc(md.Path, md.eventHandler).Methods("POST")
|
router.HandleFunc(md.Path, md.eventHandler).Methods("POST")
|
||||||
|
|
||||||
md.log = log
|
md.log = log
|
||||||
|
|
@ -29,7 +29,7 @@ func (md *MandrillWebhook) Register(router *mux.Router, acc telegraf.Accumulator
|
||||||
md.acc = acc
|
md.acc = acc
|
||||||
}
|
}
|
||||||
|
|
||||||
func (md *MandrillWebhook) returnOK(w http.ResponseWriter, _ *http.Request) {
|
func returnOK(w http.ResponseWriter, _ *http.Request) {
|
||||||
w.WriteHeader(http.StatusOK)
|
w.WriteHeader(http.StatusOK)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -25,19 +25,18 @@ func postWebhooks(t *testing.T, md *MandrillWebhook, eventBody string) *httptest
|
||||||
return w
|
return w
|
||||||
}
|
}
|
||||||
|
|
||||||
func headRequest(md *MandrillWebhook, t *testing.T) *httptest.ResponseRecorder {
|
func headRequest(t *testing.T) *httptest.ResponseRecorder {
|
||||||
req, err := http.NewRequest("HEAD", "/mandrill", strings.NewReader(""))
|
req, err := http.NewRequest("HEAD", "/mandrill", strings.NewReader(""))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
w := httptest.NewRecorder()
|
w := httptest.NewRecorder()
|
||||||
|
|
||||||
md.returnOK(w, req)
|
returnOK(w, req)
|
||||||
|
|
||||||
return w
|
return w
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHead(t *testing.T) {
|
func TestHead(t *testing.T) {
|
||||||
md := &MandrillWebhook{Path: "/mandrill"}
|
resp := headRequest(t)
|
||||||
resp := headRequest(md, t)
|
|
||||||
if resp.Code != http.StatusOK {
|
if resp.Code != http.StatusOK {
|
||||||
t.Errorf("HEAD returned HTTP status code %v.\nExpected %v", resp.Code, http.StatusOK)
|
t.Errorf("HEAD returned HTTP status code %v.\nExpected %v", resp.Code, http.StatusOK)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -65,7 +65,7 @@ func (*Webhooks) SampleConfig() string {
|
||||||
return sampleConfig
|
return sampleConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
func (wb *Webhooks) Gather(_ telegraf.Accumulator) error {
|
func (*Webhooks) Gather(telegraf.Accumulator) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -17,12 +17,14 @@ type WinPerfCounters struct {
|
||||||
Log telegraf.Logger `toml:"-"`
|
Log telegraf.Logger `toml:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (*WinPerfCounters) SampleConfig() string { return sampleConfig }
|
||||||
|
|
||||||
func (w *WinPerfCounters) Init() error {
|
func (w *WinPerfCounters) Init() error {
|
||||||
w.Log.Warn("current platform is not supported")
|
w.Log.Warn("current platform is not supported")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (w *WinPerfCounters) SampleConfig() string { return sampleConfig }
|
|
||||||
func (w *WinPerfCounters) Gather(_ telegraf.Accumulator) error { return nil }
|
func (*WinPerfCounters) Gather(telegraf.Accumulator) error { return nil }
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
inputs.Add("win_perf_counters", func() telegraf.Input {
|
inputs.Add("win_perf_counters", func() telegraf.Input {
|
||||||
|
|
|
||||||
|
|
@ -17,12 +17,13 @@ type WinServices struct {
|
||||||
Log telegraf.Logger `toml:"-"`
|
Log telegraf.Logger `toml:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (*WinServices) SampleConfig() string { return sampleConfig }
|
||||||
|
|
||||||
func (w *WinServices) Init() error {
|
func (w *WinServices) Init() error {
|
||||||
w.Log.Warn("current platform is not supported")
|
w.Log.Warn("Current platform is not supported")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (w *WinServices) SampleConfig() string { return sampleConfig }
|
func (*WinServices) Gather(telegraf.Accumulator) error { return nil }
|
||||||
func (w *WinServices) Gather(_ telegraf.Accumulator) error { return nil }
|
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
inputs.Add("win_services", func() telegraf.Input {
|
inputs.Add("win_services", func() telegraf.Input {
|
||||||
|
|
|
||||||
|
|
@ -57,10 +57,10 @@ func (wg *Wireguard) Gather(acc telegraf.Accumulator) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, device := range devices {
|
for _, device := range devices {
|
||||||
wg.gatherDeviceMetrics(acc, device)
|
gatherDeviceMetrics(acc, device)
|
||||||
|
|
||||||
for _, peer := range device.Peers {
|
for _, peer := range device.Peers {
|
||||||
wg.gatherDevicePeerMetrics(acc, device, peer)
|
gatherDevicePeerMetrics(acc, device, peer)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -89,7 +89,7 @@ func (wg *Wireguard) enumerateDevices() ([]*wgtypes.Device, error) {
|
||||||
return devices, nil
|
return devices, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (wg *Wireguard) gatherDeviceMetrics(acc telegraf.Accumulator, device *wgtypes.Device) {
|
func gatherDeviceMetrics(acc telegraf.Accumulator, device *wgtypes.Device) {
|
||||||
fields := map[string]interface{}{
|
fields := map[string]interface{}{
|
||||||
"listen_port": device.ListenPort,
|
"listen_port": device.ListenPort,
|
||||||
"firewall_mark": device.FirewallMark,
|
"firewall_mark": device.FirewallMark,
|
||||||
|
|
@ -108,7 +108,7 @@ func (wg *Wireguard) gatherDeviceMetrics(acc telegraf.Accumulator, device *wgtyp
|
||||||
acc.AddGauge(measurementDevice, gauges, tags)
|
acc.AddGauge(measurementDevice, gauges, tags)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (wg *Wireguard) gatherDevicePeerMetrics(acc telegraf.Accumulator, device *wgtypes.Device, peer wgtypes.Peer) {
|
func gatherDevicePeerMetrics(acc telegraf.Accumulator, device *wgtypes.Device, peer wgtypes.Peer) {
|
||||||
fields := map[string]interface{}{
|
fields := map[string]interface{}{
|
||||||
"persistent_keepalive_interval_ns": peer.PersistentKeepaliveInterval.Nanoseconds(),
|
"persistent_keepalive_interval_ns": peer.PersistentKeepaliveInterval.Nanoseconds(),
|
||||||
"protocol_version": peer.ProtocolVersion,
|
"protocol_version": peer.ProtocolVersion,
|
||||||
|
|
|
||||||
|
|
@ -12,9 +12,6 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestWireguard_gatherDeviceMetrics(t *testing.T) {
|
func TestWireguard_gatherDeviceMetrics(t *testing.T) {
|
||||||
var acc testutil.Accumulator
|
|
||||||
|
|
||||||
wg := &Wireguard{}
|
|
||||||
device := &wgtypes.Device{
|
device := &wgtypes.Device{
|
||||||
Name: "wg0",
|
Name: "wg0",
|
||||||
Type: wgtypes.LinuxKernel,
|
Type: wgtypes.LinuxKernel,
|
||||||
|
|
@ -22,7 +19,6 @@ func TestWireguard_gatherDeviceMetrics(t *testing.T) {
|
||||||
FirewallMark: 2,
|
FirewallMark: 2,
|
||||||
Peers: []wgtypes.Peer{{}, {}},
|
Peers: []wgtypes.Peer{{}, {}},
|
||||||
}
|
}
|
||||||
|
|
||||||
expectFields := map[string]interface{}{
|
expectFields := map[string]interface{}{
|
||||||
"listen_port": 1,
|
"listen_port": 1,
|
||||||
"firewall_mark": 2,
|
"firewall_mark": 2,
|
||||||
|
|
@ -35,7 +31,8 @@ func TestWireguard_gatherDeviceMetrics(t *testing.T) {
|
||||||
"type": "linux_kernel",
|
"type": "linux_kernel",
|
||||||
}
|
}
|
||||||
|
|
||||||
wg.gatherDeviceMetrics(&acc, device)
|
var acc testutil.Accumulator
|
||||||
|
gatherDeviceMetrics(&acc, device)
|
||||||
|
|
||||||
require.Equal(t, 3, acc.NFields())
|
require.Equal(t, 3, acc.NFields())
|
||||||
acc.AssertDoesNotContainMeasurement(t, measurementPeer)
|
acc.AssertDoesNotContainMeasurement(t, measurementPeer)
|
||||||
|
|
@ -44,11 +41,9 @@ func TestWireguard_gatherDeviceMetrics(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestWireguard_gatherDevicePeerMetrics(t *testing.T) {
|
func TestWireguard_gatherDevicePeerMetrics(t *testing.T) {
|
||||||
var acc testutil.Accumulator
|
|
||||||
pubkey, err := wgtypes.ParseKey("NZTRIrv/ClTcQoNAnChEot+WL7OH7uEGQmx8oAN9rWE=")
|
pubkey, err := wgtypes.ParseKey("NZTRIrv/ClTcQoNAnChEot+WL7OH7uEGQmx8oAN9rWE=")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
wg := &Wireguard{}
|
|
||||||
device := &wgtypes.Device{
|
device := &wgtypes.Device{
|
||||||
Name: "wg0",
|
Name: "wg0",
|
||||||
}
|
}
|
||||||
|
|
@ -61,7 +56,6 @@ func TestWireguard_gatherDevicePeerMetrics(t *testing.T) {
|
||||||
AllowedIPs: []net.IPNet{{}, {}},
|
AllowedIPs: []net.IPNet{{}, {}},
|
||||||
ProtocolVersion: 0,
|
ProtocolVersion: 0,
|
||||||
}
|
}
|
||||||
|
|
||||||
expectFields := map[string]interface{}{
|
expectFields := map[string]interface{}{
|
||||||
"persistent_keepalive_interval_ns": int64(60000000000),
|
"persistent_keepalive_interval_ns": int64(60000000000),
|
||||||
"protocol_version": 0,
|
"protocol_version": 0,
|
||||||
|
|
@ -78,7 +72,8 @@ func TestWireguard_gatherDevicePeerMetrics(t *testing.T) {
|
||||||
"public_key": pubkey.String(),
|
"public_key": pubkey.String(),
|
||||||
}
|
}
|
||||||
|
|
||||||
wg.gatherDevicePeerMetrics(&acc, device, peer)
|
var acc testutil.Accumulator
|
||||||
|
gatherDevicePeerMetrics(&acc, device, peer)
|
||||||
|
|
||||||
require.Equal(t, 7, acc.NFields())
|
require.Equal(t, 7, acc.NFields())
|
||||||
acc.AssertDoesNotContainMeasurement(t, measurementDevice)
|
acc.AssertDoesNotContainMeasurement(t, measurementDevice)
|
||||||
|
|
@ -117,15 +112,12 @@ func TestWireguard_allowedPeerCIDR(t *testing.T) {
|
||||||
}
|
}
|
||||||
for _, tc := range testcases {
|
for _, tc := range testcases {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
var acc testutil.Accumulator
|
|
||||||
pubkey, err := wgtypes.ParseKey("NZTRIrv/ClTcQoNAnChEot+WL7OH7uEGQmx8oAN9rWE=")
|
pubkey, err := wgtypes.ParseKey("NZTRIrv/ClTcQoNAnChEot+WL7OH7uEGQmx8oAN9rWE=")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
wg := &Wireguard{}
|
|
||||||
device := &wgtypes.Device{
|
device := &wgtypes.Device{
|
||||||
Name: "wg0",
|
Name: "wg0",
|
||||||
}
|
}
|
||||||
|
|
||||||
peer := wgtypes.Peer{
|
peer := wgtypes.Peer{
|
||||||
PublicKey: pubkey,
|
PublicKey: pubkey,
|
||||||
PersistentKeepaliveInterval: 1 * time.Minute,
|
PersistentKeepaliveInterval: 1 * time.Minute,
|
||||||
|
|
@ -146,7 +138,8 @@ func TestWireguard_allowedPeerCIDR(t *testing.T) {
|
||||||
"public_key": pubkey.String(),
|
"public_key": pubkey.String(),
|
||||||
}
|
}
|
||||||
|
|
||||||
wg.gatherDevicePeerMetrics(&acc, device, peer)
|
var acc testutil.Accumulator
|
||||||
|
gatherDevicePeerMetrics(&acc, device, peer)
|
||||||
acc.AssertDoesNotContainMeasurement(t, measurementDevice)
|
acc.AssertDoesNotContainMeasurement(t, measurementDevice)
|
||||||
acc.AssertContainsFields(t, measurementPeer, expectFields)
|
acc.AssertContainsFields(t, measurementPeer, expectFields)
|
||||||
})
|
})
|
||||||
|
|
|
||||||
|
|
@ -54,10 +54,10 @@ type Endpoint interface {
|
||||||
type defaultEndpoint struct{}
|
type defaultEndpoint struct{}
|
||||||
|
|
||||||
// Host returns 0.0.0.0; used when the host is unknown
|
// Host returns 0.0.0.0; used when the host is unknown
|
||||||
func (d *defaultEndpoint) Host() string { return "0.0.0.0" }
|
func (*defaultEndpoint) Host() string { return "0.0.0.0" }
|
||||||
|
|
||||||
// Name returns "unknown" when an endpoint doesn't exist
|
// Name returns "unknown" when an endpoint doesn't exist
|
||||||
func (d *defaultEndpoint) Name() string { return DefaultServiceName }
|
func (*defaultEndpoint) Name() string { return DefaultServiceName }
|
||||||
|
|
||||||
// MicroToTime converts zipkin's native time of microseconds into time.Time
|
// MicroToTime converts zipkin's native time of microseconds into time.Time
|
||||||
func MicroToTime(micro int64) time.Time {
|
func MicroToTime(micro int64) time.Time {
|
||||||
|
|
|
||||||
|
|
@ -15,7 +15,7 @@ import (
|
||||||
type JSON struct{}
|
type JSON struct{}
|
||||||
|
|
||||||
// Decode unmarshals and validates the JSON body
|
// Decode unmarshals and validates the JSON body
|
||||||
func (j *JSON) Decode(octets []byte) ([]codec.Span, error) {
|
func (*JSON) Decode(octets []byte) ([]codec.Span, error) {
|
||||||
var spans []span
|
var spans []span
|
||||||
err := json.Unmarshal(octets, &spans)
|
err := json.Unmarshal(octets, &spans)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
||||||
|
|
@ -45,7 +45,7 @@ func UnmarshalThrift(body []byte) ([]*zipkincore.Span, error) {
|
||||||
type Thrift struct{}
|
type Thrift struct{}
|
||||||
|
|
||||||
// Decode unmarshals and validates bytes in thrift format
|
// Decode unmarshals and validates bytes in thrift format
|
||||||
func (t *Thrift) Decode(octets []byte) ([]codec.Span, error) {
|
func (*Thrift) Decode(octets []byte) ([]codec.Span, error) {
|
||||||
spans, err := UnmarshalThrift(octets)
|
spans, err := UnmarshalThrift(octets)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
||||||
|
|
@ -77,7 +77,7 @@ func (*Zipkin) SampleConfig() string {
|
||||||
|
|
||||||
// Gather is empty for the zipkin plugin; all gathering is done through
|
// Gather is empty for the zipkin plugin; all gathering is done through
|
||||||
// the separate goroutine launched in (*Zipkin).Start()
|
// the separate goroutine launched in (*Zipkin).Start()
|
||||||
func (z *Zipkin) Gather(_ telegraf.Accumulator) error { return nil }
|
func (*Zipkin) Gather(telegraf.Accumulator) error { return nil }
|
||||||
|
|
||||||
// Start launches a separate goroutine for collecting zipkin client http requests,
|
// Start launches a separate goroutine for collecting zipkin client http requests,
|
||||||
// passing in a telegraf.Accumulator such that data can be collected.
|
// passing in a telegraf.Accumulator such that data can be collected.
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue