chore: Fix linter findings for `revive:exported` in `plugins/inputs/s*` (#16363)

This commit is contained in:
Paweł Żak 2025-01-16 16:47:14 +01:00 committed by GitHub
parent 02159df7ec
commit 5af100d96b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
54 changed files with 2529 additions and 2548 deletions

View File

@ -25,8 +25,6 @@ import (
//go:embed sample.conf //go:embed sample.conf
var sampleConfig string var sampleConfig string
const addressRegexp = `^(?P<area>[A-Z]+)(?P<no>[0-9]+)\.(?P<type>[A-Z]+)(?P<start>[0-9]+)(?:\.(?P<extra>.*))?$`
var ( var (
regexAddr = regexp.MustCompile(addressRegexp) regexAddr = regexp.MustCompile(addressRegexp)
// Area mapping taken from https://github.com/robinson/gos7/blob/master/client.go // Area mapping taken from https://github.com/robinson/gos7/blob/master/client.go
@ -60,32 +58,8 @@ var (
} }
) )
type metricFieldDefinition struct { const addressRegexp = `^(?P<area>[A-Z]+)(?P<no>[0-9]+)\.(?P<type>[A-Z]+)(?P<start>[0-9]+)(?:\.(?P<extra>.*))?$`
Name string `toml:"name"`
Address string `toml:"address"`
}
type metricDefinition struct {
Name string `toml:"name"`
Fields []metricFieldDefinition `toml:"fields"`
Tags map[string]string `toml:"tags"`
}
type converterFunc func([]byte) interface{}
type batch struct {
items []gos7.S7DataItem
mappings []fieldMapping
}
type fieldMapping struct {
measurement string
field string
tags map[string]string
convert converterFunc
}
// S7comm represents the plugin
type S7comm struct { type S7comm struct {
Server string `toml:"server"` Server string `toml:"server"`
Rack int `toml:"rack"` Rack int `toml:"rack"`
@ -102,13 +76,35 @@ type S7comm struct {
batches []batch batches []batch
} }
// SampleConfig returns a basic configuration for the plugin type metricDefinition struct {
Name string `toml:"name"`
Fields []metricFieldDefinition `toml:"fields"`
Tags map[string]string `toml:"tags"`
}
type metricFieldDefinition struct {
Name string `toml:"name"`
Address string `toml:"address"`
}
type batch struct {
items []gos7.S7DataItem
mappings []fieldMapping
}
type fieldMapping struct {
measurement string
field string
tags map[string]string
convert converterFunc
}
type converterFunc func([]byte) interface{}
func (*S7comm) SampleConfig() string { func (*S7comm) SampleConfig() string {
return sampleConfig return sampleConfig
} }
// Init checks the config settings and prepares the plugin. It's called
// once by the Telegraf agent after parsing the config settings.
func (s *S7comm) Init() error { func (s *S7comm) Init() error {
// Check settings // Check settings
if s.Server == "" { if s.Server == "" {
@ -150,8 +146,7 @@ func (s *S7comm) Init() error {
return s.createRequests() return s.createRequests()
} }
// Start initializes the connection to the remote endpoint func (s *S7comm) Start(telegraf.Accumulator) error {
func (s *S7comm) Start(_ telegraf.Accumulator) error {
s.Log.Debugf("Connecting to %q...", s.Server) s.Log.Debugf("Connecting to %q...", s.Server)
if err := s.handler.Connect(); err != nil { if err := s.handler.Connect(); err != nil {
return &internal.StartupError{ return &internal.StartupError{
@ -164,15 +159,6 @@ func (s *S7comm) Start(_ telegraf.Accumulator) error {
return nil return nil
} }
// Stop disconnects from the remote endpoint and cleans up
func (s *S7comm) Stop() {
if s.handler != nil {
s.Log.Debugf("Disconnecting from %q...", s.handler.Address)
s.handler.Close()
}
}
// Gather collects the data from the device
func (s *S7comm) Gather(acc telegraf.Accumulator) error { func (s *S7comm) Gather(acc telegraf.Accumulator) error {
timestamp := time.Now() timestamp := time.Now()
grouper := metric.NewSeriesGrouper() grouper := metric.NewSeriesGrouper()
@ -208,7 +194,13 @@ func (s *S7comm) Gather(acc telegraf.Accumulator) error {
return nil return nil
} }
// Internal functions func (s *S7comm) Stop() {
if s.handler != nil {
s.Log.Debugf("Disconnecting from %q...", s.handler.Address)
s.handler.Close()
}
}
func (s *S7comm) createRequests() error { func (s *S7comm) createRequests() error {
seed := maphash.MakeSeed() seed := maphash.MakeSeed()
seenFields := make(map[uint64]bool) seenFields := make(map[uint64]bool)

View File

@ -711,14 +711,14 @@ func TestMetricCollisions(t *testing.T) {
func TestConnectionLoss(t *testing.T) { func TestConnectionLoss(t *testing.T) {
// Create fake S7 comm server that can accept connects // Create fake S7 comm server that can accept connects
server, err := NewMockServer("127.0.0.1:0") server, err := newMockServer()
require.NoError(t, err) require.NoError(t, err)
defer server.Close() defer server.close()
require.NoError(t, server.Start()) server.start()
// Create the plugin and attempt a connection // Create the plugin and attempt a connection
plugin := &S7comm{ plugin := &S7comm{
Server: server.Addr(), Server: server.addr(),
Rack: 0, Rack: 0,
Slot: 2, Slot: 2,
DebugConnection: true, DebugConnection: true,
@ -742,20 +742,20 @@ func TestConnectionLoss(t *testing.T) {
require.NoError(t, plugin.Gather(&acc)) require.NoError(t, plugin.Gather(&acc))
require.NoError(t, plugin.Gather(&acc)) require.NoError(t, plugin.Gather(&acc))
plugin.Stop() plugin.Stop()
server.Close() server.close()
require.Equal(t, uint32(3), server.ConnectionAttempts.Load()) require.Equal(t, uint32(3), server.connectionAttempts.Load())
} }
func TestStartupErrorBehaviorError(t *testing.T) { func TestStartupErrorBehaviorError(t *testing.T) {
// Create fake S7 comm server that can accept connects // Create fake S7 comm server that can accept connects
server, err := NewMockServer("127.0.0.1:0") server, err := newMockServer()
require.NoError(t, err) require.NoError(t, err)
defer server.Close() defer server.close()
// Setup the plugin and the model to be able to use the startup retry strategy // Setup the plugin and the model to be able to use the startup retry strategy
plugin := &S7comm{ plugin := &S7comm{
Server: server.Addr(), Server: server.addr(),
Rack: 0, Rack: 0,
Slot: 2, Slot: 2,
DebugConnection: true, DebugConnection: true,
@ -784,18 +784,18 @@ func TestStartupErrorBehaviorError(t *testing.T) {
// Starting the plugin will fail with an error because the server does not listen // Starting the plugin will fail with an error because the server does not listen
var acc testutil.Accumulator var acc testutil.Accumulator
require.ErrorContains(t, model.Start(&acc), "connecting to \""+server.Addr()+"\" failed") require.ErrorContains(t, model.Start(&acc), "connecting to \""+server.addr()+"\" failed")
} }
func TestStartupErrorBehaviorIgnore(t *testing.T) { func TestStartupErrorBehaviorIgnore(t *testing.T) {
// Create fake S7 comm server that can accept connects // Create fake S7 comm server that can accept connects
server, err := NewMockServer("127.0.0.1:0") server, err := newMockServer()
require.NoError(t, err) require.NoError(t, err)
defer server.Close() defer server.close()
// Setup the plugin and the model to be able to use the startup retry strategy // Setup the plugin and the model to be able to use the startup retry strategy
plugin := &S7comm{ plugin := &S7comm{
Server: server.Addr(), Server: server.addr(),
Rack: 0, Rack: 0,
Slot: 2, Slot: 2,
DebugConnection: true, DebugConnection: true,
@ -828,20 +828,20 @@ func TestStartupErrorBehaviorIgnore(t *testing.T) {
// the plugin. // the plugin.
var acc testutil.Accumulator var acc testutil.Accumulator
err = model.Start(&acc) err = model.Start(&acc)
require.ErrorContains(t, err, "connecting to \""+server.Addr()+"\" failed") require.ErrorContains(t, err, "connecting to \""+server.addr()+"\" failed")
var fatalErr *internal.FatalError var fatalErr *internal.FatalError
require.ErrorAs(t, err, &fatalErr) require.ErrorAs(t, err, &fatalErr)
} }
func TestStartupErrorBehaviorRetry(t *testing.T) { func TestStartupErrorBehaviorRetry(t *testing.T) {
// Create fake S7 comm server that can accept connects // Create fake S7 comm server that can accept connects
server, err := NewMockServer("127.0.0.1:0") server, err := newMockServer()
require.NoError(t, err) require.NoError(t, err)
defer server.Close() defer server.close()
// Setup the plugin and the model to be able to use the startup retry strategy // Setup the plugin and the model to be able to use the startup retry strategy
plugin := &S7comm{ plugin := &S7comm{
Server: server.Addr(), Server: server.addr(),
Rack: 0, Rack: 0,
Slot: 2, Slot: 2,
DebugConnection: true, DebugConnection: true,
@ -880,37 +880,36 @@ func TestStartupErrorBehaviorRetry(t *testing.T) {
require.Equal(t, int64(2), model.StartupErrors.Get()) require.Equal(t, int64(2), model.StartupErrors.Get())
// Allow connection in the server, now the connection should succeed // Allow connection in the server, now the connection should succeed
require.NoError(t, server.Start()) server.start()
defer model.Stop() defer model.Stop()
require.NoError(t, model.Gather(&acc)) require.NoError(t, model.Gather(&acc))
} }
type MockServer struct { type mockServer struct {
ConnectionAttempts atomic.Uint32 connectionAttempts atomic.Uint32
listener net.Listener
listener net.Listener
} }
func NewMockServer(addr string) (*MockServer, error) { func newMockServer() (*mockServer, error) {
l, err := net.Listen("tcp", addr) l, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &MockServer{listener: l}, nil return &mockServer{listener: l}, nil
} }
func (s *MockServer) Addr() string { func (s *mockServer) addr() string {
return s.listener.Addr().String() return s.listener.Addr().String()
} }
func (s *MockServer) Close() error { func (s *mockServer) close() error {
if s.listener != nil { if s.listener != nil {
return s.listener.Close() return s.listener.Close()
} }
return nil return nil
} }
func (s *MockServer) Start() error { func (s *mockServer) start() {
go func() { go func() {
defer s.listener.Close() defer s.listener.Close()
for { for {
@ -924,7 +923,7 @@ func (s *MockServer) Start() error {
} }
// Count the number of connection attempts // Count the number of connection attempts
s.ConnectionAttempts.Add(1) s.connectionAttempts.Add(1)
buf := make([]byte, 4096) buf := make([]byte, 4096)
@ -961,6 +960,4 @@ func (s *MockServer) Start() error {
conn.Close() conn.Close()
} }
}() }()
return nil
} }

View File

@ -21,6 +21,25 @@ import (
//go:embed sample.conf //go:embed sample.conf
var sampleConfig string var sampleConfig string
const (
defaultVersion = "39.0"
defaultEnvironment = "production"
)
type Salesforce struct {
Username string `toml:"username"`
Password string `toml:"password"`
SecurityToken string `toml:"security_token"`
Environment string `toml:"environment"`
Version string `toml:"version"`
sessionID string
serverURL *url.URL
organizationID string
client *http.Client
}
type limit struct { type limit struct {
Max int Max int
Remaining int Remaining int
@ -28,42 +47,10 @@ type limit struct {
type limits map[string]limit type limits map[string]limit
type Salesforce struct {
Username string
Password string
SecurityToken string
Environment string
SessionID string
ServerURL *url.URL
OrganizationID string
Version string
client *http.Client
}
const defaultVersion = "39.0"
const defaultEnvironment = "production"
// returns a new Salesforce plugin instance
func NewSalesforce() *Salesforce {
tr := &http.Transport{
ResponseHeaderTimeout: 5 * time.Second,
}
client := &http.Client{
Transport: tr,
Timeout: 10 * time.Second,
}
return &Salesforce{
client: client,
Version: defaultVersion,
Environment: defaultEnvironment}
}
func (*Salesforce) SampleConfig() string { func (*Salesforce) SampleConfig() string {
return sampleConfig return sampleConfig
} }
// Reads limits values from Salesforce API
func (s *Salesforce) Gather(acc telegraf.Accumulator) error { func (s *Salesforce) Gather(acc telegraf.Accumulator) error {
limits, err := s.fetchLimits() limits, err := s.fetchLimits()
if err != nil { if err != nil {
@ -71,8 +58,8 @@ func (s *Salesforce) Gather(acc telegraf.Accumulator) error {
} }
tags := map[string]string{ tags := map[string]string{
"organization_id": s.OrganizationID, "organization_id": s.organizationID,
"host": s.ServerURL.Host, "host": s.serverURL.Host,
} }
fields := make(map[string]interface{}) fields := make(map[string]interface{})
@ -88,18 +75,18 @@ func (s *Salesforce) Gather(acc telegraf.Accumulator) error {
// query the limits endpoint // query the limits endpoint
func (s *Salesforce) queryLimits() (*http.Response, error) { func (s *Salesforce) queryLimits() (*http.Response, error) {
endpoint := fmt.Sprintf("%s://%s/services/data/v%s/limits", s.ServerURL.Scheme, s.ServerURL.Host, s.Version) endpoint := fmt.Sprintf("%s://%s/services/data/v%s/limits", s.serverURL.Scheme, s.serverURL.Host, s.Version)
req, err := http.NewRequest(http.MethodGet, endpoint, nil) req, err := http.NewRequest(http.MethodGet, endpoint, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
req.Header.Add("Accept", "encoding/json") req.Header.Add("Accept", "encoding/json")
req.Header.Add("Authorization", "Bearer "+s.SessionID) req.Header.Add("Authorization", "Bearer "+s.sessionID)
return s.client.Do(req) return s.client.Do(req)
} }
func (s *Salesforce) isAuthenticated() bool { func (s *Salesforce) isAuthenticated() bool {
return s.SessionID != "" return s.sessionID != ""
} }
func (s *Salesforce) fetchLimits() (limits, error) { func (s *Salesforce) fetchLimits() (limits, error) {
@ -218,15 +205,29 @@ func (s *Salesforce) login() error {
return err return err
} }
s.SessionID = loginResult.SessionID s.sessionID = loginResult.SessionID
s.OrganizationID = loginResult.OrganizationID s.organizationID = loginResult.OrganizationID
s.ServerURL, err = url.Parse(loginResult.ServerURL) s.serverURL, err = url.Parse(loginResult.ServerURL)
return err return err
} }
func newSalesforce() *Salesforce {
tr := &http.Transport{
ResponseHeaderTimeout: 5 * time.Second,
}
client := &http.Client{
Transport: tr,
Timeout: 10 * time.Second,
}
return &Salesforce{
client: client,
Version: defaultVersion,
Environment: defaultEnvironment}
}
func init() { func init() {
inputs.Add("salesforce", func() telegraf.Input { inputs.Add("salesforce", func() telegraf.Input {
return NewSalesforce() return newSalesforce()
}) })
} }

View File

@ -1,4 +1,4 @@
package salesforce_test package salesforce
import ( import (
"net/http" "net/http"
@ -8,7 +8,6 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/influxdata/telegraf/plugins/inputs/salesforce"
"github.com/influxdata/telegraf/testutil" "github.com/influxdata/telegraf/testutil"
) )
@ -23,13 +22,13 @@ func Test_Gather(t *testing.T) {
})) }))
defer fakeServer.Close() defer fakeServer.Close()
plugin := salesforce.NewSalesforce() plugin := newSalesforce()
plugin.SessionID = "test_session" plugin.sessionID = "test_session"
u, err := url.Parse(fakeServer.URL) u, err := url.Parse(fakeServer.URL)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
plugin.ServerURL = u plugin.serverURL = u
var acc testutil.Accumulator var acc testutil.Accumulator
require.NoError(t, acc.GatherError(plugin.Gather)) require.NoError(t, acc.GatherError(plugin.Gather))

View File

@ -28,14 +28,14 @@ var (
defaultTimeout = config.Duration(5 * time.Second) defaultTimeout = config.Duration(5 * time.Second)
) )
const cmd = "sensors"
type Sensors struct { type Sensors struct {
RemoveNumbers bool `toml:"remove_numbers"` RemoveNumbers bool `toml:"remove_numbers"`
Timeout config.Duration `toml:"timeout"` Timeout config.Duration `toml:"timeout"`
path string path string
} }
const cmd = "sensors"
func (*Sensors) SampleConfig() string { func (*Sensors) SampleConfig() string {
return sampleConfig return sampleConfig
} }

View File

@ -17,12 +17,14 @@ type Sensors struct {
Log telegraf.Logger `toml:"-"` Log telegraf.Logger `toml:"-"`
} }
func (*Sensors) SampleConfig() string { return sampleConfig }
func (s *Sensors) Init() error { func (s *Sensors) Init() error {
s.Log.Warn("current platform is not supported") s.Log.Warn("Current platform is not supported")
return nil return nil
} }
func (*Sensors) SampleConfig() string { return sampleConfig }
func (*Sensors) Gather(_ telegraf.Accumulator) error { return nil } func (*Sensors) Gather(telegraf.Accumulator) error { return nil }
func init() { func init() {
inputs.Add("sensors", func() telegraf.Input { inputs.Add("sensors", func() telegraf.Input {

View File

@ -304,7 +304,7 @@ func fakeExecCommand(command string, args ...string) *exec.Cmd {
// For example, if you run: // For example, if you run:
// GO_WANT_HELPER_PROCESS=1 go test -test.run=TestHelperProcess -- chrony tracking // GO_WANT_HELPER_PROCESS=1 go test -test.run=TestHelperProcess -- chrony tracking
// it returns below mockData. // it returns below mockData.
func TestHelperProcess(_ *testing.T) { func TestHelperProcess(*testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
return return
} }

View File

@ -4,33 +4,33 @@ import "io"
// MinimumReader is the implementation for MinReader. // MinimumReader is the implementation for MinReader.
type MinimumReader struct { type MinimumReader struct {
R io.Reader reader io.Reader
MinNumberOfBytesToRead int64 // Min number of bytes we need to read from the reader minNumberOfBytesToRead int64 // Min number of bytes we need to read from the reader
} }
// MinReader reads from R but ensures there is at least N bytes read from the reader. // MinReader reads from the reader but ensures there is at least N bytes read from the reader.
// The reader should call Close() when they are done reading. // The reader should call Close() when they are done reading.
// Closing the MinReader will read and discard any unread bytes up to MinNumberOfBytesToRead. // Closing the MinReader will read and discard any unread bytes up to minNumberOfBytesToRead.
// CLosing the MinReader does NOT close the underlying reader. // CLosing the MinReader does NOT close the underlying reader.
// The underlying implementation is a MinimumReader, which implements ReaderCloser. // The underlying implementation is a MinimumReader, which implements ReaderCloser.
func MinReader(r io.Reader, minNumberOfBytesToRead int64) *MinimumReader { func MinReader(r io.Reader, minNumberOfBytesToRead int64) *MinimumReader {
return &MinimumReader{ return &MinimumReader{
R: r, reader: r,
MinNumberOfBytesToRead: minNumberOfBytesToRead, minNumberOfBytesToRead: minNumberOfBytesToRead,
} }
} }
func (r *MinimumReader) Read(p []byte) (n int, err error) { func (r *MinimumReader) Read(p []byte) (n int, err error) {
n, err = r.R.Read(p) n, err = r.reader.Read(p)
r.MinNumberOfBytesToRead -= int64(n) r.minNumberOfBytesToRead -= int64(n)
return n, err return n, err
} }
// Close does not close the underlying reader, only the MinimumReader // Close does not close the underlying reader, only the MinimumReader
func (r *MinimumReader) Close() error { func (r *MinimumReader) Close() error {
if r.MinNumberOfBytesToRead > 0 { if r.minNumberOfBytesToRead > 0 {
b := make([]byte, r.MinNumberOfBytesToRead) b := make([]byte, r.minNumberOfBytesToRead)
_, err := r.R.Read(b) _, err := r.reader.Read(b)
return err return err
} }
return nil return nil

View File

@ -66,12 +66,12 @@ func TestIPv4SW(t *testing.T) {
actual := make([]telegraf.Metric, 0) actual := make([]telegraf.Metric, 0)
dc := newDecoder() dc := newDecoder()
dc.OnPacket(func(p *v5Format) { dc.onPacket(func(p *v5Format) {
metrics := makeMetrics(p) metrics := makeMetrics(p)
actual = append(actual, metrics...) actual = append(actual, metrics...)
}) })
buf := bytes.NewReader(packet) buf := bytes.NewReader(packet)
err = dc.Decode(buf) err = dc.decode(buf)
require.NoError(t, err) require.NoError(t, err)
expected := []telegraf.Metric{ expected := []telegraf.Metric{
@ -165,7 +165,7 @@ func BenchmarkDecodeIPv4SW(b *testing.B) {
b.ResetTimer() b.ResetTimer()
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
_, err = dc.DecodeOnePacket(bytes.NewBuffer(packet)) _, err = dc.decodeOnePacket(bytes.NewBuffer(packet))
if err != nil { if err != nil {
panic(err) panic(err)
} }
@ -189,7 +189,7 @@ func TestExpandFlow(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
dc := newDecoder() dc := newDecoder()
p, err := dc.DecodeOnePacket(bytes.NewBuffer(packet)) p, err := dc.decodeOnePacket(bytes.NewBuffer(packet))
require.NoError(t, err) require.NoError(t, err)
actual := makeMetrics(p) actual := makeMetrics(p)
@ -330,7 +330,7 @@ func TestIPv4SWRT(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
dc := newDecoder() dc := newDecoder()
p, err := dc.DecodeOnePacket(bytes.NewBuffer(packet)) p, err := dc.decodeOnePacket(bytes.NewBuffer(packet))
require.NoError(t, err) require.NoError(t, err)
actual := makeMetrics(p) actual := makeMetrics(p)
@ -557,7 +557,7 @@ func TestIPv6SW(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
dc := newDecoder() dc := newDecoder()
p, err := dc.DecodeOnePacket(bytes.NewBuffer(packet)) p, err := dc.decodeOnePacket(bytes.NewBuffer(packet))
require.NoError(t, err) require.NoError(t, err)
actual := makeMetrics(p) actual := makeMetrics(p)
@ -628,7 +628,7 @@ func TestExpandFlowCounter(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
dc := newDecoder() dc := newDecoder()
p, err := dc.DecodeOnePacket(bytes.NewBuffer(packet)) p, err := dc.decodeOnePacket(bytes.NewBuffer(packet))
require.NoError(t, err) require.NoError(t, err)
actual := makeMetrics(p) actual := makeMetrics(p)
@ -830,7 +830,7 @@ func TestFlowExpandCounter(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
dc := newDecoder() dc := newDecoder()
p, err := dc.DecodeOnePacket(bytes.NewBuffer(packet)) p, err := dc.decodeOnePacket(bytes.NewBuffer(packet))
require.NoError(t, err) require.NoError(t, err)
actual := makeMetrics(p) actual := makeMetrics(p)

View File

@ -12,22 +12,22 @@ func makeMetrics(p *v5Format) []telegraf.Metric {
now := time.Now() now := time.Now()
metrics := make([]telegraf.Metric, 0) metrics := make([]telegraf.Metric, 0)
tags := map[string]string{ tags := map[string]string{
"agent_address": p.AgentAddress.String(), "agent_address": p.agentAddress.String(),
} }
fields := make(map[string]interface{}, 2) fields := make(map[string]interface{}, 2)
for _, sample := range p.Samples { for _, sample := range p.samples {
tags["input_ifindex"] = strconv.FormatUint(uint64(sample.SampleData.InputIfIndex), 10) tags["input_ifindex"] = strconv.FormatUint(uint64(sample.smplData.inputIfIndex), 10)
tags["output_ifindex"] = strconv.FormatUint(uint64(sample.SampleData.OutputIfIndex), 10) tags["output_ifindex"] = strconv.FormatUint(uint64(sample.smplData.outputIfIndex), 10)
tags["sample_direction"] = sample.SampleData.SampleDirection tags["sample_direction"] = sample.smplData.sampleDirection
tags["source_id_index"] = strconv.FormatUint(uint64(sample.SampleData.SourceIDIndex), 10) tags["source_id_index"] = strconv.FormatUint(uint64(sample.smplData.sourceIDIndex), 10)
tags["source_id_type"] = strconv.FormatUint(uint64(sample.SampleData.SourceIDType), 10) tags["source_id_type"] = strconv.FormatUint(uint64(sample.smplData.sourceIDType), 10)
fields["drops"] = sample.SampleData.Drops fields["drops"] = sample.smplData.drops
fields["sampling_rate"] = sample.SampleData.SamplingRate fields["sampling_rate"] = sample.smplData.samplingRate
for _, flowRecord := range sample.SampleData.FlowRecords { for _, flowRecord := range sample.smplData.flowRecords {
if flowRecord.FlowData != nil { if flowRecord.flowData != nil {
tags2 := flowRecord.FlowData.getTags() tags2 := flowRecord.flowData.getTags()
fields2 := flowRecord.FlowData.getFields() fields2 := flowRecord.flowData.getFields()
for k, v := range tags { for k, v := range tags {
tags2[k] = v tags2[k] = v
} }

View File

@ -11,8 +11,8 @@ import (
) )
type packetDecoder struct { type packetDecoder struct {
onPacket func(p *v5Format) onPacketF func(p *v5Format)
Log telegraf.Logger Log telegraf.Logger
} }
func newDecoder() *packetDecoder { func newDecoder() *packetDecoder {
@ -25,19 +25,19 @@ func (d *packetDecoder) debug(args ...interface{}) {
} }
} }
func (d *packetDecoder) OnPacket(f func(p *v5Format)) { func (d *packetDecoder) onPacket(f func(p *v5Format)) {
d.onPacket = f d.onPacketF = f
} }
func (d *packetDecoder) Decode(r io.Reader) error { func (d *packetDecoder) decode(r io.Reader) error {
var err error var err error
var packet *v5Format var packet *v5Format
for err == nil { for err == nil {
packet, err = d.DecodeOnePacket(r) packet, err = d.decodeOnePacket(r)
if err != nil { if err != nil {
break break
} }
d.onPacket(packet) d.onPacketF(packet)
} }
if err != nil && errors.Is(err, io.EOF) { if err != nil && errors.Is(err, io.EOF) {
return nil return nil
@ -45,51 +45,51 @@ func (d *packetDecoder) Decode(r io.Reader) error {
return err return err
} }
type AddressType uint32 // must be uint32 type addressType uint32 // must be uint32
const ( const (
AddressTypeUnknown AddressType = 0 addressTypeUnknown addressType = 0
AddressTypeIPV4 AddressType = 1 addressTypeIPV4 addressType = 1
AddressTypeIPV6 AddressType = 2 addressTypeIPV6 addressType = 2
) )
func (d *packetDecoder) DecodeOnePacket(r io.Reader) (*v5Format, error) { func (d *packetDecoder) decodeOnePacket(r io.Reader) (*v5Format, error) {
p := &v5Format{} p := &v5Format{}
err := read(r, &p.Version, "version") err := read(r, &p.version, "version")
if err != nil { if err != nil {
return nil, err return nil, err
} }
if p.Version != 5 { if p.version != 5 {
return nil, fmt.Errorf("version %d not supported, only version 5", p.Version) return nil, fmt.Errorf("version %d not supported, only version 5", p.version)
} }
var addressIPType AddressType var addressIPType addressType
if err := read(r, &addressIPType, "address ip type"); err != nil { if err := read(r, &addressIPType, "address ip type"); err != nil {
return nil, err return nil, err
} }
switch addressIPType { switch addressIPType {
case AddressTypeUnknown: case addressTypeUnknown:
p.AgentAddress.IP = make([]byte, 0) p.agentAddress.IP = make([]byte, 0)
case AddressTypeIPV4: case addressTypeIPV4:
p.AgentAddress.IP = make([]byte, 4) p.agentAddress.IP = make([]byte, 4)
case AddressTypeIPV6: case addressTypeIPV6:
p.AgentAddress.IP = make([]byte, 16) p.agentAddress.IP = make([]byte, 16)
default: default:
return nil, fmt.Errorf("unknown address IP type %d", addressIPType) return nil, fmt.Errorf("unknown address IP type %d", addressIPType)
} }
if err := read(r, &p.AgentAddress.IP, "Agent Address IP"); err != nil { if err := read(r, &p.agentAddress.IP, "Agent Address IP"); err != nil {
return nil, err return nil, err
} }
if err := read(r, &p.SubAgentID, "SubAgentID"); err != nil { if err := read(r, &p.subAgentID, "SubAgentID"); err != nil {
return nil, err return nil, err
} }
if err := read(r, &p.SequenceNumber, "SequenceNumber"); err != nil { if err := read(r, &p.sequenceNumber, "SequenceNumber"); err != nil {
return nil, err return nil, err
} }
if err := read(r, &p.Uptime, "Uptime"); err != nil { if err := read(r, &p.uptime, "Uptime"); err != nil {
return nil, err return nil, err
} }
p.Samples, err = d.decodeSamples(r) p.samples, err = d.decodeSamples(r)
return p, err return p, err
} }
@ -115,7 +115,7 @@ func (d *packetDecoder) decodeSamples(r io.Reader) ([]sample, error) {
func (d *packetDecoder) decodeSample(r io.Reader) (sample, error) { func (d *packetDecoder) decodeSample(r io.Reader) (sample, error) {
var err error var err error
sam := sample{} sam := sample{}
if err := read(r, &sam.SampleType, "sampleType"); err != nil { if err := read(r, &sam.smplType, "sampleType"); err != nil {
return sam, err return sam, err
} }
sampleDataLen := uint32(0) sampleDataLen := uint32(0)
@ -125,25 +125,19 @@ func (d *packetDecoder) decodeSample(r io.Reader) (sample, error) {
mr := binaryio.MinReader(r, int64(sampleDataLen)) mr := binaryio.MinReader(r, int64(sampleDataLen))
defer mr.Close() defer mr.Close()
switch sam.SampleType { switch sam.smplType {
case sampleTypeFlowSample: case sampleTypeFlowSample:
sam.SampleData, err = d.decodeFlowSample(mr) sam.smplData, err = d.decodeFlowSample(mr)
case sampleTypeFlowSampleExpanded: case sampleTypeFlowSampleExpanded:
sam.SampleData, err = d.decodeFlowSampleExpanded(mr) sam.smplData, err = d.decodeFlowSampleExpanded(mr)
default: default:
d.debug("Unknown sample type: ", sam.SampleType) d.debug("Unknown sample type: ", sam.smplType)
} }
return sam, err return sam, err
} }
type InterfaceFormatType uint8 // sflow_version_5.txt line 1497
const (
InterfaceFormatTypeSingleInterface InterfaceFormatType = 0
InterfaceFormatTypePacketDiscarded InterfaceFormatType = 1
)
func (d *packetDecoder) decodeFlowSample(r io.Reader) (t sampleDataFlowSampleExpanded, err error) { func (d *packetDecoder) decodeFlowSample(r io.Reader) (t sampleDataFlowSampleExpanded, err error) {
if err := read(r, &t.SequenceNumber, "SequenceNumber"); err != nil { if err := read(r, &t.sequenceNumber, "SequenceNumber"); err != nil {
return t, err return t, err
} }
var sourceID uint32 var sourceID uint32
@ -151,80 +145,80 @@ func (d *packetDecoder) decodeFlowSample(r io.Reader) (t sampleDataFlowSampleExp
return t, err return t, err
} }
// split source id to source id type and source id index // split source id to source id type and source id index
t.SourceIDIndex = sourceID & 0x00ffffff // sflow_version_5.txt line: 1468 t.sourceIDIndex = sourceID & 0x00ffffff // sflow_version_5.txt line: 1468
t.SourceIDType = sourceID >> 24 // source_id_type sflow_version_5.txt Line 1465 t.sourceIDType = sourceID >> 24 // source_id_type sflow_version_5.txt Line 1465
if err := read(r, &t.SamplingRate, "SamplingRate"); err != nil { if err := read(r, &t.samplingRate, "SamplingRate"); err != nil {
return t, err return t, err
} }
if err := read(r, &t.SamplePool, "SamplePool"); err != nil { if err := read(r, &t.samplePool, "SamplePool"); err != nil {
return t, err return t, err
} }
if err := read(r, &t.Drops, "Drops"); err != nil { // sflow_version_5.txt line 1636 if err := read(r, &t.drops, "Drops"); err != nil { // sflow_version_5.txt line 1636
return t, err return t, err
} }
if err := read(r, &t.InputIfIndex, "InputIfIndex"); err != nil { if err := read(r, &t.inputIfIndex, "InputIfIndex"); err != nil {
return t, err return t, err
} }
t.InputIfFormat = t.InputIfIndex >> 30 t.inputIfFormat = t.inputIfIndex >> 30
t.InputIfIndex = t.InputIfIndex & 0x3FFFFFFF t.inputIfIndex = t.inputIfIndex & 0x3FFFFFFF
if err := read(r, &t.OutputIfIndex, "OutputIfIndex"); err != nil { if err := read(r, &t.outputIfIndex, "OutputIfIndex"); err != nil {
return t, err return t, err
} }
t.OutputIfFormat = t.OutputIfIndex >> 30 t.outputIfFormat = t.outputIfIndex >> 30
t.OutputIfIndex = t.OutputIfIndex & 0x3FFFFFFF t.outputIfIndex = t.outputIfIndex & 0x3FFFFFFF
switch t.SourceIDIndex { switch t.sourceIDIndex {
case t.OutputIfIndex: case t.outputIfIndex:
t.SampleDirection = "egress" t.sampleDirection = "egress"
case t.InputIfIndex: case t.inputIfIndex:
t.SampleDirection = "ingress" t.sampleDirection = "ingress"
} }
t.FlowRecords, err = d.decodeFlowRecords(r, t.SamplingRate) t.flowRecords, err = d.decodeFlowRecords(r, t.samplingRate)
return t, err return t, err
} }
func (d *packetDecoder) decodeFlowSampleExpanded(r io.Reader) (t sampleDataFlowSampleExpanded, err error) { func (d *packetDecoder) decodeFlowSampleExpanded(r io.Reader) (t sampleDataFlowSampleExpanded, err error) {
if err := read(r, &t.SequenceNumber, "SequenceNumber"); err != nil { // sflow_version_5.txt line 1701 if err := read(r, &t.sequenceNumber, "SequenceNumber"); err != nil { // sflow_version_5.txt line 1701
return t, err return t, err
} }
if err := read(r, &t.SourceIDType, "SourceIDType"); err != nil { // sflow_version_5.txt line: 1706 + 16878 if err := read(r, &t.sourceIDType, "SourceIDType"); err != nil { // sflow_version_5.txt line: 1706 + 16878
return t, err return t, err
} }
if err := read(r, &t.SourceIDIndex, "SourceIDIndex"); err != nil { // sflow_version_5.txt line: 1689 if err := read(r, &t.sourceIDIndex, "SourceIDIndex"); err != nil { // sflow_version_5.txt line: 1689
return t, err return t, err
} }
if err := read(r, &t.SamplingRate, "SamplingRate"); err != nil { // sflow_version_5.txt line: 1707 if err := read(r, &t.samplingRate, "SamplingRate"); err != nil { // sflow_version_5.txt line: 1707
return t, err return t, err
} }
if err := read(r, &t.SamplePool, "SamplePool"); err != nil { // sflow_version_5.txt line: 1708 if err := read(r, &t.samplePool, "SamplePool"); err != nil { // sflow_version_5.txt line: 1708
return t, err return t, err
} }
if err := read(r, &t.Drops, "Drops"); err != nil { // sflow_version_5.txt line: 1712 if err := read(r, &t.drops, "Drops"); err != nil { // sflow_version_5.txt line: 1712
return t, err return t, err
} }
if err := read(r, &t.InputIfFormat, "InputIfFormat"); err != nil { // sflow_version_5.txt line: 1727 if err := read(r, &t.inputIfFormat, "InputIfFormat"); err != nil { // sflow_version_5.txt line: 1727
return t, err return t, err
} }
if err := read(r, &t.InputIfIndex, "InputIfIndex"); err != nil { if err := read(r, &t.inputIfIndex, "InputIfIndex"); err != nil {
return t, err return t, err
} }
if err := read(r, &t.OutputIfFormat, "OutputIfFormat"); err != nil { // sflow_version_5.txt line: 1728 if err := read(r, &t.outputIfFormat, "OutputIfFormat"); err != nil { // sflow_version_5.txt line: 1728
return t, err return t, err
} }
if err := read(r, &t.OutputIfIndex, "OutputIfIndex"); err != nil { if err := read(r, &t.outputIfIndex, "OutputIfIndex"); err != nil {
return t, err return t, err
} }
switch t.SourceIDIndex { switch t.sourceIDIndex {
case t.OutputIfIndex: case t.outputIfIndex:
t.SampleDirection = "egress" t.sampleDirection = "egress"
case t.InputIfIndex: case t.inputIfIndex:
t.SampleDirection = "ingress" t.sampleDirection = "ingress"
} }
t.FlowRecords, err = d.decodeFlowRecords(r, t.SamplingRate) t.flowRecords, err = d.decodeFlowRecords(r, t.samplingRate)
return t, err return t, err
} }
@ -236,7 +230,7 @@ func (d *packetDecoder) decodeFlowRecords(r io.Reader, samplingRate uint32) (rec
} }
for i := uint32(0); i < count; i++ { for i := uint32(0); i < count; i++ {
fr := flowRecord{} fr := flowRecord{}
if err := read(r, &fr.FlowFormat, "FlowFormat"); err != nil { // sflow_version_5.txt line 1597 if err := read(r, &fr.flowFormat, "FlowFormat"); err != nil { // sflow_version_5.txt line 1597
return recs, err return recs, err
} }
if err := read(r, &flowDataLen, "Flow data length"); err != nil { if err := read(r, &flowDataLen, "Flow data length"); err != nil {
@ -245,11 +239,11 @@ func (d *packetDecoder) decodeFlowRecords(r io.Reader, samplingRate uint32) (rec
mr := binaryio.MinReader(r, int64(flowDataLen)) mr := binaryio.MinReader(r, int64(flowDataLen))
switch fr.FlowFormat { switch fr.flowFormat {
case flowFormatTypeRawPacketHeader: // sflow_version_5.txt line 1938 case flowFormatTypeRawPacketHeader: // sflow_version_5.txt line 1938
fr.FlowData, err = d.decodeRawPacketHeaderFlowData(mr, samplingRate) fr.flowData, err = d.decodeRawPacketHeaderFlowData(mr, samplingRate)
default: default:
d.debug("Unknown flow format: ", fr.FlowFormat) d.debug("Unknown flow format: ", fr.flowFormat)
} }
if err != nil { if err != nil {
mr.Close() mr.Close()
@ -264,29 +258,29 @@ func (d *packetDecoder) decodeFlowRecords(r io.Reader, samplingRate uint32) (rec
} }
func (d *packetDecoder) decodeRawPacketHeaderFlowData(r io.Reader, samplingRate uint32) (h rawPacketHeaderFlowData, err error) { func (d *packetDecoder) decodeRawPacketHeaderFlowData(r io.Reader, samplingRate uint32) (h rawPacketHeaderFlowData, err error) {
if err := read(r, &h.HeaderProtocol, "HeaderProtocol"); err != nil { // sflow_version_5.txt line 1940 if err := read(r, &h.headerProtocol, "HeaderProtocol"); err != nil { // sflow_version_5.txt line 1940
return h, err return h, err
} }
if err := read(r, &h.FrameLength, "FrameLength"); err != nil { // sflow_version_5.txt line 1942 if err := read(r, &h.frameLength, "FrameLength"); err != nil { // sflow_version_5.txt line 1942
return h, err return h, err
} }
h.Bytes = h.FrameLength * samplingRate h.bytes = h.frameLength * samplingRate
if err := read(r, &h.StrippedOctets, "StrippedOctets"); err != nil { // sflow_version_5.txt line 1967 if err := read(r, &h.strippedOctets, "StrippedOctets"); err != nil { // sflow_version_5.txt line 1967
return h, err return h, err
} }
if err := read(r, &h.HeaderLength, "HeaderLength"); err != nil { if err := read(r, &h.headerLength, "HeaderLength"); err != nil {
return h, err return h, err
} }
mr := binaryio.MinReader(r, int64(h.HeaderLength)) mr := binaryio.MinReader(r, int64(h.headerLength))
defer mr.Close() defer mr.Close()
switch h.HeaderProtocol { switch h.headerProtocol {
case headerProtocolTypeEthernetISO88023: case headerProtocolTypeEthernetISO88023:
h.Header, err = d.decodeEthHeader(mr) h.header, err = d.decodeEthHeader(mr)
default: default:
d.debug("Unknown header protocol type: ", h.HeaderProtocol) d.debug("Unknown header protocol type: ", h.headerProtocol)
} }
return h, err return h, err
@ -296,10 +290,10 @@ func (d *packetDecoder) decodeRawPacketHeaderFlowData(r io.Reader, samplingRate
// according to https://en.wikipedia.org/wiki/Ethernet_frame // according to https://en.wikipedia.org/wiki/Ethernet_frame
func (d *packetDecoder) decodeEthHeader(r io.Reader) (h ethHeader, err error) { func (d *packetDecoder) decodeEthHeader(r io.Reader) (h ethHeader, err error) {
// we may have to read out StrippedOctets bytes and throw them away first? // we may have to read out StrippedOctets bytes and throw them away first?
if err := read(r, &h.DestinationMAC, "DestinationMAC"); err != nil { if err := read(r, &h.destinationMAC, "DestinationMAC"); err != nil {
return h, err return h, err
} }
if err := read(r, &h.SourceMAC, "SourceMAC"); err != nil { if err := read(r, &h.sourceMAC, "SourceMAC"); err != nil {
return h, err return h, err
} }
var tagOrEType uint16 var tagOrEType uint16
@ -312,18 +306,18 @@ func (d *packetDecoder) decodeEthHeader(r io.Reader) (h ethHeader, err error) {
if err := read(r, &discard, "unknown"); err != nil { if err := read(r, &discard, "unknown"); err != nil {
return h, err return h, err
} }
if err := read(r, &h.EtherTypeCode, "EtherTypeCode"); err != nil { if err := read(r, &h.etherTypeCode, "EtherTypeCode"); err != nil {
return h, err return h, err
} }
default: default:
h.EtherTypeCode = tagOrEType h.etherTypeCode = tagOrEType
} }
h.EtherType = eTypeMap[h.EtherTypeCode] h.etherType = eTypeMap[h.etherTypeCode]
switch h.EtherType { switch h.etherType {
case "IPv4": case "IPv4":
h.IPHeader, err = d.decodeIPv4Header(r) h.ipHeader, err = d.decodeIPv4Header(r)
case "IPv6": case "IPv6":
h.IPHeader, err = d.decodeIPv6Header(r) h.ipHeader, err = d.decodeIPv6Header(r)
default: default:
} }
if err != nil { if err != nil {
@ -334,49 +328,49 @@ func (d *packetDecoder) decodeEthHeader(r io.Reader) (h ethHeader, err error) {
// https://en.wikipedia.org/wiki/IPv4#Header // https://en.wikipedia.org/wiki/IPv4#Header
func (d *packetDecoder) decodeIPv4Header(r io.Reader) (h ipV4Header, err error) { func (d *packetDecoder) decodeIPv4Header(r io.Reader) (h ipV4Header, err error) {
if err := read(r, &h.Version, "Version"); err != nil { if err := read(r, &h.version, "Version"); err != nil {
return h, err return h, err
} }
h.InternetHeaderLength = h.Version & 0x0F h.internetHeaderLength = h.version & 0x0F
h.Version = h.Version & 0xF0 h.version = h.version & 0xF0
if err := read(r, &h.DSCP, "DSCP"); err != nil { if err := read(r, &h.dscp, "DSCP"); err != nil {
return h, err return h, err
} }
h.ECN = h.DSCP & 0x03 h.ecn = h.dscp & 0x03
h.DSCP = h.DSCP >> 2 h.dscp = h.dscp >> 2
if err := read(r, &h.TotalLength, "TotalLength"); err != nil { if err := read(r, &h.totalLength, "TotalLength"); err != nil {
return h, err return h, err
} }
if err := read(r, &h.Identification, "Identification"); err != nil { if err := read(r, &h.identification, "Identification"); err != nil {
return h, err return h, err
} }
if err := read(r, &h.FragmentOffset, "FragmentOffset"); err != nil { if err := read(r, &h.fragmentOffset, "FragmentOffset"); err != nil {
return h, err return h, err
} }
h.Flags = uint8(h.FragmentOffset >> 13) h.flags = uint8(h.fragmentOffset >> 13)
h.FragmentOffset = h.FragmentOffset & 0x1FFF h.fragmentOffset = h.fragmentOffset & 0x1FFF
if err := read(r, &h.TTL, "TTL"); err != nil { if err := read(r, &h.ttl, "TTL"); err != nil {
return h, err return h, err
} }
if err := read(r, &h.Protocol, "Protocol"); err != nil { if err := read(r, &h.protocol, "Protocol"); err != nil {
return h, err return h, err
} }
if err := read(r, &h.HeaderChecksum, "HeaderChecksum"); err != nil { if err := read(r, &h.headerChecksum, "HeaderChecksum"); err != nil {
return h, err return h, err
} }
if err := read(r, &h.SourceIP, "SourceIP"); err != nil { if err := read(r, &h.sourceIP, "SourceIP"); err != nil {
return h, err return h, err
} }
if err := read(r, &h.DestIP, "DestIP"); err != nil { if err := read(r, &h.destIP, "DestIP"); err != nil {
return h, err return h, err
} }
switch h.Protocol { switch h.protocol {
case ipProtocolTCP: case ipProtocolTCP:
h.ProtocolHeader, err = decodeTCPHeader(r) h.protocolHeader, err = decodeTCPHeader(r)
case ipProtocolUDP: case ipProtocolUDP:
h.ProtocolHeader, err = decodeUDPHeader(r) h.protocolHeader, err = decodeUDPHeader(r)
default: default:
d.debug("Unknown IP protocol: ", h.Protocol) d.debug("Unknown IP protocol: ", h.protocol)
} }
return h, err return h, err
} }
@ -391,49 +385,49 @@ func (d *packetDecoder) decodeIPv6Header(r io.Reader) (h ipV6Header, err error)
if version != 0x6 { if version != 0x6 {
return h, fmt.Errorf("unexpected IPv6 header version 0x%x", version) return h, fmt.Errorf("unexpected IPv6 header version 0x%x", version)
} }
h.DSCP = uint8((fourByteBlock & 0xFC00000) >> 22) h.dscp = uint8((fourByteBlock & 0xFC00000) >> 22)
h.ECN = uint8((fourByteBlock & 0x300000) >> 20) h.ecn = uint8((fourByteBlock & 0x300000) >> 20)
// The flowLabel is available via fourByteBlock & 0xFFFFF // The flowLabel is available via fourByteBlock & 0xFFFFF
if err := read(r, &h.PayloadLength, "PayloadLength"); err != nil { if err := read(r, &h.payloadLength, "PayloadLength"); err != nil {
return h, err return h, err
} }
if err := read(r, &h.NextHeaderProto, "NextHeaderProto"); err != nil { if err := read(r, &h.nextHeaderProto, "NextHeaderProto"); err != nil {
return h, err return h, err
} }
if err := read(r, &h.HopLimit, "HopLimit"); err != nil { if err := read(r, &h.hopLimit, "HopLimit"); err != nil {
return h, err return h, err
} }
if err := read(r, &h.SourceIP, "SourceIP"); err != nil { if err := read(r, &h.sourceIP, "SourceIP"); err != nil {
return h, err return h, err
} }
if err := read(r, &h.DestIP, "DestIP"); err != nil { if err := read(r, &h.destIP, "DestIP"); err != nil {
return h, err return h, err
} }
switch h.NextHeaderProto { switch h.nextHeaderProto {
case ipProtocolTCP: case ipProtocolTCP:
h.ProtocolHeader, err = decodeTCPHeader(r) h.protocolHeader, err = decodeTCPHeader(r)
case ipProtocolUDP: case ipProtocolUDP:
h.ProtocolHeader, err = decodeUDPHeader(r) h.protocolHeader, err = decodeUDPHeader(r)
default: default:
// not handled // not handled
d.debug("Unknown IP protocol: ", h.NextHeaderProto) d.debug("Unknown IP protocol: ", h.nextHeaderProto)
} }
return h, err return h, err
} }
// https://en.wikipedia.org/wiki/Transmission_Control_Protocol#TCP_segment_structure // https://en.wikipedia.org/wiki/Transmission_Control_Protocol#TCP_segment_structure
func decodeTCPHeader(r io.Reader) (h tcpHeader, err error) { func decodeTCPHeader(r io.Reader) (h tcpHeader, err error) {
if err := read(r, &h.SourcePort, "SourcePort"); err != nil { if err := read(r, &h.sourcePort, "SourcePort"); err != nil {
return h, err return h, err
} }
if err := read(r, &h.DestinationPort, "DestinationPort"); err != nil { if err := read(r, &h.destinationPort, "DestinationPort"); err != nil {
return h, err return h, err
} }
if err := read(r, &h.Sequence, "Sequence"); err != nil { if err := read(r, &h.sequence, "Sequence"); err != nil {
return h, err return h, err
} }
if err := read(r, &h.AckNumber, "AckNumber"); err != nil { if err := read(r, &h.ackNumber, "AckNumber"); err != nil {
return h, err return h, err
} }
// Next up: bit reading! // Next up: bit reading!
@ -444,17 +438,17 @@ func decodeTCPHeader(r io.Reader) (h tcpHeader, err error) {
if err := read(r, &dataOffsetAndReservedAndFlags, "TCP Header Octet offset 12"); err != nil { if err := read(r, &dataOffsetAndReservedAndFlags, "TCP Header Octet offset 12"); err != nil {
return h, err return h, err
} }
h.TCPHeaderLength = uint8((dataOffsetAndReservedAndFlags >> 12) * 4) h.tcpHeaderLength = uint8((dataOffsetAndReservedAndFlags >> 12) * 4)
h.Flags = dataOffsetAndReservedAndFlags & 0x1FF h.flags = dataOffsetAndReservedAndFlags & 0x1FF
// done bit reading // done bit reading
if err := read(r, &h.TCPWindowSize, "TCPWindowSize"); err != nil { if err := read(r, &h.tcpWindowSize, "TCPWindowSize"); err != nil {
return h, err return h, err
} }
if err := read(r, &h.Checksum, "Checksum"); err != nil { if err := read(r, &h.checksum, "Checksum"); err != nil {
return h, err return h, err
} }
if err := read(r, &h.TCPUrgentPointer, "TCPUrgentPointer"); err != nil { if err := read(r, &h.tcpUrgentPointer, "TCPUrgentPointer"); err != nil {
return h, err return h, err
} }
@ -462,16 +456,16 @@ func decodeTCPHeader(r io.Reader) (h tcpHeader, err error) {
} }
func decodeUDPHeader(r io.Reader) (h udpHeader, err error) { func decodeUDPHeader(r io.Reader) (h udpHeader, err error) {
if err := read(r, &h.SourcePort, "SourcePort"); err != nil { if err := read(r, &h.sourcePort, "SourcePort"); err != nil {
return h, err return h, err
} }
if err := read(r, &h.DestinationPort, "DestinationPort"); err != nil { if err := read(r, &h.destinationPort, "DestinationPort"); err != nil {
return h, err return h, err
} }
if err := read(r, &h.UDPLength, "UDPLength"); err != nil { if err := read(r, &h.udpLength, "UDPLength"); err != nil {
return h, err return h, err
} }
if err := read(r, &h.Checksum, "Checksum"); err != nil { if err := read(r, &h.checksum, "Checksum"); err != nil {
return h, err return h, err
} }
return h, err return h, err

View File

@ -19,9 +19,9 @@ func TestUDPHeader(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
expected := udpHeader{ expected := udpHeader{
SourcePort: 1, sourcePort: 1,
DestinationPort: 2, destinationPort: 2,
UDPLength: 3, udpLength: 3,
} }
require.Equal(t, expected, actual) require.Equal(t, expected, actual)
@ -66,24 +66,24 @@ func TestIPv4Header(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
expected := ipV4Header{ expected := ipV4Header{
Version: 0x40, version: 0x40,
InternetHeaderLength: 0x05, internetHeaderLength: 0x05,
DSCP: 0, dscp: 0,
ECN: 0, ecn: 0,
TotalLength: 0, totalLength: 0,
Identification: 0, identification: 0,
Flags: 0, flags: 0,
FragmentOffset: 0, fragmentOffset: 0,
TTL: 0, ttl: 0,
Protocol: 0x11, protocol: 0x11,
HeaderChecksum: 0, headerChecksum: 0,
SourceIP: [4]byte{127, 0, 0, 1}, sourceIP: [4]byte{127, 0, 0, 1},
DestIP: [4]byte{127, 0, 0, 2}, destIP: [4]byte{127, 0, 0, 2},
ProtocolHeader: udpHeader{ protocolHeader: udpHeader{
SourcePort: 1, sourcePort: 1,
DestinationPort: 2, destinationPort: 2,
UDPLength: 3, udpLength: 3,
Checksum: 0, checksum: 0,
}, },
} }
@ -142,14 +142,14 @@ func TestIPv4HeaderSwitch(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
expected := ipV4Header{ expected := ipV4Header{
Version: 64, version: 64,
InternetHeaderLength: 5, internetHeaderLength: 5,
Protocol: 6, protocol: 6,
SourceIP: [4]byte{127, 0, 0, 1}, sourceIP: [4]byte{127, 0, 0, 1},
DestIP: [4]byte{127, 0, 0, 2}, destIP: [4]byte{127, 0, 0, 2},
ProtocolHeader: tcpHeader{ protocolHeader: tcpHeader{
SourcePort: 1, sourcePort: 1,
DestinationPort: 2, destinationPort: 2,
}, },
} }
@ -194,11 +194,11 @@ func TestUnknownProtocol(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
expected := ipV4Header{ expected := ipV4Header{
Version: 64, version: 64,
InternetHeaderLength: 5, internetHeaderLength: 5,
Protocol: 153, protocol: 153,
SourceIP: [4]byte{127, 0, 0, 1}, sourceIP: [4]byte{127, 0, 0, 1},
DestIP: [4]byte{127, 0, 0, 2}, destIP: [4]byte{127, 0, 0, 2},
} }
require.Equal(t, expected, actual) require.Equal(t, expected, actual)

View File

@ -47,7 +47,7 @@ func (s *SFlow) Init() error {
// Start starts this sFlow listener listening on the configured network for sFlow packets // Start starts this sFlow listener listening on the configured network for sFlow packets
func (s *SFlow) Start(acc telegraf.Accumulator) error { func (s *SFlow) Start(acc telegraf.Accumulator) error {
s.decoder.OnPacket(func(p *v5Format) { s.decoder.onPacket(func(p *v5Format) {
metrics := makeMetrics(p) metrics := makeMetrics(p)
for _, m := range metrics { for _, m := range metrics {
acc.AddMetric(m) acc.AddMetric(m)
@ -95,7 +95,7 @@ func (s *SFlow) Stop() {
s.wg.Wait() s.wg.Wait()
} }
func (s *SFlow) Address() net.Addr { func (s *SFlow) address() net.Addr {
return s.addr return s.addr
} }
@ -114,7 +114,7 @@ func (s *SFlow) read(acc telegraf.Accumulator, conn net.PacketConn) {
} }
func (s *SFlow) process(acc telegraf.Accumulator, buf []byte) { func (s *SFlow) process(acc telegraf.Accumulator, buf []byte) {
if err := s.decoder.Decode(bytes.NewBuffer(buf)); err != nil { if err := s.decoder.decode(bytes.NewBuffer(buf)); err != nil {
acc.AddError(fmt.Errorf("unable to parse incoming packet: %w", err)) acc.AddError(fmt.Errorf("unable to parse incoming packet: %w", err))
} }
} }
@ -132,7 +132,6 @@ func listenUDP(network, address string) (*net.UDPConn, error) {
} }
} }
// init registers this SFlow input plug in with the Telegraf framework
func init() { func init() {
inputs.Add("sflow", func() telegraf.Input { inputs.Add("sflow", func() telegraf.Input {
return &SFlow{} return &SFlow{}

View File

@ -25,7 +25,7 @@ func TestSFlow(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
defer sflow.Stop() defer sflow.Stop()
client, err := net.Dial(sflow.Address().Network(), sflow.Address().String()) client, err := net.Dial(sflow.address().Network(), sflow.address().String())
require.NoError(t, err) require.NoError(t, err)
packetBytes, err := hex.DecodeString( packetBytes, err := hex.DecodeString(
@ -132,7 +132,7 @@ func BenchmarkSFlow(b *testing.B) {
require.NoError(b, err) require.NoError(b, err)
defer sflow.Stop() defer sflow.Stop()
client, err := net.Dial(sflow.Address().Network(), sflow.Address().String()) client, err := net.Dial(sflow.address().Network(), sflow.address().String())
require.NoError(b, err) require.NoError(b, err)
packetBytes, err := hex.DecodeString( packetBytes, err := hex.DecodeString(

View File

@ -23,12 +23,12 @@ type containsMetricData interface {
// v5Format answers and decoder.Directive capable of decoding sFlow v5 packets in accordance // v5Format answers and decoder.Directive capable of decoding sFlow v5 packets in accordance
// with SFlow v5 specification at https://sflow.org/sflow_version_5.txt // with SFlow v5 specification at https://sflow.org/sflow_version_5.txt
type v5Format struct { type v5Format struct {
Version uint32 version uint32
AgentAddress net.IPAddr agentAddress net.IPAddr
SubAgentID uint32 subAgentID uint32
SequenceNumber uint32 sequenceNumber uint32
Uptime uint32 uptime uint32
Samples []sample samples []sample
} }
type sampleType uint32 type sampleType uint32
@ -39,23 +39,23 @@ const (
) )
type sample struct { type sample struct {
SampleType sampleType smplType sampleType
SampleData sampleDataFlowSampleExpanded smplData sampleDataFlowSampleExpanded
} }
type sampleDataFlowSampleExpanded struct { type sampleDataFlowSampleExpanded struct {
SequenceNumber uint32 sequenceNumber uint32
SourceIDType uint32 sourceIDType uint32
SourceIDIndex uint32 sourceIDIndex uint32
SamplingRate uint32 samplingRate uint32
SamplePool uint32 samplePool uint32
Drops uint32 drops uint32
SampleDirection string // ingress/egress sampleDirection string // ingress/egress
InputIfFormat uint32 inputIfFormat uint32
InputIfIndex uint32 inputIfIndex uint32
OutputIfFormat uint32 outputIfFormat uint32
OutputIfIndex uint32 outputIfIndex uint32
FlowRecords []flowRecord flowRecords []flowRecord
} }
type flowFormatType uint32 type flowFormatType uint32
@ -67,8 +67,8 @@ const (
type flowData containsMetricData type flowData containsMetricData
type flowRecord struct { type flowRecord struct {
FlowFormat flowFormatType flowFormat flowFormatType
FlowData flowData flowData flowData
} }
type headerProtocolType uint32 type headerProtocolType uint32
@ -97,64 +97,66 @@ var headerProtocolMap = map[headerProtocolType]string{
type header containsMetricData type header containsMetricData
type rawPacketHeaderFlowData struct { type rawPacketHeaderFlowData struct {
HeaderProtocol headerProtocolType headerProtocol headerProtocolType
FrameLength uint32 frameLength uint32
Bytes uint32 bytes uint32
StrippedOctets uint32 strippedOctets uint32
HeaderLength uint32 headerLength uint32
Header header header header
} }
func (h rawPacketHeaderFlowData) getTags() map[string]string { func (h rawPacketHeaderFlowData) getTags() map[string]string {
var t map[string]string var t map[string]string
if h.Header != nil { if h.header != nil {
t = h.Header.getTags() t = h.header.getTags()
} else { } else {
t = make(map[string]string, 1) t = make(map[string]string, 1)
} }
t["header_protocol"] = headerProtocolMap[h.HeaderProtocol] t["header_protocol"] = headerProtocolMap[h.headerProtocol]
return t return t
} }
func (h rawPacketHeaderFlowData) getFields() map[string]interface{} { func (h rawPacketHeaderFlowData) getFields() map[string]interface{} {
var f map[string]interface{} var f map[string]interface{}
if h.Header != nil { if h.header != nil {
f = h.Header.getFields() f = h.header.getFields()
} else { } else {
f = make(map[string]interface{}, 3) f = make(map[string]interface{}, 3)
} }
f["bytes"] = h.Bytes f["bytes"] = h.bytes
f["frame_length"] = h.FrameLength f["frame_length"] = h.frameLength
f["header_length"] = h.HeaderLength f["header_length"] = h.headerLength
return f return f
} }
type ipHeader containsMetricData type ipHeader containsMetricData
type ethHeader struct { type ethHeader struct {
DestinationMAC [6]byte destinationMAC [6]byte
SourceMAC [6]byte sourceMAC [6]byte
TagProtocolIdentifier uint16 tagProtocolIdentifier uint16
TagControlInformation uint16 tagControlInformation uint16
EtherTypeCode uint16 etherTypeCode uint16
EtherType string etherType string
IPHeader ipHeader ipHeader ipHeader
} }
func (h ethHeader) getTags() map[string]string { func (h ethHeader) getTags() map[string]string {
var t map[string]string var t map[string]string
if h.IPHeader != nil { if h.ipHeader != nil {
t = h.IPHeader.getTags() t = h.ipHeader.getTags()
} else { } else {
t = make(map[string]string, 3) t = make(map[string]string, 3)
} }
t["src_mac"] = net.HardwareAddr(h.SourceMAC[:]).String() t["src_mac"] = net.HardwareAddr(h.sourceMAC[:]).String()
t["dst_mac"] = net.HardwareAddr(h.DestinationMAC[:]).String() t["dst_mac"] = net.HardwareAddr(h.destinationMAC[:]).String()
t["ether_type"] = h.EtherType t["ether_type"] = h.etherType
return t return t
} }
func (h ethHeader) getFields() map[string]interface{} { func (h ethHeader) getFields() map[string]interface{} {
if h.IPHeader != nil { if h.ipHeader != nil {
return h.IPHeader.getFields() return h.ipHeader.getFields()
} }
return make(map[string]interface{}) return make(map[string]interface{})
} }
@ -163,129 +165,133 @@ type protocolHeader containsMetricData
// https://en.wikipedia.org/wiki/IPv4#Header // https://en.wikipedia.org/wiki/IPv4#Header
type ipV4Header struct { type ipV4Header struct {
Version uint8 // 4 bit version uint8 // 4 bit
InternetHeaderLength uint8 // 4 bit internetHeaderLength uint8 // 4 bit
DSCP uint8 dscp uint8
ECN uint8 ecn uint8
TotalLength uint16 totalLength uint16
Identification uint16 identification uint16
Flags uint8 flags uint8
FragmentOffset uint16 fragmentOffset uint16
TTL uint8 ttl uint8
Protocol uint8 // https://en.wikipedia.org/wiki/List_of_IP_protocol_numbers protocol uint8 // https://en.wikipedia.org/wiki/List_of_IP_protocol_numbers
HeaderChecksum uint16 headerChecksum uint16
SourceIP [4]byte sourceIP [4]byte
DestIP [4]byte destIP [4]byte
ProtocolHeader protocolHeader protocolHeader protocolHeader
} }
func (h ipV4Header) getTags() map[string]string { func (h ipV4Header) getTags() map[string]string {
var t map[string]string var t map[string]string
if h.ProtocolHeader != nil { if h.protocolHeader != nil {
t = h.ProtocolHeader.getTags() t = h.protocolHeader.getTags()
} else { } else {
t = make(map[string]string, 2) t = make(map[string]string, 2)
} }
t["src_ip"] = net.IP(h.SourceIP[:]).String() t["src_ip"] = net.IP(h.sourceIP[:]).String()
t["dst_ip"] = net.IP(h.DestIP[:]).String() t["dst_ip"] = net.IP(h.destIP[:]).String()
return t return t
} }
func (h ipV4Header) getFields() map[string]interface{} { func (h ipV4Header) getFields() map[string]interface{} {
var f map[string]interface{} var f map[string]interface{}
if h.ProtocolHeader != nil { if h.protocolHeader != nil {
f = h.ProtocolHeader.getFields() f = h.protocolHeader.getFields()
} else { } else {
f = make(map[string]interface{}, 6) f = make(map[string]interface{}, 6)
} }
f["ip_dscp"] = strconv.FormatUint(uint64(h.DSCP), 10) f["ip_dscp"] = strconv.FormatUint(uint64(h.dscp), 10)
f["ip_ecn"] = strconv.FormatUint(uint64(h.ECN), 10) f["ip_ecn"] = strconv.FormatUint(uint64(h.ecn), 10)
f["ip_flags"] = h.Flags f["ip_flags"] = h.flags
f["ip_fragment_offset"] = h.FragmentOffset f["ip_fragment_offset"] = h.fragmentOffset
f["ip_total_length"] = h.TotalLength f["ip_total_length"] = h.totalLength
f["ip_ttl"] = h.TTL f["ip_ttl"] = h.ttl
return f return f
} }
// https://en.wikipedia.org/wiki/IPv6_packet // https://en.wikipedia.org/wiki/IPv6_packet
type ipV6Header struct { type ipV6Header struct {
DSCP uint8 dscp uint8
ECN uint8 ecn uint8
PayloadLength uint16 payloadLength uint16
NextHeaderProto uint8 // tcp/udp? nextHeaderProto uint8 // tcp/udp?
HopLimit uint8 hopLimit uint8
SourceIP [16]byte sourceIP [16]byte
DestIP [16]byte destIP [16]byte
ProtocolHeader protocolHeader protocolHeader protocolHeader
} }
func (h ipV6Header) getTags() map[string]string { func (h ipV6Header) getTags() map[string]string {
var t map[string]string var t map[string]string
if h.ProtocolHeader != nil { if h.protocolHeader != nil {
t = h.ProtocolHeader.getTags() t = h.protocolHeader.getTags()
} else { } else {
t = make(map[string]string, 2) t = make(map[string]string, 2)
} }
t["src_ip"] = net.IP(h.SourceIP[:]).String() t["src_ip"] = net.IP(h.sourceIP[:]).String()
t["dst_ip"] = net.IP(h.DestIP[:]).String() t["dst_ip"] = net.IP(h.destIP[:]).String()
return t return t
} }
func (h ipV6Header) getFields() map[string]interface{} { func (h ipV6Header) getFields() map[string]interface{} {
var f map[string]interface{} var f map[string]interface{}
if h.ProtocolHeader != nil { if h.protocolHeader != nil {
f = h.ProtocolHeader.getFields() f = h.protocolHeader.getFields()
} else { } else {
f = make(map[string]interface{}, 3) f = make(map[string]interface{}, 3)
} }
f["ip_dscp"] = strconv.FormatUint(uint64(h.DSCP), 10) f["ip_dscp"] = strconv.FormatUint(uint64(h.dscp), 10)
f["ip_ecn"] = strconv.FormatUint(uint64(h.ECN), 10) f["ip_ecn"] = strconv.FormatUint(uint64(h.ecn), 10)
f["payload_length"] = h.PayloadLength f["payload_length"] = h.payloadLength
return f return f
} }
// https://en.wikipedia.org/wiki/Transmission_Control_Protocol // https://en.wikipedia.org/wiki/Transmission_Control_Protocol
type tcpHeader struct { type tcpHeader struct {
SourcePort uint16 sourcePort uint16
DestinationPort uint16 destinationPort uint16
Sequence uint32 sequence uint32
AckNumber uint32 ackNumber uint32
TCPHeaderLength uint8 tcpHeaderLength uint8
Flags uint16 flags uint16
TCPWindowSize uint16 tcpWindowSize uint16
Checksum uint16 checksum uint16
TCPUrgentPointer uint16 tcpUrgentPointer uint16
} }
func (h tcpHeader) getTags() map[string]string { func (h tcpHeader) getTags() map[string]string {
t := map[string]string{ t := map[string]string{
"dst_port": strconv.FormatUint(uint64(h.DestinationPort), 10), "dst_port": strconv.FormatUint(uint64(h.destinationPort), 10),
"src_port": strconv.FormatUint(uint64(h.SourcePort), 10), "src_port": strconv.FormatUint(uint64(h.sourcePort), 10),
} }
return t return t
} }
func (h tcpHeader) getFields() map[string]interface{} { func (h tcpHeader) getFields() map[string]interface{} {
return map[string]interface{}{ return map[string]interface{}{
"tcp_header_length": h.TCPHeaderLength, "tcp_header_length": h.tcpHeaderLength,
"tcp_urgent_pointer": h.TCPUrgentPointer, "tcp_urgent_pointer": h.tcpUrgentPointer,
"tcp_window_size": h.TCPWindowSize, "tcp_window_size": h.tcpWindowSize,
} }
} }
type udpHeader struct { type udpHeader struct {
SourcePort uint16 sourcePort uint16
DestinationPort uint16 destinationPort uint16
UDPLength uint16 udpLength uint16
Checksum uint16 checksum uint16
} }
func (h udpHeader) getTags() map[string]string { func (h udpHeader) getTags() map[string]string {
t := map[string]string{ t := map[string]string{
"dst_port": strconv.FormatUint(uint64(h.DestinationPort), 10), "dst_port": strconv.FormatUint(uint64(h.destinationPort), 10),
"src_port": strconv.FormatUint(uint64(h.SourcePort), 10), "src_port": strconv.FormatUint(uint64(h.sourcePort), 10),
} }
return t return t
} }
func (h udpHeader) getFields() map[string]interface{} { func (h udpHeader) getFields() map[string]interface{} {
return map[string]interface{}{ return map[string]interface{}{
"udp_length": h.UDPLength, "udp_length": h.udpLength,
} }
} }

View File

@ -8,12 +8,12 @@ import (
func TestRawPacketHeaderFlowData(t *testing.T) { func TestRawPacketHeaderFlowData(t *testing.T) {
h := rawPacketHeaderFlowData{ h := rawPacketHeaderFlowData{
HeaderProtocol: headerProtocolTypeEthernetISO88023, headerProtocol: headerProtocolTypeEthernetISO88023,
FrameLength: 64, frameLength: 64,
Bytes: 64, bytes: 64,
StrippedOctets: 0, strippedOctets: 0,
HeaderLength: 0, headerLength: 0,
Header: nil, header: nil,
} }
tags := h.getTags() tags := h.getTags()
fields := h.getFields() fields := h.getFields()
@ -27,13 +27,13 @@ func TestRawPacketHeaderFlowData(t *testing.T) {
// process a raw ethernet packet without any encapsulated protocol // process a raw ethernet packet without any encapsulated protocol
func TestEthHeader(t *testing.T) { func TestEthHeader(t *testing.T) {
h := ethHeader{ h := ethHeader{
DestinationMAC: [6]byte{0xca, 0xff, 0xee, 0xff, 0xe, 0x0}, destinationMAC: [6]byte{0xca, 0xff, 0xee, 0xff, 0xe, 0x0},
SourceMAC: [6]byte{0xde, 0xad, 0xbe, 0xef, 0x0, 0x0}, sourceMAC: [6]byte{0xde, 0xad, 0xbe, 0xef, 0x0, 0x0},
TagProtocolIdentifier: 0x88B5, // IEEE Std 802 - Local Experimental Ethertype tagProtocolIdentifier: 0x88B5, // IEEE Std 802 - Local Experimental Ethertype
TagControlInformation: 0, tagControlInformation: 0,
EtherTypeCode: 0, etherTypeCode: 0,
EtherType: "", etherType: "",
IPHeader: nil, ipHeader: nil,
} }
tags := h.getTags() tags := h.getTags()
fields := h.getFields() fields := h.getFields()

View File

@ -24,18 +24,18 @@ import (
//go:embed sample.conf //go:embed sample.conf
var sampleConfig string var sampleConfig string
type SlabStats struct { type Slab struct {
Log telegraf.Logger `toml:"-"` Log telegraf.Logger `toml:"-"`
statFile string statFile string
useSudo bool useSudo bool
} }
func (*SlabStats) SampleConfig() string { func (*Slab) SampleConfig() string {
return sampleConfig return sampleConfig
} }
func (ss *SlabStats) Gather(acc telegraf.Accumulator) error { func (ss *Slab) Gather(acc telegraf.Accumulator) error {
fields, err := ss.getSlabStats() fields, err := ss.getSlabStats()
if err != nil { if err != nil {
return err return err
@ -45,7 +45,7 @@ func (ss *SlabStats) Gather(acc telegraf.Accumulator) error {
return nil return nil
} }
func (ss *SlabStats) getSlabStats() (map[string]interface{}, error) { func (ss *Slab) getSlabStats() (map[string]interface{}, error) {
out, err := ss.runCmd("/bin/cat", []string{ss.statFile}) out, err := ss.runCmd("/bin/cat", []string{ss.statFile})
if err != nil { if err != nil {
return nil, err return nil, err
@ -85,7 +85,7 @@ func (ss *SlabStats) getSlabStats() (map[string]interface{}, error) {
return fields, nil return fields, nil
} }
func (ss *SlabStats) runCmd(cmd string, args []string) ([]byte, error) { func (ss *Slab) runCmd(cmd string, args []string) ([]byte, error) {
execCmd := exec.Command(cmd, args...) execCmd := exec.Command(cmd, args...)
if os.Geteuid() != 0 && ss.useSudo { if os.Geteuid() != 0 && ss.useSudo {
execCmd = exec.Command("sudo", append([]string{"-n", cmd}, args...)...) execCmd = exec.Command("sudo", append([]string{"-n", cmd}, args...)...)
@ -105,7 +105,7 @@ func normalizeName(name string) string {
func init() { func init() {
inputs.Add("slab", func() telegraf.Input { inputs.Add("slab", func() telegraf.Input {
return &SlabStats{ return &Slab{
statFile: path.Join(internal.GetProcPath(), "slabinfo"), statFile: path.Join(internal.GetProcPath(), "slabinfo"),
useSudo: true, useSudo: true,
} }

View File

@ -17,12 +17,14 @@ type Slab struct {
Log telegraf.Logger `toml:"-"` Log telegraf.Logger `toml:"-"`
} }
func (*Slab) SampleConfig() string { return sampleConfig }
func (s *Slab) Init() error { func (s *Slab) Init() error {
s.Log.Warn("current platform is not supported") s.Log.Warn("Current platform is not supported")
return nil return nil
} }
func (*Slab) SampleConfig() string { return sampleConfig }
func (*Slab) Gather(_ telegraf.Accumulator) error { return nil } func (*Slab) Gather(telegraf.Accumulator) error { return nil }
func init() { func init() {
inputs.Add("slab", func() telegraf.Input { inputs.Add("slab", func() telegraf.Input {

View File

@ -12,7 +12,7 @@ import (
) )
func TestSlab(t *testing.T) { func TestSlab(t *testing.T) {
slabStats := SlabStats{ slabStats := Slab{
statFile: path.Join("testdata", "slabinfo"), statFile: path.Join("testdata", "slabinfo"),
useSudo: false, useSudo: false,
} }

View File

@ -103,6 +103,74 @@ func (s *Slurm) Init() error {
return nil return nil
} }
func (s *Slurm) Gather(acc telegraf.Accumulator) (err error) {
auth := context.WithValue(
context.Background(),
goslurm.ContextAPIKeys,
map[string]goslurm.APIKey{
"user": {Key: s.Username},
"token": {Key: s.Token},
},
)
if s.endpointMap["diag"] {
diagResp, respRaw, err := s.client.SlurmAPI.SlurmV0038Diag(auth).Execute()
if err != nil {
return fmt.Errorf("error getting diag: %w", err)
}
if diag, ok := diagResp.GetStatisticsOk(); ok {
s.gatherDiagMetrics(acc, diag)
}
respRaw.Body.Close()
}
if s.endpointMap["jobs"] {
jobsResp, respRaw, err := s.client.SlurmAPI.SlurmV0038GetJobs(auth).Execute()
if err != nil {
return fmt.Errorf("error getting jobs: %w", err)
}
if jobs, ok := jobsResp.GetJobsOk(); ok {
s.gatherJobsMetrics(acc, jobs)
}
respRaw.Body.Close()
}
if s.endpointMap["nodes"] {
nodesResp, respRaw, err := s.client.SlurmAPI.SlurmV0038GetNodes(auth).Execute()
if err != nil {
return fmt.Errorf("error getting nodes: %w", err)
}
if nodes, ok := nodesResp.GetNodesOk(); ok {
s.gatherNodesMetrics(acc, nodes)
}
respRaw.Body.Close()
}
if s.endpointMap["partitions"] {
partitionsResp, respRaw, err := s.client.SlurmAPI.SlurmV0038GetPartitions(auth).Execute()
if err != nil {
return fmt.Errorf("error getting partitions: %w", err)
}
if partitions, ok := partitionsResp.GetPartitionsOk(); ok {
s.gatherPartitionsMetrics(acc, partitions)
}
respRaw.Body.Close()
}
if s.endpointMap["reservations"] {
reservationsResp, respRaw, err := s.client.SlurmAPI.SlurmV0038GetReservations(auth).Execute()
if err != nil {
return fmt.Errorf("error getting reservations: %w", err)
}
if reservations, ok := reservationsResp.GetReservationsOk(); ok {
s.gatherReservationsMetrics(acc, reservations)
}
respRaw.Body.Close()
}
return nil
}
func parseTres(tres string) map[string]interface{} { func parseTres(tres string) map[string]interface{} {
tresKVs := strings.Split(tres, ",") tresKVs := strings.Split(tres, ",")
parsedValues := make(map[string]interface{}, len(tresKVs)) parsedValues := make(map[string]interface{}, len(tresKVs))
@ -399,74 +467,6 @@ func (s *Slurm) gatherReservationsMetrics(acc telegraf.Accumulator, reservations
} }
} }
func (s *Slurm) Gather(acc telegraf.Accumulator) (err error) {
auth := context.WithValue(
context.Background(),
goslurm.ContextAPIKeys,
map[string]goslurm.APIKey{
"user": {Key: s.Username},
"token": {Key: s.Token},
},
)
if s.endpointMap["diag"] {
diagResp, respRaw, err := s.client.SlurmAPI.SlurmV0038Diag(auth).Execute()
if err != nil {
return fmt.Errorf("error getting diag: %w", err)
}
if diag, ok := diagResp.GetStatisticsOk(); ok {
s.gatherDiagMetrics(acc, diag)
}
respRaw.Body.Close()
}
if s.endpointMap["jobs"] {
jobsResp, respRaw, err := s.client.SlurmAPI.SlurmV0038GetJobs(auth).Execute()
if err != nil {
return fmt.Errorf("error getting jobs: %w", err)
}
if jobs, ok := jobsResp.GetJobsOk(); ok {
s.gatherJobsMetrics(acc, jobs)
}
respRaw.Body.Close()
}
if s.endpointMap["nodes"] {
nodesResp, respRaw, err := s.client.SlurmAPI.SlurmV0038GetNodes(auth).Execute()
if err != nil {
return fmt.Errorf("error getting nodes: %w", err)
}
if nodes, ok := nodesResp.GetNodesOk(); ok {
s.gatherNodesMetrics(acc, nodes)
}
respRaw.Body.Close()
}
if s.endpointMap["partitions"] {
partitionsResp, respRaw, err := s.client.SlurmAPI.SlurmV0038GetPartitions(auth).Execute()
if err != nil {
return fmt.Errorf("error getting partitions: %w", err)
}
if partitions, ok := partitionsResp.GetPartitionsOk(); ok {
s.gatherPartitionsMetrics(acc, partitions)
}
respRaw.Body.Close()
}
if s.endpointMap["reservations"] {
reservationsResp, respRaw, err := s.client.SlurmAPI.SlurmV0038GetReservations(auth).Execute()
if err != nil {
return fmt.Errorf("error getting reservations: %w", err)
}
if reservations, ok := reservationsResp.GetReservationsOk(); ok {
s.gatherReservationsMetrics(acc, reservations)
}
respRaw.Body.Close()
}
return nil
}
func init() { func init() {
inputs.Add("slurm", func() telegraf.Input { inputs.Add("slurm", func() telegraf.Input {
return &Slurm{ return &Slurm{

View File

@ -25,8 +25,6 @@ import (
//go:embed sample.conf //go:embed sample.conf
var sampleConfig string var sampleConfig string
const intelVID = "0x8086"
var ( var (
// Device Model: APPLE SSD SM256E // Device Model: APPLE SSD SM256E
// Product: HUH721212AL5204 // Product: HUH721212AL5204
@ -356,8 +354,19 @@ var (
} }
knownReadMethods = []string{"concurrent", "sequential"} knownReadMethods = []string{"concurrent", "sequential"}
// Wrap with sudo
runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) {
cmd := exec.Command(command, args...)
if sudo {
cmd = exec.Command("sudo", append([]string{"-n", command}, args...)...)
}
return internal.CombinedOutputTimeout(cmd, time.Duration(timeout))
}
) )
const intelVID = "0x8086"
// Smart plugin reads metrics from storage devices supporting S.M.A.R.T. // Smart plugin reads metrics from storage devices supporting S.M.A.R.T.
type Smart struct { type Smart struct {
Path string `toml:"path" deprecated:"1.16.0;1.35.0;use 'path_smartctl' instead"` Path string `toml:"path" deprecated:"1.16.0;1.35.0;use 'path_smartctl' instead"`
@ -382,18 +391,10 @@ type nvmeDevice struct {
serialNumber string serialNumber string
} }
func newSmart() *Smart {
return &Smart{
Timeout: config.Duration(time.Second * 30),
ReadMethod: "concurrent",
}
}
func (*Smart) SampleConfig() string { func (*Smart) SampleConfig() string {
return sampleConfig return sampleConfig
} }
// Init performs one time setup of the plugin and returns an error if the configuration is invalid.
func (m *Smart) Init() error { func (m *Smart) Init() error {
// if deprecated `path` (to smartctl binary) is provided in config and `path_smartctl` override does not exist // if deprecated `path` (to smartctl binary) is provided in config and `path_smartctl` override does not exist
if len(m.Path) > 0 && len(m.PathSmartctl) == 0 { if len(m.Path) > 0 && len(m.PathSmartctl) == 0 {
@ -436,7 +437,6 @@ func (m *Smart) Init() error {
return nil return nil
} }
// Gather takes in an accumulator and adds the metrics that the SMART tools gather.
func (m *Smart) Gather(acc telegraf.Accumulator) error { func (m *Smart) Gather(acc telegraf.Accumulator) error {
var err error var err error
var scannedNVMeDevices []string var scannedNVMeDevices []string
@ -532,15 +532,6 @@ func (m *Smart) scanDevices(ignoreExcludes bool, scanArgs ...string) ([]string,
return devices, nil return devices, nil
} }
// Wrap with sudo
var runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) {
cmd := exec.Command(command, args...)
if sudo {
cmd = exec.Command("sudo", append([]string{"-n", command}, args...)...)
}
return internal.CombinedOutputTimeout(cmd, time.Duration(timeout))
}
func excludedDev(excludes []string, deviceLine string) bool { func excludedDev(excludes []string, deviceLine string) bool {
device := strings.Split(deviceLine, " ") device := strings.Split(deviceLine, " ")
if len(device) != 0 { if len(device) != 0 {
@ -1109,6 +1100,13 @@ func validatePath(filePath string) error {
return nil return nil
} }
func newSmart() *Smart {
return &Smart{
Timeout: config.Duration(time.Second * 30),
ReadMethod: "concurrent",
}
}
func init() { func init() {
// Set LC_NUMERIC to uniform numeric output from cli tools // Set LC_NUMERIC to uniform numeric output from cli tools
_ = os.Setenv("LC_NUMERIC", "en_US.UTF-8") _ = os.Setenv("LC_NUMERIC", "en_US.UTF-8")

View File

@ -78,7 +78,7 @@ func fakeScanExecCommand(command string, args ...string) *exec.Cmd {
return cmd return cmd
} }
func TestScanHelperProcess(_ *testing.T) { func TestScanHelperProcess(*testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
return return
} }

View File

@ -17,7 +17,6 @@ import (
//go:embed sample.conf //go:embed sample.conf
var sampleConfig string var sampleConfig string
// Snmp holds the configuration for the plugin.
type Snmp struct { type Snmp struct {
// The SNMP agent to query. Format is [SCHEME://]ADDR[:PORT] (e.g. // The SNMP agent to query. Format is [SCHEME://]ADDR[:PORT] (e.g.
// udp://1.2.3.4:161). If the scheme is not specified then "udp" is used. // udp://1.2.3.4:161). If the scheme is not specified then "udp" is used.
@ -36,21 +35,21 @@ type Snmp struct {
Name string `toml:"name"` Name string `toml:"name"`
Fields []snmp.Field `toml:"field"` Fields []snmp.Field `toml:"field"`
connectionCache []snmp.Connection
Log telegraf.Logger `toml:"-"` Log telegraf.Logger `toml:"-"`
connectionCache []snmp.Connection
translator snmp.Translator translator snmp.Translator
} }
func (s *Snmp) SetTranslator(name string) {
s.Translator = name
}
func (*Snmp) SampleConfig() string { func (*Snmp) SampleConfig() string {
return sampleConfig return sampleConfig
} }
func (s *Snmp) SetTranslator(name string) {
s.Translator = name
}
func (s *Snmp) Init() error { func (s *Snmp) Init() error {
var err error var err error
switch s.Translator { switch s.Translator {
@ -92,9 +91,6 @@ func (s *Snmp) Init() error {
return nil return nil
} }
// Gather retrieves all the configured fields and tables.
// Any error encountered does not halt the process. The errors are accumulated
// and returned at the end.
func (s *Snmp) Gather(acc telegraf.Accumulator) error { func (s *Snmp) Gather(acc telegraf.Accumulator) error {
var wg sync.WaitGroup var wg sync.WaitGroup
for i, agent := range s.Agents { for i, agent := range s.Agents {

View File

@ -43,6 +43,7 @@ func (tsc *testSNMPConnection) Get(oids []string) (*gosnmp.SnmpPacket, error) {
} }
return sp, nil return sp, nil
} }
func (tsc *testSNMPConnection) Walk(oid string, wf gosnmp.WalkFunc) error { func (tsc *testSNMPConnection) Walk(oid string, wf gosnmp.WalkFunc) error {
for void, v := range tsc.values { for void, v := range tsc.values {
if void == oid || (len(void) > len(oid) && void[:len(oid)+1] == oid+".") { if void == oid || (len(void) > len(oid) && void[:len(oid)+1] == oid+".") {
@ -56,6 +57,7 @@ func (tsc *testSNMPConnection) Walk(oid string, wf gosnmp.WalkFunc) error {
} }
return nil return nil
} }
func (*testSNMPConnection) Reconnect() error { func (*testSNMPConnection) Reconnect() error {
return nil return nil
} }
@ -466,7 +468,7 @@ func TestGosnmpWrapper_walk_retry(t *testing.T) {
gsw := snmp.GosnmpWrapper{ gsw := snmp.GosnmpWrapper{
GoSNMP: gs, GoSNMP: gs,
} }
err = gsw.Walk(".1.0.0", func(_ gosnmp.SnmpPDU) error { return nil }) err = gsw.Walk(".1.0.0", func(gosnmp.SnmpPDU) error { return nil })
require.NoError(t, srvr.Close()) require.NoError(t, srvr.Close())
wg.Wait() wg.Wait()
require.Error(t, err) require.Error(t, err)

View File

@ -39,7 +39,7 @@ type netsnmpTranslator struct {
cacheLock sync.Mutex cacheLock sync.Mutex
cache map[string]snmp.MibEntry cache map[string]snmp.MibEntry
execCmd execer execCmd execer
Timeout config.Duration timeout config.Duration
} }
func (s *netsnmpTranslator) lookup(oid string) (e snmp.MibEntry, err error) { func (s *netsnmpTranslator) lookup(oid string) (e snmp.MibEntry, err error) {
@ -59,7 +59,7 @@ func (s *netsnmpTranslator) lookup(oid string) (e snmp.MibEntry, err error) {
func (s *netsnmpTranslator) snmptranslate(oid string) (e snmp.MibEntry, err error) { func (s *netsnmpTranslator) snmptranslate(oid string) (e snmp.MibEntry, err error) {
var out []byte var out []byte
out, err = s.execCmd(s.Timeout, "snmptranslate", "-Td", "-Ob", "-m", "all", oid) out, err = s.execCmd(s.timeout, "snmptranslate", "-Td", "-Ob", "-m", "all", oid)
if err != nil { if err != nil {
return e, err return e, err
@ -86,6 +86,6 @@ func newNetsnmpTranslator(timeout config.Duration) *netsnmpTranslator {
return &netsnmpTranslator{ return &netsnmpTranslator{
execCmd: realExecCmd, execCmd: realExecCmd,
cache: make(map[string]snmp.MibEntry), cache: make(map[string]snmp.MibEntry),
Timeout: timeout, timeout: timeout,
} }
} }

View File

@ -25,33 +25,16 @@ var defaultTimeout = config.Duration(time.Second * 5)
//go:embed sample.conf //go:embed sample.conf
var sampleConfig string var sampleConfig string
type translator interface {
lookup(oid string) (snmp.MibEntry, error)
}
type wrapLog struct {
telegraf.Logger
}
func (l wrapLog) Printf(format string, args ...interface{}) {
l.Debugf(format, args...)
}
func (l wrapLog) Print(args ...interface{}) {
l.Debug(args...)
}
type SnmpTrap struct { type SnmpTrap struct {
ServiceAddress string `toml:"service_address"` ServiceAddress string `toml:"service_address"`
Timeout config.Duration `toml:"timeout"` Timeout config.Duration `toml:"timeout"`
Version string `toml:"version"` Version string `toml:"version"`
Translator string `toml:"-"`
Path []string `toml:"path"` Path []string `toml:"path"`
// Settings for version 3 // Settings for version 3
// Values: "noAuthNoPriv", "authNoPriv", "authPriv" // Values: "noAuthNoPriv", "authNoPriv", "authPriv"
SecLevel string `toml:"sec_level"` SecLevel string `toml:"sec_level"`
SecName config.Secret `toml:"sec_name"`
SecName config.Secret `toml:"sec_name"`
// Values: "MD5", "SHA", "". Default: "" // Values: "MD5", "SHA", "". Default: ""
AuthProtocol string `toml:"auth_protocol"` AuthProtocol string `toml:"auth_protocol"`
AuthPassword config.Secret `toml:"auth_password"` AuthPassword config.Secret `toml:"auth_password"`
@ -59,38 +42,30 @@ type SnmpTrap struct {
PrivProtocol string `toml:"priv_protocol"` PrivProtocol string `toml:"priv_protocol"`
PrivPassword config.Secret `toml:"priv_password"` PrivPassword config.Secret `toml:"priv_password"`
Translator string `toml:"-"`
Log telegraf.Logger `toml:"-"`
acc telegraf.Accumulator acc telegraf.Accumulator
listener *gosnmp.TrapListener listener *gosnmp.TrapListener
timeFunc func() time.Time timeFunc func() time.Time
errCh chan error errCh chan error
makeHandlerWrapper func(gosnmp.TrapHandlerFunc) gosnmp.TrapHandlerFunc makeHandlerWrapper func(gosnmp.TrapHandlerFunc) gosnmp.TrapHandlerFunc
transl translator
}
Log telegraf.Logger `toml:"-"` type wrapLog struct {
telegraf.Logger
}
transl translator type translator interface {
lookup(oid string) (snmp.MibEntry, error)
} }
func (*SnmpTrap) SampleConfig() string { func (*SnmpTrap) SampleConfig() string {
return sampleConfig return sampleConfig
} }
func (*SnmpTrap) Gather(telegraf.Accumulator) error {
return nil
}
func init() {
inputs.Add("snmp_trap", func() telegraf.Input {
return &SnmpTrap{
timeFunc: time.Now,
ServiceAddress: "udp://:162",
Timeout: defaultTimeout,
Path: []string{"/usr/share/snmp/mibs"},
Version: "2c",
}
})
}
func (s *SnmpTrap) SetTranslator(name string) { func (s *SnmpTrap) SetTranslator(name string) {
s.Translator = name s.Translator = name
} }
@ -259,6 +234,10 @@ func (s *SnmpTrap) Start(acc telegraf.Accumulator) error {
return nil return nil
} }
func (*SnmpTrap) Gather(telegraf.Accumulator) error {
return nil
}
func (s *SnmpTrap) Stop() { func (s *SnmpTrap) Stop() {
s.listener.Close() s.listener.Close()
err := <-s.errCh err := <-s.errCh
@ -385,3 +364,23 @@ func makeTrapHandler(s *SnmpTrap) gosnmp.TrapHandlerFunc {
s.acc.AddFields("snmp_trap", fields, tags, tm) s.acc.AddFields("snmp_trap", fields, tags, tm)
} }
} }
func (l wrapLog) Printf(format string, args ...interface{}) {
l.Debugf(format, args...)
}
func (l wrapLog) Print(args ...interface{}) {
l.Debug(args...)
}
func init() {
inputs.Add("snmp_trap", func() telegraf.Input {
return &SnmpTrap{
timeFunc: time.Now,
ServiceAddress: "udp://:162",
Timeout: defaultTimeout,
Path: []string{"/usr/share/snmp/mibs"},
Version: "2c",
}
})
}

View File

@ -34,6 +34,10 @@ func (*SocketListener) SampleConfig() string {
return sampleConfig return sampleConfig
} }
func (sl *SocketListener) SetParser(parser telegraf.Parser) {
sl.parser = parser
}
func (sl *SocketListener) Init() error { func (sl *SocketListener) Init() error {
sock, err := sl.Config.NewSocket(sl.ServiceAddress, &sl.SplitConfig, sl.Log) sock, err := sl.Config.NewSocket(sl.ServiceAddress, &sl.SplitConfig, sl.Log)
if err != nil { if err != nil {
@ -44,14 +48,6 @@ func (sl *SocketListener) Init() error {
return nil return nil
} }
func (*SocketListener) Gather(telegraf.Accumulator) error {
return nil
}
func (sl *SocketListener) SetParser(parser telegraf.Parser) {
sl.parser = parser
}
func (sl *SocketListener) Start(acc telegraf.Accumulator) error { func (sl *SocketListener) Start(acc telegraf.Accumulator) error {
// Create the callbacks for parsing the data and recording issues // Create the callbacks for parsing the data and recording issues
onData := func(_ net.Addr, data []byte, receiveTime time.Time) { onData := func(_ net.Addr, data []byte, receiveTime time.Time) {
@ -93,6 +89,10 @@ func (sl *SocketListener) Start(acc telegraf.Accumulator) error {
return nil return nil
} }
func (*SocketListener) Gather(telegraf.Accumulator) error {
return nil
}
func (sl *SocketListener) Stop() { func (sl *SocketListener) Stop() {
if sl.socket != nil { if sl.socket != nil {
sl.socket.Close() sl.socket.Close()

View File

@ -27,7 +27,6 @@ var sampleConfig string
const measurement = "socketstat" const measurement = "socketstat"
// Socketstat is a telegraf plugin to gather indicators from established connections, using iproute2's `ss` command.
type Socketstat struct { type Socketstat struct {
SocketProto []string `toml:"protocols"` SocketProto []string `toml:"protocols"`
Timeout config.Duration `toml:"timeout"` Timeout config.Duration `toml:"timeout"`
@ -45,7 +44,30 @@ func (*Socketstat) SampleConfig() string {
return sampleConfig return sampleConfig
} }
// Gather gathers indicators from established connections func (ss *Socketstat) Init() error {
if len(ss.SocketProto) == 0 {
ss.SocketProto = []string{"tcp", "udp"}
}
// Initialize regexps to validate input data
validFields := "(bytes_acked|bytes_received|segs_out|segs_in|data_segs_in|data_segs_out)"
ss.validValues = regexp.MustCompile("^" + validFields + ":[0-9]+$")
ss.isNewConnection = regexp.MustCompile(`^\s+.*$`)
ss.lister = socketList
// Check that ss is installed, get its path.
// Do it last, because in test environments where `ss` might not be available,
// we still want the other Init() actions to be performed.
ssPath, err := exec.LookPath("ss")
if err != nil {
return err
}
ss.cmdName = ssPath
return nil
}
func (ss *Socketstat) Gather(acc telegraf.Accumulator) error { func (ss *Socketstat) Gather(acc telegraf.Accumulator) error {
// best effort : we continue through the protocols even if an error is encountered, // best effort : we continue through the protocols even if an error is encountered,
// but we keep track of the last error. // but we keep track of the last error.
@ -183,30 +205,6 @@ func getTagsAndState(proto string, words []string, log telegraf.Logger) (map[str
return tags, fields return tags, fields
} }
func (ss *Socketstat) Init() error {
if len(ss.SocketProto) == 0 {
ss.SocketProto = []string{"tcp", "udp"}
}
// Initialize regexps to validate input data
validFields := "(bytes_acked|bytes_received|segs_out|segs_in|data_segs_in|data_segs_out)"
ss.validValues = regexp.MustCompile("^" + validFields + ":[0-9]+$")
ss.isNewConnection = regexp.MustCompile(`^\s+.*$`)
ss.lister = socketList
// Check that ss is installed, get its path.
// Do it last, because in test environments where `ss` might not be available,
// we still want the other Init() actions to be performed.
ssPath, err := exec.LookPath("ss")
if err != nil {
return err
}
ss.cmdName = ssPath
return nil
}
func init() { func init() {
inputs.Add("socketstat", func() telegraf.Input { inputs.Add("socketstat", func() telegraf.Input {
return &Socketstat{Timeout: config.Duration(time.Second)} return &Socketstat{Timeout: config.Duration(time.Second)}

View File

@ -16,12 +16,14 @@ type Socketstat struct {
Log telegraf.Logger `toml:"-"` Log telegraf.Logger `toml:"-"`
} }
func (*Socketstat) SampleConfig() string { return sampleConfig }
func (s *Socketstat) Init() error { func (s *Socketstat) Init() error {
s.Log.Warn("current platform is not supported") s.Log.Warn("Current platform is not supported")
return nil return nil
} }
func (*Socketstat) SampleConfig() string { return sampleConfig }
func (*Socketstat) Gather(_ telegraf.Accumulator) error { return nil } func (*Socketstat) Gather(telegraf.Accumulator) error { return nil }
func init() { func init() {
inputs.Add("socketstat", func() telegraf.Input { inputs.Add("socketstat", func() telegraf.Input {

View File

@ -20,7 +20,6 @@ import (
//go:embed sample.conf //go:embed sample.conf
var sampleConfig string var sampleConfig string
// Solr is a plugin to read stats from one or many Solr servers
type Solr struct { type Solr struct {
Servers []string `toml:"servers"` Servers []string `toml:"servers"`
Username string `toml:"username"` Username string `toml:"username"`
@ -60,7 +59,7 @@ func (s *Solr) Init() error {
return nil return nil
} }
func (s *Solr) Start(_ telegraf.Accumulator) error { func (s *Solr) Start(telegraf.Accumulator) error {
for _, server := range s.Servers { for _, server := range s.Servers {
// Simply fill the cache for all available servers // Simply fill the cache for all available servers
_ = s.getAPIConfig(server) _ = s.getAPIConfig(server)
@ -68,8 +67,6 @@ func (s *Solr) Start(_ telegraf.Accumulator) error {
return nil return nil
} }
func (*Solr) Stop() {}
func (s *Solr) Gather(acc telegraf.Accumulator) error { func (s *Solr) Gather(acc telegraf.Accumulator) error {
var wg sync.WaitGroup var wg sync.WaitGroup
for _, srv := range s.Servers { for _, srv := range s.Servers {
@ -87,6 +84,8 @@ func (s *Solr) Gather(acc telegraf.Accumulator) error {
return nil return nil
} }
func (*Solr) Stop() {}
func (s *Solr) getAPIConfig(server string) *apiConfig { func (s *Solr) getAPIConfig(server string) *apiConfig {
if cfg, found := s.configs[server]; found { if cfg, found := s.configs[server]; found {
return cfg return cfg

View File

@ -24,11 +24,28 @@ import (
//go:embed sample.conf //go:embed sample.conf
var sampleConfig string var sampleConfig string
const magicIdleCount = -int(^uint(0) >> 1)
var disconnectedServersBehavior = []string{"error", "ignore"} var disconnectedServersBehavior = []string{"error", "ignore"}
type Query struct { const magicIdleCount = -int(^uint(0) >> 1)
type SQL struct {
Driver string `toml:"driver"`
Dsn config.Secret `toml:"dsn"`
Timeout config.Duration `toml:"timeout"`
MaxIdleTime config.Duration `toml:"connection_max_idle_time"`
MaxLifetime config.Duration `toml:"connection_max_life_time"`
MaxOpenConnections int `toml:"connection_max_open"`
MaxIdleConnections int `toml:"connection_max_idle"`
Queries []query `toml:"query"`
Log telegraf.Logger `toml:"-"`
DisconnectedServersBehavior string `toml:"disconnected_servers_behavior"`
driverName string
db *dbsql.DB
serverConnected bool
}
type query struct {
Query string `toml:"query"` Query string `toml:"query"`
Script string `toml:"query_script"` Script string `toml:"query_script"`
Measurement string `toml:"measurement"` Measurement string `toml:"measurement"`
@ -55,7 +72,312 @@ type Query struct {
fieldFilterString filter.Filter fieldFilterString filter.Filter
} }
func (q *Query) parse(acc telegraf.Accumulator, rows *dbsql.Rows, t time.Time, logger telegraf.Logger) (int, error) { func (*SQL) SampleConfig() string {
return sampleConfig
}
func (s *SQL) Init() error {
// Option handling
if s.Driver == "" {
return errors.New("missing SQL driver option")
}
if err := s.checkDSN(); err != nil {
return err
}
if s.Timeout <= 0 {
s.Timeout = config.Duration(5 * time.Second)
}
if s.MaxIdleConnections == magicIdleCount {
// Determine the number by the number of queries + the golang default value
s.MaxIdleConnections = len(s.Queries) + 2
}
for i, q := range s.Queries {
if q.Query == "" && q.Script == "" {
return errors.New("neither 'query' nor 'query_script' specified")
}
if q.Query != "" && q.Script != "" {
return errors.New("only one of 'query' and 'query_script' can be specified")
}
// In case we got a script, we should read the query now.
if q.Script != "" {
query, err := os.ReadFile(q.Script)
if err != nil {
return fmt.Errorf("reading script %q failed: %w", q.Script, err)
}
s.Queries[i].Query = string(query)
}
// Time format
if q.TimeFormat == "" {
s.Queries[i].TimeFormat = "unix"
}
// Compile the tag-filter
tagfilter, err := filter.NewIncludeExcludeFilterDefaults(q.TagColumnsInclude, q.TagColumnsExclude, false, false)
if err != nil {
return fmt.Errorf("creating tag filter failed: %w", err)
}
s.Queries[i].tagFilter = tagfilter
// Compile the explicit type field-filter
fieldfilterFloat, err := filter.NewIncludeExcludeFilterDefaults(q.FieldColumnsFloat, nil, false, false)
if err != nil {
return fmt.Errorf("creating field filter for float failed: %w", err)
}
s.Queries[i].fieldFilterFloat = fieldfilterFloat
fieldfilterInt, err := filter.NewIncludeExcludeFilterDefaults(q.FieldColumnsInt, nil, false, false)
if err != nil {
return fmt.Errorf("creating field filter for int failed: %w", err)
}
s.Queries[i].fieldFilterInt = fieldfilterInt
fieldfilterUint, err := filter.NewIncludeExcludeFilterDefaults(q.FieldColumnsUint, nil, false, false)
if err != nil {
return fmt.Errorf("creating field filter for uint failed: %w", err)
}
s.Queries[i].fieldFilterUint = fieldfilterUint
fieldfilterBool, err := filter.NewIncludeExcludeFilterDefaults(q.FieldColumnsBool, nil, false, false)
if err != nil {
return fmt.Errorf("creating field filter for bool failed: %w", err)
}
s.Queries[i].fieldFilterBool = fieldfilterBool
fieldfilterString, err := filter.NewIncludeExcludeFilterDefaults(q.FieldColumnsString, nil, false, false)
if err != nil {
return fmt.Errorf("creating field filter for string failed: %w", err)
}
s.Queries[i].fieldFilterString = fieldfilterString
// Compile the field-filter
fieldfilter, err := filter.NewIncludeExcludeFilter(q.FieldColumnsInclude, q.FieldColumnsExclude)
if err != nil {
return fmt.Errorf("creating field filter failed: %w", err)
}
s.Queries[i].fieldFilter = fieldfilter
if q.Measurement == "" {
s.Queries[i].Measurement = "sql"
}
}
// Derive the sql-framework driver name from our config name. This abstracts the actual driver
// from the database-type the user wants.
aliases := map[string]string{
"cockroach": "pgx",
"tidb": "mysql",
"mssql": "sqlserver",
"maria": "mysql",
"postgres": "pgx",
"oracle": "oracle",
}
s.driverName = s.Driver
if driver, ok := aliases[s.Driver]; ok {
s.driverName = driver
}
availDrivers := dbsql.Drivers()
if !choice.Contains(s.driverName, availDrivers) {
for d, r := range aliases {
if choice.Contains(r, availDrivers) {
availDrivers = append(availDrivers, d)
}
}
// Sort the list of drivers and make them unique
sort.Strings(availDrivers)
last := 0
for _, d := range availDrivers {
if d != availDrivers[last] {
last++
availDrivers[last] = d
}
}
availDrivers = availDrivers[:last+1]
return fmt.Errorf("driver %q not supported use one of %v", s.Driver, availDrivers)
}
if s.DisconnectedServersBehavior == "" {
s.DisconnectedServersBehavior = "error"
}
if !choice.Contains(s.DisconnectedServersBehavior, disconnectedServersBehavior) {
return fmt.Errorf("%q is not a valid value for disconnected_servers_behavior", s.DisconnectedServersBehavior)
}
return nil
}
func (s *SQL) Start(telegraf.Accumulator) error {
if err := s.setupConnection(); err != nil {
return err
}
if err := s.ping(); err != nil {
if s.DisconnectedServersBehavior == "error" {
return err
}
s.Log.Errorf("unable to connect to database: %s", err)
}
if s.serverConnected {
s.prepareStatements()
}
return nil
}
func (s *SQL) Gather(acc telegraf.Accumulator) error {
// during plugin startup, it is possible that the server was not reachable.
// we try pinging the server in this collection cycle.
// we are only concerned with `prepareStatements` function to complete(return true), just once.
if !s.serverConnected {
if err := s.ping(); err != nil {
return err
}
s.prepareStatements()
}
var wg sync.WaitGroup
tstart := time.Now()
for _, q := range s.Queries {
wg.Add(1)
go func(q query) {
defer wg.Done()
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(s.Timeout))
defer cancel()
if err := s.executeQuery(ctx, acc, q, tstart); err != nil {
acc.AddError(err)
}
}(q)
}
wg.Wait()
s.Log.Debugf("Executed %d queries in %s", len(s.Queries), time.Since(tstart).String())
return nil
}
func (s *SQL) Stop() {
// Free the statements
for _, q := range s.Queries {
if q.statement != nil {
if err := q.statement.Close(); err != nil {
s.Log.Errorf("closing statement for query %q failed: %v", q.Query, err)
}
}
}
// Close the connection to the server
if s.db != nil {
if err := s.db.Close(); err != nil {
s.Log.Errorf("closing database connection failed: %v", err)
}
}
}
func (s *SQL) setupConnection() error {
// Connect to the database server
dsnSecret, err := s.Dsn.Get()
if err != nil {
return fmt.Errorf("getting DSN failed: %w", err)
}
dsn := dsnSecret.String()
dsnSecret.Destroy()
s.Log.Debug("Connecting...")
s.db, err = dbsql.Open(s.driverName, dsn)
if err != nil {
// should return since the error is most likely with invalid DSN string format
return err
}
// Set the connection limits
// s.db.SetConnMaxIdleTime(time.Duration(s.MaxIdleTime)) // Requires go >= 1.15
s.db.SetConnMaxLifetime(time.Duration(s.MaxLifetime))
s.db.SetMaxOpenConns(s.MaxOpenConnections)
s.db.SetMaxIdleConns(s.MaxIdleConnections)
return nil
}
func (s *SQL) ping() error {
// Test if the connection can be established
s.Log.Debug("Testing connectivity...")
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(s.Timeout))
err := s.db.PingContext(ctx)
cancel()
if err != nil {
return fmt.Errorf("unable to connect to database: %w", err)
}
s.serverConnected = true
return nil
}
func (s *SQL) prepareStatements() {
// Prepare the statements
for i, q := range s.Queries {
s.Log.Debugf("Preparing statement %q...", q.Query)
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(s.Timeout))
stmt, err := s.db.PrepareContext(ctx, q.Query)
cancel()
if err != nil {
// Some database drivers or databases do not support prepare
// statements and report an error here. However, we can still
// execute unprepared queries for those setups so do not bail-out
// here but simply do leave the `statement` with a `nil` value
// indicating no prepared statement.
s.Log.Warnf("preparing query %q failed: %s; falling back to unprepared query", q.Query, err)
continue
}
s.Queries[i].statement = stmt
}
}
func (s *SQL) executeQuery(ctx context.Context, acc telegraf.Accumulator, q query, tquery time.Time) error {
// Execute the query either prepared or unprepared
var rows *dbsql.Rows
if q.statement != nil {
// Use the previously prepared query
var err error
rows, err = q.statement.QueryContext(ctx)
if err != nil {
return err
}
} else {
// Fallback to unprepared query
var err error
rows, err = s.db.Query(q.Query)
if err != nil {
return err
}
}
defer rows.Close()
// Handle the rows
columnNames, err := rows.Columns()
if err != nil {
return err
}
rowCount, err := q.parse(acc, rows, tquery, s.Log)
s.Log.Debugf("Received %d rows and %d columns for query %q", rowCount, len(columnNames), q.Query)
return err
}
func (s *SQL) checkDSN() error {
if s.Dsn.Empty() {
return errors.New("missing data source name (DSN) option")
}
return nil
}
func (q *query) parse(acc telegraf.Accumulator, rows *dbsql.Rows, t time.Time, logger telegraf.Logger) (int, error) {
columnNames, err := rows.Columns() columnNames, err := rows.Columns()
if err != nil { if err != nil {
return 0, err return 0, err
@ -214,290 +536,6 @@ func (q *Query) parse(acc telegraf.Accumulator, rows *dbsql.Rows, t time.Time, l
return rowCount, nil return rowCount, nil
} }
type SQL struct {
Driver string `toml:"driver"`
Dsn config.Secret `toml:"dsn"`
Timeout config.Duration `toml:"timeout"`
MaxIdleTime config.Duration `toml:"connection_max_idle_time"`
MaxLifetime config.Duration `toml:"connection_max_life_time"`
MaxOpenConnections int `toml:"connection_max_open"`
MaxIdleConnections int `toml:"connection_max_idle"`
Queries []Query `toml:"query"`
Log telegraf.Logger `toml:"-"`
DisconnectedServersBehavior string `toml:"disconnected_servers_behavior"`
driverName string
db *dbsql.DB
serverConnected bool
}
func (*SQL) SampleConfig() string {
return sampleConfig
}
func (s *SQL) Init() error {
// Option handling
if s.Driver == "" {
return errors.New("missing SQL driver option")
}
if err := s.checkDSN(); err != nil {
return err
}
if s.Timeout <= 0 {
s.Timeout = config.Duration(5 * time.Second)
}
if s.MaxIdleConnections == magicIdleCount {
// Determine the number by the number of queries + the golang default value
s.MaxIdleConnections = len(s.Queries) + 2
}
for i, q := range s.Queries {
if q.Query == "" && q.Script == "" {
return errors.New("neither 'query' nor 'query_script' specified")
}
if q.Query != "" && q.Script != "" {
return errors.New("only one of 'query' and 'query_script' can be specified")
}
// In case we got a script, we should read the query now.
if q.Script != "" {
query, err := os.ReadFile(q.Script)
if err != nil {
return fmt.Errorf("reading script %q failed: %w", q.Script, err)
}
s.Queries[i].Query = string(query)
}
// Time format
if q.TimeFormat == "" {
s.Queries[i].TimeFormat = "unix"
}
// Compile the tag-filter
tagfilter, err := filter.NewIncludeExcludeFilterDefaults(q.TagColumnsInclude, q.TagColumnsExclude, false, false)
if err != nil {
return fmt.Errorf("creating tag filter failed: %w", err)
}
s.Queries[i].tagFilter = tagfilter
// Compile the explicit type field-filter
fieldfilterFloat, err := filter.NewIncludeExcludeFilterDefaults(q.FieldColumnsFloat, nil, false, false)
if err != nil {
return fmt.Errorf("creating field filter for float failed: %w", err)
}
s.Queries[i].fieldFilterFloat = fieldfilterFloat
fieldfilterInt, err := filter.NewIncludeExcludeFilterDefaults(q.FieldColumnsInt, nil, false, false)
if err != nil {
return fmt.Errorf("creating field filter for int failed: %w", err)
}
s.Queries[i].fieldFilterInt = fieldfilterInt
fieldfilterUint, err := filter.NewIncludeExcludeFilterDefaults(q.FieldColumnsUint, nil, false, false)
if err != nil {
return fmt.Errorf("creating field filter for uint failed: %w", err)
}
s.Queries[i].fieldFilterUint = fieldfilterUint
fieldfilterBool, err := filter.NewIncludeExcludeFilterDefaults(q.FieldColumnsBool, nil, false, false)
if err != nil {
return fmt.Errorf("creating field filter for bool failed: %w", err)
}
s.Queries[i].fieldFilterBool = fieldfilterBool
fieldfilterString, err := filter.NewIncludeExcludeFilterDefaults(q.FieldColumnsString, nil, false, false)
if err != nil {
return fmt.Errorf("creating field filter for string failed: %w", err)
}
s.Queries[i].fieldFilterString = fieldfilterString
// Compile the field-filter
fieldfilter, err := filter.NewIncludeExcludeFilter(q.FieldColumnsInclude, q.FieldColumnsExclude)
if err != nil {
return fmt.Errorf("creating field filter failed: %w", err)
}
s.Queries[i].fieldFilter = fieldfilter
if q.Measurement == "" {
s.Queries[i].Measurement = "sql"
}
}
// Derive the sql-framework driver name from our config name. This abstracts the actual driver
// from the database-type the user wants.
aliases := map[string]string{
"cockroach": "pgx",
"tidb": "mysql",
"mssql": "sqlserver",
"maria": "mysql",
"postgres": "pgx",
"oracle": "oracle",
}
s.driverName = s.Driver
if driver, ok := aliases[s.Driver]; ok {
s.driverName = driver
}
availDrivers := dbsql.Drivers()
if !choice.Contains(s.driverName, availDrivers) {
for d, r := range aliases {
if choice.Contains(r, availDrivers) {
availDrivers = append(availDrivers, d)
}
}
// Sort the list of drivers and make them unique
sort.Strings(availDrivers)
last := 0
for _, d := range availDrivers {
if d != availDrivers[last] {
last++
availDrivers[last] = d
}
}
availDrivers = availDrivers[:last+1]
return fmt.Errorf("driver %q not supported use one of %v", s.Driver, availDrivers)
}
if s.DisconnectedServersBehavior == "" {
s.DisconnectedServersBehavior = "error"
}
if !choice.Contains(s.DisconnectedServersBehavior, disconnectedServersBehavior) {
return fmt.Errorf("%q is not a valid value for disconnected_servers_behavior", s.DisconnectedServersBehavior)
}
return nil
}
func (s *SQL) setupConnection() error {
// Connect to the database server
dsnSecret, err := s.Dsn.Get()
if err != nil {
return fmt.Errorf("getting DSN failed: %w", err)
}
dsn := dsnSecret.String()
dsnSecret.Destroy()
s.Log.Debug("Connecting...")
s.db, err = dbsql.Open(s.driverName, dsn)
if err != nil {
// should return since the error is most likely with invalid DSN string format
return err
}
// Set the connection limits
// s.db.SetConnMaxIdleTime(time.Duration(s.MaxIdleTime)) // Requires go >= 1.15
s.db.SetConnMaxLifetime(time.Duration(s.MaxLifetime))
s.db.SetMaxOpenConns(s.MaxOpenConnections)
s.db.SetMaxIdleConns(s.MaxIdleConnections)
return nil
}
func (s *SQL) ping() error {
// Test if the connection can be established
s.Log.Debug("Testing connectivity...")
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(s.Timeout))
err := s.db.PingContext(ctx)
cancel()
if err != nil {
return fmt.Errorf("unable to connect to database: %w", err)
}
s.serverConnected = true
return nil
}
func (s *SQL) prepareStatements() {
// Prepare the statements
for i, q := range s.Queries {
s.Log.Debugf("Preparing statement %q...", q.Query)
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(s.Timeout))
stmt, err := s.db.PrepareContext(ctx, q.Query)
cancel()
if err != nil {
// Some database drivers or databases do not support prepare
// statements and report an error here. However, we can still
// execute unprepared queries for those setups so do not bail-out
// here but simply do leave the `statement` with a `nil` value
// indicating no prepared statement.
s.Log.Warnf("preparing query %q failed: %s; falling back to unprepared query", q.Query, err)
continue
}
s.Queries[i].statement = stmt
}
}
func (s *SQL) Start(_ telegraf.Accumulator) error {
if err := s.setupConnection(); err != nil {
return err
}
if err := s.ping(); err != nil {
if s.DisconnectedServersBehavior == "error" {
return err
}
s.Log.Errorf("unable to connect to database: %s", err)
}
if s.serverConnected {
s.prepareStatements()
}
return nil
}
func (s *SQL) Stop() {
// Free the statements
for _, q := range s.Queries {
if q.statement != nil {
if err := q.statement.Close(); err != nil {
s.Log.Errorf("closing statement for query %q failed: %v", q.Query, err)
}
}
}
// Close the connection to the server
if s.db != nil {
if err := s.db.Close(); err != nil {
s.Log.Errorf("closing database connection failed: %v", err)
}
}
}
func (s *SQL) Gather(acc telegraf.Accumulator) error {
// during plugin startup, it is possible that the server was not reachable.
// we try pinging the server in this collection cycle.
// we are only concerned with `prepareStatements` function to complete(return true), just once.
if !s.serverConnected {
if err := s.ping(); err != nil {
return err
}
s.prepareStatements()
}
var wg sync.WaitGroup
tstart := time.Now()
for _, query := range s.Queries {
wg.Add(1)
go func(q Query) {
defer wg.Done()
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(s.Timeout))
defer cancel()
if err := s.executeQuery(ctx, acc, q, tstart); err != nil {
acc.AddError(err)
}
}(query)
}
wg.Wait()
s.Log.Debugf("Executed %d queries in %s", len(s.Queries), time.Since(tstart).String())
return nil
}
func init() { func init() {
inputs.Add("sql", func() telegraf.Input { inputs.Add("sql", func() telegraf.Input {
return &SQL{ return &SQL{
@ -508,41 +546,3 @@ func init() {
} }
}) })
} }
func (s *SQL) executeQuery(ctx context.Context, acc telegraf.Accumulator, q Query, tquery time.Time) error {
// Execute the query either prepared or unprepared
var rows *dbsql.Rows
if q.statement != nil {
// Use the previously prepared query
var err error
rows, err = q.statement.QueryContext(ctx)
if err != nil {
return err
}
} else {
// Fallback to unprepared query
var err error
rows, err = s.db.Query(q.Query)
if err != nil {
return err
}
}
defer rows.Close()
// Handle the rows
columnNames, err := rows.Columns()
if err != nil {
return err
}
rowCount, err := q.parse(acc, rows, tquery, s.Log)
s.Log.Debugf("Received %d rows and %d columns for query %q", rowCount, len(columnNames), q.Query)
return err
}
func (s *SQL) checkDSN() error {
if s.Dsn.Empty() {
return errors.New("missing data source name (DSN) option")
}
return nil
}

View File

@ -65,12 +65,12 @@ func TestMariaDBIntegration(t *testing.T) {
// Define the testset // Define the testset
var testset = []struct { var testset = []struct {
name string name string
queries []Query queries []query
expected []telegraf.Metric expected []telegraf.Metric
}{ }{
{ {
name: "metric_one", name: "metric_one",
queries: []Query{ queries: []query{
{ {
Query: "SELECT * FROM metric_one", Query: "SELECT * FROM metric_one",
TagColumnsInclude: []string{"tag_*"}, TagColumnsInclude: []string{"tag_*"},
@ -164,12 +164,12 @@ func TestPostgreSQLIntegration(t *testing.T) {
// Define the testset // Define the testset
var testset = []struct { var testset = []struct {
name string name string
queries []Query queries []query
expected []telegraf.Metric expected []telegraf.Metric
}{ }{
{ {
name: "metric_one", name: "metric_one",
queries: []Query{ queries: []query{
{ {
Query: "SELECT * FROM metric_one", Query: "SELECT * FROM metric_one",
TagColumnsInclude: []string{"tag_*"}, TagColumnsInclude: []string{"tag_*"},
@ -259,12 +259,12 @@ func TestClickHouseIntegration(t *testing.T) {
// Define the testset // Define the testset
var testset = []struct { var testset = []struct {
name string name string
queries []Query queries []query
expected []telegraf.Metric expected []telegraf.Metric
}{ }{
{ {
name: "metric_one", name: "metric_one",
queries: []Query{ queries: []query{
{ {
Query: "SELECT * FROM default.metric_one", Query: "SELECT * FROM default.metric_one",
TagColumnsInclude: []string{"tag_*"}, TagColumnsInclude: []string{"tag_*"},

View File

@ -23,7 +23,25 @@ import (
//go:embed sample.conf //go:embed sample.conf
var sampleConfig string var sampleConfig string
// SQLServer struct const (
defaultServer = "Server=.;app name=telegraf;log=1;"
typeAzureSQLDB = "AzureSQLDB"
typeAzureSQLManagedInstance = "AzureSQLManagedInstance"
typeAzureSQLPool = "AzureSQLPool"
typeSQLServer = "SQLServer"
typeAzureArcSQLManagedInstance = "AzureArcSQLManagedInstance"
healthMetricName = "sqlserver_telegraf_health"
healthMetricInstanceTag = "sql_instance"
healthMetricDatabaseTag = "database_name"
healthMetricAttemptedQueries = "attempted_queries"
healthMetricSuccessfulQueries = "successful_queries"
healthMetricDatabaseType = "database_type"
sqlAzureResourceID = "https://database.windows.net/"
)
type SQLServer struct { type SQLServer struct {
Servers []*config.Secret `toml:"servers"` Servers []*config.Secret `toml:"servers"`
QueryTimeout config.Duration `toml:"query_timeout"` QueryTimeout config.Duration `toml:"query_timeout"`
@ -38,213 +56,38 @@ type SQLServer struct {
Log telegraf.Logger `toml:"-"` Log telegraf.Logger `toml:"-"`
pools []*sql.DB pools []*sql.DB
queries MapQuery queries mapQuery
adalToken *adal.Token adalToken *adal.Token
muCacheLock sync.RWMutex muCacheLock sync.RWMutex
} }
// Query struct type query struct {
type Query struct {
ScriptName string ScriptName string
Script string Script string
ResultByRow bool ResultByRow bool
OrderedColumns []string OrderedColumns []string
} }
// MapQuery type type mapQuery map[string]query
type MapQuery map[string]Query
// HealthMetric struct tracking the number of attempted vs successful connections for each connection string // healthMetric struct tracking the number of attempted vs successful connections for each connection string
type HealthMetric struct { type healthMetric struct {
AttemptedQueries int attemptedQueries int
SuccessfulQueries int successfulQueries int
} }
const defaultServer = "Server=.;app name=telegraf;log=1;"
const (
typeAzureSQLDB = "AzureSQLDB"
typeAzureSQLManagedInstance = "AzureSQLManagedInstance"
typeAzureSQLPool = "AzureSQLPool"
typeSQLServer = "SQLServer"
typeAzureArcSQLManagedInstance = "AzureArcSQLManagedInstance"
)
const (
healthMetricName = "sqlserver_telegraf_health"
healthMetricInstanceTag = "sql_instance"
healthMetricDatabaseTag = "database_name"
healthMetricAttemptedQueries = "attempted_queries"
healthMetricSuccessfulQueries = "successful_queries"
healthMetricDatabaseType = "database_type"
)
// resource id for Azure SQL Database
const sqlAzureResourceID = "https://database.windows.net/"
type scanner interface { type scanner interface {
Scan(dest ...interface{}) error Scan(dest ...interface{}) error
} }
func (s *SQLServer) initQueries() error {
s.queries = make(MapQuery)
queries := s.queries
s.Log.Infof("Config: database_type: %s , query_version:%d , azuredb: %t", s.DatabaseType, s.QueryVersion, s.AzureDB)
// To prevent query definition conflicts
// Constant definitions for type "AzureSQLDB" start with sqlAzureDB
// Constant definitions for type "AzureSQLManagedInstance" start with sqlAzureMI
// Constant definitions for type "AzureSQLPool" start with sqlAzurePool
// Constant definitions for type "AzureArcSQLManagedInstance" start with sqlAzureArcMI
// Constant definitions for type "SQLServer" start with sqlServer
if s.DatabaseType == typeAzureSQLDB {
queries["AzureSQLDBResourceStats"] = Query{ScriptName: "AzureSQLDBResourceStats", Script: sqlAzureDBResourceStats, ResultByRow: false}
queries["AzureSQLDBResourceGovernance"] = Query{ScriptName: "AzureSQLDBResourceGovernance", Script: sqlAzureDBResourceGovernance, ResultByRow: false}
queries["AzureSQLDBWaitStats"] = Query{ScriptName: "AzureSQLDBWaitStats", Script: sqlAzureDBWaitStats, ResultByRow: false}
queries["AzureSQLDBDatabaseIO"] = Query{ScriptName: "AzureSQLDBDatabaseIO", Script: sqlAzureDBDatabaseIO, ResultByRow: false}
queries["AzureSQLDBServerProperties"] = Query{ScriptName: "AzureSQLDBServerProperties", Script: sqlAzureDBProperties, ResultByRow: false}
queries["AzureSQLDBOsWaitstats"] = Query{ScriptName: "AzureSQLOsWaitstats", Script: sqlAzureDBOsWaitStats, ResultByRow: false}
queries["AzureSQLDBMemoryClerks"] = Query{ScriptName: "AzureSQLDBMemoryClerks", Script: sqlAzureDBMemoryClerks, ResultByRow: false}
queries["AzureSQLDBPerformanceCounters"] = Query{ScriptName: "AzureSQLDBPerformanceCounters", Script: sqlAzureDBPerformanceCounters, ResultByRow: false}
queries["AzureSQLDBRequests"] = Query{ScriptName: "AzureSQLDBRequests", Script: sqlAzureDBRequests, ResultByRow: false}
queries["AzureSQLDBSchedulers"] = Query{ScriptName: "AzureSQLDBSchedulers", Script: sqlAzureDBSchedulers, ResultByRow: false}
} else if s.DatabaseType == typeAzureSQLManagedInstance {
queries["AzureSQLMIResourceStats"] = Query{ScriptName: "AzureSQLMIResourceStats", Script: sqlAzureMIResourceStats, ResultByRow: false}
queries["AzureSQLMIResourceGovernance"] = Query{ScriptName: "AzureSQLMIResourceGovernance", Script: sqlAzureMIResourceGovernance, ResultByRow: false}
queries["AzureSQLMIDatabaseIO"] = Query{ScriptName: "AzureSQLMIDatabaseIO", Script: sqlAzureMIDatabaseIO, ResultByRow: false}
queries["AzureSQLMIServerProperties"] = Query{ScriptName: "AzureSQLMIServerProperties", Script: sqlAzureMIProperties, ResultByRow: false}
queries["AzureSQLMIOsWaitstats"] = Query{ScriptName: "AzureSQLMIOsWaitstats", Script: sqlAzureMIOsWaitStats, ResultByRow: false}
queries["AzureSQLMIMemoryClerks"] = Query{ScriptName: "AzureSQLMIMemoryClerks", Script: sqlAzureMIMemoryClerks, ResultByRow: false}
queries["AzureSQLMIPerformanceCounters"] = Query{ScriptName: "AzureSQLMIPerformanceCounters", Script: sqlAzureMIPerformanceCounters, ResultByRow: false}
queries["AzureSQLMIRequests"] = Query{ScriptName: "AzureSQLMIRequests", Script: sqlAzureMIRequests, ResultByRow: false}
queries["AzureSQLMISchedulers"] = Query{ScriptName: "AzureSQLMISchedulers", Script: sqlAzureMISchedulers, ResultByRow: false}
} else if s.DatabaseType == typeAzureSQLPool {
queries["AzureSQLPoolResourceStats"] = Query{ScriptName: "AzureSQLPoolResourceStats", Script: sqlAzurePoolResourceStats, ResultByRow: false}
queries["AzureSQLPoolResourceGovernance"] =
Query{ScriptName: "AzureSQLPoolResourceGovernance", Script: sqlAzurePoolResourceGovernance, ResultByRow: false}
queries["AzureSQLPoolDatabaseIO"] = Query{ScriptName: "AzureSQLPoolDatabaseIO", Script: sqlAzurePoolDatabaseIO, ResultByRow: false}
queries["AzureSQLPoolOsWaitStats"] = Query{ScriptName: "AzureSQLPoolOsWaitStats", Script: sqlAzurePoolOsWaitStats, ResultByRow: false}
queries["AzureSQLPoolMemoryClerks"] = Query{ScriptName: "AzureSQLPoolMemoryClerks", Script: sqlAzurePoolMemoryClerks, ResultByRow: false}
queries["AzureSQLPoolPerformanceCounters"] =
Query{ScriptName: "AzureSQLPoolPerformanceCounters", Script: sqlAzurePoolPerformanceCounters, ResultByRow: false}
queries["AzureSQLPoolSchedulers"] = Query{ScriptName: "AzureSQLPoolSchedulers", Script: sqlAzurePoolSchedulers, ResultByRow: false}
} else if s.DatabaseType == typeAzureArcSQLManagedInstance {
queries["AzureArcSQLMIDatabaseIO"] = Query{ScriptName: "AzureArcSQLMIDatabaseIO", Script: sqlAzureArcMIDatabaseIO, ResultByRow: false}
queries["AzureArcSQLMIServerProperties"] = Query{ScriptName: "AzureArcSQLMIServerProperties", Script: sqlAzureArcMIProperties, ResultByRow: false}
queries["AzureArcSQLMIOsWaitstats"] = Query{ScriptName: "AzureArcSQLMIOsWaitstats", Script: sqlAzureArcMIOsWaitStats, ResultByRow: false}
queries["AzureArcSQLMIMemoryClerks"] = Query{ScriptName: "AzureArcSQLMIMemoryClerks", Script: sqlAzureArcMIMemoryClerks, ResultByRow: false}
queries["AzureArcSQLMIPerformanceCounters"] =
Query{ScriptName: "AzureArcSQLMIPerformanceCounters", Script: sqlAzureArcMIPerformanceCounters, ResultByRow: false}
queries["AzureArcSQLMIRequests"] = Query{ScriptName: "AzureArcSQLMIRequests", Script: sqlAzureArcMIRequests, ResultByRow: false}
queries["AzureArcSQLMISchedulers"] = Query{ScriptName: "AzureArcSQLMISchedulers", Script: sqlAzureArcMISchedulers, ResultByRow: false}
} else if s.DatabaseType == typeSQLServer { // These are still V2 queries and have not been refactored yet.
queries["SQLServerPerformanceCounters"] = Query{ScriptName: "SQLServerPerformanceCounters", Script: sqlServerPerformanceCounters, ResultByRow: false}
queries["SQLServerWaitStatsCategorized"] = Query{ScriptName: "SQLServerWaitStatsCategorized", Script: sqlServerWaitStatsCategorized, ResultByRow: false}
queries["SQLServerDatabaseIO"] = Query{ScriptName: "SQLServerDatabaseIO", Script: sqlServerDatabaseIO, ResultByRow: false}
queries["SQLServerProperties"] = Query{ScriptName: "SQLServerProperties", Script: sqlServerProperties, ResultByRow: false}
queries["SQLServerMemoryClerks"] = Query{ScriptName: "SQLServerMemoryClerks", Script: sqlServerMemoryClerks, ResultByRow: false}
queries["SQLServerSchedulers"] = Query{ScriptName: "SQLServerSchedulers", Script: sqlServerSchedulers, ResultByRow: false}
queries["SQLServerRequests"] = Query{ScriptName: "SQLServerRequests", Script: sqlServerRequests, ResultByRow: false}
queries["SQLServerVolumeSpace"] = Query{ScriptName: "SQLServerVolumeSpace", Script: sqlServerVolumeSpace, ResultByRow: false}
queries["SQLServerCpu"] = Query{ScriptName: "SQLServerCpu", Script: sqlServerRingBufferCPU, ResultByRow: false}
queries["SQLServerAvailabilityReplicaStates"] =
Query{ScriptName: "SQLServerAvailabilityReplicaStates", Script: sqlServerAvailabilityReplicaStates, ResultByRow: false}
queries["SQLServerDatabaseReplicaStates"] =
Query{ScriptName: "SQLServerDatabaseReplicaStates", Script: sqlServerDatabaseReplicaStates, ResultByRow: false}
queries["SQLServerRecentBackups"] = Query{ScriptName: "SQLServerRecentBackups", Script: sqlServerRecentBackups, ResultByRow: false}
queries["SQLServerPersistentVersionStore"] =
Query{ScriptName: "SQLServerPersistentVersionStore", Script: sqlServerPersistentVersionStore, ResultByRow: false}
} else {
// If this is an AzureDB instance, grab some extra metrics
if s.AzureDB {
queries["AzureDBResourceStats"] = Query{ScriptName: "AzureDBPerformanceCounters", Script: sqlAzureDBResourceStats, ResultByRow: false}
queries["AzureDBResourceGovernance"] = Query{ScriptName: "AzureDBPerformanceCounters", Script: sqlAzureDBResourceGovernance, ResultByRow: false}
}
// Decide if we want to run version 1 or version 2 queries
if s.QueryVersion == 2 {
queries["PerformanceCounters"] = Query{ScriptName: "PerformanceCounters", Script: sqlPerformanceCountersV2, ResultByRow: true}
queries["WaitStatsCategorized"] = Query{ScriptName: "WaitStatsCategorized", Script: sqlWaitStatsCategorizedV2, ResultByRow: false}
queries["DatabaseIO"] = Query{ScriptName: "DatabaseIO", Script: sqlDatabaseIOV2, ResultByRow: false}
queries["ServerProperties"] = Query{ScriptName: "ServerProperties", Script: sqlServerPropertiesV2, ResultByRow: false}
queries["MemoryClerk"] = Query{ScriptName: "MemoryClerk", Script: sqlMemoryClerkV2, ResultByRow: false}
queries["Schedulers"] = Query{ScriptName: "Schedulers", Script: sqlServerSchedulersV2, ResultByRow: false}
queries["SqlRequests"] = Query{ScriptName: "SqlRequests", Script: sqlServerRequestsV2, ResultByRow: false}
queries["VolumeSpace"] = Query{ScriptName: "VolumeSpace", Script: sqlServerVolumeSpaceV2, ResultByRow: false}
queries["Cpu"] = Query{ScriptName: "Cpu", Script: sqlServerCPUV2, ResultByRow: false}
} else {
queries["PerformanceCounters"] = Query{ScriptName: "PerformanceCounters", Script: sqlPerformanceCounters, ResultByRow: true}
queries["WaitStatsCategorized"] = Query{ScriptName: "WaitStatsCategorized", Script: sqlWaitStatsCategorized, ResultByRow: false}
queries["CPUHistory"] = Query{ScriptName: "CPUHistory", Script: sqlCPUHistory, ResultByRow: false}
queries["DatabaseIO"] = Query{ScriptName: "DatabaseIO", Script: sqlDatabaseIO, ResultByRow: false}
queries["DatabaseSize"] = Query{ScriptName: "DatabaseSize", Script: sqlDatabaseSize, ResultByRow: false}
queries["DatabaseStats"] = Query{ScriptName: "DatabaseStats", Script: sqlDatabaseStats, ResultByRow: false}
queries["DatabaseProperties"] = Query{ScriptName: "DatabaseProperties", Script: sqlDatabaseProperties, ResultByRow: false}
queries["MemoryClerk"] = Query{ScriptName: "MemoryClerk", Script: sqlMemoryClerk, ResultByRow: false}
queries["VolumeSpace"] = Query{ScriptName: "VolumeSpace", Script: sqlVolumeSpace, ResultByRow: false}
queries["PerformanceMetrics"] = Query{ScriptName: "PerformanceMetrics", Script: sqlPerformanceMetrics, ResultByRow: false}
}
}
filterQueries, err := filter.NewIncludeExcludeFilter(s.IncludeQuery, s.ExcludeQuery)
if err != nil {
return err
}
for query := range queries {
if !filterQueries.Match(query) {
delete(queries, query)
}
}
queryList := make([]string, 0, len(queries))
for query := range queries {
queryList = append(queryList, query)
}
s.Log.Infof("Config: Effective Queries: %#v\n", queryList)
return nil
}
func (*SQLServer) SampleConfig() string { func (*SQLServer) SampleConfig() string {
return sampleConfig return sampleConfig
} }
// Gather collect data from SQL Server func (s *SQLServer) Init() error {
func (s *SQLServer) Gather(acc telegraf.Accumulator) error { if len(s.Servers) == 0 {
var wg sync.WaitGroup srv := config.NewSecret([]byte(defaultServer))
var mutex sync.Mutex s.Servers = append(s.Servers, &srv)
var healthMetrics = make(map[string]*HealthMetric)
for i, pool := range s.pools {
dnsSecret, err := s.Servers[i].Get()
if err != nil {
acc.AddError(err)
continue
}
dsn := dnsSecret.String()
dnsSecret.Destroy()
for _, query := range s.queries {
wg.Add(1)
go func(pool *sql.DB, query Query, dsn string) {
defer wg.Done()
queryError := s.gatherServer(pool, query, acc, dsn)
if s.HealthMetric {
mutex.Lock()
gatherHealth(healthMetrics, dsn, queryError)
mutex.Unlock()
}
acc.AddError(queryError)
}(pool, query, dsn)
}
}
wg.Wait()
if s.HealthMetric {
s.accHealth(healthMetrics, acc)
} }
return nil return nil
@ -321,6 +164,46 @@ func (s *SQLServer) Start(acc telegraf.Accumulator) error {
return nil return nil
} }
func (s *SQLServer) Gather(acc telegraf.Accumulator) error {
var wg sync.WaitGroup
var mutex sync.Mutex
var healthMetrics = make(map[string]*healthMetric)
for i, pool := range s.pools {
dnsSecret, err := s.Servers[i].Get()
if err != nil {
acc.AddError(err)
continue
}
dsn := dnsSecret.String()
dnsSecret.Destroy()
for _, q := range s.queries {
wg.Add(1)
go func(pool *sql.DB, q query, dsn string) {
defer wg.Done()
queryError := s.gatherServer(pool, q, acc, dsn)
if s.HealthMetric {
mutex.Lock()
gatherHealth(healthMetrics, dsn, queryError)
mutex.Unlock()
}
acc.AddError(queryError)
}(pool, q, dsn)
}
}
wg.Wait()
if s.HealthMetric {
s.accHealth(healthMetrics, acc)
}
return nil
}
// Stop cleanup server connection pools // Stop cleanup server connection pools
func (s *SQLServer) Stop() { func (s *SQLServer) Stop() {
for _, pool := range s.pools { for _, pool := range s.pools {
@ -328,7 +211,126 @@ func (s *SQLServer) Stop() {
} }
} }
func (s *SQLServer) gatherServer(pool *sql.DB, query Query, acc telegraf.Accumulator, connectionString string) error { func (s *SQLServer) initQueries() error {
s.queries = make(mapQuery)
queries := s.queries
s.Log.Infof("Config: database_type: %s , query_version:%d , azuredb: %t", s.DatabaseType, s.QueryVersion, s.AzureDB)
// To prevent query definition conflicts
// Constant definitions for type "AzureSQLDB" start with sqlAzureDB
// Constant definitions for type "AzureSQLManagedInstance" start with sqlAzureMI
// Constant definitions for type "AzureSQLPool" start with sqlAzurePool
// Constant definitions for type "AzureArcSQLManagedInstance" start with sqlAzureArcMI
// Constant definitions for type "SQLServer" start with sqlServer
if s.DatabaseType == typeAzureSQLDB {
queries["AzureSQLDBResourceStats"] = query{ScriptName: "AzureSQLDBResourceStats", Script: sqlAzureDBResourceStats, ResultByRow: false}
queries["AzureSQLDBResourceGovernance"] = query{ScriptName: "AzureSQLDBResourceGovernance", Script: sqlAzureDBResourceGovernance, ResultByRow: false}
queries["AzureSQLDBWaitStats"] = query{ScriptName: "AzureSQLDBWaitStats", Script: sqlAzureDBWaitStats, ResultByRow: false}
queries["AzureSQLDBDatabaseIO"] = query{ScriptName: "AzureSQLDBDatabaseIO", Script: sqlAzureDBDatabaseIO, ResultByRow: false}
queries["AzureSQLDBServerProperties"] = query{ScriptName: "AzureSQLDBServerProperties", Script: sqlAzureDBProperties, ResultByRow: false}
queries["AzureSQLDBOsWaitstats"] = query{ScriptName: "AzureSQLOsWaitstats", Script: sqlAzureDBOsWaitStats, ResultByRow: false}
queries["AzureSQLDBMemoryClerks"] = query{ScriptName: "AzureSQLDBMemoryClerks", Script: sqlAzureDBMemoryClerks, ResultByRow: false}
queries["AzureSQLDBPerformanceCounters"] = query{ScriptName: "AzureSQLDBPerformanceCounters", Script: sqlAzureDBPerformanceCounters, ResultByRow: false}
queries["AzureSQLDBRequests"] = query{ScriptName: "AzureSQLDBRequests", Script: sqlAzureDBRequests, ResultByRow: false}
queries["AzureSQLDBSchedulers"] = query{ScriptName: "AzureSQLDBSchedulers", Script: sqlAzureDBSchedulers, ResultByRow: false}
} else if s.DatabaseType == typeAzureSQLManagedInstance {
queries["AzureSQLMIResourceStats"] = query{ScriptName: "AzureSQLMIResourceStats", Script: sqlAzureMIResourceStats, ResultByRow: false}
queries["AzureSQLMIResourceGovernance"] = query{ScriptName: "AzureSQLMIResourceGovernance", Script: sqlAzureMIResourceGovernance, ResultByRow: false}
queries["AzureSQLMIDatabaseIO"] = query{ScriptName: "AzureSQLMIDatabaseIO", Script: sqlAzureMIDatabaseIO, ResultByRow: false}
queries["AzureSQLMIServerProperties"] = query{ScriptName: "AzureSQLMIServerProperties", Script: sqlAzureMIProperties, ResultByRow: false}
queries["AzureSQLMIOsWaitstats"] = query{ScriptName: "AzureSQLMIOsWaitstats", Script: sqlAzureMIOsWaitStats, ResultByRow: false}
queries["AzureSQLMIMemoryClerks"] = query{ScriptName: "AzureSQLMIMemoryClerks", Script: sqlAzureMIMemoryClerks, ResultByRow: false}
queries["AzureSQLMIPerformanceCounters"] = query{ScriptName: "AzureSQLMIPerformanceCounters", Script: sqlAzureMIPerformanceCounters, ResultByRow: false}
queries["AzureSQLMIRequests"] = query{ScriptName: "AzureSQLMIRequests", Script: sqlAzureMIRequests, ResultByRow: false}
queries["AzureSQLMISchedulers"] = query{ScriptName: "AzureSQLMISchedulers", Script: sqlAzureMISchedulers, ResultByRow: false}
} else if s.DatabaseType == typeAzureSQLPool {
queries["AzureSQLPoolResourceStats"] = query{ScriptName: "AzureSQLPoolResourceStats", Script: sqlAzurePoolResourceStats, ResultByRow: false}
queries["AzureSQLPoolResourceGovernance"] =
query{ScriptName: "AzureSQLPoolResourceGovernance", Script: sqlAzurePoolResourceGovernance, ResultByRow: false}
queries["AzureSQLPoolDatabaseIO"] = query{ScriptName: "AzureSQLPoolDatabaseIO", Script: sqlAzurePoolDatabaseIO, ResultByRow: false}
queries["AzureSQLPoolOsWaitStats"] = query{ScriptName: "AzureSQLPoolOsWaitStats", Script: sqlAzurePoolOsWaitStats, ResultByRow: false}
queries["AzureSQLPoolMemoryClerks"] = query{ScriptName: "AzureSQLPoolMemoryClerks", Script: sqlAzurePoolMemoryClerks, ResultByRow: false}
queries["AzureSQLPoolPerformanceCounters"] =
query{ScriptName: "AzureSQLPoolPerformanceCounters", Script: sqlAzurePoolPerformanceCounters, ResultByRow: false}
queries["AzureSQLPoolSchedulers"] = query{ScriptName: "AzureSQLPoolSchedulers", Script: sqlAzurePoolSchedulers, ResultByRow: false}
} else if s.DatabaseType == typeAzureArcSQLManagedInstance {
queries["AzureArcSQLMIDatabaseIO"] = query{ScriptName: "AzureArcSQLMIDatabaseIO", Script: sqlAzureArcMIDatabaseIO, ResultByRow: false}
queries["AzureArcSQLMIServerProperties"] = query{ScriptName: "AzureArcSQLMIServerProperties", Script: sqlAzureArcMIProperties, ResultByRow: false}
queries["AzureArcSQLMIOsWaitstats"] = query{ScriptName: "AzureArcSQLMIOsWaitstats", Script: sqlAzureArcMIOsWaitStats, ResultByRow: false}
queries["AzureArcSQLMIMemoryClerks"] = query{ScriptName: "AzureArcSQLMIMemoryClerks", Script: sqlAzureArcMIMemoryClerks, ResultByRow: false}
queries["AzureArcSQLMIPerformanceCounters"] =
query{ScriptName: "AzureArcSQLMIPerformanceCounters", Script: sqlAzureArcMIPerformanceCounters, ResultByRow: false}
queries["AzureArcSQLMIRequests"] = query{ScriptName: "AzureArcSQLMIRequests", Script: sqlAzureArcMIRequests, ResultByRow: false}
queries["AzureArcSQLMISchedulers"] = query{ScriptName: "AzureArcSQLMISchedulers", Script: sqlAzureArcMISchedulers, ResultByRow: false}
} else if s.DatabaseType == typeSQLServer { // These are still V2 queries and have not been refactored yet.
queries["SQLServerPerformanceCounters"] = query{ScriptName: "SQLServerPerformanceCounters", Script: sqlServerPerformanceCounters, ResultByRow: false}
queries["SQLServerWaitStatsCategorized"] = query{ScriptName: "SQLServerWaitStatsCategorized", Script: sqlServerWaitStatsCategorized, ResultByRow: false}
queries["SQLServerDatabaseIO"] = query{ScriptName: "SQLServerDatabaseIO", Script: sqlServerDatabaseIO, ResultByRow: false}
queries["SQLServerProperties"] = query{ScriptName: "SQLServerProperties", Script: sqlServerProperties, ResultByRow: false}
queries["SQLServerMemoryClerks"] = query{ScriptName: "SQLServerMemoryClerks", Script: sqlServerMemoryClerks, ResultByRow: false}
queries["SQLServerSchedulers"] = query{ScriptName: "SQLServerSchedulers", Script: sqlServerSchedulers, ResultByRow: false}
queries["SQLServerRequests"] = query{ScriptName: "SQLServerRequests", Script: sqlServerRequests, ResultByRow: false}
queries["SQLServerVolumeSpace"] = query{ScriptName: "SQLServerVolumeSpace", Script: sqlServerVolumeSpace, ResultByRow: false}
queries["SQLServerCpu"] = query{ScriptName: "SQLServerCpu", Script: sqlServerRingBufferCPU, ResultByRow: false}
queries["SQLServerAvailabilityReplicaStates"] =
query{ScriptName: "SQLServerAvailabilityReplicaStates", Script: sqlServerAvailabilityReplicaStates, ResultByRow: false}
queries["SQLServerDatabaseReplicaStates"] =
query{ScriptName: "SQLServerDatabaseReplicaStates", Script: sqlServerDatabaseReplicaStates, ResultByRow: false}
queries["SQLServerRecentBackups"] = query{ScriptName: "SQLServerRecentBackups", Script: sqlServerRecentBackups, ResultByRow: false}
queries["SQLServerPersistentVersionStore"] =
query{ScriptName: "SQLServerPersistentVersionStore", Script: sqlServerPersistentVersionStore, ResultByRow: false}
} else {
// If this is an AzureDB instance, grab some extra metrics
if s.AzureDB {
queries["AzureDBResourceStats"] = query{ScriptName: "AzureDBPerformanceCounters", Script: sqlAzureDBResourceStats, ResultByRow: false}
queries["AzureDBResourceGovernance"] = query{ScriptName: "AzureDBPerformanceCounters", Script: sqlAzureDBResourceGovernance, ResultByRow: false}
}
// Decide if we want to run version 1 or version 2 queries
if s.QueryVersion == 2 {
queries["PerformanceCounters"] = query{ScriptName: "PerformanceCounters", Script: sqlPerformanceCountersV2, ResultByRow: true}
queries["WaitStatsCategorized"] = query{ScriptName: "WaitStatsCategorized", Script: sqlWaitStatsCategorizedV2, ResultByRow: false}
queries["DatabaseIO"] = query{ScriptName: "DatabaseIO", Script: sqlDatabaseIOV2, ResultByRow: false}
queries["ServerProperties"] = query{ScriptName: "ServerProperties", Script: sqlServerPropertiesV2, ResultByRow: false}
queries["MemoryClerk"] = query{ScriptName: "MemoryClerk", Script: sqlMemoryClerkV2, ResultByRow: false}
queries["Schedulers"] = query{ScriptName: "Schedulers", Script: sqlServerSchedulersV2, ResultByRow: false}
queries["SqlRequests"] = query{ScriptName: "SqlRequests", Script: sqlServerRequestsV2, ResultByRow: false}
queries["VolumeSpace"] = query{ScriptName: "VolumeSpace", Script: sqlServerVolumeSpaceV2, ResultByRow: false}
queries["Cpu"] = query{ScriptName: "Cpu", Script: sqlServerCPUV2, ResultByRow: false}
} else {
queries["PerformanceCounters"] = query{ScriptName: "PerformanceCounters", Script: sqlPerformanceCounters, ResultByRow: true}
queries["WaitStatsCategorized"] = query{ScriptName: "WaitStatsCategorized", Script: sqlWaitStatsCategorized, ResultByRow: false}
queries["CPUHistory"] = query{ScriptName: "CPUHistory", Script: sqlCPUHistory, ResultByRow: false}
queries["DatabaseIO"] = query{ScriptName: "DatabaseIO", Script: sqlDatabaseIO, ResultByRow: false}
queries["DatabaseSize"] = query{ScriptName: "DatabaseSize", Script: sqlDatabaseSize, ResultByRow: false}
queries["DatabaseStats"] = query{ScriptName: "DatabaseStats", Script: sqlDatabaseStats, ResultByRow: false}
queries["DatabaseProperties"] = query{ScriptName: "DatabaseProperties", Script: sqlDatabaseProperties, ResultByRow: false}
queries["MemoryClerk"] = query{ScriptName: "MemoryClerk", Script: sqlMemoryClerk, ResultByRow: false}
queries["VolumeSpace"] = query{ScriptName: "VolumeSpace", Script: sqlVolumeSpace, ResultByRow: false}
queries["PerformanceMetrics"] = query{ScriptName: "PerformanceMetrics", Script: sqlPerformanceMetrics, ResultByRow: false}
}
}
filterQueries, err := filter.NewIncludeExcludeFilter(s.IncludeQuery, s.ExcludeQuery)
if err != nil {
return err
}
for query := range queries {
if !filterQueries.Match(query) {
delete(queries, query)
}
}
queryList := make([]string, 0, len(queries))
for query := range queries {
queryList = append(queryList, query)
}
s.Log.Infof("Config: Effective Queries: %#v\n", queryList)
return nil
}
func (s *SQLServer) gatherServer(pool *sql.DB, query query, acc telegraf.Accumulator, connectionString string) error {
// execute query // execute query
ctx := context.Background() ctx := context.Background()
// Use the query timeout if any // Use the query timeout if any
@ -368,7 +370,7 @@ func (s *SQLServer) gatherServer(pool *sql.DB, query Query, acc telegraf.Accumul
return rows.Err() return rows.Err()
} }
func (s *SQLServer) accRow(query Query, acc telegraf.Accumulator, row scanner) error { func (s *SQLServer) accRow(query query, acc telegraf.Accumulator, row scanner) error {
var fields = make(map[string]interface{}) var fields = make(map[string]interface{})
// store the column name with its *interface{} // store the column name with its *interface{}
@ -425,25 +427,25 @@ func (s *SQLServer) accRow(query Query, acc telegraf.Accumulator, row scanner) e
} }
// gatherHealth stores info about any query errors in the healthMetrics map // gatherHealth stores info about any query errors in the healthMetrics map
func gatherHealth(healthMetrics map[string]*HealthMetric, serv string, queryError error) { func gatherHealth(healthMetrics map[string]*healthMetric, serv string, queryError error) {
if healthMetrics[serv] == nil { if healthMetrics[serv] == nil {
healthMetrics[serv] = &HealthMetric{} healthMetrics[serv] = &healthMetric{}
} }
healthMetrics[serv].AttemptedQueries++ healthMetrics[serv].attemptedQueries++
if queryError == nil { if queryError == nil {
healthMetrics[serv].SuccessfulQueries++ healthMetrics[serv].successfulQueries++
} }
} }
// accHealth accumulates the query health data contained within the healthMetrics map // accHealth accumulates the query health data contained within the healthMetrics map
func (s *SQLServer) accHealth(healthMetrics map[string]*HealthMetric, acc telegraf.Accumulator) { func (s *SQLServer) accHealth(healthMetrics map[string]*healthMetric, acc telegraf.Accumulator) {
for connectionString, connectionStats := range healthMetrics { for connectionString, connectionStats := range healthMetrics {
sqlInstance, databaseName := getConnectionIdentifiers(connectionString) sqlInstance, databaseName := getConnectionIdentifiers(connectionString)
tags := map[string]string{healthMetricInstanceTag: sqlInstance, healthMetricDatabaseTag: databaseName} tags := map[string]string{healthMetricInstanceTag: sqlInstance, healthMetricDatabaseTag: databaseName}
fields := map[string]interface{}{ fields := map[string]interface{}{
healthMetricAttemptedQueries: connectionStats.AttemptedQueries, healthMetricAttemptedQueries: connectionStats.attemptedQueries,
healthMetricSuccessfulQueries: connectionStats.SuccessfulQueries, healthMetricSuccessfulQueries: connectionStats.successfulQueries,
healthMetricDatabaseType: s.getDatabaseTypeToLog(), healthMetricDatabaseType: s.getDatabaseTypeToLog(),
} }
@ -464,15 +466,6 @@ func (s *SQLServer) getDatabaseTypeToLog() string {
return logname return logname
} }
func (s *SQLServer) Init() error {
if len(s.Servers) == 0 {
srv := config.NewSecret([]byte(defaultServer))
s.Servers = append(s.Servers, &srv)
}
return nil
}
// Get Token Provider by loading cached token or refreshed token // Get Token Provider by loading cached token or refreshed token
func (s *SQLServer) getTokenProvider() (func() (string, error), error) { func (s *SQLServer) getTokenProvider() (func() (string, error), error) {
var tokenString string var tokenString string

View File

@ -47,17 +47,17 @@ func TestSqlServer_QueriesInclusionExclusion(t *testing.T) {
func TestSqlServer_ParseMetrics(t *testing.T) { func TestSqlServer_ParseMetrics(t *testing.T) {
var acc testutil.Accumulator var acc testutil.Accumulator
queries := make(MapQuery) queries := make(mapQuery)
queries["PerformanceCounters"] = Query{ScriptName: "PerformanceCounters", Script: mockPerformanceCounters, ResultByRow: true} queries["PerformanceCounters"] = query{ScriptName: "PerformanceCounters", Script: mockPerformanceCounters, ResultByRow: true}
queries["WaitStatsCategorized"] = Query{ScriptName: "WaitStatsCategorized", Script: mockWaitStatsCategorized, ResultByRow: false} queries["WaitStatsCategorized"] = query{ScriptName: "WaitStatsCategorized", Script: mockWaitStatsCategorized, ResultByRow: false}
queries["CPUHistory"] = Query{ScriptName: "CPUHistory", Script: mockCPUHistory, ResultByRow: false} queries["CPUHistory"] = query{ScriptName: "CPUHistory", Script: mockCPUHistory, ResultByRow: false}
queries["DatabaseIO"] = Query{ScriptName: "DatabaseIO", Script: mockDatabaseIO, ResultByRow: false} queries["DatabaseIO"] = query{ScriptName: "DatabaseIO", Script: mockDatabaseIO, ResultByRow: false}
queries["DatabaseSize"] = Query{ScriptName: "DatabaseSize", Script: mockDatabaseSize, ResultByRow: false} queries["DatabaseSize"] = query{ScriptName: "DatabaseSize", Script: mockDatabaseSize, ResultByRow: false}
queries["DatabaseStats"] = Query{ScriptName: "DatabaseStats", Script: mockDatabaseStats, ResultByRow: false} queries["DatabaseStats"] = query{ScriptName: "DatabaseStats", Script: mockDatabaseStats, ResultByRow: false}
queries["DatabaseProperties"] = Query{ScriptName: "DatabaseProperties", Script: mockDatabaseProperties, ResultByRow: false} queries["DatabaseProperties"] = query{ScriptName: "DatabaseProperties", Script: mockDatabaseProperties, ResultByRow: false}
queries["VolumeSpace"] = Query{ScriptName: "VolumeSpace", Script: mockVolumeSpace, ResultByRow: false} queries["VolumeSpace"] = query{ScriptName: "VolumeSpace", Script: mockVolumeSpace, ResultByRow: false}
queries["MemoryClerk"] = Query{ScriptName: "MemoryClerk", Script: mockMemoryClerk, ResultByRow: false} queries["MemoryClerk"] = query{ScriptName: "MemoryClerk", Script: mockMemoryClerk, ResultByRow: false}
queries["PerformanceMetrics"] = Query{ScriptName: "PerformanceMetrics", Script: mockPerformanceMetrics, ResultByRow: false} queries["PerformanceMetrics"] = query{ScriptName: "PerformanceMetrics", Script: mockPerformanceMetrics, ResultByRow: false}
var headers, mock, row []string var headers, mock, row []string
var tags = make(map[string]string) var tags = make(map[string]string)

View File

@ -31,19 +31,18 @@ import (
//go:embed sample.conf //go:embed sample.conf
var sampleConfig string var sampleConfig string
const (
defaultRateLimit = 14
)
var ( var (
defaultCacheTTL = config.Duration(1 * time.Hour) defaultCacheTTL = config.Duration(1 * time.Hour)
defaultWindow = config.Duration(1 * time.Minute) defaultWindow = config.Duration(1 * time.Minute)
defaultDelay = config.Duration(5 * time.Minute) defaultDelay = config.Duration(5 * time.Minute)
) )
const (
defaultRateLimit = 14
)
type ( type (
// stackdriver is the Google Stackdriver config info. Stackdriver struct {
stackdriver struct {
Project string `toml:"project"` Project string `toml:"project"`
RateLimit int `toml:"rate_limit"` RateLimit int `toml:"rate_limit"`
Window config.Duration `toml:"window"` Window config.Duration `toml:"window"`
@ -55,7 +54,7 @@ type (
DistributionAggregationAligners []string `toml:"distribution_aggregation_aligners"` DistributionAggregationAligners []string `toml:"distribution_aggregation_aligners"`
Filter *listTimeSeriesFilter `toml:"filter"` Filter *listTimeSeriesFilter `toml:"filter"`
Log telegraf.Logger Log telegraf.Logger `toml:"-"`
client metricClient client metricClient
timeSeriesConfCache *timeSeriesConfCache timeSeriesConfCache *timeSeriesConfCache
@ -106,9 +105,9 @@ type (
// metricClient is convenient for testing // metricClient is convenient for testing
metricClient interface { metricClient interface {
ListMetricDescriptors(ctx context.Context, req *monitoringpb.ListMetricDescriptorsRequest) (<-chan *metricpb.MetricDescriptor, error) listMetricDescriptors(ctx context.Context, req *monitoringpb.ListMetricDescriptorsRequest) (<-chan *metricpb.MetricDescriptor, error)
ListTimeSeries(ctx context.Context, req *monitoringpb.ListTimeSeriesRequest) (<-chan *monitoringpb.TimeSeries, error) listTimeSeries(ctx context.Context, req *monitoringpb.ListTimeSeriesRequest) (<-chan *monitoringpb.TimeSeries, error)
Close() error close() error
} }
lockedSeriesGrouper struct { lockedSeriesGrouper struct {
@ -117,87 +116,11 @@ type (
} }
) )
func (g *lockedSeriesGrouper) Add( func (*Stackdriver) SampleConfig() string {
measurement string,
tags map[string]string,
tm time.Time,
field string,
fieldValue interface{},
) {
g.Lock()
defer g.Unlock()
g.SeriesGrouper.Add(measurement, tags, tm, field, fieldValue)
}
// ListMetricDescriptors implements metricClient interface
func (smc *stackdriverMetricClient) ListMetricDescriptors(
ctx context.Context,
req *monitoringpb.ListMetricDescriptorsRequest,
) (<-chan *metricpb.MetricDescriptor, error) {
mdChan := make(chan *metricpb.MetricDescriptor, 1000)
go func() {
smc.log.Debugf("List metric descriptor request filter: %s", req.Filter)
defer close(mdChan)
// Iterate over metric descriptors and send them to buffered channel
mdResp := smc.conn.ListMetricDescriptors(ctx, req)
smc.listMetricDescriptorsCalls.Incr(1)
for {
mdDesc, mdErr := mdResp.Next()
if mdErr != nil {
if !errors.Is(mdErr, iterator.Done) {
smc.log.Errorf("Failed iterating metric descriptor responses: %q: %v", req.String(), mdErr)
}
break
}
mdChan <- mdDesc
}
}()
return mdChan, nil
}
// ListTimeSeries implements metricClient interface
func (smc *stackdriverMetricClient) ListTimeSeries(
ctx context.Context,
req *monitoringpb.ListTimeSeriesRequest,
) (<-chan *monitoringpb.TimeSeries, error) {
tsChan := make(chan *monitoringpb.TimeSeries, 1000)
go func() {
smc.log.Debugf("List time series request filter: %s", req.Filter)
defer close(tsChan)
// Iterate over timeseries and send them to buffered channel
tsResp := smc.conn.ListTimeSeries(ctx, req)
smc.listTimeSeriesCalls.Incr(1)
for {
tsDesc, tsErr := tsResp.Next()
if tsErr != nil {
if !errors.Is(tsErr, iterator.Done) {
smc.log.Errorf("Failed iterating time series responses: %q: %v", req.String(), tsErr)
}
break
}
tsChan <- tsDesc
}
}()
return tsChan, nil
}
// Close implements metricClient interface
func (smc *stackdriverMetricClient) Close() error {
return smc.conn.Close()
}
func (*stackdriver) SampleConfig() string {
return sampleConfig return sampleConfig
} }
// Gather implements telegraf.Input interface func (s *Stackdriver) Gather(acc telegraf.Accumulator) error {
func (s *stackdriver) Gather(acc telegraf.Accumulator) error {
ctx := context.Background() ctx := context.Background()
if s.RateLimit == 0 { if s.RateLimit == 0 {
@ -212,7 +135,7 @@ func (s *stackdriver) Gather(acc telegraf.Accumulator) error {
start, end := s.updateWindow(s.prevEnd) start, end := s.updateWindow(s.prevEnd)
s.prevEnd = end s.prevEnd = end
tsConfs, err := s.generatetimeSeriesConfs(ctx, start, end) tsConfs, err := s.generateTimeSeriesConfs(ctx, start, end)
if err != nil { if err != nil {
return err return err
} }
@ -242,8 +165,34 @@ func (s *stackdriver) Gather(acc telegraf.Accumulator) error {
return nil return nil
} }
func (s *Stackdriver) initializeStackdriverClient(ctx context.Context) error {
if s.client == nil {
client, err := monitoring.NewMetricClient(ctx)
if err != nil {
return fmt.Errorf("failed to create stackdriver monitoring client: %w", err)
}
tags := map[string]string{
"project_id": s.Project,
}
listMetricDescriptorsCalls := selfstat.Register(
"stackdriver", "list_metric_descriptors_calls", tags)
listTimeSeriesCalls := selfstat.Register(
"stackdriver", "list_timeseries_calls", tags)
s.client = &stackdriverMetricClient{
log: s.Log,
conn: client,
listMetricDescriptorsCalls: listMetricDescriptorsCalls,
listTimeSeriesCalls: listTimeSeriesCalls,
}
}
return nil
}
// Returns the start and end time for the next collection. // Returns the start and end time for the next collection.
func (s *stackdriver) updateWindow(prevEnd time.Time) (time.Time, time.Time) { func (s *Stackdriver) updateWindow(prevEnd time.Time) (time.Time, time.Time) {
var start time.Time var start time.Time
if time.Duration(s.Window) != 0 { if time.Duration(s.Window) != 0 {
start = time.Now().Add(-time.Duration(s.Delay)).Add(-time.Duration(s.Window)) start = time.Now().Add(-time.Duration(s.Delay)).Add(-time.Duration(s.Window))
@ -256,8 +205,90 @@ func (s *stackdriver) updateWindow(prevEnd time.Time) (time.Time, time.Time) {
return start, end return start, end
} }
// Generate a list of timeSeriesConfig structs by making a listMetricDescriptors
// API request and filtering the result against our configuration.
func (s *Stackdriver) generateTimeSeriesConfs(ctx context.Context, startTime, endTime time.Time) ([]*timeSeriesConf, error) {
if s.timeSeriesConfCache != nil && s.timeSeriesConfCache.isValid() {
// Update interval for timeseries requests in timeseries cache
interval := &monitoringpb.TimeInterval{
EndTime: &timestamppb.Timestamp{Seconds: endTime.Unix()},
StartTime: &timestamppb.Timestamp{Seconds: startTime.Unix()},
}
for _, timeSeriesConf := range s.timeSeriesConfCache.TimeSeriesConfs {
timeSeriesConf.listTimeSeriesRequest.Interval = interval
}
return s.timeSeriesConfCache.TimeSeriesConfs, nil
}
ret := make([]*timeSeriesConf, 0)
req := &monitoringpb.ListMetricDescriptorsRequest{
Name: "projects/" + s.Project,
}
filters := s.newListMetricDescriptorsFilters()
if len(filters) == 0 {
filters = []string{""}
}
for _, filter := range filters {
// Add filter for list metric descriptors if
// includeMetricTypePrefixes is specified,
// this is more efficient than iterating over
// all metric descriptors
req.Filter = filter
mdRespChan, err := s.client.listMetricDescriptors(ctx, req)
if err != nil {
return nil, err
}
for metricDescriptor := range mdRespChan {
metricType := metricDescriptor.Type
valueType := metricDescriptor.ValueType
if filter == "" && !s.includeMetricType(metricType) {
continue
}
if valueType == metricpb.MetricDescriptor_DISTRIBUTION {
if s.GatherRawDistributionBuckets {
tsConf := s.newTimeSeriesConf(metricType, startTime, endTime)
ret = append(ret, tsConf)
}
for _, alignerStr := range s.DistributionAggregationAligners {
tsConf := s.newTimeSeriesConf(metricType, startTime, endTime)
tsConf.initForAggregate(alignerStr)
ret = append(ret, tsConf)
}
} else {
ret = append(ret, s.newTimeSeriesConf(metricType, startTime, endTime))
}
}
}
s.timeSeriesConfCache = &timeSeriesConfCache{
TimeSeriesConfs: ret,
Generated: time.Now(),
TTL: time.Duration(s.CacheTTL),
}
return ret, nil
}
// Generates filter for list metric descriptors request
func (s *Stackdriver) newListMetricDescriptorsFilters() []string {
if len(s.MetricTypePrefixInclude) == 0 {
return nil
}
metricTypeFilters := make([]string, 0, len(s.MetricTypePrefixInclude))
for _, metricTypePrefix := range s.MetricTypePrefixInclude {
metricTypeFilters = append(metricTypeFilters, fmt.Sprintf(`metric.type = starts_with(%q)`, metricTypePrefix))
}
return metricTypeFilters
}
// Generate filter string for ListTimeSeriesRequest // Generate filter string for ListTimeSeriesRequest
func (s *stackdriver) newListTimeSeriesFilter(metricType string) string { func (s *Stackdriver) newListTimeSeriesFilter(metricType string) string {
functions := []string{ functions := []string{
"starts_with", "starts_with",
"ends_with", "ends_with",
@ -345,11 +376,8 @@ func (s *stackdriver) newListTimeSeriesFilter(metricType string) string {
return filterString return filterString
} }
// Create and initialize a timeSeriesConf for a given GCP metric type with // Create and initialize a timeSeriesConf for a given GCP metric type with defaults taken from the gcp_stackdriver plugin configuration.
// defaults taken from the gcp_stackdriver plugin configuration. func (s *Stackdriver) newTimeSeriesConf(metricType string, startTime, endTime time.Time) *timeSeriesConf {
func (s *stackdriver) newTimeSeriesConf(
metricType string, startTime, endTime time.Time,
) *timeSeriesConf {
filter := s.newListTimeSeriesFilter(metricType) filter := s.newListTimeSeriesFilter(metricType)
interval := &monitoringpb.TimeInterval{ interval := &monitoringpb.TimeInterval{
EndTime: &timestamppb.Timestamp{Seconds: endTime.Unix()}, EndTime: &timestamppb.Timestamp{Seconds: endTime.Unix()},
@ -376,83 +404,10 @@ func (s *stackdriver) newTimeSeriesConf(
return cfg return cfg
} }
// Change this configuration to query an aggregate by specifying an "aligner".
// In GCP monitoring, "aligning" is aggregation performed *within* a time
// series, to distill a pile of data points down to a single data point for
// some given time period (here, we specify 60s as our time period). This is
// especially useful for scraping GCP "distribution" metric types, whose raw
// data amounts to a ~60 bucket histogram, which is fairly hard to query and
// visualize in the TICK stack.
func (t *timeSeriesConf) initForAggregate(alignerStr string) {
// Check if alignerStr is valid
alignerInt, isValid := monitoringpb.Aggregation_Aligner_value[alignerStr]
if !isValid {
alignerStr = monitoringpb.Aggregation_Aligner_name[alignerInt]
}
aligner := monitoringpb.Aggregation_Aligner(alignerInt)
agg := &monitoringpb.Aggregation{
AlignmentPeriod: &durationpb.Duration{Seconds: 60},
PerSeriesAligner: aligner,
}
t.fieldKey = t.fieldKey + "_" + strings.ToLower(alignerStr)
t.listTimeSeriesRequest.Aggregation = agg
}
// IsValid checks timeseriesconf cache validity
func (c *timeSeriesConfCache) IsValid() bool {
return c.TimeSeriesConfs != nil && time.Since(c.Generated) < c.TTL
}
func (s *stackdriver) initializeStackdriverClient(ctx context.Context) error {
if s.client == nil {
client, err := monitoring.NewMetricClient(ctx)
if err != nil {
return fmt.Errorf("failed to create stackdriver monitoring client: %w", err)
}
tags := map[string]string{
"project_id": s.Project,
}
listMetricDescriptorsCalls := selfstat.Register(
"stackdriver", "list_metric_descriptors_calls", tags)
listTimeSeriesCalls := selfstat.Register(
"stackdriver", "list_timeseries_calls", tags)
s.client = &stackdriverMetricClient{
log: s.Log,
conn: client,
listMetricDescriptorsCalls: listMetricDescriptorsCalls,
listTimeSeriesCalls: listTimeSeriesCalls,
}
}
return nil
}
func includeExcludeHelper(key string, includes, excludes []string) bool {
if len(includes) > 0 {
for _, includeStr := range includes {
if strings.HasPrefix(key, includeStr) {
return true
}
}
return false
}
if len(excludes) > 0 {
for _, excludeStr := range excludes {
if strings.HasPrefix(key, excludeStr) {
return false
}
}
return true
}
return true
}
// Test whether a particular GCP metric type should be scraped by this plugin // Test whether a particular GCP metric type should be scraped by this plugin
// by checking the plugin name against the configuration's // by checking the plugin name against the configuration's
// "includeMetricTypePrefixes" and "excludeMetricTypePrefixes" // "includeMetricTypePrefixes" and "excludeMetricTypePrefixes"
func (s *stackdriver) includeMetricType(metricType string) bool { func (s *Stackdriver) includeMetricType(metricType string) bool {
k := metricType k := metricType
inc := s.MetricTypePrefixInclude inc := s.MetricTypePrefixInclude
exc := s.MetricTypePrefixExclude exc := s.MetricTypePrefixExclude
@ -460,98 +415,11 @@ func (s *stackdriver) includeMetricType(metricType string) bool {
return includeExcludeHelper(k, inc, exc) return includeExcludeHelper(k, inc, exc)
} }
// Generates filter for list metric descriptors request // Do the work to gather an individual time series. Runs inside a timeseries-specific goroutine.
func (s *stackdriver) newListMetricDescriptorsFilters() []string { func (s *Stackdriver) gatherTimeSeries(ctx context.Context, grouper *lockedSeriesGrouper, tsConf *timeSeriesConf) error {
if len(s.MetricTypePrefixInclude) == 0 {
return nil
}
metricTypeFilters := make([]string, 0, len(s.MetricTypePrefixInclude))
for _, metricTypePrefix := range s.MetricTypePrefixInclude {
metricTypeFilters = append(metricTypeFilters, fmt.Sprintf(`metric.type = starts_with(%q)`, metricTypePrefix))
}
return metricTypeFilters
}
// Generate a list of timeSeriesConfig structs by making a ListMetricDescriptors
// API request and filtering the result against our configuration.
func (s *stackdriver) generatetimeSeriesConfs(
ctx context.Context, startTime, endTime time.Time,
) ([]*timeSeriesConf, error) {
if s.timeSeriesConfCache != nil && s.timeSeriesConfCache.IsValid() {
// Update interval for timeseries requests in timeseries cache
interval := &monitoringpb.TimeInterval{
EndTime: &timestamppb.Timestamp{Seconds: endTime.Unix()},
StartTime: &timestamppb.Timestamp{Seconds: startTime.Unix()},
}
for _, timeSeriesConf := range s.timeSeriesConfCache.TimeSeriesConfs {
timeSeriesConf.listTimeSeriesRequest.Interval = interval
}
return s.timeSeriesConfCache.TimeSeriesConfs, nil
}
ret := make([]*timeSeriesConf, 0)
req := &monitoringpb.ListMetricDescriptorsRequest{
Name: "projects/" + s.Project,
}
filters := s.newListMetricDescriptorsFilters()
if len(filters) == 0 {
filters = []string{""}
}
for _, filter := range filters {
// Add filter for list metric descriptors if
// includeMetricTypePrefixes is specified,
// this is more efficient than iterating over
// all metric descriptors
req.Filter = filter
mdRespChan, err := s.client.ListMetricDescriptors(ctx, req)
if err != nil {
return nil, err
}
for metricDescriptor := range mdRespChan {
metricType := metricDescriptor.Type
valueType := metricDescriptor.ValueType
if filter == "" && !s.includeMetricType(metricType) {
continue
}
if valueType == metricpb.MetricDescriptor_DISTRIBUTION {
if s.GatherRawDistributionBuckets {
tsConf := s.newTimeSeriesConf(metricType, startTime, endTime)
ret = append(ret, tsConf)
}
for _, alignerStr := range s.DistributionAggregationAligners {
tsConf := s.newTimeSeriesConf(metricType, startTime, endTime)
tsConf.initForAggregate(alignerStr)
ret = append(ret, tsConf)
}
} else {
ret = append(ret, s.newTimeSeriesConf(metricType, startTime, endTime))
}
}
}
s.timeSeriesConfCache = &timeSeriesConfCache{
TimeSeriesConfs: ret,
Generated: time.Now(),
TTL: time.Duration(s.CacheTTL),
}
return ret, nil
}
// Do the work to gather an individual time series. Runs inside a
// timeseries-specific goroutine.
func (s *stackdriver) gatherTimeSeries(
ctx context.Context, grouper *lockedSeriesGrouper, tsConf *timeSeriesConf,
) error {
tsReq := tsConf.listTimeSeriesRequest tsReq := tsConf.listTimeSeriesRequest
tsRespChan, err := s.client.ListTimeSeries(ctx, tsReq) tsRespChan, err := s.client.listTimeSeries(ctx, tsReq)
if err != nil { if err != nil {
return err return err
} }
@ -599,75 +467,10 @@ func (s *stackdriver) gatherTimeSeries(
return nil return nil
} }
type buckets interface {
Amount() int32
UpperBound(i int32) float64
}
type LinearBuckets struct {
*distributionpb.Distribution_BucketOptions_Linear
}
func (l *LinearBuckets) Amount() int32 {
return l.NumFiniteBuckets + 2
}
func (l *LinearBuckets) UpperBound(i int32) float64 {
return l.Offset + (l.Width * float64(i))
}
type ExponentialBuckets struct {
*distributionpb.Distribution_BucketOptions_Exponential
}
func (e *ExponentialBuckets) Amount() int32 {
return e.NumFiniteBuckets + 2
}
func (e *ExponentialBuckets) UpperBound(i int32) float64 {
width := math.Pow(e.GrowthFactor, float64(i))
return e.Scale * width
}
type ExplicitBuckets struct {
*distributionpb.Distribution_BucketOptions_Explicit
}
func (e *ExplicitBuckets) Amount() int32 {
return int32(len(e.Bounds)) + 1
}
func (e *ExplicitBuckets) UpperBound(i int32) float64 {
return e.Bounds[i]
}
func NewBucket(dist *distributionpb.Distribution) (buckets, error) {
linearBuckets := dist.BucketOptions.GetLinearBuckets()
if linearBuckets != nil {
var l LinearBuckets
l.Distribution_BucketOptions_Linear = linearBuckets
return &l, nil
}
exponentialBuckets := dist.BucketOptions.GetExponentialBuckets()
if exponentialBuckets != nil {
var e ExponentialBuckets
e.Distribution_BucketOptions_Exponential = exponentialBuckets
return &e, nil
}
explicitBuckets := dist.BucketOptions.GetExplicitBuckets()
if explicitBuckets != nil {
var e ExplicitBuckets
e.Distribution_BucketOptions_Explicit = explicitBuckets
return &e, nil
}
return nil, errors.New("no buckets available")
}
// addDistribution adds metrics from a distribution value type. // addDistribution adds metrics from a distribution value type.
func addDistribution(dist *distributionpb.Distribution, tags map[string]string, ts time.Time, grouper *lockedSeriesGrouper, tsConf *timeSeriesConf) error { func addDistribution(dist *distributionpb.Distribution, tags map[string]string, ts time.Time,
grouper *lockedSeriesGrouper, tsConf *timeSeriesConf,
) error {
field := tsConf.fieldKey field := tsConf.fieldKey
name := tsConf.measurement name := tsConf.measurement
@ -680,11 +483,11 @@ func addDistribution(dist *distributionpb.Distribution, tags map[string]string,
grouper.Add(name, tags, ts, field+"_range_max", dist.Range.Max) grouper.Add(name, tags, ts, field+"_range_max", dist.Range.Max)
} }
bucket, err := NewBucket(dist) bucket, err := newBucket(dist)
if err != nil { if err != nil {
return err return err
} }
numBuckets := bucket.Amount() numBuckets := bucket.amount()
var i int32 var i int32
var count int64 var count int64
@ -694,7 +497,7 @@ func addDistribution(dist *distributionpb.Distribution, tags map[string]string,
if i == numBuckets-1 { if i == numBuckets-1 {
tags["lt"] = "+Inf" tags["lt"] = "+Inf"
} else { } else {
upperBound := bucket.UpperBound(i) upperBound := bucket.upperBound(i)
tags["lt"] = strconv.FormatFloat(upperBound, 'f', -1, 64) tags["lt"] = strconv.FormatFloat(upperBound, 'f', -1, 64)
} }
@ -709,9 +512,192 @@ func addDistribution(dist *distributionpb.Distribution, tags map[string]string,
return nil return nil
} }
// Add adds a field key and value to the series.
func (g *lockedSeriesGrouper) Add(measurement string, tags map[string]string, tm time.Time, field string, fieldValue interface{}) {
g.Lock()
defer g.Unlock()
g.SeriesGrouper.Add(measurement, tags, tm, field, fieldValue)
}
// listMetricDescriptors implements metricClient interface
func (smc *stackdriverMetricClient) listMetricDescriptors(ctx context.Context,
req *monitoringpb.ListMetricDescriptorsRequest,
) (<-chan *metricpb.MetricDescriptor, error) {
mdChan := make(chan *metricpb.MetricDescriptor, 1000)
go func() {
smc.log.Debugf("List metric descriptor request filter: %s", req.Filter)
defer close(mdChan)
// Iterate over metric descriptors and send them to buffered channel
mdResp := smc.conn.ListMetricDescriptors(ctx, req)
smc.listMetricDescriptorsCalls.Incr(1)
for {
mdDesc, mdErr := mdResp.Next()
if mdErr != nil {
if !errors.Is(mdErr, iterator.Done) {
smc.log.Errorf("Failed iterating metric descriptor responses: %q: %v", req.String(), mdErr)
}
break
}
mdChan <- mdDesc
}
}()
return mdChan, nil
}
// listTimeSeries implements metricClient interface
func (smc *stackdriverMetricClient) listTimeSeries(
ctx context.Context,
req *monitoringpb.ListTimeSeriesRequest,
) (<-chan *monitoringpb.TimeSeries, error) {
tsChan := make(chan *monitoringpb.TimeSeries, 1000)
go func() {
smc.log.Debugf("List time series request filter: %s", req.Filter)
defer close(tsChan)
// Iterate over timeseries and send them to buffered channel
tsResp := smc.conn.ListTimeSeries(ctx, req)
smc.listTimeSeriesCalls.Incr(1)
for {
tsDesc, tsErr := tsResp.Next()
if tsErr != nil {
if !errors.Is(tsErr, iterator.Done) {
smc.log.Errorf("Failed iterating time series responses: %q: %v", req.String(), tsErr)
}
break
}
tsChan <- tsDesc
}
}()
return tsChan, nil
}
// close implements metricClient interface
func (smc *stackdriverMetricClient) close() error {
return smc.conn.Close()
}
// Change this configuration to query an aggregate by specifying an "aligner".
// In GCP monitoring, "aligning" is aggregation performed *within* a time
// series, to distill a pile of data points down to a single data point for
// some given time period (here, we specify 60s as our time period). This is
// especially useful for scraping GCP "distribution" metric types, whose raw
// data amounts to a ~60 bucket histogram, which is fairly hard to query and
// visualize in the TICK stack.
func (t *timeSeriesConf) initForAggregate(alignerStr string) {
// Check if alignerStr is valid
alignerInt, isValid := monitoringpb.Aggregation_Aligner_value[alignerStr]
if !isValid {
alignerStr = monitoringpb.Aggregation_Aligner_name[alignerInt]
}
aligner := monitoringpb.Aggregation_Aligner(alignerInt)
agg := &monitoringpb.Aggregation{
AlignmentPeriod: &durationpb.Duration{Seconds: 60},
PerSeriesAligner: aligner,
}
t.fieldKey = t.fieldKey + "_" + strings.ToLower(alignerStr)
t.listTimeSeriesRequest.Aggregation = agg
}
// isValid checks timeseriesconf cache validity
func (c *timeSeriesConfCache) isValid() bool {
return c.TimeSeriesConfs != nil && time.Since(c.Generated) < c.TTL
}
func includeExcludeHelper(key string, includes, excludes []string) bool {
if len(includes) > 0 {
for _, includeStr := range includes {
if strings.HasPrefix(key, includeStr) {
return true
}
}
return false
}
if len(excludes) > 0 {
for _, excludeStr := range excludes {
if strings.HasPrefix(key, excludeStr) {
return false
}
}
return true
}
return true
}
type buckets interface {
amount() int32
upperBound(i int32) float64
}
type linearBuckets struct {
*distributionpb.Distribution_BucketOptions_Linear
}
func (l *linearBuckets) amount() int32 {
return l.NumFiniteBuckets + 2
}
func (l *linearBuckets) upperBound(i int32) float64 {
return l.Offset + (l.Width * float64(i))
}
type exponentialBuckets struct {
*distributionpb.Distribution_BucketOptions_Exponential
}
func (e *exponentialBuckets) amount() int32 {
return e.NumFiniteBuckets + 2
}
func (e *exponentialBuckets) upperBound(i int32) float64 {
width := math.Pow(e.GrowthFactor, float64(i))
return e.Scale * width
}
type explicitBuckets struct {
*distributionpb.Distribution_BucketOptions_Explicit
}
func (e *explicitBuckets) amount() int32 {
return int32(len(e.Bounds)) + 1
}
func (e *explicitBuckets) upperBound(i int32) float64 {
return e.Bounds[i]
}
func newBucket(dist *distributionpb.Distribution) (buckets, error) {
linBuckets := dist.BucketOptions.GetLinearBuckets()
if linBuckets != nil {
var l linearBuckets
l.Distribution_BucketOptions_Linear = linBuckets
return &l, nil
}
expoBuckets := dist.BucketOptions.GetExponentialBuckets()
if expoBuckets != nil {
var e exponentialBuckets
e.Distribution_BucketOptions_Exponential = expoBuckets
return &e, nil
}
explBuckets := dist.BucketOptions.GetExplicitBuckets()
if explBuckets != nil {
var e explicitBuckets
e.Distribution_BucketOptions_Explicit = explBuckets
return &e, nil
}
return nil, errors.New("no buckets available")
}
func init() { func init() {
inputs.Add("stackdriver", func() telegraf.Input { inputs.Add("stackdriver", func() telegraf.Input {
return &stackdriver{ return &Stackdriver{
CacheTTL: defaultCacheTTL, CacheTTL: defaultCacheTTL,
RateLimit: defaultRateLimit, RateLimit: defaultRateLimit,
Delay: defaultDelay, Delay: defaultDelay,

View File

@ -18,52 +18,52 @@ import (
"github.com/influxdata/telegraf/testutil" "github.com/influxdata/telegraf/testutil"
) )
type Call struct { type call struct {
name string name string
args []interface{} args []interface{}
} }
type MockStackdriverClient struct { type mockStackdriverClient struct {
ListMetricDescriptorsF func() (<-chan *metricpb.MetricDescriptor, error) listMetricDescriptorsF func() (<-chan *metricpb.MetricDescriptor, error)
ListTimeSeriesF func() (<-chan *monitoringpb.TimeSeries, error) listTimeSeriesF func() (<-chan *monitoringpb.TimeSeries, error)
CloseF func() error closeF func() error
calls []*Call calls []*call
sync.Mutex sync.Mutex
} }
func (m *MockStackdriverClient) ListMetricDescriptors( func (m *mockStackdriverClient) listMetricDescriptors(
ctx context.Context, ctx context.Context,
req *monitoringpb.ListMetricDescriptorsRequest, req *monitoringpb.ListMetricDescriptorsRequest,
) (<-chan *metricpb.MetricDescriptor, error) { ) (<-chan *metricpb.MetricDescriptor, error) {
call := &Call{name: "ListMetricDescriptors", args: []interface{}{ctx, req}} call := &call{name: "listMetricDescriptors", args: []interface{}{ctx, req}}
m.Lock() m.Lock()
m.calls = append(m.calls, call) m.calls = append(m.calls, call)
m.Unlock() m.Unlock()
return m.ListMetricDescriptorsF() return m.listMetricDescriptorsF()
} }
func (m *MockStackdriverClient) ListTimeSeries( func (m *mockStackdriverClient) listTimeSeries(
ctx context.Context, ctx context.Context,
req *monitoringpb.ListTimeSeriesRequest, req *monitoringpb.ListTimeSeriesRequest,
) (<-chan *monitoringpb.TimeSeries, error) { ) (<-chan *monitoringpb.TimeSeries, error) {
call := &Call{name: "ListTimeSeries", args: []interface{}{ctx, req}} call := &call{name: "listTimeSeries", args: []interface{}{ctx, req}}
m.Lock() m.Lock()
m.calls = append(m.calls, call) m.calls = append(m.calls, call)
m.Unlock() m.Unlock()
return m.ListTimeSeriesF() return m.listTimeSeriesF()
} }
func (m *MockStackdriverClient) Close() error { func (m *mockStackdriverClient) close() error {
call := &Call{name: "Close", args: make([]interface{}, 0)} call := &call{name: "close", args: make([]interface{}, 0)}
m.Lock() m.Lock()
m.calls = append(m.calls, call) m.calls = append(m.calls, call)
m.Unlock() m.Unlock()
return m.CloseF() return m.closeF()
} }
func TestInitAndRegister(t *testing.T) { func TestInitAndRegister(t *testing.T) {
expected := &stackdriver{ expected := &Stackdriver{
CacheTTL: defaultCacheTTL, CacheTTL: defaultCacheTTL,
RateLimit: defaultRateLimit, RateLimit: defaultRateLimit,
Delay: defaultDelay, Delay: defaultDelay,
@ -731,15 +731,15 @@ func TestGather(t *testing.T) {
return ch, nil return ch, nil
} }
s := &stackdriver{ s := &Stackdriver{
Log: testutil.Logger{}, Log: testutil.Logger{},
Project: "test", Project: "test",
RateLimit: 10, RateLimit: 10,
GatherRawDistributionBuckets: true, GatherRawDistributionBuckets: true,
client: &MockStackdriverClient{ client: &mockStackdriverClient{
ListMetricDescriptorsF: listMetricDescriptorsF, listMetricDescriptorsF: listMetricDescriptorsF,
ListTimeSeriesF: listTimeSeriesF, listTimeSeriesF: listTimeSeriesF,
CloseF: func() error { closeF: func() error {
return nil return nil
}, },
}, },
@ -839,25 +839,25 @@ func TestGatherAlign(t *testing.T) {
for listCall, tt := range tests { for listCall, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
var acc testutil.Accumulator var acc testutil.Accumulator
client := &MockStackdriverClient{ client := &mockStackdriverClient{
ListMetricDescriptorsF: func() (<-chan *metricpb.MetricDescriptor, error) { listMetricDescriptorsF: func() (<-chan *metricpb.MetricDescriptor, error) {
ch := make(chan *metricpb.MetricDescriptor, 1) ch := make(chan *metricpb.MetricDescriptor, 1)
ch <- tt.descriptor ch <- tt.descriptor
close(ch) close(ch)
return ch, nil return ch, nil
}, },
ListTimeSeriesF: func() (<-chan *monitoringpb.TimeSeries, error) { listTimeSeriesF: func() (<-chan *monitoringpb.TimeSeries, error) {
ch := make(chan *monitoringpb.TimeSeries, 1) ch := make(chan *monitoringpb.TimeSeries, 1)
ch <- tt.timeseries[listCall] ch <- tt.timeseries[listCall]
close(ch) close(ch)
return ch, nil return ch, nil
}, },
CloseF: func() error { closeF: func() error {
return nil return nil
}, },
} }
s := &stackdriver{ s := &Stackdriver{
Log: testutil.Logger{}, Log: testutil.Logger{},
Project: "test", Project: "test",
RateLimit: 10, RateLimit: 10,
@ -891,13 +891,13 @@ func TestListMetricDescriptorFilter(t *testing.T) {
now := time.Now().Round(time.Second) now := time.Now().Round(time.Second)
tests := []struct { tests := []struct {
name string name string
stackdriver *stackdriver stackdriver *Stackdriver
descriptor *metricpb.MetricDescriptor descriptor *metricpb.MetricDescriptor
calls []call calls []call
}{ }{
{ {
name: "simple", name: "simple",
stackdriver: &stackdriver{ stackdriver: &Stackdriver{
Project: "test", Project: "test",
MetricTypePrefixInclude: []string{"telegraf/cpu/usage"}, MetricTypePrefixInclude: []string{"telegraf/cpu/usage"},
RateLimit: 1, RateLimit: 1,
@ -908,17 +908,17 @@ func TestListMetricDescriptorFilter(t *testing.T) {
}, },
calls: []call{ calls: []call{
{ {
name: "ListMetricDescriptors", name: "listMetricDescriptors",
filter: `metric.type = starts_with("telegraf/cpu/usage")`, filter: `metric.type = starts_with("telegraf/cpu/usage")`,
}, { }, {
name: "ListTimeSeries", name: "listTimeSeries",
filter: `metric.type = "telegraf/cpu/usage"`, filter: `metric.type = "telegraf/cpu/usage"`,
}, },
}, },
}, },
{ {
name: "single resource labels string", name: "single resource labels string",
stackdriver: &stackdriver{ stackdriver: &Stackdriver{
Project: "test", Project: "test",
MetricTypePrefixInclude: []string{"telegraf/cpu/usage"}, MetricTypePrefixInclude: []string{"telegraf/cpu/usage"},
Filter: &listTimeSeriesFilter{ Filter: &listTimeSeriesFilter{
@ -937,17 +937,17 @@ func TestListMetricDescriptorFilter(t *testing.T) {
}, },
calls: []call{ calls: []call{
{ {
name: "ListMetricDescriptors", name: "listMetricDescriptors",
filter: `metric.type = starts_with("telegraf/cpu/usage")`, filter: `metric.type = starts_with("telegraf/cpu/usage")`,
}, { }, {
name: "ListTimeSeries", name: "listTimeSeries",
filter: `metric.type = "telegraf/cpu/usage" AND resource.labels.instance_name = "localhost"`, filter: `metric.type = "telegraf/cpu/usage" AND resource.labels.instance_name = "localhost"`,
}, },
}, },
}, },
{ {
name: "single resource labels function", name: "single resource labels function",
stackdriver: &stackdriver{ stackdriver: &Stackdriver{
Project: "test", Project: "test",
MetricTypePrefixInclude: []string{"telegraf/cpu/usage"}, MetricTypePrefixInclude: []string{"telegraf/cpu/usage"},
Filter: &listTimeSeriesFilter{ Filter: &listTimeSeriesFilter{
@ -966,17 +966,17 @@ func TestListMetricDescriptorFilter(t *testing.T) {
}, },
calls: []call{ calls: []call{
{ {
name: "ListMetricDescriptors", name: "listMetricDescriptors",
filter: `metric.type = starts_with("telegraf/cpu/usage")`, filter: `metric.type = starts_with("telegraf/cpu/usage")`,
}, { }, {
name: "ListTimeSeries", name: "listTimeSeries",
filter: `metric.type = "telegraf/cpu/usage" AND resource.labels.instance_name = starts_with("localhost")`, filter: `metric.type = "telegraf/cpu/usage" AND resource.labels.instance_name = starts_with("localhost")`,
}, },
}, },
}, },
{ {
name: "multiple resource labels", name: "multiple resource labels",
stackdriver: &stackdriver{ stackdriver: &Stackdriver{
Project: "test", Project: "test",
MetricTypePrefixInclude: []string{"telegraf/cpu/usage"}, MetricTypePrefixInclude: []string{"telegraf/cpu/usage"},
Filter: &listTimeSeriesFilter{ Filter: &listTimeSeriesFilter{
@ -999,17 +999,17 @@ func TestListMetricDescriptorFilter(t *testing.T) {
}, },
calls: []call{ calls: []call{
{ {
name: "ListMetricDescriptors", name: "listMetricDescriptors",
filter: `metric.type = starts_with("telegraf/cpu/usage")`, filter: `metric.type = starts_with("telegraf/cpu/usage")`,
}, { }, {
name: "ListTimeSeries", name: "listTimeSeries",
filter: `metric.type = "telegraf/cpu/usage" AND (resource.labels.instance_name = "localhost" OR resource.labels.zone = starts_with("us-"))`, filter: `metric.type = "telegraf/cpu/usage" AND (resource.labels.instance_name = "localhost" OR resource.labels.zone = starts_with("us-"))`,
}, },
}, },
}, },
{ {
name: "single metric label string", name: "single metric label string",
stackdriver: &stackdriver{ stackdriver: &Stackdriver{
Project: "test", Project: "test",
MetricTypePrefixInclude: []string{"telegraf/cpu/usage"}, MetricTypePrefixInclude: []string{"telegraf/cpu/usage"},
Filter: &listTimeSeriesFilter{ Filter: &listTimeSeriesFilter{
@ -1028,17 +1028,17 @@ func TestListMetricDescriptorFilter(t *testing.T) {
}, },
calls: []call{ calls: []call{
{ {
name: "ListMetricDescriptors", name: "listMetricDescriptors",
filter: `metric.type = starts_with("telegraf/cpu/usage")`, filter: `metric.type = starts_with("telegraf/cpu/usage")`,
}, { }, {
name: "ListTimeSeries", name: "listTimeSeries",
filter: `metric.type = "telegraf/cpu/usage" AND metric.labels.resource_type = "instance"`, filter: `metric.type = "telegraf/cpu/usage" AND metric.labels.resource_type = "instance"`,
}, },
}, },
}, },
{ {
name: "single metric label function", name: "single metric label function",
stackdriver: &stackdriver{ stackdriver: &Stackdriver{
Project: "test", Project: "test",
MetricTypePrefixInclude: []string{"telegraf/cpu/usage"}, MetricTypePrefixInclude: []string{"telegraf/cpu/usage"},
Filter: &listTimeSeriesFilter{ Filter: &listTimeSeriesFilter{
@ -1057,17 +1057,17 @@ func TestListMetricDescriptorFilter(t *testing.T) {
}, },
calls: []call{ calls: []call{
{ {
name: "ListMetricDescriptors", name: "listMetricDescriptors",
filter: `metric.type = starts_with("telegraf/cpu/usage")`, filter: `metric.type = starts_with("telegraf/cpu/usage")`,
}, { }, {
name: "ListTimeSeries", name: "listTimeSeries",
filter: `metric.type = "telegraf/cpu/usage" AND metric.labels.resource_id = starts_with("abc-")`, filter: `metric.type = "telegraf/cpu/usage" AND metric.labels.resource_id = starts_with("abc-")`,
}, },
}, },
}, },
{ {
name: "multiple metric labels", name: "multiple metric labels",
stackdriver: &stackdriver{ stackdriver: &Stackdriver{
Project: "test", Project: "test",
MetricTypePrefixInclude: []string{"telegraf/cpu/usage"}, MetricTypePrefixInclude: []string{"telegraf/cpu/usage"},
Filter: &listTimeSeriesFilter{ Filter: &listTimeSeriesFilter{
@ -1090,10 +1090,10 @@ func TestListMetricDescriptorFilter(t *testing.T) {
}, },
calls: []call{ calls: []call{
{ {
name: "ListMetricDescriptors", name: "listMetricDescriptors",
filter: `metric.type = starts_with("telegraf/cpu/usage")`, filter: `metric.type = starts_with("telegraf/cpu/usage")`,
}, { }, {
name: "ListTimeSeries", name: "listTimeSeries",
filter: `metric.type = "telegraf/cpu/usage" AND ` + filter: `metric.type = "telegraf/cpu/usage" AND ` +
`(metric.labels.resource_type = "instance" OR metric.labels.resource_id = starts_with("abc-"))`, `(metric.labels.resource_type = "instance" OR metric.labels.resource_id = starts_with("abc-"))`,
}, },
@ -1101,7 +1101,7 @@ func TestListMetricDescriptorFilter(t *testing.T) {
}, },
{ {
name: "all labels filters", name: "all labels filters",
stackdriver: &stackdriver{ stackdriver: &Stackdriver{
Project: "test", Project: "test",
MetricTypePrefixInclude: []string{"telegraf/cpu/usage"}, MetricTypePrefixInclude: []string{"telegraf/cpu/usage"},
Filter: &listTimeSeriesFilter{ Filter: &listTimeSeriesFilter{
@ -1154,10 +1154,10 @@ func TestListMetricDescriptorFilter(t *testing.T) {
}, },
calls: []call{ calls: []call{
{ {
name: "ListMetricDescriptors", name: "listMetricDescriptors",
filter: `metric.type = starts_with("telegraf/cpu/usage")`, filter: `metric.type = starts_with("telegraf/cpu/usage")`,
}, { }, {
name: "ListTimeSeries", name: "listTimeSeries",
filter: `metric.type = "telegraf/cpu/usage" AND ` + filter: `metric.type = "telegraf/cpu/usage" AND ` +
`(resource.labels.instance_name = "localhost" OR resource.labels.zone = starts_with("us-")) AND ` + `(resource.labels.instance_name = "localhost" OR resource.labels.zone = starts_with("us-")) AND ` +
`(metric.labels.resource_type = "instance" OR metric.labels.resource_id = starts_with("abc-")) AND ` + `(metric.labels.resource_type = "instance" OR metric.labels.resource_id = starts_with("abc-")) AND ` +
@ -1170,14 +1170,14 @@ func TestListMetricDescriptorFilter(t *testing.T) {
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
var acc testutil.Accumulator var acc testutil.Accumulator
client := &MockStackdriverClient{ client := &mockStackdriverClient{
ListMetricDescriptorsF: func() (<-chan *metricpb.MetricDescriptor, error) { listMetricDescriptorsF: func() (<-chan *metricpb.MetricDescriptor, error) {
ch := make(chan *metricpb.MetricDescriptor, 1) ch := make(chan *metricpb.MetricDescriptor, 1)
ch <- tt.descriptor ch <- tt.descriptor
close(ch) close(ch)
return ch, nil return ch, nil
}, },
ListTimeSeriesF: func() (<-chan *monitoringpb.TimeSeries, error) { listTimeSeriesF: func() (<-chan *monitoringpb.TimeSeries, error) {
ch := make(chan *monitoringpb.TimeSeries, 1) ch := make(chan *monitoringpb.TimeSeries, 1)
ch <- createTimeSeries( ch <- createTimeSeries(
&monitoringpb.Point{ &monitoringpb.Point{
@ -1197,7 +1197,7 @@ func TestListMetricDescriptorFilter(t *testing.T) {
close(ch) close(ch)
return ch, nil return ch, nil
}, },
CloseF: func() error { closeF: func() error {
return nil return nil
}, },
} }

View File

@ -73,7 +73,7 @@ func TestEventGather(t *testing.T) {
}, },
} }
acc := &testutil.Accumulator{} acc := &testutil.Accumulator{}
s := NewTestStatsd() s := newTestStatsd()
require.NoError(t, s.Start(acc)) require.NoError(t, s.Start(acc))
defer s.Stop() defer s.Stop()
@ -380,7 +380,7 @@ func TestEvents(t *testing.T) {
}, },
}, },
} }
s := NewTestStatsd() s := newTestStatsd()
acc := &testutil.Accumulator{} acc := &testutil.Accumulator{}
require.NoError(t, s.Start(acc)) require.NoError(t, s.Start(acc))
defer s.Stop() defer s.Stop()
@ -408,7 +408,7 @@ func TestEvents(t *testing.T) {
func TestEventError(t *testing.T) { func TestEventError(t *testing.T) {
now := time.Now() now := time.Now()
s := NewTestStatsd() s := newTestStatsd()
acc := &testutil.Accumulator{} acc := &testutil.Accumulator{}
require.NoError(t, s.Start(acc)) require.NoError(t, s.Start(acc))
defer s.Stop() defer s.Stop()

View File

@ -9,57 +9,57 @@ import (
const defaultPercentileLimit = 1000 const defaultPercentileLimit = 1000
const defaultMedianLimit = 1000 const defaultMedianLimit = 1000
// RunningStats calculates a running mean, variance, standard deviation, // runningStats calculates a running mean, variance, standard deviation,
// lower bound, upper bound, count, and can calculate estimated percentiles. // lower bound, upper bound, count, and can calculate estimated percentiles.
// It is based on the incremental algorithm described here: // It is based on the incremental algorithm described here:
// //
// https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
type RunningStats struct { type runningStats struct {
k float64 k float64
n int64 n int64
ex float64 ex float64
ex2 float64 ex2 float64
// Array used to calculate estimated percentiles // Array used to calculate estimated percentiles
// We will store a maximum of PercLimit values, at which point we will start // We will store a maximum of percLimit values, at which point we will start
// randomly replacing old values, hence it is an estimated percentile. // randomly replacing old values, hence it is an estimated percentile.
perc []float64 perc []float64
PercLimit int percLimit int
sum float64 totalSum float64
lower float64 lowerBound float64
upper float64 upperBound float64
// cache if we have sorted the list so that we never re-sort a sorted list, // cache if we have sorted the list so that we never re-sort a sorted list,
// which can have very bad performance. // which can have very bad performance.
SortedPerc bool sortedPerc bool
// Array used to calculate estimated median values // Array used to calculate estimated median values
// We will store a maximum of MedLimit values, at which point we will start // We will store a maximum of medLimit values, at which point we will start
// slicing old values // slicing old values
med []float64 med []float64
MedLimit int medLimit int
MedInsertIndex int medInsertIndex int
} }
func (rs *RunningStats) AddValue(v float64) { func (rs *runningStats) addValue(v float64) {
// Whenever a value is added, the list is no longer sorted. // Whenever a value is added, the list is no longer sorted.
rs.SortedPerc = false rs.sortedPerc = false
if rs.n == 0 { if rs.n == 0 {
rs.k = v rs.k = v
rs.upper = v rs.upperBound = v
rs.lower = v rs.lowerBound = v
if rs.PercLimit == 0 { if rs.percLimit == 0 {
rs.PercLimit = defaultPercentileLimit rs.percLimit = defaultPercentileLimit
} }
if rs.MedLimit == 0 { if rs.medLimit == 0 {
rs.MedLimit = defaultMedianLimit rs.medLimit = defaultMedianLimit
rs.MedInsertIndex = 0 rs.medInsertIndex = 0
} }
rs.perc = make([]float64, 0, rs.PercLimit) rs.perc = make([]float64, 0, rs.percLimit)
rs.med = make([]float64, 0, rs.MedLimit) rs.med = make([]float64, 0, rs.medLimit)
} }
// These are used for the running mean and variance // These are used for the running mean and variance
@ -68,36 +68,36 @@ func (rs *RunningStats) AddValue(v float64) {
rs.ex2 += (v - rs.k) * (v - rs.k) rs.ex2 += (v - rs.k) * (v - rs.k)
// add to running sum // add to running sum
rs.sum += v rs.totalSum += v
// track upper and lower bounds // track upper and lower bounds
if v > rs.upper { if v > rs.upperBound {
rs.upper = v rs.upperBound = v
} else if v < rs.lower { } else if v < rs.lowerBound {
rs.lower = v rs.lowerBound = v
} }
if len(rs.perc) < rs.PercLimit { if len(rs.perc) < rs.percLimit {
rs.perc = append(rs.perc, v) rs.perc = append(rs.perc, v)
} else { } else {
// Reached limit, choose random index to overwrite in the percentile array // Reached limit, choose random index to overwrite in the percentile array
rs.perc[rand.Intn(len(rs.perc))] = v //nolint:gosec // G404: not security critical rs.perc[rand.Intn(len(rs.perc))] = v //nolint:gosec // G404: not security critical
} }
if len(rs.med) < rs.MedLimit { if len(rs.med) < rs.medLimit {
rs.med = append(rs.med, v) rs.med = append(rs.med, v)
} else { } else {
// Reached limit, start over // Reached limit, start over
rs.med[rs.MedInsertIndex] = v rs.med[rs.medInsertIndex] = v
} }
rs.MedInsertIndex = (rs.MedInsertIndex + 1) % rs.MedLimit rs.medInsertIndex = (rs.medInsertIndex + 1) % rs.medLimit
} }
func (rs *RunningStats) Mean() float64 { func (rs *runningStats) mean() float64 {
return rs.k + rs.ex/float64(rs.n) return rs.k + rs.ex/float64(rs.n)
} }
func (rs *RunningStats) Median() float64 { func (rs *runningStats) median() float64 {
// Need to sort for median, but keep temporal order // Need to sort for median, but keep temporal order
var values []float64 var values []float64
values = append(values, rs.med...) values = append(values, rs.med...)
@ -111,38 +111,38 @@ func (rs *RunningStats) Median() float64 {
return values[count/2] return values[count/2]
} }
func (rs *RunningStats) Variance() float64 { func (rs *runningStats) variance() float64 {
return (rs.ex2 - (rs.ex*rs.ex)/float64(rs.n)) / float64(rs.n) return (rs.ex2 - (rs.ex*rs.ex)/float64(rs.n)) / float64(rs.n)
} }
func (rs *RunningStats) Stddev() float64 { func (rs *runningStats) stddev() float64 {
return math.Sqrt(rs.Variance()) return math.Sqrt(rs.variance())
} }
func (rs *RunningStats) Sum() float64 { func (rs *runningStats) sum() float64 {
return rs.sum return rs.totalSum
} }
func (rs *RunningStats) Upper() float64 { func (rs *runningStats) upper() float64 {
return rs.upper return rs.upperBound
} }
func (rs *RunningStats) Lower() float64 { func (rs *runningStats) lower() float64 {
return rs.lower return rs.lowerBound
} }
func (rs *RunningStats) Count() int64 { func (rs *runningStats) count() int64 {
return rs.n return rs.n
} }
func (rs *RunningStats) Percentile(n float64) float64 { func (rs *runningStats) percentile(n float64) float64 {
if n > 100 { if n > 100 {
n = 100 n = 100
} }
if !rs.SortedPerc { if !rs.sortedPerc {
sort.Float64s(rs.perc) sort.Float64s(rs.perc)
rs.SortedPerc = true rs.sortedPerc = true
} }
i := float64(len(rs.perc)) * n / float64(100) i := float64(len(rs.perc)) * n / float64(100)

View File

@ -7,163 +7,163 @@ import (
// Test that a single metric is handled correctly // Test that a single metric is handled correctly
func TestRunningStats_Single(t *testing.T) { func TestRunningStats_Single(t *testing.T) {
rs := RunningStats{} rs := runningStats{}
values := []float64{10.1} values := []float64{10.1}
for _, v := range values { for _, v := range values {
rs.AddValue(v) rs.addValue(v)
} }
if rs.Mean() != 10.1 { if rs.mean() != 10.1 {
t.Errorf("Expected %v, got %v", 10.1, rs.Mean()) t.Errorf("Expected %v, got %v", 10.1, rs.mean())
} }
if rs.Median() != 10.1 { if rs.median() != 10.1 {
t.Errorf("Expected %v, got %v", 10.1, rs.Median()) t.Errorf("Expected %v, got %v", 10.1, rs.median())
} }
if rs.Upper() != 10.1 { if rs.upper() != 10.1 {
t.Errorf("Expected %v, got %v", 10.1, rs.Upper()) t.Errorf("Expected %v, got %v", 10.1, rs.upper())
} }
if rs.Lower() != 10.1 { if rs.lower() != 10.1 {
t.Errorf("Expected %v, got %v", 10.1, rs.Lower()) t.Errorf("Expected %v, got %v", 10.1, rs.lower())
} }
if rs.Percentile(100) != 10.1 { if rs.percentile(100) != 10.1 {
t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(100)) t.Errorf("Expected %v, got %v", 10.1, rs.percentile(100))
} }
if rs.Percentile(99.95) != 10.1 { if rs.percentile(99.95) != 10.1 {
t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(99.95)) t.Errorf("Expected %v, got %v", 10.1, rs.percentile(99.95))
} }
if rs.Percentile(90) != 10.1 { if rs.percentile(90) != 10.1 {
t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(90)) t.Errorf("Expected %v, got %v", 10.1, rs.percentile(90))
} }
if rs.Percentile(50) != 10.1 { if rs.percentile(50) != 10.1 {
t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(50)) t.Errorf("Expected %v, got %v", 10.1, rs.percentile(50))
} }
if rs.Percentile(0) != 10.1 { if rs.percentile(0) != 10.1 {
t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(0)) t.Errorf("Expected %v, got %v", 10.1, rs.percentile(0))
} }
if rs.Count() != 1 { if rs.count() != 1 {
t.Errorf("Expected %v, got %v", 1, rs.Count()) t.Errorf("Expected %v, got %v", 1, rs.count())
} }
if rs.Variance() != 0 { if rs.variance() != 0 {
t.Errorf("Expected %v, got %v", 0, rs.Variance()) t.Errorf("Expected %v, got %v", 0, rs.variance())
} }
if rs.Stddev() != 0 { if rs.stddev() != 0 {
t.Errorf("Expected %v, got %v", 0, rs.Stddev()) t.Errorf("Expected %v, got %v", 0, rs.stddev())
} }
} }
// Test that duplicate values are handled correctly // Test that duplicate values are handled correctly
func TestRunningStats_Duplicate(t *testing.T) { func TestRunningStats_Duplicate(t *testing.T) {
rs := RunningStats{} rs := runningStats{}
values := []float64{10.1, 10.1, 10.1, 10.1} values := []float64{10.1, 10.1, 10.1, 10.1}
for _, v := range values { for _, v := range values {
rs.AddValue(v) rs.addValue(v)
} }
if rs.Mean() != 10.1 { if rs.mean() != 10.1 {
t.Errorf("Expected %v, got %v", 10.1, rs.Mean()) t.Errorf("Expected %v, got %v", 10.1, rs.mean())
} }
if rs.Median() != 10.1 { if rs.median() != 10.1 {
t.Errorf("Expected %v, got %v", 10.1, rs.Median()) t.Errorf("Expected %v, got %v", 10.1, rs.median())
} }
if rs.Upper() != 10.1 { if rs.upper() != 10.1 {
t.Errorf("Expected %v, got %v", 10.1, rs.Upper()) t.Errorf("Expected %v, got %v", 10.1, rs.upper())
} }
if rs.Lower() != 10.1 { if rs.lower() != 10.1 {
t.Errorf("Expected %v, got %v", 10.1, rs.Lower()) t.Errorf("Expected %v, got %v", 10.1, rs.lower())
} }
if rs.Percentile(100) != 10.1 { if rs.percentile(100) != 10.1 {
t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(100)) t.Errorf("Expected %v, got %v", 10.1, rs.percentile(100))
} }
if rs.Percentile(99.95) != 10.1 { if rs.percentile(99.95) != 10.1 {
t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(99.95)) t.Errorf("Expected %v, got %v", 10.1, rs.percentile(99.95))
} }
if rs.Percentile(90) != 10.1 { if rs.percentile(90) != 10.1 {
t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(90)) t.Errorf("Expected %v, got %v", 10.1, rs.percentile(90))
} }
if rs.Percentile(50) != 10.1 { if rs.percentile(50) != 10.1 {
t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(50)) t.Errorf("Expected %v, got %v", 10.1, rs.percentile(50))
} }
if rs.Percentile(0) != 10.1 { if rs.percentile(0) != 10.1 {
t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(0)) t.Errorf("Expected %v, got %v", 10.1, rs.percentile(0))
} }
if rs.Count() != 4 { if rs.count() != 4 {
t.Errorf("Expected %v, got %v", 4, rs.Count()) t.Errorf("Expected %v, got %v", 4, rs.count())
} }
if rs.Variance() != 0 { if rs.variance() != 0 {
t.Errorf("Expected %v, got %v", 0, rs.Variance()) t.Errorf("Expected %v, got %v", 0, rs.variance())
} }
if rs.Stddev() != 0 { if rs.stddev() != 0 {
t.Errorf("Expected %v, got %v", 0, rs.Stddev()) t.Errorf("Expected %v, got %v", 0, rs.stddev())
} }
} }
// Test a list of sample values, returns all correct values // Test a list of sample values, returns all correct values
func TestRunningStats(t *testing.T) { func TestRunningStats(t *testing.T) {
rs := RunningStats{} rs := runningStats{}
values := []float64{10, 20, 10, 30, 20, 11, 12, 32, 45, 9, 5, 5, 5, 10, 23, 8} values := []float64{10, 20, 10, 30, 20, 11, 12, 32, 45, 9, 5, 5, 5, 10, 23, 8}
for _, v := range values { for _, v := range values {
rs.AddValue(v) rs.addValue(v)
} }
if rs.Mean() != 15.9375 { if rs.mean() != 15.9375 {
t.Errorf("Expected %v, got %v", 15.9375, rs.Mean()) t.Errorf("Expected %v, got %v", 15.9375, rs.mean())
} }
if rs.Median() != 10.5 { if rs.median() != 10.5 {
t.Errorf("Expected %v, got %v", 10.5, rs.Median()) t.Errorf("Expected %v, got %v", 10.5, rs.median())
} }
if rs.Upper() != 45 { if rs.upper() != 45 {
t.Errorf("Expected %v, got %v", 45, rs.Upper()) t.Errorf("Expected %v, got %v", 45, rs.upper())
} }
if rs.Lower() != 5 { if rs.lower() != 5 {
t.Errorf("Expected %v, got %v", 5, rs.Lower()) t.Errorf("Expected %v, got %v", 5, rs.lower())
} }
if rs.Percentile(100) != 45 { if rs.percentile(100) != 45 {
t.Errorf("Expected %v, got %v", 45, rs.Percentile(100)) t.Errorf("Expected %v, got %v", 45, rs.percentile(100))
} }
if rs.Percentile(99.98) != 45 { if rs.percentile(99.98) != 45 {
t.Errorf("Expected %v, got %v", 45, rs.Percentile(99.98)) t.Errorf("Expected %v, got %v", 45, rs.percentile(99.98))
} }
if rs.Percentile(90) != 32 { if rs.percentile(90) != 32 {
t.Errorf("Expected %v, got %v", 32, rs.Percentile(90)) t.Errorf("Expected %v, got %v", 32, rs.percentile(90))
} }
if rs.Percentile(50.1) != 11 { if rs.percentile(50.1) != 11 {
t.Errorf("Expected %v, got %v", 11, rs.Percentile(50.1)) t.Errorf("Expected %v, got %v", 11, rs.percentile(50.1))
} }
if rs.Percentile(50) != 11 { if rs.percentile(50) != 11 {
t.Errorf("Expected %v, got %v", 11, rs.Percentile(50)) t.Errorf("Expected %v, got %v", 11, rs.percentile(50))
} }
if rs.Percentile(49.9) != 10 { if rs.percentile(49.9) != 10 {
t.Errorf("Expected %v, got %v", 10, rs.Percentile(49.9)) t.Errorf("Expected %v, got %v", 10, rs.percentile(49.9))
} }
if rs.Percentile(0) != 5 { if rs.percentile(0) != 5 {
t.Errorf("Expected %v, got %v", 5, rs.Percentile(0)) t.Errorf("Expected %v, got %v", 5, rs.percentile(0))
} }
if rs.Count() != 16 { if rs.count() != 16 {
t.Errorf("Expected %v, got %v", 4, rs.Count()) t.Errorf("Expected %v, got %v", 4, rs.count())
} }
if !fuzzyEqual(rs.Variance(), 124.93359, .00001) { if !fuzzyEqual(rs.variance(), 124.93359, .00001) {
t.Errorf("Expected %v, got %v", 124.93359, rs.Variance()) t.Errorf("Expected %v, got %v", 124.93359, rs.variance())
} }
if !fuzzyEqual(rs.Stddev(), 11.17736, .00001) { if !fuzzyEqual(rs.stddev(), 11.17736, .00001) {
t.Errorf("Expected %v, got %v", 11.17736, rs.Stddev()) t.Errorf("Expected %v, got %v", 11.17736, rs.stddev())
} }
} }
// Test that the percentile limit is respected. // Test that the percentile limit is respected.
func TestRunningStats_PercentileLimit(t *testing.T) { func TestRunningStats_PercentileLimit(t *testing.T) {
rs := RunningStats{} rs := runningStats{}
rs.PercLimit = 10 rs.percLimit = 10
values := []float64{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} values := []float64{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}
for _, v := range values { for _, v := range values {
rs.AddValue(v) rs.addValue(v)
} }
if rs.Count() != 11 { if rs.count() != 11 {
t.Errorf("Expected %v, got %v", 11, rs.Count()) t.Errorf("Expected %v, got %v", 11, rs.count())
} }
if len(rs.perc) != 10 { if len(rs.perc) != 10 {
t.Errorf("Expected %v, got %v", 10, len(rs.perc)) t.Errorf("Expected %v, got %v", 10, len(rs.perc))
@ -174,23 +174,23 @@ func fuzzyEqual(a, b, epsilon float64) bool {
return math.Abs(a-b) <= epsilon return math.Abs(a-b) <= epsilon
} }
// Test that the median limit is respected and MedInsertIndex is properly incrementing index. // Test that the median limit is respected and medInsertIndex is properly incrementing index.
func TestRunningStats_MedianLimitIndex(t *testing.T) { func TestRunningStats_MedianLimitIndex(t *testing.T) {
rs := RunningStats{} rs := runningStats{}
rs.MedLimit = 10 rs.medLimit = 10
values := []float64{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} values := []float64{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}
for _, v := range values { for _, v := range values {
rs.AddValue(v) rs.addValue(v)
} }
if rs.Count() != 11 { if rs.count() != 11 {
t.Errorf("Expected %v, got %v", 11, rs.Count()) t.Errorf("Expected %v, got %v", 11, rs.count())
} }
if len(rs.med) != 10 { if len(rs.med) != 10 {
t.Errorf("Expected %v, got %v", 10, len(rs.med)) t.Errorf("Expected %v, got %v", 10, len(rs.med))
} }
if rs.MedInsertIndex != 1 { if rs.medInsertIndex != 1 {
t.Errorf("Expected %v, got %v", 0, rs.MedInsertIndex) t.Errorf("Expected %v, got %v", 0, rs.medInsertIndex)
} }
} }

View File

@ -26,35 +26,19 @@ import (
//go:embed sample.conf //go:embed sample.conf
var sampleConfig string var sampleConfig string
var errParsing = errors.New("error parsing statsd line")
const ( const (
// UDPMaxPacketSize is the UDP packet limit, see // udpMaxPacketSize is the UDP packet limit, see
// https://en.wikipedia.org/wiki/User_Datagram_Protocol#Packet_structure // https://en.wikipedia.org/wiki/User_Datagram_Protocol#Packet_structure
UDPMaxPacketSize int = 64 * 1024 udpMaxPacketSize int = 64 * 1024
defaultFieldName = "value"
defaultProtocol = "udp"
defaultFieldName = "value"
defaultProtocol = "udp"
defaultSeparator = "_" defaultSeparator = "_"
defaultAllowPendingMessage = 10000 defaultAllowPendingMessage = 10000
) )
var errParsing = errors.New("error parsing statsd line")
// Number will get parsed as an int or float depending on what is passed
type Number float64
func (n *Number) UnmarshalTOML(b []byte) error {
value, err := strconv.ParseFloat(string(b), 64)
if err != nil {
return err
}
*n = Number(value)
return nil
}
// Statsd allows the importing of statsd and dogstatsd data.
type Statsd struct { type Statsd struct {
// Protocol used on listener - udp or tcp // Protocol used on listener - udp or tcp
Protocol string `toml:"protocol"` Protocol string `toml:"protocol"`
@ -69,7 +53,7 @@ type Statsd struct {
// Percentiles specifies the percentiles that will be calculated for timing // Percentiles specifies the percentiles that will be calculated for timing
// and histogram stats. // and histogram stats.
Percentiles []Number `toml:"percentiles"` Percentiles []number `toml:"percentiles"`
PercentileLimit int `toml:"percentile_limit"` PercentileLimit int `toml:"percentile_limit"`
DeleteGauges bool `toml:"delete_gauges"` DeleteGauges bool `toml:"delete_gauges"`
DeleteCounters bool `toml:"delete_counters"` DeleteCounters bool `toml:"delete_counters"`
@ -171,6 +155,19 @@ type Statsd struct {
lastGatherTime time.Time lastGatherTime time.Time
} }
// number will get parsed as an int or float depending on what is passed
type number float64
func (n *number) UnmarshalTOML(b []byte) error {
value, err := strconv.ParseFloat(string(b), 64)
if err != nil {
return err
}
*n = number(value)
return nil
}
type input struct { type input struct {
*bytes.Buffer *bytes.Buffer
time.Time time.Time
@ -215,7 +212,7 @@ type cachedcounter struct {
type cachedtimings struct { type cachedtimings struct {
name string name string
fields map[string]RunningStats fields map[string]runningStats
tags map[string]string tags map[string]string
expiresAt time.Time expiresAt time.Time
} }
@ -230,110 +227,6 @@ func (*Statsd) SampleConfig() string {
return sampleConfig return sampleConfig
} }
func (s *Statsd) Gather(acc telegraf.Accumulator) error {
s.Lock()
defer s.Unlock()
now := time.Now()
for _, m := range s.distributions {
fields := map[string]interface{}{
defaultFieldName: m.value,
}
if s.EnableAggregationTemporality {
fields["start_time"] = s.lastGatherTime.Format(time.RFC3339)
}
acc.AddFields(m.name, fields, m.tags, now)
}
s.distributions = make([]cacheddistributions, 0)
for _, m := range s.timings {
// Defining a template to parse field names for timers allows us to split
// out multiple fields per timer. In this case we prefix each stat with the
// field name and store these all in a single measurement.
fields := make(map[string]interface{})
for fieldName, stats := range m.fields {
var prefix string
if fieldName != defaultFieldName {
prefix = fieldName + "_"
}
fields[prefix+"mean"] = stats.Mean()
fields[prefix+"median"] = stats.Median()
fields[prefix+"stddev"] = stats.Stddev()
fields[prefix+"sum"] = stats.Sum()
fields[prefix+"upper"] = stats.Upper()
fields[prefix+"lower"] = stats.Lower()
if s.FloatTimings {
fields[prefix+"count"] = float64(stats.Count())
} else {
fields[prefix+"count"] = stats.Count()
}
for _, percentile := range s.Percentiles {
name := fmt.Sprintf("%s%v_percentile", prefix, percentile)
fields[name] = stats.Percentile(float64(percentile))
}
}
if s.EnableAggregationTemporality {
fields["start_time"] = s.lastGatherTime.Format(time.RFC3339)
}
acc.AddFields(m.name, fields, m.tags, now)
}
if s.DeleteTimings {
s.timings = make(map[string]cachedtimings)
}
for _, m := range s.gauges {
if s.EnableAggregationTemporality && m.fields != nil {
m.fields["start_time"] = s.lastGatherTime.Format(time.RFC3339)
}
acc.AddGauge(m.name, m.fields, m.tags, now)
}
if s.DeleteGauges {
s.gauges = make(map[string]cachedgauge)
}
for _, m := range s.counters {
if s.EnableAggregationTemporality && m.fields != nil {
m.fields["start_time"] = s.lastGatherTime.Format(time.RFC3339)
}
if s.FloatCounters {
for key := range m.fields {
m.fields[key] = float64(m.fields[key].(int64))
}
}
acc.AddCounter(m.name, m.fields, m.tags, now)
}
if s.DeleteCounters {
s.counters = make(map[string]cachedcounter)
}
for _, m := range s.sets {
fields := make(map[string]interface{})
for field, set := range m.fields {
if s.FloatSets {
fields[field] = float64(len(set))
} else {
fields[field] = int64(len(set))
}
}
if s.EnableAggregationTemporality {
fields["start_time"] = s.lastGatherTime.Format(time.RFC3339)
}
acc.AddFields(m.name, fields, m.tags, now)
}
if s.DeleteSets {
s.sets = make(map[string]cachedset)
}
s.expireCachedMetrics()
s.lastGatherTime = now
return nil
}
func (s *Statsd) Start(ac telegraf.Accumulator) error { func (s *Statsd) Start(ac telegraf.Accumulator) error {
if s.ParseDataDogTags { if s.ParseDataDogTags {
s.DataDogExtensions = true s.DataDogExtensions = true
@ -444,6 +337,147 @@ func (s *Statsd) Start(ac telegraf.Accumulator) error {
return nil return nil
} }
func (s *Statsd) Gather(acc telegraf.Accumulator) error {
s.Lock()
defer s.Unlock()
now := time.Now()
for _, m := range s.distributions {
fields := map[string]interface{}{
defaultFieldName: m.value,
}
if s.EnableAggregationTemporality {
fields["start_time"] = s.lastGatherTime.Format(time.RFC3339)
}
acc.AddFields(m.name, fields, m.tags, now)
}
s.distributions = make([]cacheddistributions, 0)
for _, m := range s.timings {
// Defining a template to parse field names for timers allows us to split
// out multiple fields per timer. In this case we prefix each stat with the
// field name and store these all in a single measurement.
fields := make(map[string]interface{})
for fieldName, stats := range m.fields {
var prefix string
if fieldName != defaultFieldName {
prefix = fieldName + "_"
}
fields[prefix+"mean"] = stats.mean()
fields[prefix+"median"] = stats.median()
fields[prefix+"stddev"] = stats.stddev()
fields[prefix+"sum"] = stats.sum()
fields[prefix+"upper"] = stats.upper()
fields[prefix+"lower"] = stats.lower()
if s.FloatTimings {
fields[prefix+"count"] = float64(stats.count())
} else {
fields[prefix+"count"] = stats.count()
}
for _, percentile := range s.Percentiles {
name := fmt.Sprintf("%s%v_percentile", prefix, percentile)
fields[name] = stats.percentile(float64(percentile))
}
}
if s.EnableAggregationTemporality {
fields["start_time"] = s.lastGatherTime.Format(time.RFC3339)
}
acc.AddFields(m.name, fields, m.tags, now)
}
if s.DeleteTimings {
s.timings = make(map[string]cachedtimings)
}
for _, m := range s.gauges {
if s.EnableAggregationTemporality && m.fields != nil {
m.fields["start_time"] = s.lastGatherTime.Format(time.RFC3339)
}
acc.AddGauge(m.name, m.fields, m.tags, now)
}
if s.DeleteGauges {
s.gauges = make(map[string]cachedgauge)
}
for _, m := range s.counters {
if s.EnableAggregationTemporality && m.fields != nil {
m.fields["start_time"] = s.lastGatherTime.Format(time.RFC3339)
}
if s.FloatCounters {
for key := range m.fields {
m.fields[key] = float64(m.fields[key].(int64))
}
}
acc.AddCounter(m.name, m.fields, m.tags, now)
}
if s.DeleteCounters {
s.counters = make(map[string]cachedcounter)
}
for _, m := range s.sets {
fields := make(map[string]interface{})
for field, set := range m.fields {
if s.FloatSets {
fields[field] = float64(len(set))
} else {
fields[field] = int64(len(set))
}
}
if s.EnableAggregationTemporality {
fields["start_time"] = s.lastGatherTime.Format(time.RFC3339)
}
acc.AddFields(m.name, fields, m.tags, now)
}
if s.DeleteSets {
s.sets = make(map[string]cachedset)
}
s.expireCachedMetrics()
s.lastGatherTime = now
return nil
}
func (s *Statsd) Stop() {
s.Lock()
s.Log.Infof("Stopping the statsd service")
close(s.done)
if s.isUDP() {
if s.UDPlistener != nil {
s.UDPlistener.Close()
}
} else {
if s.TCPlistener != nil {
s.TCPlistener.Close()
}
// Close all open TCP connections
// - get all conns from the s.conns map and put into slice
// - this is so the forget() function doesnt conflict with looping
// over the s.conns map
var conns []*net.TCPConn
s.cleanup.Lock()
for _, conn := range s.conns {
conns = append(conns, conn)
}
s.cleanup.Unlock()
for _, conn := range conns {
conn.Close()
}
}
s.Unlock()
s.wg.Wait()
s.Lock()
close(s.in)
s.Log.Infof("Stopped listener service on %q", s.ServiceAddress)
s.Unlock()
}
// tcpListen() starts listening for TCP packets on the configured port. // tcpListen() starts listening for TCP packets on the configured port.
func (s *Statsd) tcpListen(listener *net.TCPListener) error { func (s *Statsd) tcpListen(listener *net.TCPListener) error {
for { for {
@ -497,7 +531,7 @@ func (s *Statsd) udpListen(conn *net.UDPConn) error {
} }
} }
buf := make([]byte, UDPMaxPacketSize) buf := make([]byte, udpMaxPacketSize)
for { for {
select { select {
case <-s.done: case <-s.done:
@ -838,7 +872,7 @@ func (s *Statsd) aggregate(m metric) {
if !ok { if !ok {
cached = cachedtimings{ cached = cachedtimings{
name: m.name, name: m.name,
fields: make(map[string]RunningStats), fields: make(map[string]runningStats),
tags: m.tags, tags: m.tags,
} }
} }
@ -846,16 +880,16 @@ func (s *Statsd) aggregate(m metric) {
// this will be the default field name, eg. "value" // this will be the default field name, eg. "value"
field, ok := cached.fields[m.field] field, ok := cached.fields[m.field]
if !ok { if !ok {
field = RunningStats{ field = runningStats{
PercLimit: s.PercentileLimit, percLimit: s.PercentileLimit,
} }
} }
if m.samplerate > 0 { if m.samplerate > 0 {
for i := 0; i < int(1.0/m.samplerate); i++ { for i := 0; i < int(1.0/m.samplerate); i++ {
field.AddValue(m.floatvalue) field.addValue(m.floatvalue)
} }
} else { } else {
field.AddValue(m.floatvalue) field.addValue(m.floatvalue)
} }
cached.fields[m.field] = field cached.fields[m.field] = field
cached.expiresAt = time.Now().Add(time.Duration(s.MaxTTL)) cached.expiresAt = time.Now().Add(time.Duration(s.MaxTTL))
@ -1000,43 +1034,6 @@ func (s *Statsd) remember(id string, conn *net.TCPConn) {
s.conns[id] = conn s.conns[id] = conn
} }
func (s *Statsd) Stop() {
s.Lock()
s.Log.Infof("Stopping the statsd service")
close(s.done)
if s.isUDP() {
if s.UDPlistener != nil {
s.UDPlistener.Close()
}
} else {
if s.TCPlistener != nil {
s.TCPlistener.Close()
}
// Close all open TCP connections
// - get all conns from the s.conns map and put into slice
// - this is so the forget() function doesnt conflict with looping
// over the s.conns map
var conns []*net.TCPConn
s.cleanup.Lock()
for _, conn := range s.conns {
conns = append(conns, conn)
}
s.cleanup.Unlock()
for _, conn := range conns {
conn.Close()
}
}
s.Unlock()
s.wg.Wait()
s.Lock()
close(s.in)
s.Log.Infof("Stopped listener service on %q", s.ServiceAddress)
s.Unlock()
}
// IsUDP returns true if the protocol is UDP, false otherwise. // IsUDP returns true if the protocol is UDP, false otherwise.
func (s *Statsd) isUDP() bool { func (s *Statsd) isUDP() bool {
return strings.HasPrefix(s.Protocol, "udp") return strings.HasPrefix(s.Protocol, "udp")

View File

@ -19,7 +19,7 @@ const (
producerThreads = 10 producerThreads = 10
) )
func NewTestStatsd() *Statsd { func newTestStatsd() *Statsd {
s := Statsd{ s := Statsd{
Log: testutil.Logger{}, Log: testutil.Logger{},
NumberWorkerThreads: 5, NumberWorkerThreads: 5,
@ -339,7 +339,7 @@ func BenchmarkTCP(b *testing.B) {
// Valid lines should be parsed and their values should be cached // Valid lines should be parsed and their values should be cached
func TestParse_ValidLines(t *testing.T) { func TestParse_ValidLines(t *testing.T) {
s := NewTestStatsd() s := newTestStatsd()
validLines := []string{ validLines := []string{
"valid:45|c", "valid:45|c",
"valid:45|s", "valid:45|s",
@ -355,7 +355,7 @@ func TestParse_ValidLines(t *testing.T) {
// Tests low-level functionality of gauges // Tests low-level functionality of gauges
func TestParse_Gauges(t *testing.T) { func TestParse_Gauges(t *testing.T) {
s := NewTestStatsd() s := newTestStatsd()
// Test that gauge +- values work // Test that gauge +- values work
validLines := []string{ validLines := []string{
@ -425,7 +425,7 @@ func TestParse_Gauges(t *testing.T) {
// Tests low-level functionality of sets // Tests low-level functionality of sets
func TestParse_Sets(t *testing.T) { func TestParse_Sets(t *testing.T) {
s := NewTestStatsd() s := newTestStatsd()
// Test that sets work // Test that sets work
validLines := []string{ validLines := []string{
@ -480,7 +480,7 @@ func TestParse_Sets(t *testing.T) {
} }
func TestParse_Sets_SetsAsFloat(t *testing.T) { func TestParse_Sets_SetsAsFloat(t *testing.T) {
s := NewTestStatsd() s := newTestStatsd()
s.FloatSets = true s.FloatSets = true
// Test that sets work // Test that sets work
@ -526,7 +526,7 @@ func TestParse_Sets_SetsAsFloat(t *testing.T) {
// Tests low-level functionality of counters // Tests low-level functionality of counters
func TestParse_Counters(t *testing.T) { func TestParse_Counters(t *testing.T) {
s := NewTestStatsd() s := newTestStatsd()
// Test that counters work // Test that counters work
validLines := []string{ validLines := []string{
@ -584,7 +584,7 @@ func TestParse_Counters(t *testing.T) {
} }
func TestParse_CountersAsFloat(t *testing.T) { func TestParse_CountersAsFloat(t *testing.T) {
s := NewTestStatsd() s := newTestStatsd()
s.FloatCounters = true s.FloatCounters = true
// Test that counters work // Test that counters work
@ -694,8 +694,8 @@ func TestParse_CountersAsFloat(t *testing.T) {
// Tests low-level functionality of timings // Tests low-level functionality of timings
func TestParse_Timings(t *testing.T) { func TestParse_Timings(t *testing.T) {
s := NewTestStatsd() s := newTestStatsd()
s.Percentiles = []Number{90.0} s.Percentiles = []number{90.0}
acc := &testutil.Accumulator{} acc := &testutil.Accumulator{}
// Test that timings work // Test that timings work
@ -728,9 +728,9 @@ func TestParse_Timings(t *testing.T) {
} }
func TestParse_Timings_TimingsAsFloat(t *testing.T) { func TestParse_Timings_TimingsAsFloat(t *testing.T) {
s := NewTestStatsd() s := newTestStatsd()
s.FloatTimings = true s.FloatTimings = true
s.Percentiles = []Number{90.0} s.Percentiles = []number{90.0}
acc := &testutil.Accumulator{} acc := &testutil.Accumulator{}
// Test that timings work // Test that timings work
@ -760,7 +760,7 @@ func TestParse_Timings_TimingsAsFloat(t *testing.T) {
// Tests low-level functionality of distributions // Tests low-level functionality of distributions
func TestParse_Distributions(t *testing.T) { func TestParse_Distributions(t *testing.T) {
s := NewTestStatsd() s := newTestStatsd()
acc := &testutil.Accumulator{} acc := &testutil.Accumulator{}
parseMetrics := func() { parseMetrics := func() {
@ -813,7 +813,7 @@ func TestParse_Distributions(t *testing.T) {
} }
func TestParseScientificNotation(t *testing.T) { func TestParseScientificNotation(t *testing.T) {
s := NewTestStatsd() s := newTestStatsd()
sciNotationLines := []string{ sciNotationLines := []string{
"scientific.notation:4.6968460083008E-5|ms", "scientific.notation:4.6968460083008E-5|ms",
"scientific.notation:4.6968460083008E-5|g", "scientific.notation:4.6968460083008E-5|g",
@ -827,7 +827,7 @@ func TestParseScientificNotation(t *testing.T) {
// Invalid lines should return an error // Invalid lines should return an error
func TestParse_InvalidLines(t *testing.T) { func TestParse_InvalidLines(t *testing.T) {
s := NewTestStatsd() s := newTestStatsd()
invalidLines := []string{ invalidLines := []string{
"i.dont.have.a.pipe:45g", "i.dont.have.a.pipe:45g",
"i.dont.have.a.colon45|c", "i.dont.have.a.colon45|c",
@ -846,7 +846,7 @@ func TestParse_InvalidLines(t *testing.T) {
// Invalid sample rates should be ignored and not applied // Invalid sample rates should be ignored and not applied
func TestParse_InvalidSampleRate(t *testing.T) { func TestParse_InvalidSampleRate(t *testing.T) {
s := NewTestStatsd() s := newTestStatsd()
invalidLines := []string{ invalidLines := []string{
"invalid.sample.rate:45|c|0.1", "invalid.sample.rate:45|c|0.1",
"invalid.sample.rate.2:45|c|@foo", "invalid.sample.rate.2:45|c|@foo",
@ -886,7 +886,7 @@ func TestParse_InvalidSampleRate(t *testing.T) {
// Names should be parsed like . -> _ // Names should be parsed like . -> _
func TestParse_DefaultNameParsing(t *testing.T) { func TestParse_DefaultNameParsing(t *testing.T) {
s := NewTestStatsd() s := newTestStatsd()
validLines := []string{ validLines := []string{
"valid:1|c", "valid:1|c",
"valid.foo-bar:11|c", "valid.foo-bar:11|c",
@ -917,7 +917,7 @@ func TestParse_DefaultNameParsing(t *testing.T) {
// Test that template name transformation works // Test that template name transformation works
func TestParse_Template(t *testing.T) { func TestParse_Template(t *testing.T) {
s := NewTestStatsd() s := newTestStatsd()
s.Templates = []string{ s.Templates = []string{
"measurement.measurement.host.service", "measurement.measurement.host.service",
} }
@ -953,7 +953,7 @@ func TestParse_Template(t *testing.T) {
// Test that template filters properly // Test that template filters properly
func TestParse_TemplateFilter(t *testing.T) { func TestParse_TemplateFilter(t *testing.T) {
s := NewTestStatsd() s := newTestStatsd()
s.Templates = []string{ s.Templates = []string{
"cpu.idle.* measurement.measurement.host", "cpu.idle.* measurement.measurement.host",
} }
@ -989,7 +989,7 @@ func TestParse_TemplateFilter(t *testing.T) {
// Test that most specific template is chosen // Test that most specific template is chosen
func TestParse_TemplateSpecificity(t *testing.T) { func TestParse_TemplateSpecificity(t *testing.T) {
s := NewTestStatsd() s := newTestStatsd()
s.Templates = []string{ s.Templates = []string{
"cpu.* measurement.foo.host", "cpu.* measurement.foo.host",
"cpu.idle.* measurement.measurement.host", "cpu.idle.* measurement.measurement.host",
@ -1021,7 +1021,7 @@ func TestParse_TemplateSpecificity(t *testing.T) {
// Test that most specific template is chosen // Test that most specific template is chosen
func TestParse_TemplateFields(t *testing.T) { func TestParse_TemplateFields(t *testing.T) {
s := NewTestStatsd() s := newTestStatsd()
s.Templates = []string{ s.Templates = []string{
"* measurement.measurement.field", "* measurement.measurement.field",
} }
@ -1123,7 +1123,7 @@ func TestParse_Fields(t *testing.T) {
// Test that tags within the bucket are parsed correctly // Test that tags within the bucket are parsed correctly
func TestParse_Tags(t *testing.T) { func TestParse_Tags(t *testing.T) {
s := NewTestStatsd() s := newTestStatsd()
tests := []struct { tests := []struct {
bucket string bucket string
@ -1276,7 +1276,7 @@ func TestParse_DataDogTags(t *testing.T) {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
var acc testutil.Accumulator var acc testutil.Accumulator
s := NewTestStatsd() s := newTestStatsd()
s.DataDogExtensions = true s.DataDogExtensions = true
require.NoError(t, s.parseStatsdLine(tt.line)) require.NoError(t, s.parseStatsdLine(tt.line))
@ -1426,7 +1426,7 @@ func TestParse_DataDogContainerID(t *testing.T) {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
var acc testutil.Accumulator var acc testutil.Accumulator
s := NewTestStatsd() s := newTestStatsd()
s.DataDogExtensions = true s.DataDogExtensions = true
s.DataDogKeepContainerTag = tt.keep s.DataDogKeepContainerTag = tt.keep
@ -1441,7 +1441,7 @@ func TestParse_DataDogContainerID(t *testing.T) {
// Test that statsd buckets are parsed to measurement names properly // Test that statsd buckets are parsed to measurement names properly
func TestParseName(t *testing.T) { func TestParseName(t *testing.T) {
s := NewTestStatsd() s := newTestStatsd()
tests := []struct { tests := []struct {
inName string inName string
@ -1496,7 +1496,7 @@ func TestParseName(t *testing.T) {
// Test that measurements with the same name, but different tags, are treated // Test that measurements with the same name, but different tags, are treated
// as different outputs // as different outputs
func TestParse_MeasurementsWithSameName(t *testing.T) { func TestParse_MeasurementsWithSameName(t *testing.T) {
s := NewTestStatsd() s := newTestStatsd()
// Test that counters work // Test that counters work
validLines := []string{ validLines := []string{
@ -1513,7 +1513,7 @@ func TestParse_MeasurementsWithSameName(t *testing.T) {
// Test that the metric caches expire (clear) an entry after the entry hasn't been updated for the configurable MaxTTL duration. // Test that the metric caches expire (clear) an entry after the entry hasn't been updated for the configurable MaxTTL duration.
func TestCachesExpireAfterMaxTTL(t *testing.T) { func TestCachesExpireAfterMaxTTL(t *testing.T) {
s := NewTestStatsd() s := newTestStatsd()
s.MaxTTL = config.Duration(10 * time.Millisecond) s.MaxTTL = config.Duration(10 * time.Millisecond)
acc := &testutil.Accumulator{} acc := &testutil.Accumulator{}
@ -1611,8 +1611,8 @@ func TestParse_MeasurementsWithMultipleValues(t *testing.T) {
"valid.multiple.mixed:1|c:1|ms:2|s:1|g", "valid.multiple.mixed:1|c:1|ms:2|s:1|g",
} }
sSingle := NewTestStatsd() sSingle := newTestStatsd()
sMultiple := NewTestStatsd() sMultiple := newTestStatsd()
for _, line := range singleLines { for _, line := range singleLines {
require.NoErrorf(t, sSingle.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) require.NoErrorf(t, sSingle.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line)
@ -1634,7 +1634,7 @@ func TestParse_MeasurementsWithMultipleValues(t *testing.T) {
// which adds up to 12 individual datapoints to be cached // which adds up to 12 individual datapoints to be cached
require.EqualValuesf(t, 12, cachedtiming.fields[defaultFieldName].n, "Expected 12 additions, got %d", cachedtiming.fields[defaultFieldName].n) require.EqualValuesf(t, 12, cachedtiming.fields[defaultFieldName].n, "Expected 12 additions, got %d", cachedtiming.fields[defaultFieldName].n)
require.InDelta(t, 1, cachedtiming.fields[defaultFieldName].upper, testutil.DefaultDelta) require.InDelta(t, 1, cachedtiming.fields[defaultFieldName].upperBound, testutil.DefaultDelta)
// test if sSingle and sMultiple did compute the same stats for valid.multiple.duplicate // test if sSingle and sMultiple did compute the same stats for valid.multiple.duplicate
require.NoError(t, testValidateSet("valid_multiple_duplicate", 2, sSingle.sets)) require.NoError(t, testValidateSet("valid_multiple_duplicate", 2, sSingle.sets))
@ -1666,9 +1666,9 @@ func TestParse_MeasurementsWithMultipleValues(t *testing.T) {
// Tests low-level functionality of timings when multiple fields is enabled // Tests low-level functionality of timings when multiple fields is enabled
// and a measurement template has been defined which can parse field names // and a measurement template has been defined which can parse field names
func TestParse_TimingsMultipleFieldsWithTemplate(t *testing.T) { func TestParse_TimingsMultipleFieldsWithTemplate(t *testing.T) {
s := NewTestStatsd() s := newTestStatsd()
s.Templates = []string{"measurement.field"} s.Templates = []string{"measurement.field"}
s.Percentiles = []Number{90.0} s.Percentiles = []number{90.0}
acc := &testutil.Accumulator{} acc := &testutil.Accumulator{}
validLines := []string{ validLines := []string{
@ -1716,9 +1716,9 @@ func TestParse_TimingsMultipleFieldsWithTemplate(t *testing.T) {
// but a measurement template hasn't been defined so we can't parse field names // but a measurement template hasn't been defined so we can't parse field names
// In this case the behaviour should be the same as normal behaviour // In this case the behaviour should be the same as normal behaviour
func TestParse_TimingsMultipleFieldsWithoutTemplate(t *testing.T) { func TestParse_TimingsMultipleFieldsWithoutTemplate(t *testing.T) {
s := NewTestStatsd() s := newTestStatsd()
s.Templates = make([]string, 0) s.Templates = make([]string, 0)
s.Percentiles = []Number{90.0} s.Percentiles = []number{90.0}
acc := &testutil.Accumulator{} acc := &testutil.Accumulator{}
validLines := []string{ validLines := []string{
@ -1765,7 +1765,7 @@ func TestParse_TimingsMultipleFieldsWithoutTemplate(t *testing.T) {
} }
func BenchmarkParse(b *testing.B) { func BenchmarkParse(b *testing.B) {
s := NewTestStatsd() s := newTestStatsd()
validLines := []string{ validLines := []string{
"test.timing.success:1|ms", "test.timing.success:1|ms",
"test.timing.success:11|ms", "test.timing.success:11|ms",
@ -1789,7 +1789,7 @@ func BenchmarkParse(b *testing.B) {
} }
func BenchmarkParseWithTemplate(b *testing.B) { func BenchmarkParseWithTemplate(b *testing.B) {
s := NewTestStatsd() s := newTestStatsd()
s.Templates = []string{"measurement.measurement.field"} s.Templates = []string{"measurement.measurement.field"}
validLines := []string{ validLines := []string{
"test.timing.success:1|ms", "test.timing.success:1|ms",
@ -1814,7 +1814,7 @@ func BenchmarkParseWithTemplate(b *testing.B) {
} }
func BenchmarkParseWithTemplateAndFilter(b *testing.B) { func BenchmarkParseWithTemplateAndFilter(b *testing.B) {
s := NewTestStatsd() s := newTestStatsd()
s.Templates = []string{"cpu* measurement.measurement.field"} s.Templates = []string{"cpu* measurement.measurement.field"}
validLines := []string{ validLines := []string{
"test.timing.success:1|ms", "test.timing.success:1|ms",
@ -1839,7 +1839,7 @@ func BenchmarkParseWithTemplateAndFilter(b *testing.B) {
} }
func BenchmarkParseWith2TemplatesAndFilter(b *testing.B) { func BenchmarkParseWith2TemplatesAndFilter(b *testing.B) {
s := NewTestStatsd() s := newTestStatsd()
s.Templates = []string{ s.Templates = []string{
"cpu1* measurement.measurement.field", "cpu1* measurement.measurement.field",
"cpu2* measurement.measurement.field", "cpu2* measurement.measurement.field",
@ -1867,7 +1867,7 @@ func BenchmarkParseWith2TemplatesAndFilter(b *testing.B) {
} }
func BenchmarkParseWith2Templates3TagsAndFilter(b *testing.B) { func BenchmarkParseWith2Templates3TagsAndFilter(b *testing.B) {
s := NewTestStatsd() s := newTestStatsd()
s.Templates = []string{ s.Templates = []string{
"cpu1* measurement.measurement.region.city.rack.field", "cpu1* measurement.measurement.region.city.rack.field",
"cpu2* measurement.measurement.region.city.rack.field", "cpu2* measurement.measurement.region.city.rack.field",
@ -1895,7 +1895,7 @@ func BenchmarkParseWith2Templates3TagsAndFilter(b *testing.B) {
} }
func TestParse_Timings_Delete(t *testing.T) { func TestParse_Timings_Delete(t *testing.T) {
s := NewTestStatsd() s := newTestStatsd()
s.DeleteTimings = true s.DeleteTimings = true
fakeacc := &testutil.Accumulator{} fakeacc := &testutil.Accumulator{}
@ -1911,7 +1911,7 @@ func TestParse_Timings_Delete(t *testing.T) {
// Tests the delete_gauges option // Tests the delete_gauges option
func TestParse_Gauges_Delete(t *testing.T) { func TestParse_Gauges_Delete(t *testing.T) {
s := NewTestStatsd() s := newTestStatsd()
s.DeleteGauges = true s.DeleteGauges = true
fakeacc := &testutil.Accumulator{} fakeacc := &testutil.Accumulator{}
@ -1927,7 +1927,7 @@ func TestParse_Gauges_Delete(t *testing.T) {
// Tests the delete_sets option // Tests the delete_sets option
func TestParse_Sets_Delete(t *testing.T) { func TestParse_Sets_Delete(t *testing.T) {
s := NewTestStatsd() s := newTestStatsd()
s.DeleteSets = true s.DeleteSets = true
fakeacc := &testutil.Accumulator{} fakeacc := &testutil.Accumulator{}
@ -1943,7 +1943,7 @@ func TestParse_Sets_Delete(t *testing.T) {
// Tests the delete_counters option // Tests the delete_counters option
func TestParse_Counters_Delete(t *testing.T) { func TestParse_Counters_Delete(t *testing.T) {
s := NewTestStatsd() s := newTestStatsd()
s.DeleteCounters = true s.DeleteCounters = true
fakeacc := &testutil.Accumulator{} fakeacc := &testutil.Accumulator{}
@ -2186,12 +2186,12 @@ func TestUdpFillQueue(t *testing.T) {
} }
func TestParse_Ints(t *testing.T) { func TestParse_Ints(t *testing.T) {
s := NewTestStatsd() s := newTestStatsd()
s.Percentiles = []Number{90} s.Percentiles = []number{90}
acc := &testutil.Accumulator{} acc := &testutil.Accumulator{}
require.NoError(t, s.Gather(acc)) require.NoError(t, s.Gather(acc))
require.Equal(t, []Number{90.0}, s.Percentiles) require.Equal(t, []number{90.0}, s.Percentiles)
} }
func TestParse_KeyValue(t *testing.T) { func TestParse_KeyValue(t *testing.T) {
@ -2222,7 +2222,7 @@ func TestParse_KeyValue(t *testing.T) {
} }
func TestParseSanitize(t *testing.T) { func TestParseSanitize(t *testing.T) {
s := NewTestStatsd() s := newTestStatsd()
s.SanitizeNamesMethod = "upstream" s.SanitizeNamesMethod = "upstream"
tests := []struct { tests := []struct {
@ -2254,7 +2254,7 @@ func TestParseSanitize(t *testing.T) {
} }
func TestParseNoSanitize(t *testing.T) { func TestParseNoSanitize(t *testing.T) {
s := NewTestStatsd() s := newTestStatsd()
s.SanitizeNamesMethod = "" s.SanitizeNamesMethod = ""
tests := []struct { tests := []struct {

View File

@ -14,6 +14,9 @@ import (
"github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs"
) )
//go:embed sample.conf
var sampleConfig string
type Supervisor struct { type Supervisor struct {
Server string `toml:"url"` Server string `toml:"url"`
MetricsInc []string `toml:"metrics_include"` MetricsInc []string `toml:"metrics_include"`
@ -46,13 +49,29 @@ type supervisorInfo struct {
Ident string Ident string
} }
//go:embed sample.conf
var sampleConfig string
func (*Supervisor) SampleConfig() string { func (*Supervisor) SampleConfig() string {
return sampleConfig return sampleConfig
} }
func (s *Supervisor) Init() error {
// Using default server URL if none was specified in config
if s.Server == "" {
s.Server = "http://localhost:9001/RPC2"
}
var err error
// Initializing XML-RPC client
s.rpcClient, err = xmlrpc.NewClient(s.Server, nil)
if err != nil {
return fmt.Errorf("failed to initialize XML-RPC client: %w", err)
}
// Setting filter for additional metrics
s.fieldFilter, err = filter.NewIncludeExcludeFilter(s.MetricsInc, s.MetricsExc)
if err != nil {
return fmt.Errorf("metrics filter setup failed: %w", err)
}
return nil
}
func (s *Supervisor) Gather(acc telegraf.Accumulator) error { func (s *Supervisor) Gather(acc telegraf.Accumulator) error {
// API call to get information about all running processes // API call to get information about all running processes
var rawProcessData []processInfo var rawProcessData []processInfo
@ -134,33 +153,6 @@ func (s *Supervisor) parseInstanceData(status supervisorInfo) (map[string]string
return tags, fields, nil return tags, fields, nil
} }
func (s *Supervisor) Init() error {
// Using default server URL if none was specified in config
if s.Server == "" {
s.Server = "http://localhost:9001/RPC2"
}
var err error
// Initializing XML-RPC client
s.rpcClient, err = xmlrpc.NewClient(s.Server, nil)
if err != nil {
return fmt.Errorf("failed to initialize XML-RPC client: %w", err)
}
// Setting filter for additional metrics
s.fieldFilter, err = filter.NewIncludeExcludeFilter(s.MetricsInc, s.MetricsExc)
if err != nil {
return fmt.Errorf("metrics filter setup failed: %w", err)
}
return nil
}
func init() {
inputs.Add("supervisor", func() telegraf.Input {
return &Supervisor{
MetricsExc: []string{"pid", "rc"},
}
})
}
// Function to get only address and port from URL // Function to get only address and port from URL
func beautifyServerString(rawurl string) ([]string, error) { func beautifyServerString(rawurl string) ([]string, error) {
parsedURL, err := url.Parse(rawurl) parsedURL, err := url.Parse(rawurl)
@ -177,3 +169,11 @@ func beautifyServerString(rawurl string) ([]string, error) {
} }
return splittedURL, nil return splittedURL, nil
} }
func init() {
inputs.Add("supervisor", func() telegraf.Input {
return &Supervisor{
MetricsExc: []string{"pid", "rc"},
}
})
}

View File

@ -23,13 +23,12 @@ import (
var sampleConfig string var sampleConfig string
const ( const (
// InBufSize is the input buffer size for JSON received via socket. // inBufSize is the input buffer size for JSON received via socket.
// Set to 10MB, as depending on the number of threads the output might be // Set to 10MB, as depending on the number of threads the output might be
// large. // large.
InBufSize = 10 * 1024 * 1024 inBufSize = 10 * 1024 * 1024
) )
// Suricata is a Telegraf input plugin for Suricata runtime statistics.
type Suricata struct { type Suricata struct {
Source string `toml:"source"` Source string `toml:"source"`
Delimiter string `toml:"delimiter"` Delimiter string `toml:"delimiter"`
@ -68,8 +67,7 @@ func (s *Suricata) Init() error {
return nil return nil
} }
// Start initiates background collection of JSON data from the socket // Start initiates background collection of JSON data from the socket provided to Suricata.
// provided to Suricata.
func (s *Suricata) Start(acc telegraf.Accumulator) error { func (s *Suricata) Start(acc telegraf.Accumulator) error {
var err error var err error
s.inputListener, err = net.ListenUnix("unix", &net.UnixAddr{ s.inputListener, err = net.ListenUnix("unix", &net.UnixAddr{
@ -90,8 +88,13 @@ func (s *Suricata) Start(acc telegraf.Accumulator) error {
return nil return nil
} }
// Stop causes the plugin to cease collecting JSON data from the socket provided // Gather measures and submits one full set of telemetry to Telegraf.
// to Suricata. // Not used here, submission is completely input-driven.
func (*Suricata) Gather(telegraf.Accumulator) error {
return nil
}
// Stop causes the plugin to cease collecting JSON data from the socket provided to Suricata.
func (s *Suricata) Stop() { func (s *Suricata) Stop() {
s.inputListener.Close() s.inputListener.Close()
if s.cancel != nil { if s.cancel != nil {
@ -101,7 +104,7 @@ func (s *Suricata) Stop() {
} }
func (s *Suricata) readInput(ctx context.Context, acc telegraf.Accumulator, conn net.Conn) error { func (s *Suricata) readInput(ctx context.Context, acc telegraf.Accumulator, conn net.Conn) error {
reader := bufio.NewReaderSize(conn, InBufSize) reader := bufio.NewReaderSize(conn, inBufSize)
for { for {
select { select {
case <-ctx.Done(): case <-ctx.Done():
@ -342,12 +345,6 @@ func (s *Suricata) parse(acc telegraf.Accumulator, sjson []byte) error {
return nil return nil
} }
// Gather measures and submits one full set of telemetry to Telegraf.
// Not used here, submission is completely input-driven.
func (*Suricata) Gather(telegraf.Accumulator) error {
return nil
}
func init() { func init() {
inputs.Add("suricata", func() telegraf.Input { inputs.Add("suricata", func() telegraf.Input {
return &Suricata{} return &Suricata{}

View File

@ -13,15 +13,15 @@ import (
//go:embed sample.conf //go:embed sample.conf
var sampleConfig string var sampleConfig string
type SwapStats struct { type Swap struct {
ps system.PS ps system.PS
} }
func (*SwapStats) SampleConfig() string { func (*Swap) SampleConfig() string {
return sampleConfig return sampleConfig
} }
func (ss *SwapStats) Gather(acc telegraf.Accumulator) error { func (ss *Swap) Gather(acc telegraf.Accumulator) error {
swap, err := ss.ps.SwapStat() swap, err := ss.ps.SwapStat()
if err != nil { if err != nil {
return fmt.Errorf("error getting swap memory info: %w", err) return fmt.Errorf("error getting swap memory info: %w", err)
@ -46,6 +46,6 @@ func (ss *SwapStats) Gather(acc telegraf.Accumulator) error {
func init() { func init() {
ps := system.NewSystemPS() ps := system.NewSystemPS()
inputs.Add("swap", func() telegraf.Input { inputs.Add("swap", func() telegraf.Input {
return &SwapStats{ps: ps} return &Swap{ps: ps}
}) })
} }

View File

@ -26,7 +26,7 @@ func TestSwapStats(t *testing.T) {
mps.On("SwapStat").Return(sms, nil) mps.On("SwapStat").Return(sms, nil)
err = (&SwapStats{&mps}).Gather(&acc) err = (&Swap{&mps}).Gather(&acc)
require.NoError(t, err) require.NoError(t, err)
swapfields := map[string]interface{}{ swapfields := map[string]interface{}{

View File

@ -1,9 +1,17 @@
//go:generate ../../../tools/readme_config_includer/generator //go:generate ../../../tools/readme_config_includer/generator
//go:build linux
package synproxy package synproxy
import ( import (
"bufio"
_ "embed" _ "embed"
"errors"
"fmt"
"os"
"path" "path"
"strconv"
"strings"
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal"
@ -24,6 +32,83 @@ func (*Synproxy) SampleConfig() string {
return sampleConfig return sampleConfig
} }
func (s *Synproxy) Gather(acc telegraf.Accumulator) error {
data, err := s.getSynproxyStat()
if err != nil {
return err
}
acc.AddCounter("synproxy", data, make(map[string]string))
return nil
}
func (s *Synproxy) getSynproxyStat() (map[string]interface{}, error) {
var hname []string
counters := []string{"entries", "syn_received", "cookie_invalid", "cookie_valid", "cookie_retrans", "conn_reopened"}
fields := make(map[string]interface{})
// Open synproxy file in proc filesystem
file, err := os.Open(s.statFile)
if err != nil {
return nil, err
}
defer file.Close()
// Initialise expected fields
for _, val := range counters {
fields[val] = uint32(0)
}
scanner := bufio.NewScanner(file)
// Read header row
if scanner.Scan() {
line := scanner.Text()
// Parse fields separated by whitespace
dataFields := strings.Fields(line)
for _, val := range dataFields {
if !inSlice(counters, val) {
val = ""
}
hname = append(hname, val)
}
}
if len(hname) == 0 {
return nil, errors.New("invalid data")
}
// Read data rows
for scanner.Scan() {
line := scanner.Text()
// Parse fields separated by whitespace
dataFields := strings.Fields(line)
// If number of data fields do not match number of header fields
if len(dataFields) != len(hname) {
return nil, fmt.Errorf("invalid number of columns in data, expected %d found %d", len(hname),
len(dataFields))
}
for i, val := range dataFields {
// Convert from hexstring to int32
x, err := strconv.ParseUint(val, 16, 32)
// If field is not a valid hexstring
if err != nil {
return nil, fmt.Errorf("invalid value %q found", val)
}
if hname[i] != "" {
fields[hname[i]] = fields[hname[i]].(uint32) + uint32(x)
}
}
}
return fields, nil
}
func inSlice(haystack []string, needle string) bool {
for _, val := range haystack {
if needle == val {
return true
}
}
return false
}
func init() { func init() {
inputs.Add("synproxy", func() telegraf.Input { inputs.Add("synproxy", func() telegraf.Input {
return &Synproxy{ return &Synproxy{

View File

@ -1,91 +0,0 @@
//go:build linux
package synproxy
import (
"bufio"
"errors"
"fmt"
"os"
"strconv"
"strings"
"github.com/influxdata/telegraf"
)
func (k *Synproxy) Gather(acc telegraf.Accumulator) error {
data, err := k.getSynproxyStat()
if err != nil {
return err
}
acc.AddCounter("synproxy", data, make(map[string]string))
return nil
}
func inSlice(haystack []string, needle string) bool {
for _, val := range haystack {
if needle == val {
return true
}
}
return false
}
func (k *Synproxy) getSynproxyStat() (map[string]interface{}, error) {
var hname []string
counters := []string{"entries", "syn_received", "cookie_invalid", "cookie_valid", "cookie_retrans", "conn_reopened"}
fields := make(map[string]interface{})
// Open synproxy file in proc filesystem
file, err := os.Open(k.statFile)
if err != nil {
return nil, err
}
defer file.Close()
// Initialise expected fields
for _, val := range counters {
fields[val] = uint32(0)
}
scanner := bufio.NewScanner(file)
// Read header row
if scanner.Scan() {
line := scanner.Text()
// Parse fields separated by whitespace
dataFields := strings.Fields(line)
for _, val := range dataFields {
if !inSlice(counters, val) {
val = ""
}
hname = append(hname, val)
}
}
if len(hname) == 0 {
return nil, errors.New("invalid data")
}
// Read data rows
for scanner.Scan() {
line := scanner.Text()
// Parse fields separated by whitespace
dataFields := strings.Fields(line)
// If number of data fields do not match number of header fields
if len(dataFields) != len(hname) {
return nil, fmt.Errorf("invalid number of columns in data, expected %d found %d", len(hname),
len(dataFields))
}
for i, val := range dataFields {
// Convert from hexstring to int32
x, err := strconv.ParseUint(val, 16, 32)
// If field is not a valid hexstring
if err != nil {
return nil, fmt.Errorf("invalid value %q found", val)
}
if hname[i] != "" {
fields[hname[i]] = fields[hname[i]].(uint32) + uint32(x)
}
}
}
return fields, nil
}

View File

@ -1,23 +1,33 @@
//go:generate ../../../tools/readme_config_includer/generator
//go:build !linux //go:build !linux
package synproxy package synproxy
import ( import (
_ "embed"
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs"
) )
func (k *Synproxy) Init() error { //go:embed sample.conf
k.Log.Warn("Current platform is not supported") var sampleConfig string
type Synproxy struct {
Log telegraf.Logger `toml:"-"`
}
func (*Synproxy) SampleConfig() string { return sampleConfig }
func (s *Synproxy) Init() error {
s.Log.Warn("Current platform is not supported")
return nil return nil
} }
func (*Synproxy) Gather(_ telegraf.Accumulator) error { func (*Synproxy) Gather(telegraf.Accumulator) error { return nil }
return nil
}
func init() { func init() {
inputs.Add("synproxy", func() telegraf.Input { inputs.Add("slab", func() telegraf.Input {
return &Synproxy{} return &Synproxy{}
}) })
} }

View File

@ -29,7 +29,6 @@ var sampleConfig string
const readTimeoutMsg = "Read timeout set! Connections, inactive for the set duration, will be closed!" const readTimeoutMsg = "Read timeout set! Connections, inactive for the set duration, will be closed!"
// Syslog is a syslog plugin
type Syslog struct { type Syslog struct {
Address string `toml:"server"` Address string `toml:"server"`
Framing string `toml:"framing"` Framing string `toml:"framing"`
@ -113,12 +112,6 @@ func (s *Syslog) Init() error {
return nil return nil
} }
// Gather ...
func (*Syslog) Gather(_ telegraf.Accumulator) error {
return nil
}
// Start starts the service.
func (s *Syslog) Start(acc telegraf.Accumulator) error { func (s *Syslog) Start(acc telegraf.Accumulator) error {
s.mu.Lock() s.mu.Lock()
defer s.mu.Unlock() defer s.mu.Unlock()
@ -148,7 +141,10 @@ func (s *Syslog) Start(acc telegraf.Accumulator) error {
return nil return nil
} }
// Stop cleans up all resources func (*Syslog) Gather(telegraf.Accumulator) error {
return nil
}
func (s *Syslog) Stop() { func (s *Syslog) Stop() {
s.mu.Lock() s.mu.Lock()
defer s.mu.Unlock() defer s.mu.Unlock()

View File

@ -31,7 +31,10 @@ var (
dfltActivities = []string{"DISK"} dfltActivities = []string{"DISK"}
) )
const parseInterval = 1 // parseInterval is the interval (in seconds) where the parsing of the binary file takes place. const (
parseInterval = 1 // parseInterval is the interval (in seconds) where the parsing of the binary file takes place.
cmd = "sadf"
)
type Sysstat struct { type Sysstat struct {
// Sadc represents the path to the sadc collector utility. // Sadc represents the path to the sadc collector utility.
@ -46,7 +49,7 @@ type Sysstat struct {
// Activities is a list of activities that are passed as argument to the // Activities is a list of activities that are passed as argument to the
// collector utility (e.g: DISK, SNMP etc...) // collector utility (e.g: DISK, SNMP etc...)
// The more activities that are added, the more data is collected. // The more activities that are added, the more data is collected.
Activities []string Activities []string `toml:"activities"`
// Options is a map of options. // Options is a map of options.
// //
@ -62,23 +65,21 @@ type Sysstat struct {
// and represents itself a measurement. // and represents itself a measurement.
// //
// If Group is true, metrics are grouped to a single measurement with the corresponding description as name. // If Group is true, metrics are grouped to a single measurement with the corresponding description as name.
Options map[string]string Options map[string]string `toml:"options"`
// Group determines if metrics are grouped or not. // Group determines if metrics are grouped or not.
Group bool Group bool `toml:"group"`
// DeviceTags adds the possibility to add additional tags for devices. // DeviceTags adds the possibility to add additional tags for devices.
DeviceTags map[string][]map[string]string `toml:"device_tags"` DeviceTags map[string][]map[string]string `toml:"device_tags"`
Log telegraf.Logger Log telegraf.Logger `toml:"-"`
// Used to autodetect how long the sadc command should run for // Used to autodetect how long the sadc command should run for
interval int interval int
firstTimestamp time.Time firstTimestamp time.Time
} }
const cmd = "sadf"
func (*Sysstat) SampleConfig() string { func (*Sysstat) SampleConfig() string {
return sampleConfig return sampleConfig
} }

View File

@ -17,12 +17,14 @@ type Sysstat struct {
Log telegraf.Logger `toml:"-"` Log telegraf.Logger `toml:"-"`
} }
func (*Sysstat) SampleConfig() string { return sampleConfig }
func (s *Sysstat) Init() error { func (s *Sysstat) Init() error {
s.Log.Warn("current platform is not supported") s.Log.Warn("Current platform is not supported")
return nil return nil
} }
func (*Sysstat) SampleConfig() string { return sampleConfig }
func (*Sysstat) Gather(_ telegraf.Accumulator) error { return nil } func (*Sysstat) Gather(telegraf.Accumulator) error { return nil }
func init() { func init() {
inputs.Add("sysstat", func() telegraf.Input { inputs.Add("sysstat", func() telegraf.Input {

View File

@ -1,8 +1,18 @@
//go:generate ../../../tools/readme_config_includer/generator //go:generate ../../../tools/readme_config_includer/generator
//go:build linux
package systemd_units package systemd_units
import ( import (
"context"
_ "embed" _ "embed"
"fmt"
"github.com/coreos/go-systemd/v22/dbus"
"github.com/influxdata/telegraf/filter"
"math"
"os/user"
"path"
"strings"
"time" "time"
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
@ -13,6 +23,107 @@ import (
//go:embed sample.conf //go:embed sample.conf
var sampleConfig string var sampleConfig string
var (
// Below are mappings of systemd state tables as defined in
// https://github.com/systemd/systemd/blob/c87700a1335f489be31cd3549927da68b5638819/src/basic/unit-def.c
// Duplicate strings are removed from this list.
// This map is used by `subcommand_show` and `subcommand_list`. Changes must be
// compatible with both subcommands.
loadMap = map[string]int{
"loaded": 0,
"stub": 1,
"not-found": 2,
"bad-setting": 3,
"error": 4,
"merged": 5,
"masked": 6,
}
activeMap = map[string]int{
"active": 0,
"reloading": 1,
"inactive": 2,
"failed": 3,
"activating": 4,
"deactivating": 5,
}
subMap = map[string]int{
// service_state_table, offset 0x0000
"running": 0x0000,
"dead": 0x0001,
"start-pre": 0x0002,
"start": 0x0003,
"exited": 0x0004,
"reload": 0x0005,
"stop": 0x0006,
"stop-watchdog": 0x0007,
"stop-sigterm": 0x0008,
"stop-sigkill": 0x0009,
"stop-post": 0x000a,
"final-sigterm": 0x000b,
"failed": 0x000c,
"auto-restart": 0x000d,
"condition": 0x000e,
"cleaning": 0x000f,
// automount_state_table, offset 0x0010
// continuation of service_state_table
"waiting": 0x0010,
"reload-signal": 0x0011,
"reload-notify": 0x0012,
"final-watchdog": 0x0013,
"dead-before-auto-restart": 0x0014,
"failed-before-auto-restart": 0x0015,
"dead-resources-pinned": 0x0016,
"auto-restart-queued": 0x0017,
// device_state_table, offset 0x0020
"tentative": 0x0020,
"plugged": 0x0021,
// mount_state_table, offset 0x0030
"mounting": 0x0030,
"mounting-done": 0x0031,
"mounted": 0x0032,
"remounting": 0x0033,
"unmounting": 0x0034,
"remounting-sigterm": 0x0035,
"remounting-sigkill": 0x0036,
"unmounting-sigterm": 0x0037,
"unmounting-sigkill": 0x0038,
// path_state_table, offset 0x0040
// scope_state_table, offset 0x0050
"abandoned": 0x0050,
// slice_state_table, offset 0x0060
"active": 0x0060,
// socket_state_table, offset 0x0070
"start-chown": 0x0070,
"start-post": 0x0071,
"listening": 0x0072,
"stop-pre": 0x0073,
"stop-pre-sigterm": 0x0074,
"stop-pre-sigkill": 0x0075,
"final-sigkill": 0x0076,
// swap_state_table, offset 0x0080
"activating": 0x0080,
"activating-done": 0x0081,
"deactivating": 0x0082,
"deactivating-sigterm": 0x0083,
"deactivating-sigkill": 0x0084,
// target_state_table, offset 0x0090
// timer_state_table, offset 0x00a0
"elapsed": 0x00a0,
}
)
// SystemdUnits is a telegraf plugin to gather systemd unit status // SystemdUnits is a telegraf plugin to gather systemd unit status
type SystemdUnits struct { type SystemdUnits struct {
Pattern string `toml:"pattern"` Pattern string `toml:"pattern"`
@ -25,10 +136,330 @@ type SystemdUnits struct {
archParams archParams
} }
type archParams struct {
client client
pattern []string
filter filter.Filter
unitTypeDBus string
scope string
user string
warnUnitProps map[string]bool
}
type client interface {
// Connected returns whether client is connected
Connected() bool
// Close closes an established connection.
Close()
// ListUnitFilesByPatternsContext returns an array of all available units on disk matched the patterns.
ListUnitFilesByPatternsContext(ctx context.Context, states, pattern []string) ([]dbus.UnitFile, error)
// ListUnitsByNamesContext returns an array with units.
ListUnitsByNamesContext(ctx context.Context, units []string) ([]dbus.UnitStatus, error)
// GetUnitTypePropertiesContext returns the extra properties for a unit, specific to the unit type.
GetUnitTypePropertiesContext(ctx context.Context, unit, unitType string) (map[string]interface{}, error)
// GetUnitPropertiesContext takes the (unescaped) unit name and returns all of its dbus object properties.
GetUnitPropertiesContext(ctx context.Context, unit string) (map[string]interface{}, error)
// ListUnitsContext returns an array with all currently loaded units.
ListUnitsContext(ctx context.Context) ([]dbus.UnitStatus, error)
}
func (*SystemdUnits) SampleConfig() string { func (*SystemdUnits) SampleConfig() string {
return sampleConfig return sampleConfig
} }
func (s *SystemdUnits) Init() error {
// Set default pattern
if s.Pattern == "" {
s.Pattern = "*"
}
// Check unit-type and convert the first letter to uppercase as this is
// what dbus expects.
switch s.UnitType {
case "":
s.UnitType = "service"
case "service", "socket", "target", "device", "mount", "automount", "swap",
"timer", "path", "slice", "scope":
default:
return fmt.Errorf("invalid 'unittype' %q", s.UnitType)
}
s.unitTypeDBus = strings.ToUpper(s.UnitType[0:1]) + strings.ToLower(s.UnitType[1:])
s.pattern = strings.Split(s.Pattern, " ")
f, err := filter.Compile(s.pattern)
if err != nil {
return fmt.Errorf("compiling filter failed: %w", err)
}
s.filter = f
switch s.Scope {
case "", "system":
s.scope = "system"
case "user":
u, err := user.Current()
if err != nil {
return fmt.Errorf("unable to determine user: %w", err)
}
s.scope = "user"
s.user = u.Username
default:
return fmt.Errorf("invalid 'scope' %q", s.Scope)
}
s.warnUnitProps = make(map[string]bool)
return nil
}
func (s *SystemdUnits) Start(telegraf.Accumulator) error {
ctx := context.Background()
var client *dbus.Conn
var err error
if s.scope == "user" {
client, err = dbus.NewUserConnectionContext(ctx)
} else {
client, err = dbus.NewSystemConnectionContext(ctx)
}
if err != nil {
return err
}
s.client = client
return nil
}
func (s *SystemdUnits) Gather(acc telegraf.Accumulator) error {
// Reconnect in case the connection was lost
if !s.client.Connected() {
s.Log.Debug("Connection to systemd daemon lost, trying to reconnect...")
s.Stop()
if err := s.Start(acc); err != nil {
return err
}
}
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(s.Timeout))
defer cancel()
// List all loaded units to handle multi-instance units correctly
loaded, err := s.client.ListUnitsContext(ctx)
if err != nil {
return fmt.Errorf("listing loaded units failed: %w", err)
}
var files []dbus.UnitFile
if s.CollectDisabled {
// List all unit files matching the pattern to also get disabled units
list := []string{"enabled", "disabled", "static"}
files, err = s.client.ListUnitFilesByPatternsContext(ctx, list, s.pattern)
if err != nil {
return fmt.Errorf("listing unit files failed: %w", err)
}
}
// Collect all matching units, the loaded ones and the disabled ones
states := make([]dbus.UnitStatus, 0, len(loaded))
// Match all loaded units first
seen := make(map[string]bool)
for _, u := range loaded {
if !s.filter.Match(u.Name) {
continue
}
states = append(states, u)
// Remember multi-instance units to remove duplicates from files
instance := u.Name
if strings.Contains(u.Name, "@") {
prefix, _, _ := strings.Cut(u.Name, "@")
suffix := path.Ext(u.Name)
instance = prefix + "@" + suffix
}
seen[instance] = true
}
// Now split the unit-files into disabled ones and static ones, ignore
// enabled units as those are already contained in the "loaded" list.
if len(files) > 0 {
disabled := make([]string, 0, len(files))
static := make([]string, 0, len(files))
for _, f := range files {
name := path.Base(f.Path)
switch f.Type {
case "disabled":
if seen[name] {
continue
}
seen[name] = true
// Detect disabled multi-instance units and declare them as static
_, suffix, found := strings.Cut(name, "@")
instance, _, _ := strings.Cut(suffix, ".")
if found && instance == "" {
static = append(static, name)
continue
}
disabled = append(disabled, name)
case "static":
// Make sure we filter already loaded static multi-instance units
instance := name
if strings.Contains(name, "@") {
prefix, _, _ := strings.Cut(name, "@")
suffix := path.Ext(name)
instance = prefix + "@" + suffix
}
if seen[instance] || seen[name] {
continue
}
seen[instance] = true
static = append(static, name)
}
}
// Resolve the disabled and remaining static units
disabledStates, err := s.client.ListUnitsByNamesContext(ctx, disabled)
if err != nil {
return fmt.Errorf("listing unit states failed: %w", err)
}
states = append(states, disabledStates...)
// Add special information about unused static units
for _, name := range static {
if !strings.EqualFold(strings.TrimPrefix(path.Ext(name), "."), s.UnitType) {
continue
}
states = append(states, dbus.UnitStatus{
Name: name,
LoadState: "stub",
ActiveState: "inactive",
SubState: "dead",
})
}
}
// Merge the unit information into one struct
for _, state := range states {
// Filter units of the wrong type
if idx := strings.LastIndex(state.Name, "."); idx < 0 || state.Name[idx+1:] != s.UnitType {
continue
}
// Map the state names to numerical values
load, ok := loadMap[state.LoadState]
if !ok {
acc.AddError(fmt.Errorf("parsing field 'load' failed, value not in map: %s", state.LoadState))
continue
}
active, ok := activeMap[state.ActiveState]
if !ok {
acc.AddError(fmt.Errorf("parsing field 'active' failed, value not in map: %s", state.ActiveState))
continue
}
subState, ok := subMap[state.SubState]
if !ok {
acc.AddError(fmt.Errorf("parsing field 'sub' failed, value not in map: %s", state.SubState))
continue
}
// Create the metric
tags := map[string]string{
"name": state.Name,
"load": state.LoadState,
"active": state.ActiveState,
"sub": state.SubState,
}
if s.scope == "user" {
tags["user"] = s.user
}
fields := map[string]interface{}{
"load_code": load,
"active_code": active,
"sub_code": subState,
}
if s.Details {
properties, err := s.client.GetUnitTypePropertiesContext(ctx, state.Name, s.unitTypeDBus)
if err != nil {
// Skip units returning "Unknown interface" errors as those indicate
// that the unit is of the wrong type.
if strings.Contains(err.Error(), "Unknown interface") {
continue
}
// For other units we make up properties, usually those are
// disabled multi-instance units
properties = map[string]interface{}{
"StatusErrno": int64(-1),
"NRestarts": uint64(0),
}
}
// Get required unit file properties
unitProperties, err := s.client.GetUnitPropertiesContext(ctx, state.Name)
if err != nil && !s.warnUnitProps[state.Name] {
s.Log.Warnf("Cannot read unit properties for %q: %v", state.Name, err)
s.warnUnitProps[state.Name] = true
}
// Set tags
if v, found := unitProperties["UnitFileState"]; found {
tags["state"] = v.(string)
}
if v, found := unitProperties["UnitFilePreset"]; found {
tags["preset"] = v.(string)
}
// Set fields
if v, found := unitProperties["ActiveEnterTimestamp"]; found {
fields["active_enter_timestamp_us"] = v
}
fields["status_errno"] = properties["StatusErrno"]
fields["restarts"] = properties["NRestarts"]
fields["pid"] = properties["MainPID"]
fields["mem_current"] = properties["MemoryCurrent"]
fields["mem_peak"] = properties["MemoryPeak"]
fields["mem_avail"] = properties["MemoryAvailable"]
fields["swap_current"] = properties["MemorySwapCurrent"]
fields["swap_peak"] = properties["MemorySwapPeak"]
// Sanitize unset memory fields
for k, value := range fields {
switch {
case strings.HasPrefix(k, "mem_"), strings.HasPrefix(k, "swap_"):
v, ok := value.(uint64)
if ok && v == math.MaxUint64 || value == nil {
fields[k] = uint64(0)
}
}
}
}
acc.AddFields("systemd_units", fields, tags)
}
return nil
}
func (s *SystemdUnits) Stop() {
if s.client != nil && s.client.Connected() {
s.client.Close()
}
s.client = nil
}
func init() { func init() {
inputs.Add("systemd_units", func() telegraf.Input { inputs.Add("systemd_units", func() telegraf.Input {
return &SystemdUnits{Timeout: config.Duration(5 * time.Second)} return &SystemdUnits{Timeout: config.Duration(5 * time.Second)}

View File

@ -1,425 +0,0 @@
//go:build linux
package systemd_units
import (
"context"
"fmt"
"math"
"os/user"
"path"
"strings"
"time"
"github.com/coreos/go-systemd/v22/dbus"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/filter"
)
// Below are mappings of systemd state tables as defined in
// https://github.com/systemd/systemd/blob/c87700a1335f489be31cd3549927da68b5638819/src/basic/unit-def.c
// Duplicate strings are removed from this list.
// This map is used by `subcommand_show` and `subcommand_list`. Changes must be
// compatible with both subcommands.
var loadMap = map[string]int{
"loaded": 0,
"stub": 1,
"not-found": 2,
"bad-setting": 3,
"error": 4,
"merged": 5,
"masked": 6,
}
var activeMap = map[string]int{
"active": 0,
"reloading": 1,
"inactive": 2,
"failed": 3,
"activating": 4,
"deactivating": 5,
}
var subMap = map[string]int{
// service_state_table, offset 0x0000
"running": 0x0000,
"dead": 0x0001,
"start-pre": 0x0002,
"start": 0x0003,
"exited": 0x0004,
"reload": 0x0005,
"stop": 0x0006,
"stop-watchdog": 0x0007,
"stop-sigterm": 0x0008,
"stop-sigkill": 0x0009,
"stop-post": 0x000a,
"final-sigterm": 0x000b,
"failed": 0x000c,
"auto-restart": 0x000d,
"condition": 0x000e,
"cleaning": 0x000f,
// automount_state_table, offset 0x0010
// continuation of service_state_table
"waiting": 0x0010,
"reload-signal": 0x0011,
"reload-notify": 0x0012,
"final-watchdog": 0x0013,
"dead-before-auto-restart": 0x0014,
"failed-before-auto-restart": 0x0015,
"dead-resources-pinned": 0x0016,
"auto-restart-queued": 0x0017,
// device_state_table, offset 0x0020
"tentative": 0x0020,
"plugged": 0x0021,
// mount_state_table, offset 0x0030
"mounting": 0x0030,
"mounting-done": 0x0031,
"mounted": 0x0032,
"remounting": 0x0033,
"unmounting": 0x0034,
"remounting-sigterm": 0x0035,
"remounting-sigkill": 0x0036,
"unmounting-sigterm": 0x0037,
"unmounting-sigkill": 0x0038,
// path_state_table, offset 0x0040
// scope_state_table, offset 0x0050
"abandoned": 0x0050,
// slice_state_table, offset 0x0060
"active": 0x0060,
// socket_state_table, offset 0x0070
"start-chown": 0x0070,
"start-post": 0x0071,
"listening": 0x0072,
"stop-pre": 0x0073,
"stop-pre-sigterm": 0x0074,
"stop-pre-sigkill": 0x0075,
"final-sigkill": 0x0076,
// swap_state_table, offset 0x0080
"activating": 0x0080,
"activating-done": 0x0081,
"deactivating": 0x0082,
"deactivating-sigterm": 0x0083,
"deactivating-sigkill": 0x0084,
// target_state_table, offset 0x0090
// timer_state_table, offset 0x00a0
"elapsed": 0x00a0,
}
type client interface {
Connected() bool
Close()
ListUnitFilesByPatternsContext(ctx context.Context, states, pattern []string) ([]dbus.UnitFile, error)
ListUnitsByNamesContext(ctx context.Context, units []string) ([]dbus.UnitStatus, error)
GetUnitTypePropertiesContext(ctx context.Context, unit, unitType string) (map[string]interface{}, error)
GetUnitPropertiesContext(ctx context.Context, unit string) (map[string]interface{}, error)
ListUnitsContext(ctx context.Context) ([]dbus.UnitStatus, error)
}
type archParams struct {
client client
pattern []string
filter filter.Filter
unitTypeDBus string
scope string
user string
warnUnitProps map[string]bool
}
func (s *SystemdUnits) Init() error {
// Set default pattern
if s.Pattern == "" {
s.Pattern = "*"
}
// Check unit-type and convert the first letter to uppercase as this is
// what dbus expects.
switch s.UnitType {
case "":
s.UnitType = "service"
case "service", "socket", "target", "device", "mount", "automount", "swap",
"timer", "path", "slice", "scope":
default:
return fmt.Errorf("invalid 'unittype' %q", s.UnitType)
}
s.unitTypeDBus = strings.ToUpper(s.UnitType[0:1]) + strings.ToLower(s.UnitType[1:])
s.pattern = strings.Split(s.Pattern, " ")
f, err := filter.Compile(s.pattern)
if err != nil {
return fmt.Errorf("compiling filter failed: %w", err)
}
s.filter = f
switch s.Scope {
case "", "system":
s.scope = "system"
case "user":
u, err := user.Current()
if err != nil {
return fmt.Errorf("unable to determine user: %w", err)
}
s.scope = "user"
s.user = u.Username
default:
return fmt.Errorf("invalid 'scope' %q", s.Scope)
}
s.warnUnitProps = make(map[string]bool)
return nil
}
func (s *SystemdUnits) Start(telegraf.Accumulator) error {
ctx := context.Background()
var client *dbus.Conn
var err error
if s.scope == "user" {
client, err = dbus.NewUserConnectionContext(ctx)
} else {
client, err = dbus.NewSystemConnectionContext(ctx)
}
if err != nil {
return err
}
s.client = client
return nil
}
func (s *SystemdUnits) Stop() {
if s.client != nil && s.client.Connected() {
s.client.Close()
}
s.client = nil
}
func (s *SystemdUnits) Gather(acc telegraf.Accumulator) error {
// Reconnect in case the connection was lost
if !s.client.Connected() {
s.Log.Debug("Connection to systemd daemon lost, trying to reconnect...")
s.Stop()
if err := s.Start(acc); err != nil {
return err
}
}
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(s.Timeout))
defer cancel()
// List all loaded units to handle multi-instance units correctly
loaded, err := s.client.ListUnitsContext(ctx)
if err != nil {
return fmt.Errorf("listing loaded units failed: %w", err)
}
var files []dbus.UnitFile
if s.CollectDisabled {
// List all unit files matching the pattern to also get disabled units
list := []string{"enabled", "disabled", "static"}
files, err = s.client.ListUnitFilesByPatternsContext(ctx, list, s.pattern)
if err != nil {
return fmt.Errorf("listing unit files failed: %w", err)
}
}
// Collect all matching units, the loaded ones and the disabled ones
states := make([]dbus.UnitStatus, 0, len(loaded))
// Match all loaded units first
seen := make(map[string]bool)
for _, u := range loaded {
if !s.filter.Match(u.Name) {
continue
}
states = append(states, u)
// Remember multi-instance units to remove duplicates from files
instance := u.Name
if strings.Contains(u.Name, "@") {
prefix, _, _ := strings.Cut(u.Name, "@")
suffix := path.Ext(u.Name)
instance = prefix + "@" + suffix
}
seen[instance] = true
}
// Now split the unit-files into disabled ones and static ones, ignore
// enabled units as those are already contained in the "loaded" list.
if len(files) > 0 {
disabled := make([]string, 0, len(files))
static := make([]string, 0, len(files))
for _, f := range files {
name := path.Base(f.Path)
switch f.Type {
case "disabled":
if seen[name] {
continue
}
seen[name] = true
// Detect disabled multi-instance units and declare them as static
_, suffix, found := strings.Cut(name, "@")
instance, _, _ := strings.Cut(suffix, ".")
if found && instance == "" {
static = append(static, name)
continue
}
disabled = append(disabled, name)
case "static":
// Make sure we filter already loaded static multi-instance units
instance := name
if strings.Contains(name, "@") {
prefix, _, _ := strings.Cut(name, "@")
suffix := path.Ext(name)
instance = prefix + "@" + suffix
}
if seen[instance] || seen[name] {
continue
}
seen[instance] = true
static = append(static, name)
}
}
// Resolve the disabled and remaining static units
disabledStates, err := s.client.ListUnitsByNamesContext(ctx, disabled)
if err != nil {
return fmt.Errorf("listing unit states failed: %w", err)
}
states = append(states, disabledStates...)
// Add special information about unused static units
for _, name := range static {
if !strings.EqualFold(strings.TrimPrefix(path.Ext(name), "."), s.UnitType) {
continue
}
states = append(states, dbus.UnitStatus{
Name: name,
LoadState: "stub",
ActiveState: "inactive",
SubState: "dead",
})
}
}
// Merge the unit information into one struct
for _, state := range states {
// Filter units of the wrong type
if idx := strings.LastIndex(state.Name, "."); idx < 0 || state.Name[idx+1:] != s.UnitType {
continue
}
// Map the state names to numerical values
load, ok := loadMap[state.LoadState]
if !ok {
acc.AddError(fmt.Errorf("parsing field 'load' failed, value not in map: %s", state.LoadState))
continue
}
active, ok := activeMap[state.ActiveState]
if !ok {
acc.AddError(fmt.Errorf("parsing field 'active' failed, value not in map: %s", state.ActiveState))
continue
}
subState, ok := subMap[state.SubState]
if !ok {
acc.AddError(fmt.Errorf("parsing field 'sub' failed, value not in map: %s", state.SubState))
continue
}
// Create the metric
tags := map[string]string{
"name": state.Name,
"load": state.LoadState,
"active": state.ActiveState,
"sub": state.SubState,
}
if s.scope == "user" {
tags["user"] = s.user
}
fields := map[string]interface{}{
"load_code": load,
"active_code": active,
"sub_code": subState,
}
if s.Details {
properties, err := s.client.GetUnitTypePropertiesContext(ctx, state.Name, s.unitTypeDBus)
if err != nil {
// Skip units returning "Unknown interface" errors as those indicate
// that the unit is of the wrong type.
if strings.Contains(err.Error(), "Unknown interface") {
continue
}
// For other units we make up properties, usually those are
// disabled multi-instance units
properties = map[string]interface{}{
"StatusErrno": int64(-1),
"NRestarts": uint64(0),
}
}
// Get required unit file properties
unitProperties, err := s.client.GetUnitPropertiesContext(ctx, state.Name)
if err != nil && !s.warnUnitProps[state.Name] {
s.Log.Warnf("Cannot read unit properties for %q: %v", state.Name, err)
s.warnUnitProps[state.Name] = true
}
// Set tags
if v, found := unitProperties["UnitFileState"]; found {
tags["state"] = v.(string)
}
if v, found := unitProperties["UnitFilePreset"]; found {
tags["preset"] = v.(string)
}
// Set fields
if v, found := unitProperties["ActiveEnterTimestamp"]; found {
fields["active_enter_timestamp_us"] = v
}
fields["status_errno"] = properties["StatusErrno"]
fields["restarts"] = properties["NRestarts"]
fields["pid"] = properties["MainPID"]
fields["mem_current"] = properties["MemoryCurrent"]
fields["mem_peak"] = properties["MemoryPeak"]
fields["mem_avail"] = properties["MemoryAvailable"]
fields["swap_current"] = properties["MemorySwapCurrent"]
fields["swap_peak"] = properties["MemorySwapPeak"]
// Sanitize unset memory fields
for k, value := range fields {
switch {
case strings.HasPrefix(k, "mem_"), strings.HasPrefix(k, "swap_"):
v, ok := value.(uint64)
if ok && v == math.MaxUint64 || value == nil {
fields[k] = uint64(0)
}
}
}
}
acc.AddFields("systemd_units", fields, tags)
}
return nil
}

View File

@ -1,20 +1,33 @@
//go:generate ../../../tools/readme_config_includer/generator
//go:build !linux //go:build !linux
package systemd_units package systemd_units
import "github.com/influxdata/telegraf" import (
_ "embed"
type archParams struct{} "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
)
//go:embed sample.conf
var sampleConfig string
type SystemdUnits struct {
Log telegraf.Logger `toml:"-"`
}
func (*SystemdUnits) SampleConfig() string { return sampleConfig }
func (s *SystemdUnits) Init() error { func (s *SystemdUnits) Init() error {
s.Log.Info("Skipping plugin as it is not supported by this platform!") s.Log.Warn("Current platform is not supported")
// Required to remove linter-warning on unused struct member
_ = s.archParams
return nil return nil
} }
func (*SystemdUnits) Gather(_ telegraf.Accumulator) error { func (*SystemdUnits) Gather(telegraf.Accumulator) error { return nil }
return nil
func init() {
inputs.Add("systemd_units", func() telegraf.Input {
return &SystemdUnits{}
})
} }