chore: Fix linter findings for `revive:exported` in `plugins/inputs/[t-v]*` (#16408)

This commit is contained in:
Paweł Żak 2025-01-21 16:17:01 +01:00 committed by GitHub
parent 7a65a4d355
commit e57f48f608
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
28 changed files with 1084 additions and 1098 deletions

View File

@ -19,6 +19,9 @@ import (
"github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs"
) )
//go:embed sample.conf
var sampleConfig string
type Tacacs struct { type Tacacs struct {
Servers []string `toml:"servers"` Servers []string `toml:"servers"`
Username config.Secret `toml:"username"` Username config.Secret `toml:"username"`
@ -31,9 +34,6 @@ type Tacacs struct {
authStart tacplus.AuthenStart authStart tacplus.AuthenStart
} }
//go:embed sample.conf
var sampleConfig string
func (*Tacacs) SampleConfig() string { func (*Tacacs) SampleConfig() string {
return sampleConfig return sampleConfig
} }
@ -74,7 +74,22 @@ func (t *Tacacs) Init() error {
return nil return nil
} }
func AuthenReplyToString(code uint8) string { func (t *Tacacs) Gather(acc telegraf.Accumulator) error {
var wg sync.WaitGroup
for idx := range t.clients {
wg.Add(1)
go func(client *tacplus.Client) {
defer wg.Done()
acc.AddError(t.pollServer(acc, client))
}(&t.clients[idx])
}
wg.Wait()
return nil
}
func authenReplyToString(code uint8) string {
switch code { switch code {
case tacplus.AuthenStatusPass: case tacplus.AuthenStatusPass:
return `AuthenStatusPass` return `AuthenStatusPass`
@ -96,21 +111,6 @@ func AuthenReplyToString(code uint8) string {
return "AuthenStatusUnknown(" + strconv.FormatUint(uint64(code), 10) + ")" return "AuthenStatusUnknown(" + strconv.FormatUint(uint64(code), 10) + ")"
} }
func (t *Tacacs) Gather(acc telegraf.Accumulator) error {
var wg sync.WaitGroup
for idx := range t.clients {
wg.Add(1)
go func(client *tacplus.Client) {
defer wg.Done()
acc.AddError(t.pollServer(acc, client))
}(&t.clients[idx])
}
wg.Wait()
return nil
}
func (t *Tacacs) pollServer(acc telegraf.Accumulator, client *tacplus.Client) error { func (t *Tacacs) pollServer(acc telegraf.Accumulator, client *tacplus.Client) error {
// Create the fields for this metric // Create the fields for this metric
tags := map[string]string{"source": client.Addr} tags := map[string]string{"source": client.Addr}
@ -157,7 +157,7 @@ func (t *Tacacs) pollServer(acc telegraf.Accumulator, client *tacplus.Client) er
defer session.Close() defer session.Close()
if reply.Status != tacplus.AuthenStatusGetUser { if reply.Status != tacplus.AuthenStatusGetUser {
fields["responsetime_ms"] = time.Since(startTime).Milliseconds() fields["responsetime_ms"] = time.Since(startTime).Milliseconds()
fields["response_status"] = AuthenReplyToString(reply.Status) fields["response_status"] = authenReplyToString(reply.Status)
acc.AddFields("tacacs", fields, tags) acc.AddFields("tacacs", fields, tags)
return nil return nil
} }
@ -174,7 +174,7 @@ func (t *Tacacs) pollServer(acc telegraf.Accumulator, client *tacplus.Client) er
} }
if reply.Status != tacplus.AuthenStatusGetPass { if reply.Status != tacplus.AuthenStatusGetPass {
fields["responsetime_ms"] = time.Since(startTime).Milliseconds() fields["responsetime_ms"] = time.Since(startTime).Milliseconds()
fields["response_status"] = AuthenReplyToString(reply.Status) fields["response_status"] = authenReplyToString(reply.Status)
acc.AddFields("tacacs", fields, tags) acc.AddFields("tacacs", fields, tags)
return nil return nil
} }
@ -191,13 +191,13 @@ func (t *Tacacs) pollServer(acc telegraf.Accumulator, client *tacplus.Client) er
} }
if reply.Status != tacplus.AuthenStatusPass { if reply.Status != tacplus.AuthenStatusPass {
fields["responsetime_ms"] = time.Since(startTime).Milliseconds() fields["responsetime_ms"] = time.Since(startTime).Milliseconds()
fields["response_status"] = AuthenReplyToString(reply.Status) fields["response_status"] = authenReplyToString(reply.Status)
acc.AddFields("tacacs", fields, tags) acc.AddFields("tacacs", fields, tags)
return nil return nil
} }
fields["responsetime_ms"] = time.Since(startTime).Milliseconds() fields["responsetime_ms"] = time.Since(startTime).Milliseconds()
fields["response_status"] = AuthenReplyToString(reply.Status) fields["response_status"] = authenReplyToString(reply.Status)
acc.AddFields("tacacs", fields, tags) acc.AddFields("tacacs", fields, tags)
return nil return nil
} }

View File

@ -10,34 +10,103 @@ import (
"github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/config"
) )
// Indicates relation to the multiline event: previous or next const (
type MultilineMatchWhichLine int // previous => Append current line to previous line
previous multilineMatchWhichLine = iota
// next => next line will be appended to current line
next
)
type Multiline struct { // Indicates relation to the multiline event: previous or next
config *MultilineConfig type multilineMatchWhichLine int
type multiline struct {
config *multilineConfig
enabled bool enabled bool
patternRegexp *regexp.Regexp patternRegexp *regexp.Regexp
quote byte quote byte
inQuote bool inQuote bool
} }
type MultilineConfig struct { type multilineConfig struct {
Pattern string `toml:"pattern"` Pattern string `toml:"pattern"`
MatchWhichLine MultilineMatchWhichLine `toml:"match_which_line"` MatchWhichLine multilineMatchWhichLine `toml:"match_which_line"`
InvertMatch bool `toml:"invert_match"` InvertMatch bool `toml:"invert_match"`
PreserveNewline bool `toml:"preserve_newline"` PreserveNewline bool `toml:"preserve_newline"`
Quotation string `toml:"quotation"` Quotation string `toml:"quotation"`
Timeout *config.Duration `toml:"timeout"` Timeout *config.Duration `toml:"timeout"`
} }
const ( func (m *multiline) isEnabled() bool {
// Previous => Append current line to previous line return m.enabled
Previous MultilineMatchWhichLine = iota }
// Next => Next line will be appended to current line
Next
)
func (m *MultilineConfig) NewMultiline() (*Multiline, error) { func (m *multiline) processLine(text string, buffer *bytes.Buffer) string {
if m.matchQuotation(text) || m.matchString(text) {
// Restore the newline removed by tail's scanner
if buffer.Len() > 0 && m.config.PreserveNewline {
buffer.WriteString("\n")
}
buffer.WriteString(text)
return ""
}
if m.config.MatchWhichLine == previous {
previousText := buffer.String()
buffer.Reset()
buffer.WriteString(text)
text = previousText
} else {
// next
if buffer.Len() > 0 {
if m.config.PreserveNewline {
buffer.WriteString("\n")
}
buffer.WriteString(text)
text = buffer.String()
buffer.Reset()
}
}
return text
}
func (m *multiline) matchQuotation(text string) bool {
if m.config.Quotation == "ignore" {
return false
}
escaped := 0
count := 0
for i := 0; i < len(text); i++ {
if text[i] == '\\' {
escaped++
continue
}
// If we do encounter a backslash-quote combination, we interpret this
// as an escaped-quoted and should not count the quote. However,
// backslash-backslash combinations (or any even number of backslashes)
// are interpreted as a literal backslash not escaping the quote.
if text[i] == m.quote && escaped%2 == 0 {
count++
}
// If we encounter any non-quote, non-backslash character we can
// safely reset the escape state.
escaped = 0
}
even := count%2 == 0
m.inQuote = (m.inQuote && even) || (!m.inQuote && !even)
return m.inQuote
}
func (m *multiline) matchString(text string) bool {
if m.patternRegexp != nil {
return m.patternRegexp.MatchString(text) != m.config.InvertMatch
}
return false
}
func (m *multilineConfig) newMultiline() (*multiline, error) {
var r *regexp.Regexp var r *regexp.Regexp
if m.Pattern != "" { if m.Pattern != "" {
@ -67,7 +136,7 @@ func (m *MultilineConfig) NewMultiline() (*Multiline, error) {
m.Timeout = &d m.Timeout = &d
} }
return &Multiline{ return &multiline{
config: m, config: m,
enabled: enabled, enabled: enabled,
patternRegexp: r, patternRegexp: r,
@ -75,41 +144,7 @@ func (m *MultilineConfig) NewMultiline() (*Multiline, error) {
}, nil }, nil
} }
func (m *Multiline) IsEnabled() bool { func flush(buffer *bytes.Buffer) string {
return m.enabled
}
func (m *Multiline) ProcessLine(text string, buffer *bytes.Buffer) string {
if m.matchQuotation(text) || m.matchString(text) {
// Restore the newline removed by tail's scanner
if buffer.Len() > 0 && m.config.PreserveNewline {
buffer.WriteString("\n")
}
buffer.WriteString(text)
return ""
}
if m.config.MatchWhichLine == Previous {
previousText := buffer.String()
buffer.Reset()
buffer.WriteString(text)
text = previousText
} else {
// Next
if buffer.Len() > 0 {
if m.config.PreserveNewline {
buffer.WriteString("\n")
}
buffer.WriteString(text)
text = buffer.String()
buffer.Reset()
}
}
return text
}
func Flush(buffer *bytes.Buffer) string {
if buffer.Len() == 0 { if buffer.Len() == 0 {
return "" return ""
} }
@ -118,66 +153,31 @@ func Flush(buffer *bytes.Buffer) string {
return text return text
} }
func (m *Multiline) matchQuotation(text string) bool { func (w multilineMatchWhichLine) String() string {
if m.config.Quotation == "ignore" {
return false
}
escaped := 0
count := 0
for i := 0; i < len(text); i++ {
if text[i] == '\\' {
escaped++
continue
}
// If we do encounter a backslash-quote combination, we interpret this
// as an escaped-quoted and should not count the quote. However,
// backslash-backslash combinations (or any even number of backslashes)
// are interpreted as a literal backslash not escaping the quote.
if text[i] == m.quote && escaped%2 == 0 {
count++
}
// If we encounter any non-quote, non-backslash character we can
// safely reset the escape state.
escaped = 0
}
even := count%2 == 0
m.inQuote = (m.inQuote && even) || (!m.inQuote && !even)
return m.inQuote
}
func (m *Multiline) matchString(text string) bool {
if m.patternRegexp != nil {
return m.patternRegexp.MatchString(text) != m.config.InvertMatch
}
return false
}
func (w MultilineMatchWhichLine) String() string {
switch w { switch w {
case Previous: case previous:
return "previous" return "previous"
case Next: case next:
return "next" return "next"
} }
return "" return ""
} }
// UnmarshalTOML implements ability to unmarshal MultilineMatchWhichLine from TOML files. // UnmarshalTOML implements ability to unmarshal multilineMatchWhichLine from TOML files.
func (w *MultilineMatchWhichLine) UnmarshalTOML(data []byte) (err error) { func (w *multilineMatchWhichLine) UnmarshalTOML(data []byte) (err error) {
return w.UnmarshalText(data) return w.UnmarshalText(data)
} }
// UnmarshalText implements encoding.TextUnmarshaler // UnmarshalText implements encoding.TextUnmarshaler
func (w *MultilineMatchWhichLine) UnmarshalText(data []byte) (err error) { func (w *multilineMatchWhichLine) UnmarshalText(data []byte) (err error) {
s := string(data) s := string(data)
switch strings.ToUpper(s) { switch strings.ToUpper(s) {
case `PREVIOUS`, `"PREVIOUS"`, `'PREVIOUS'`: case `PREVIOUS`, `"PREVIOUS"`, `'PREVIOUS'`:
*w = Previous *w = previous
return nil return nil
case `NEXT`, `"NEXT"`, `'NEXT'`: case `NEXT`, `"NEXT"`, `'NEXT'`:
*w = Next *w = next
return nil return nil
} }
*w = -1 *w = -1
@ -185,7 +185,7 @@ func (w *MultilineMatchWhichLine) UnmarshalText(data []byte) (err error) {
} }
// MarshalText implements encoding.TextMarshaler // MarshalText implements encoding.TextMarshaler
func (w MultilineMatchWhichLine) MarshalText() ([]byte, error) { func (w multilineMatchWhichLine) MarshalText() ([]byte, error) {
s := w.String() s := w.String()
if s != "" { if s != "" {
return []byte(s), nil return []byte(s), nil

View File

@ -15,35 +15,35 @@ import (
) )
func TestMultilineConfigOK(t *testing.T) { func TestMultilineConfigOK(t *testing.T) {
c := &MultilineConfig{ c := &multilineConfig{
Pattern: ".*", Pattern: ".*",
MatchWhichLine: Previous, MatchWhichLine: previous,
} }
_, err := c.NewMultiline() _, err := c.newMultiline()
require.NoError(t, err, "Configuration was OK.") require.NoError(t, err, "Configuration was OK.")
} }
func TestMultilineConfigError(t *testing.T) { func TestMultilineConfigError(t *testing.T) {
c := &MultilineConfig{ c := &multilineConfig{
Pattern: "\xA0", Pattern: "\xA0",
MatchWhichLine: Previous, MatchWhichLine: previous,
} }
_, err := c.NewMultiline() _, err := c.newMultiline()
require.Error(t, err, "The pattern was invalid") require.Error(t, err, "The pattern was invalid")
} }
func TestMultilineConfigTimeoutSpecified(t *testing.T) { func TestMultilineConfigTimeoutSpecified(t *testing.T) {
duration := config.Duration(10 * time.Second) duration := config.Duration(10 * time.Second)
c := &MultilineConfig{ c := &multilineConfig{
Pattern: ".*", Pattern: ".*",
MatchWhichLine: Previous, MatchWhichLine: previous,
Timeout: &duration, Timeout: &duration,
} }
m, err := c.NewMultiline() m, err := c.newMultiline()
require.NoError(t, err, "Configuration was OK.") require.NoError(t, err, "Configuration was OK.")
require.Equal(t, duration, *m.config.Timeout) require.Equal(t, duration, *m.config.Timeout)
@ -51,44 +51,44 @@ func TestMultilineConfigTimeoutSpecified(t *testing.T) {
func TestMultilineConfigDefaultTimeout(t *testing.T) { func TestMultilineConfigDefaultTimeout(t *testing.T) {
duration := config.Duration(5 * time.Second) duration := config.Duration(5 * time.Second)
c := &MultilineConfig{ c := &multilineConfig{
Pattern: ".*", Pattern: ".*",
MatchWhichLine: Previous, MatchWhichLine: previous,
} }
m, err := c.NewMultiline() m, err := c.newMultiline()
require.NoError(t, err, "Configuration was OK.") require.NoError(t, err, "Configuration was OK.")
require.Equal(t, duration, *m.config.Timeout) require.Equal(t, duration, *m.config.Timeout)
} }
func TestMultilineIsEnabled(t *testing.T) { func TestMultilineIsEnabled(t *testing.T) {
c := &MultilineConfig{ c := &multilineConfig{
Pattern: ".*", Pattern: ".*",
MatchWhichLine: Previous, MatchWhichLine: previous,
} }
m, err := c.NewMultiline() m, err := c.newMultiline()
require.NoError(t, err, "Configuration was OK.") require.NoError(t, err, "Configuration was OK.")
isEnabled := m.IsEnabled() isEnabled := m.isEnabled()
require.True(t, isEnabled, "Should have been enabled") require.True(t, isEnabled, "Should have been enabled")
} }
func TestMultilineIsDisabled(t *testing.T) { func TestMultilineIsDisabled(t *testing.T) {
c := &MultilineConfig{ c := &multilineConfig{
MatchWhichLine: Previous, MatchWhichLine: previous,
} }
m, err := c.NewMultiline() m, err := c.newMultiline()
require.NoError(t, err, "Configuration was OK.") require.NoError(t, err, "Configuration was OK.")
isEnabled := m.IsEnabled() isEnabled := m.isEnabled()
require.False(t, isEnabled, "Should have been disabled") require.False(t, isEnabled, "Should have been disabled")
} }
func TestMultilineFlushEmpty(t *testing.T) { func TestMultilineFlushEmpty(t *testing.T) {
var buffer bytes.Buffer var buffer bytes.Buffer
text := Flush(&buffer) text := flush(&buffer)
require.Empty(t, text) require.Empty(t, text)
} }
@ -97,78 +97,78 @@ func TestMultilineFlush(t *testing.T) {
var buffer bytes.Buffer var buffer bytes.Buffer
buffer.WriteString("foo") buffer.WriteString("foo")
text := Flush(&buffer) text := flush(&buffer)
require.Equal(t, "foo", text) require.Equal(t, "foo", text)
require.Zero(t, buffer.Len()) require.Zero(t, buffer.Len())
} }
func TestMultiLineProcessLinePrevious(t *testing.T) { func TestMultiLineProcessLinePrevious(t *testing.T) {
c := &MultilineConfig{ c := &multilineConfig{
Pattern: "^=>", Pattern: "^=>",
MatchWhichLine: Previous, MatchWhichLine: previous,
} }
m, err := c.NewMultiline() m, err := c.newMultiline()
require.NoError(t, err, "Configuration was OK.") require.NoError(t, err, "Configuration was OK.")
var buffer bytes.Buffer var buffer bytes.Buffer
text := m.ProcessLine("1", &buffer) text := m.processLine("1", &buffer)
require.Empty(t, text) require.Empty(t, text)
require.NotZero(t, buffer.Len()) require.NotZero(t, buffer.Len())
text = m.ProcessLine("=>2", &buffer) text = m.processLine("=>2", &buffer)
require.Empty(t, text) require.Empty(t, text)
require.NotZero(t, buffer.Len()) require.NotZero(t, buffer.Len())
text = m.ProcessLine("=>3", &buffer) text = m.processLine("=>3", &buffer)
require.Empty(t, text) require.Empty(t, text)
require.NotZero(t, buffer.Len()) require.NotZero(t, buffer.Len())
text = m.ProcessLine("4", &buffer) text = m.processLine("4", &buffer)
require.Equal(t, "1=>2=>3", text) require.Equal(t, "1=>2=>3", text)
require.NotZero(t, buffer.Len()) require.NotZero(t, buffer.Len())
text = m.ProcessLine("5", &buffer) text = m.processLine("5", &buffer)
require.Equal(t, "4", text) require.Equal(t, "4", text)
require.Equal(t, "5", buffer.String()) require.Equal(t, "5", buffer.String())
} }
func TestMultiLineProcessLineNext(t *testing.T) { func TestMultiLineProcessLineNext(t *testing.T) {
c := &MultilineConfig{ c := &multilineConfig{
Pattern: "=>$", Pattern: "=>$",
MatchWhichLine: Next, MatchWhichLine: next,
} }
m, err := c.NewMultiline() m, err := c.newMultiline()
require.NoError(t, err, "Configuration was OK.") require.NoError(t, err, "Configuration was OK.")
var buffer bytes.Buffer var buffer bytes.Buffer
text := m.ProcessLine("1=>", &buffer) text := m.processLine("1=>", &buffer)
require.Empty(t, text) require.Empty(t, text)
require.NotZero(t, buffer.Len()) require.NotZero(t, buffer.Len())
text = m.ProcessLine("2=>", &buffer) text = m.processLine("2=>", &buffer)
require.Empty(t, text) require.Empty(t, text)
require.NotZero(t, buffer.Len()) require.NotZero(t, buffer.Len())
text = m.ProcessLine("3=>", &buffer) text = m.processLine("3=>", &buffer)
require.Empty(t, text) require.Empty(t, text)
require.NotZero(t, buffer.Len()) require.NotZero(t, buffer.Len())
text = m.ProcessLine("4", &buffer) text = m.processLine("4", &buffer)
require.Equal(t, "1=>2=>3=>4", text) require.Equal(t, "1=>2=>3=>4", text)
require.Zero(t, buffer.Len()) require.Zero(t, buffer.Len())
text = m.ProcessLine("5", &buffer) text = m.processLine("5", &buffer)
require.Equal(t, "5", text) require.Equal(t, "5", text)
require.Zero(t, buffer.Len()) require.Zero(t, buffer.Len())
} }
func TestMultiLineMatchStringWithInvertMatchFalse(t *testing.T) { func TestMultiLineMatchStringWithInvertMatchFalse(t *testing.T) {
c := &MultilineConfig{ c := &multilineConfig{
Pattern: "=>$", Pattern: "=>$",
MatchWhichLine: Next, MatchWhichLine: next,
InvertMatch: false, InvertMatch: false,
} }
m, err := c.NewMultiline() m, err := c.newMultiline()
require.NoError(t, err, "Configuration was OK.") require.NoError(t, err, "Configuration was OK.")
matches1 := m.matchString("t=>") matches1 := m.matchString("t=>")
@ -179,12 +179,12 @@ func TestMultiLineMatchStringWithInvertMatchFalse(t *testing.T) {
} }
func TestMultiLineMatchStringWithInvertTrue(t *testing.T) { func TestMultiLineMatchStringWithInvertTrue(t *testing.T) {
c := &MultilineConfig{ c := &multilineConfig{
Pattern: "=>$", Pattern: "=>$",
MatchWhichLine: Next, MatchWhichLine: next,
InvertMatch: true, InvertMatch: true,
} }
m, err := c.NewMultiline() m, err := c.newMultiline()
require.NoError(t, err, "Configuration was OK.") require.NoError(t, err, "Configuration was OK.")
matches1 := m.matchString("t=>") matches1 := m.matchString("t=>")
@ -195,33 +195,33 @@ func TestMultiLineMatchStringWithInvertTrue(t *testing.T) {
} }
func TestMultilineWhat(t *testing.T) { func TestMultilineWhat(t *testing.T) {
var w1 MultilineMatchWhichLine var w1 multilineMatchWhichLine
require.NoError(t, w1.UnmarshalTOML([]byte(`"previous"`))) require.NoError(t, w1.UnmarshalTOML([]byte(`"previous"`)))
require.Equal(t, Previous, w1) require.Equal(t, previous, w1)
var w2 MultilineMatchWhichLine var w2 multilineMatchWhichLine
require.NoError(t, w2.UnmarshalTOML([]byte(`previous`))) require.NoError(t, w2.UnmarshalTOML([]byte(`previous`)))
require.Equal(t, Previous, w2) require.Equal(t, previous, w2)
var w3 MultilineMatchWhichLine var w3 multilineMatchWhichLine
require.NoError(t, w3.UnmarshalTOML([]byte(`'previous'`))) require.NoError(t, w3.UnmarshalTOML([]byte(`'previous'`)))
require.Equal(t, Previous, w3) require.Equal(t, previous, w3)
var w4 MultilineMatchWhichLine var w4 multilineMatchWhichLine
require.NoError(t, w4.UnmarshalTOML([]byte(`"next"`))) require.NoError(t, w4.UnmarshalTOML([]byte(`"next"`)))
require.Equal(t, Next, w4) require.Equal(t, next, w4)
var w5 MultilineMatchWhichLine var w5 multilineMatchWhichLine
require.NoError(t, w5.UnmarshalTOML([]byte(`next`))) require.NoError(t, w5.UnmarshalTOML([]byte(`next`)))
require.Equal(t, Next, w5) require.Equal(t, next, w5)
var w6 MultilineMatchWhichLine var w6 multilineMatchWhichLine
require.NoError(t, w6.UnmarshalTOML([]byte(`'next'`))) require.NoError(t, w6.UnmarshalTOML([]byte(`'next'`)))
require.Equal(t, Next, w6) require.Equal(t, next, w6)
var w7 MultilineMatchWhichLine var w7 multilineMatchWhichLine
require.Error(t, w7.UnmarshalTOML([]byte(`nope`))) require.Error(t, w7.UnmarshalTOML([]byte(`nope`)))
require.Equal(t, MultilineMatchWhichLine(-1), w7) require.Equal(t, multilineMatchWhichLine(-1), w7)
} }
func TestMultilineQuoted(t *testing.T) { func TestMultilineQuoted(t *testing.T) {
@ -265,12 +265,12 @@ func TestMultilineQuoted(t *testing.T) {
fmt.Sprintf("1660819827450,5,all of %sthis%s should %sbasically%s work...,E", tt.quote, tt.quote, tt.quote, tt.quote), fmt.Sprintf("1660819827450,5,all of %sthis%s should %sbasically%s work...,E", tt.quote, tt.quote, tt.quote, tt.quote),
} }
c := &MultilineConfig{ c := &multilineConfig{
MatchWhichLine: Next, MatchWhichLine: next,
Quotation: tt.quotation, Quotation: tt.quotation,
PreserveNewline: true, PreserveNewline: true,
} }
m, err := c.NewMultiline() m, err := c.newMultiline()
require.NoError(t, err) require.NoError(t, err)
f, err := os.Open(filepath.Join("testdata", tt.filename)) f, err := os.Open(filepath.Join("testdata", tt.filename))
@ -283,13 +283,13 @@ func TestMultilineQuoted(t *testing.T) {
for scanner.Scan() { for scanner.Scan() {
line := scanner.Text() line := scanner.Text()
text := m.ProcessLine(line, &buffer) text := m.processLine(line, &buffer)
if text == "" { if text == "" {
continue continue
} }
result = append(result, text) result = append(result, text)
} }
if text := Flush(&buffer); text != "" { if text := flush(&buffer); text != "" {
result = append(result, text) result = append(result, text)
} }
@ -327,12 +327,12 @@ func TestMultilineQuotedError(t *testing.T) {
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
c := &MultilineConfig{ c := &multilineConfig{
MatchWhichLine: Next, MatchWhichLine: next,
Quotation: tt.quotation, Quotation: tt.quotation,
PreserveNewline: true, PreserveNewline: true,
} }
m, err := c.NewMultiline() m, err := c.newMultiline()
require.NoError(t, err) require.NoError(t, err)
f, err := os.Open(filepath.Join("testdata", tt.filename)) f, err := os.Open(filepath.Join("testdata", tt.filename))
@ -345,13 +345,13 @@ func TestMultilineQuotedError(t *testing.T) {
for scanner.Scan() { for scanner.Scan() {
line := scanner.Text() line := scanner.Text()
text := m.ProcessLine(line, &buffer) text := m.processLine(line, &buffer)
if text == "" { if text == "" {
continue continue
} }
result = append(result, text) result = append(result, text)
} }
if text := Flush(&buffer); text != "" { if text := flush(&buffer); text != "" {
result = append(result, text) result = append(result, text)
} }
@ -364,12 +364,12 @@ func TestMultilineNewline(t *testing.T) {
tests := []struct { tests := []struct {
name string name string
filename string filename string
cfg *MultilineConfig cfg *multilineConfig
expected []string expected []string
}{ }{
{ {
name: "do not preserve newline", name: "do not preserve newline",
cfg: &MultilineConfig{ cfg: &multilineConfig{
Pattern: `\[[0-9]{2}/[A-Za-z]{3}/[0-9]{4}:[0-9]{2}:[0-9]{2}:[0-9]{2} \+[0-9]{4}\]`, Pattern: `\[[0-9]{2}/[A-Za-z]{3}/[0-9]{4}:[0-9]{2}:[0-9]{2}:[0-9]{2} \+[0-9]{4}\]`,
InvertMatch: true, InvertMatch: true,
}, },
@ -386,7 +386,7 @@ func TestMultilineNewline(t *testing.T) {
}, },
{ {
name: "preserve newline", name: "preserve newline",
cfg: &MultilineConfig{ cfg: &multilineConfig{
Pattern: `\[[0-9]{2}/[A-Za-z]{3}/[0-9]{4}:[0-9]{2}:[0-9]{2}:[0-9]{2} \+[0-9]{4}\]`, Pattern: `\[[0-9]{2}/[A-Za-z]{3}/[0-9]{4}:[0-9]{2}:[0-9]{2}:[0-9]{2} \+[0-9]{4}\]`,
InvertMatch: true, InvertMatch: true,
PreserveNewline: true, PreserveNewline: true,
@ -406,7 +406,7 @@ java.lang.ArithmeticException: / by zero
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
m, err := tt.cfg.NewMultiline() m, err := tt.cfg.newMultiline()
require.NoError(t, err) require.NoError(t, err)
f, err := os.Open(filepath.Join("testdata", tt.filename)) f, err := os.Open(filepath.Join("testdata", tt.filename))
@ -419,13 +419,13 @@ java.lang.ArithmeticException: / by zero
for scanner.Scan() { for scanner.Scan() {
line := scanner.Text() line := scanner.Text()
text := m.ProcessLine(line, &buffer) text := m.processLine(line, &buffer)
if text == "" { if text == "" {
continue continue
} }
result = append(result, text) result = append(result, text)
} }
if text := Flush(&buffer); text != "" { if text := flush(&buffer); text != "" {
result = append(result, text) result = append(result, text)
} }
@ -435,41 +435,41 @@ java.lang.ArithmeticException: / by zero
} }
func TestMultiLineQuotedAndPattern(t *testing.T) { func TestMultiLineQuotedAndPattern(t *testing.T) {
c := &MultilineConfig{ c := &multilineConfig{
Pattern: "=>$", Pattern: "=>$",
MatchWhichLine: Next, MatchWhichLine: next,
Quotation: "double-quotes", Quotation: "double-quotes",
PreserveNewline: true, PreserveNewline: true,
} }
m, err := c.NewMultiline() m, err := c.newMultiline()
require.NoError(t, err, "Configuration was OK.") require.NoError(t, err, "Configuration was OK.")
var buffer bytes.Buffer var buffer bytes.Buffer
text := m.ProcessLine("1=>", &buffer) text := m.processLine("1=>", &buffer)
require.Empty(t, text) require.Empty(t, text)
require.NotZero(t, buffer.Len()) require.NotZero(t, buffer.Len())
text = m.ProcessLine("2=>", &buffer) text = m.processLine("2=>", &buffer)
require.Empty(t, text) require.Empty(t, text)
require.NotZero(t, buffer.Len()) require.NotZero(t, buffer.Len())
text = m.ProcessLine(`"a quoted`, &buffer) text = m.processLine(`"a quoted`, &buffer)
require.Empty(t, text) require.Empty(t, text)
require.NotZero(t, buffer.Len()) require.NotZero(t, buffer.Len())
text = m.ProcessLine(`multiline string"=>`, &buffer) text = m.processLine(`multiline string"=>`, &buffer)
require.Empty(t, text) require.Empty(t, text)
require.NotZero(t, buffer.Len()) require.NotZero(t, buffer.Len())
text = m.ProcessLine("3=>", &buffer) text = m.processLine("3=>", &buffer)
require.Empty(t, text) require.Empty(t, text)
require.NotZero(t, buffer.Len()) require.NotZero(t, buffer.Len())
text = m.ProcessLine("4", &buffer) text = m.processLine("4", &buffer)
require.Equal(t, "1=>\n2=>\n\"a quoted\nmultiline string\"=>\n3=>\n4", text) require.Equal(t, "1=>\n2=>\n\"a quoted\nmultiline string\"=>\n3=>\n4", text)
require.Zero(t, buffer.Len()) require.Zero(t, buffer.Len())
text = m.ProcessLine("5", &buffer) text = m.processLine("5", &buffer)
require.Equal(t, "5", text) require.Equal(t, "5", text)
require.Zero(t, buffer.Len()) require.Zero(t, buffer.Len())
} }

View File

@ -28,16 +28,13 @@ import (
//go:embed sample.conf //go:embed sample.conf
var sampleConfig string var sampleConfig string
var once sync.Once
var ( var (
once sync.Once
offsets = make(map[string]int64) offsets = make(map[string]int64)
offsetsMutex = new(sync.Mutex) offsetsMutex = new(sync.Mutex)
) )
type empty struct{}
type semaphore chan empty
type Tail struct { type Tail struct {
Files []string `toml:"files"` Files []string `toml:"files"`
FromBeginning bool `toml:"from_beginning"` FromBeginning bool `toml:"from_beginning"`
@ -58,8 +55,8 @@ type Tail struct {
acc telegraf.TrackingAccumulator acc telegraf.TrackingAccumulator
MultilineConfig MultilineConfig `toml:"multiline"` MultilineConfig multilineConfig `toml:"multiline"`
multiline *Multiline multiline *multiline
ctx context.Context ctx context.Context
cancel context.CancelFunc cancel context.CancelFunc
@ -67,26 +64,17 @@ type Tail struct {
decoder *encoding.Decoder decoder *encoding.Decoder
} }
func NewTail() *Tail { type empty struct{}
offsetsMutex.Lock() type semaphore chan empty
offsetsCopy := make(map[string]int64, len(offsets))
for k, v := range offsets {
offsetsCopy[k] = v
}
offsetsMutex.Unlock()
return &Tail{
FromBeginning: false,
MaxUndeliveredLines: 1000,
offsets: offsetsCopy,
PathTag: "path",
}
}
func (*Tail) SampleConfig() string { func (*Tail) SampleConfig() string {
return sampleConfig return sampleConfig
} }
func (t *Tail) SetParserFunc(fn telegraf.ParserFunc) {
t.parserFunc = fn
}
func (t *Tail) Init() error { func (t *Tail) Init() error {
if t.MaxUndeliveredLines == 0 { if t.MaxUndeliveredLines == 0 {
return errors.New("max_undelivered_lines must be positive") return errors.New("max_undelivered_lines must be positive")
@ -106,6 +94,43 @@ func (t *Tail) Init() error {
return err return err
} }
func (t *Tail) Start(acc telegraf.Accumulator) error {
t.acc = acc.WithTracking(t.MaxUndeliveredLines)
t.ctx, t.cancel = context.WithCancel(context.Background())
t.wg.Add(1)
go func() {
defer t.wg.Done()
for {
select {
case <-t.ctx.Done():
return
case <-t.acc.Delivered():
<-t.sem
}
}
}()
var err error
t.multiline, err = t.MultilineConfig.newMultiline()
if err != nil {
return err
}
t.tailers = make(map[string]*tail.Tail)
err = t.tailNewFiles(t.FromBeginning)
// assumption that once Start is called, all parallel plugins have already been initialized
offsetsMutex.Lock()
offsets = make(map[string]int64)
offsetsMutex.Unlock()
return err
}
func (t *Tail) GetState() interface{} { func (t *Tail) GetState() interface{} {
return t.offsets return t.offsets
} }
@ -125,41 +150,33 @@ func (t *Tail) Gather(_ telegraf.Accumulator) error {
return t.tailNewFiles(true) return t.tailNewFiles(true)
} }
func (t *Tail) Start(acc telegraf.Accumulator) error { func (t *Tail) Stop() {
t.acc = acc.WithTracking(t.MaxUndeliveredLines) for _, tailer := range t.tailers {
if !t.Pipe && !t.FromBeginning {
t.ctx, t.cancel = context.WithCancel(context.Background()) // store offset for resume
offset, err := tailer.Tell()
t.wg.Add(1) if err == nil {
go func() { t.Log.Debugf("Recording offset %d for %q", offset, tailer.Filename)
defer t.wg.Done() t.offsets[tailer.Filename] = offset
for { } else {
select { t.Log.Errorf("Recording offset for %q: %s", tailer.Filename, err.Error())
case <-t.ctx.Done():
return
case <-t.acc.Delivered():
<-t.sem
} }
} }
}() err := tailer.Stop()
var err error
t.multiline, err = t.MultilineConfig.NewMultiline()
if err != nil { if err != nil {
return err t.Log.Errorf("Stopping tail on %q: %s", tailer.Filename, err.Error())
}
} }
t.tailers = make(map[string]*tail.Tail) t.cancel()
t.wg.Wait()
err = t.tailNewFiles(t.FromBeginning) // persist offsets
// assumption that once Start is called, all parallel plugins have already been initialized
offsetsMutex.Lock() offsetsMutex.Lock()
offsets = make(map[string]int64) for k, v := range t.offsets {
offsets[k] = v
}
offsetsMutex.Unlock() offsetsMutex.Unlock()
return err
} }
func (t *Tail) tailNewFiles(fromBeginning bool) error { func (t *Tail) tailNewFiles(fromBeginning bool) error {
@ -249,7 +266,6 @@ func (t *Tail) tailNewFiles(fromBeginning bool) error {
return nil return nil
} }
// ParseLine parses a line of text.
func parseLine(parser telegraf.Parser, line string) ([]telegraf.Metric, error) { func parseLine(parser telegraf.Parser, line string) ([]telegraf.Metric, error) {
m, err := parser.Parse([]byte(line)) m, err := parser.Parse([]byte(line))
if err != nil { if err != nil {
@ -261,8 +277,8 @@ func parseLine(parser telegraf.Parser, line string) ([]telegraf.Metric, error) {
return m, err return m, err
} }
// Receiver is launched as a goroutine to continuously watch a tailed logfile // receiver is launched as a goroutine to continuously watch a tailed logfile
// for changes, parse any incoming msgs, and add to the accumulator. // for changes, parse any incoming messages, and add to the accumulator.
func (t *Tail) receiver(parser telegraf.Parser, tailer *tail.Tail) { func (t *Tail) receiver(parser telegraf.Parser, tailer *tail.Tail) {
// holds the individual lines of multi-line log entries. // holds the individual lines of multi-line log entries.
var buffer bytes.Buffer var buffer bytes.Buffer
@ -272,7 +288,7 @@ func (t *Tail) receiver(parser telegraf.Parser, tailer *tail.Tail) {
// The multiline mode requires a timer in order to flush the multiline buffer // The multiline mode requires a timer in order to flush the multiline buffer
// if no new lines are incoming. // if no new lines are incoming.
if t.multiline.IsEnabled() { if t.multiline.isEnabled() {
timer = time.NewTimer(time.Duration(*t.MultilineConfig.Timeout)) timer = time.NewTimer(time.Duration(*t.MultilineConfig.Timeout))
timeout = timer.C timeout = timer.C
} }
@ -304,14 +320,14 @@ func (t *Tail) receiver(parser telegraf.Parser, tailer *tail.Tail) {
// Fix up files with Windows line endings. // Fix up files with Windows line endings.
text = strings.TrimRight(line.Text, "\r") text = strings.TrimRight(line.Text, "\r")
if t.multiline.IsEnabled() { if t.multiline.isEnabled() {
if text = t.multiline.ProcessLine(text, &buffer); text == "" { if text = t.multiline.processLine(text, &buffer); text == "" {
continue continue
} }
} }
} }
if line == nil || !channelOpen || !tailerOpen { if line == nil || !channelOpen || !tailerOpen {
if text += Flush(&buffer); text == "" { if text += flush(&buffer); text == "" {
if !channelOpen { if !channelOpen {
return return
} }
@ -377,41 +393,24 @@ func (t *Tail) receiver(parser telegraf.Parser, tailer *tail.Tail) {
} }
} }
func (t *Tail) Stop() { func newTail() *Tail {
for _, tailer := range t.tailers {
if !t.Pipe && !t.FromBeginning {
// store offset for resume
offset, err := tailer.Tell()
if err == nil {
t.Log.Debugf("Recording offset %d for %q", offset, tailer.Filename)
t.offsets[tailer.Filename] = offset
} else {
t.Log.Errorf("Recording offset for %q: %s", tailer.Filename, err.Error())
}
}
err := tailer.Stop()
if err != nil {
t.Log.Errorf("Stopping tail on %q: %s", tailer.Filename, err.Error())
}
}
t.cancel()
t.wg.Wait()
// persist offsets
offsetsMutex.Lock() offsetsMutex.Lock()
for k, v := range t.offsets { offsetsCopy := make(map[string]int64, len(offsets))
offsets[k] = v for k, v := range offsets {
offsetsCopy[k] = v
} }
offsetsMutex.Unlock() offsetsMutex.Unlock()
}
func (t *Tail) SetParserFunc(fn telegraf.ParserFunc) { return &Tail{
t.parserFunc = fn FromBeginning: false,
MaxUndeliveredLines: 1000,
offsets: offsetsCopy,
PathTag: "path",
}
} }
func init() { func init() {
inputs.Add("tail", func() telegraf.Input { inputs.Add("tail", func() telegraf.Input {
return NewTail() return newTail()
}) })
} }

View File

@ -3,3 +3,34 @@
//go:build solaris //go:build solaris
package tail package tail
import (
_ "embed"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
)
//go:embed sample.conf
var sampleConfig string
type Tail struct {
Log telegraf.Logger `toml:"-"`
}
func (*Tail) SampleConfig() string {
return sampleConfig
}
func (h *Tail) Init() error {
h.Log.Warn("Current platform is not supported")
return nil
}
func (*Tail) Gather(telegraf.Accumulator) error { return nil }
func init() {
inputs.Add("tail", func() telegraf.Input {
return &Tail{}
})
}

View File

@ -29,7 +29,7 @@ func newInfluxParser() (telegraf.Parser, error) {
return parser, nil return parser, nil
} }
func NewTestTail() *Tail { func newTestTail() *Tail {
offsetsMutex.Lock() offsetsMutex.Lock()
offsetsCopy := make(map[string]int64, len(offsets)) offsetsCopy := make(map[string]int64, len(offsets))
for k, v := range offsets { for k, v := range offsets {
@ -62,7 +62,7 @@ cpu usage_idle=100
logger := &testutil.CaptureLogger{} logger := &testutil.CaptureLogger{}
tt := NewTestTail() tt := newTestTail()
tt.Log = logger tt.Log = logger
tt.FromBeginning = true tt.FromBeginning = true
tt.Files = []string{tmpfile} tt.Files = []string{tmpfile}
@ -86,7 +86,7 @@ func TestColoredLine(t *testing.T) {
tmpfile := filepath.Join(t.TempDir(), "input.csv") tmpfile := filepath.Join(t.TempDir(), "input.csv")
require.NoError(t, os.WriteFile(tmpfile, []byte(content), 0600)) require.NoError(t, os.WriteFile(tmpfile, []byte(content), 0600))
tt := NewTestTail() tt := newTestTail()
tt.Log = testutil.Logger{} tt.Log = testutil.Logger{}
tt.FromBeginning = true tt.FromBeginning = true
tt.Filters = []string{"ansi_color"} tt.Filters = []string{"ansi_color"}
@ -116,7 +116,7 @@ func TestTailDosLineEndings(t *testing.T) {
tmpfile := filepath.Join(t.TempDir(), "input.csv") tmpfile := filepath.Join(t.TempDir(), "input.csv")
require.NoError(t, os.WriteFile(tmpfile, []byte(content), 0600)) require.NoError(t, os.WriteFile(tmpfile, []byte(content), 0600))
tt := NewTestTail() tt := newTestTail()
tt.Log = testutil.Logger{} tt.Log = testutil.Logger{}
tt.FromBeginning = true tt.FromBeginning = true
tt.Files = []string{tmpfile} tt.Files = []string{tmpfile}
@ -144,13 +144,13 @@ func TestGrokParseLogFilesWithMultiline(t *testing.T) {
d, err := time.ParseDuration("100s") d, err := time.ParseDuration("100s")
require.NoError(t, err) require.NoError(t, err)
duration := config.Duration(d) duration := config.Duration(d)
tt := NewTail() tt := newTail()
tt.Log = testutil.Logger{} tt.Log = testutil.Logger{}
tt.FromBeginning = true tt.FromBeginning = true
tt.Files = []string{filepath.Join("testdata", "test_multiline.log")} tt.Files = []string{filepath.Join("testdata", "test_multiline.log")}
tt.MultilineConfig = MultilineConfig{ tt.MultilineConfig = multilineConfig{
Pattern: `^[^\[]`, Pattern: `^[^\[]`,
MatchWhichLine: Previous, MatchWhichLine: previous,
InvertMatch: false, InvertMatch: false,
Timeout: &duration, Timeout: &duration,
} }
@ -207,14 +207,14 @@ func TestGrokParseLogFilesWithMultilineTimeout(t *testing.T) {
// set tight timeout for tests // set tight timeout for tests
d := 10 * time.Millisecond d := 10 * time.Millisecond
duration := config.Duration(d) duration := config.Duration(d)
tt := NewTail() tt := newTail()
tt.Log = testutil.Logger{} tt.Log = testutil.Logger{}
tt.FromBeginning = true tt.FromBeginning = true
tt.Files = []string{tmpfile.Name()} tt.Files = []string{tmpfile.Name()}
tt.MultilineConfig = MultilineConfig{ tt.MultilineConfig = multilineConfig{
Pattern: `^[^\[]`, Pattern: `^[^\[]`,
MatchWhichLine: Previous, MatchWhichLine: previous,
InvertMatch: false, InvertMatch: false,
Timeout: &duration, Timeout: &duration,
} }
@ -259,13 +259,13 @@ func TestGrokParseLogFilesWithMultilineTailerCloseFlushesMultilineBuffer(t *test
// we make sure the timeout won't kick in // we make sure the timeout won't kick in
duration := config.Duration(100 * time.Second) duration := config.Duration(100 * time.Second)
tt := NewTestTail() tt := newTestTail()
tt.Log = testutil.Logger{} tt.Log = testutil.Logger{}
tt.FromBeginning = true tt.FromBeginning = true
tt.Files = []string{filepath.Join("testdata", "test_multiline.log")} tt.Files = []string{filepath.Join("testdata", "test_multiline.log")}
tt.MultilineConfig = MultilineConfig{ tt.MultilineConfig = multilineConfig{
Pattern: `^[^\[]`, Pattern: `^[^\[]`,
MatchWhichLine: Previous, MatchWhichLine: previous,
InvertMatch: false, InvertMatch: false,
Timeout: &duration, Timeout: &duration,
} }
@ -312,7 +312,7 @@ cpu,42
tmpfile := filepath.Join(t.TempDir(), "input.csv") tmpfile := filepath.Join(t.TempDir(), "input.csv")
require.NoError(t, os.WriteFile(tmpfile, []byte(content), 0600)) require.NoError(t, os.WriteFile(tmpfile, []byte(content), 0600))
plugin := NewTestTail() plugin := newTestTail()
plugin.Log = testutil.Logger{} plugin.Log = testutil.Logger{}
plugin.FromBeginning = true plugin.FromBeginning = true
plugin.Files = []string{tmpfile} plugin.Files = []string{tmpfile}
@ -386,7 +386,7 @@ skip2,mem,100
time.Unix(0, 0)), time.Unix(0, 0)),
} }
plugin := NewTestTail() plugin := newTestTail()
plugin.Log = testutil.Logger{} plugin.Log = testutil.Logger{}
plugin.FromBeginning = true plugin.FromBeginning = true
plugin.Files = []string{tmpfile} plugin.Files = []string{tmpfile}
@ -444,7 +444,7 @@ func TestMultipleMetricsOnFirstLine(t *testing.T) {
time.Unix(0, 0)), time.Unix(0, 0)),
} }
plugin := NewTestTail() plugin := newTestTail()
plugin.Log = testutil.Logger{} plugin.Log = testutil.Logger{}
plugin.FromBeginning = true plugin.FromBeginning = true
plugin.Files = []string{tmpfile} plugin.Files = []string{tmpfile}
@ -613,7 +613,7 @@ func TestTailEOF(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.NoError(t, tmpfile.Sync()) require.NoError(t, tmpfile.Sync())
tt := NewTestTail() tt := newTestTail()
tt.Log = testutil.Logger{} tt.Log = testutil.Logger{}
tt.FromBeginning = true tt.FromBeginning = true
tt.Files = []string{tmpfile.Name()} tt.Files = []string{tmpfile.Name()}

View File

@ -15,45 +15,16 @@ import (
var sampleConfig string var sampleConfig string
type Teamspeak struct { type Teamspeak struct {
Server string Server string `toml:"server"`
Username string Username string `toml:"username"`
Password string Password string `toml:"password"`
Nickname string Nickname string `toml:"nickname"`
VirtualServers []int `toml:"virtual_servers"` VirtualServers []int `toml:"virtual_servers"`
client *ts3.Client client *ts3.Client
connected bool connected bool
} }
func (ts *Teamspeak) connect() error {
var err error
ts.client, err = ts3.NewClient(ts.Server)
if err != nil {
return err
}
err = ts.client.Login(ts.Username, ts.Password)
if err != nil {
return err
}
if len(ts.Nickname) > 0 {
for _, vserver := range ts.VirtualServers {
if err := ts.client.Use(vserver); err != nil {
return err
}
if err := ts.client.SetNick(ts.Nickname); err != nil {
return err
}
}
}
ts.connected = true
return nil
}
func (*Teamspeak) SampleConfig() string { func (*Teamspeak) SampleConfig() string {
return sampleConfig return sampleConfig
} }
@ -109,6 +80,35 @@ func (ts *Teamspeak) Gather(acc telegraf.Accumulator) error {
return nil return nil
} }
func (ts *Teamspeak) connect() error {
var err error
ts.client, err = ts3.NewClient(ts.Server)
if err != nil {
return err
}
err = ts.client.Login(ts.Username, ts.Password)
if err != nil {
return err
}
if len(ts.Nickname) > 0 {
for _, vserver := range ts.VirtualServers {
if err := ts.client.Use(vserver); err != nil {
return err
}
if err := ts.client.SetNick(ts.Nickname); err != nil {
return err
}
}
}
ts.connected = true
return nil
}
func init() { func init() {
inputs.Add("teamspeak", func() telegraf.Input { inputs.Add("teamspeak", func() telegraf.Input {
return &Teamspeak{ return &Teamspeak{

View File

@ -16,12 +16,12 @@ import (
const scalingFactor = float64(1000.0) const scalingFactor = float64(1000.0)
type TemperatureStat struct { type temperatureStat struct {
Name string name string
Label string label string
Device string device string
Temperature float64 temperature float64
Additional map[string]interface{} additional map[string]interface{}
} }
func (t *Temperature) Init() error { func (t *Temperature) Init() error {
@ -64,48 +64,48 @@ func (t *Temperature) Gather(acc telegraf.Accumulator) error {
return nil return nil
} }
func (t *Temperature) createMetricsV1(acc telegraf.Accumulator, temperatures []TemperatureStat) { func (t *Temperature) createMetricsV1(acc telegraf.Accumulator, temperatures []temperatureStat) {
for _, temp := range temperatures { for _, temp := range temperatures {
sensor := temp.Name sensor := temp.name
if temp.Label != "" { if temp.label != "" {
sensor += "_" + strings.ReplaceAll(temp.Label, " ", "") sensor += "_" + strings.ReplaceAll(temp.label, " ", "")
} }
// Mandatory measurement value // Mandatory measurement value
tags := map[string]string{"sensor": sensor + "_input"} tags := map[string]string{"sensor": sensor + "_input"}
if t.DeviceTag { if t.DeviceTag {
tags["device"] = temp.Device tags["device"] = temp.device
} }
acc.AddFields("temp", map[string]interface{}{"temp": temp.Temperature}, tags) acc.AddFields("temp", map[string]interface{}{"temp": temp.temperature}, tags)
// Optional values values // Optional values values
for measurement, value := range temp.Additional { for measurement, value := range temp.additional {
tags := map[string]string{"sensor": sensor + "_" + measurement} tags := map[string]string{"sensor": sensor + "_" + measurement}
if t.DeviceTag { if t.DeviceTag {
tags["device"] = temp.Device tags["device"] = temp.device
} }
acc.AddFields("temp", map[string]interface{}{"temp": value}, tags) acc.AddFields("temp", map[string]interface{}{"temp": value}, tags)
} }
} }
} }
func (t *Temperature) createMetricsV2(acc telegraf.Accumulator, temperatures []TemperatureStat) { func (t *Temperature) createMetricsV2(acc telegraf.Accumulator, temperatures []temperatureStat) {
for _, temp := range temperatures { for _, temp := range temperatures {
sensor := temp.Name sensor := temp.name
if temp.Label != "" { if temp.label != "" {
sensor += "_" + strings.ReplaceAll(temp.Label, " ", "_") sensor += "_" + strings.ReplaceAll(temp.label, " ", "_")
} }
// Mandatory measurement value // Mandatory measurement value
tags := map[string]string{"sensor": sensor} tags := map[string]string{"sensor": sensor}
if t.DeviceTag { if t.DeviceTag {
tags["device"] = temp.Device tags["device"] = temp.device
} }
acc.AddFields("temp", map[string]interface{}{"temp": temp.Temperature}, tags) acc.AddFields("temp", map[string]interface{}{"temp": temp.temperature}, tags)
} }
} }
func (t *Temperature) gatherHwmon(syspath string) ([]TemperatureStat, error) { func (t *Temperature) gatherHwmon(syspath string) ([]temperatureStat, error) {
// Get all hwmon devices // Get all hwmon devices
sensors, err := filepath.Glob(filepath.Join(syspath, "class", "hwmon", "hwmon*", "temp*_input")) sensors, err := filepath.Glob(filepath.Join(syspath, "class", "hwmon", "hwmon*", "temp*_input"))
if err != nil { if err != nil {
@ -127,7 +127,7 @@ func (t *Temperature) gatherHwmon(syspath string) ([]TemperatureStat, error) {
} }
// Collect the sensor information // Collect the sensor information
stats := make([]TemperatureStat, 0, len(sensors)) stats := make([]temperatureStat, 0, len(sensors))
for _, s := range sensors { for _, s := range sensors {
// Get the sensor directory and the temperature prefix from the path // Get the sensor directory and the temperature prefix from the path
path := filepath.Dir(s) path := filepath.Dir(s)
@ -153,11 +153,11 @@ func (t *Temperature) gatherHwmon(syspath string) ([]TemperatureStat, error) {
} }
// Do the actual sensor readings // Do the actual sensor readings
temp := TemperatureStat{ temp := temperatureStat{
Name: name, name: name,
Label: strings.ToLower(label), label: strings.ToLower(label),
Device: deviceName, device: deviceName,
Additional: make(map[string]interface{}), additional: make(map[string]interface{}),
} }
// Temperature (mandatory) // Temperature (mandatory)
@ -168,7 +168,7 @@ func (t *Temperature) gatherHwmon(syspath string) ([]TemperatureStat, error) {
continue continue
} }
if v, err := strconv.ParseFloat(strings.TrimSpace(string(buf)), 64); err == nil { if v, err := strconv.ParseFloat(strings.TrimSpace(string(buf)), 64); err == nil {
temp.Temperature = v / scalingFactor temp.temperature = v / scalingFactor
} }
// Read all possible values of the sensor // Read all possible values of the sensor
@ -198,7 +198,7 @@ func (t *Temperature) gatherHwmon(syspath string) ([]TemperatureStat, error) {
if err != nil { if err != nil {
continue continue
} }
temp.Additional[measurement] = v / scalingFactor temp.additional[measurement] = v / scalingFactor
} }
stats = append(stats, temp) stats = append(stats, temp)
@ -207,7 +207,7 @@ func (t *Temperature) gatherHwmon(syspath string) ([]TemperatureStat, error) {
return stats, nil return stats, nil
} }
func (t *Temperature) gatherThermalZone(syspath string) ([]TemperatureStat, error) { func (t *Temperature) gatherThermalZone(syspath string) ([]temperatureStat, error) {
// For file layout see https://www.kernel.org/doc/Documentation/thermal/sysfs-api.txt // For file layout see https://www.kernel.org/doc/Documentation/thermal/sysfs-api.txt
zones, err := filepath.Glob(filepath.Join(syspath, "class", "thermal", "thermal_zone*")) zones, err := filepath.Glob(filepath.Join(syspath, "class", "thermal", "thermal_zone*"))
if err != nil { if err != nil {
@ -220,7 +220,7 @@ func (t *Temperature) gatherThermalZone(syspath string) ([]TemperatureStat, erro
} }
// Collect the sensor information // Collect the sensor information
stats := make([]TemperatureStat, 0, len(zones)) stats := make([]temperatureStat, 0, len(zones))
for _, path := range zones { for _, path := range zones {
// Type of the zone corresponding to the sensor name in our nomenclature // Type of the zone corresponding to the sensor name in our nomenclature
buf, err := os.ReadFile(filepath.Join(path, "type")) buf, err := os.ReadFile(filepath.Join(path, "type"))
@ -241,7 +241,7 @@ func (t *Temperature) gatherThermalZone(syspath string) ([]TemperatureStat, erro
continue continue
} }
temp := TemperatureStat{Name: name, Temperature: v / scalingFactor} temp := temperatureStat{name: name, temperature: v / scalingFactor}
stats = append(stats, temp) stats = append(stats, temp)
} }

View File

@ -25,13 +25,46 @@ import (
var sampleConfig string var sampleConfig string
type Tengine struct { type Tengine struct {
Urls []string Urls []string `toml:"urls"`
ResponseTimeout config.Duration ResponseTimeout config.Duration `toml:"response_timeout"`
tls.ClientConfig tls.ClientConfig
client *http.Client client *http.Client
} }
type tengineStatus struct {
host string
bytesIn uint64
bytesOut uint64
connTotal uint64
reqTotal uint64
http2xx uint64
http3xx uint64
http4xx uint64
http5xx uint64
httpOtherStatus uint64
rt uint64
upsReq uint64
upsRt uint64
upsTries uint64
http200 uint64
http206 uint64
http302 uint64
http304 uint64
http403 uint64
http404 uint64
http416 uint64
http499 uint64
http500 uint64
http502 uint64
http503 uint64
http504 uint64
http508 uint64
httpOtherDetailStatus uint64
httpUps4xx uint64
httpUps5xx uint64
}
func (*Tengine) SampleConfig() string { func (*Tengine) SampleConfig() string {
return sampleConfig return sampleConfig
} }
@ -87,41 +120,8 @@ func (n *Tengine) createHTTPClient() (*http.Client, error) {
return client, nil return client, nil
} }
type TengineStatus struct {
host string
bytesIn uint64
bytesOut uint64
connTotal uint64
reqTotal uint64
http2xx uint64
http3xx uint64
http4xx uint64
http5xx uint64
httpOtherStatus uint64
rt uint64
upsReq uint64
upsRt uint64
upsTries uint64
http200 uint64
http206 uint64
http302 uint64
http304 uint64
http403 uint64
http404 uint64
http416 uint64
http499 uint64
http500 uint64
http502 uint64
http503 uint64
http504 uint64
http508 uint64
httpOtherDetailStatus uint64
httpUps4xx uint64
httpUps5xx uint64
}
func (n *Tengine) gatherURL(addr *url.URL, acc telegraf.Accumulator) error { func (n *Tengine) gatherURL(addr *url.URL, acc telegraf.Accumulator) error {
var tengineStatus TengineStatus var tStatus tengineStatus
resp, err := n.client.Get(addr.String()) resp, err := n.client.Get(addr.String())
if err != nil { if err != nil {
return fmt.Errorf("error making HTTP request to %q: %w", addr.String(), err) return fmt.Errorf("error making HTTP request to %q: %w", addr.String(), err)
@ -142,157 +142,157 @@ func (n *Tengine) gatherURL(addr *url.URL, acc telegraf.Accumulator) error {
if len(lineSplit) != 30 { if len(lineSplit) != 30 {
continue continue
} }
tengineStatus.host = lineSplit[0] tStatus.host = lineSplit[0]
if err != nil { if err != nil {
return err return err
} }
tengineStatus.bytesIn, err = strconv.ParseUint(lineSplit[1], 10, 64) tStatus.bytesIn, err = strconv.ParseUint(lineSplit[1], 10, 64)
if err != nil { if err != nil {
return err return err
} }
tengineStatus.bytesOut, err = strconv.ParseUint(lineSplit[2], 10, 64) tStatus.bytesOut, err = strconv.ParseUint(lineSplit[2], 10, 64)
if err != nil { if err != nil {
return err return err
} }
tengineStatus.connTotal, err = strconv.ParseUint(lineSplit[3], 10, 64) tStatus.connTotal, err = strconv.ParseUint(lineSplit[3], 10, 64)
if err != nil { if err != nil {
return err return err
} }
tengineStatus.reqTotal, err = strconv.ParseUint(lineSplit[4], 10, 64) tStatus.reqTotal, err = strconv.ParseUint(lineSplit[4], 10, 64)
if err != nil { if err != nil {
return err return err
} }
tengineStatus.http2xx, err = strconv.ParseUint(lineSplit[5], 10, 64) tStatus.http2xx, err = strconv.ParseUint(lineSplit[5], 10, 64)
if err != nil { if err != nil {
return err return err
} }
tengineStatus.http3xx, err = strconv.ParseUint(lineSplit[6], 10, 64) tStatus.http3xx, err = strconv.ParseUint(lineSplit[6], 10, 64)
if err != nil { if err != nil {
return err return err
} }
tengineStatus.http4xx, err = strconv.ParseUint(lineSplit[7], 10, 64) tStatus.http4xx, err = strconv.ParseUint(lineSplit[7], 10, 64)
if err != nil { if err != nil {
return err return err
} }
tengineStatus.http5xx, err = strconv.ParseUint(lineSplit[8], 10, 64) tStatus.http5xx, err = strconv.ParseUint(lineSplit[8], 10, 64)
if err != nil { if err != nil {
return err return err
} }
tengineStatus.httpOtherStatus, err = strconv.ParseUint(lineSplit[9], 10, 64) tStatus.httpOtherStatus, err = strconv.ParseUint(lineSplit[9], 10, 64)
if err != nil { if err != nil {
return err return err
} }
tengineStatus.rt, err = strconv.ParseUint(lineSplit[10], 10, 64) tStatus.rt, err = strconv.ParseUint(lineSplit[10], 10, 64)
if err != nil { if err != nil {
return err return err
} }
tengineStatus.upsReq, err = strconv.ParseUint(lineSplit[11], 10, 64) tStatus.upsReq, err = strconv.ParseUint(lineSplit[11], 10, 64)
if err != nil { if err != nil {
return err return err
} }
tengineStatus.upsRt, err = strconv.ParseUint(lineSplit[12], 10, 64) tStatus.upsRt, err = strconv.ParseUint(lineSplit[12], 10, 64)
if err != nil { if err != nil {
return err return err
} }
tengineStatus.upsTries, err = strconv.ParseUint(lineSplit[13], 10, 64) tStatus.upsTries, err = strconv.ParseUint(lineSplit[13], 10, 64)
if err != nil { if err != nil {
return err return err
} }
tengineStatus.http200, err = strconv.ParseUint(lineSplit[14], 10, 64) tStatus.http200, err = strconv.ParseUint(lineSplit[14], 10, 64)
if err != nil { if err != nil {
return err return err
} }
tengineStatus.http206, err = strconv.ParseUint(lineSplit[15], 10, 64) tStatus.http206, err = strconv.ParseUint(lineSplit[15], 10, 64)
if err != nil { if err != nil {
return err return err
} }
tengineStatus.http302, err = strconv.ParseUint(lineSplit[16], 10, 64) tStatus.http302, err = strconv.ParseUint(lineSplit[16], 10, 64)
if err != nil { if err != nil {
return err return err
} }
tengineStatus.http304, err = strconv.ParseUint(lineSplit[17], 10, 64) tStatus.http304, err = strconv.ParseUint(lineSplit[17], 10, 64)
if err != nil { if err != nil {
return err return err
} }
tengineStatus.http403, err = strconv.ParseUint(lineSplit[18], 10, 64) tStatus.http403, err = strconv.ParseUint(lineSplit[18], 10, 64)
if err != nil { if err != nil {
return err return err
} }
tengineStatus.http404, err = strconv.ParseUint(lineSplit[19], 10, 64) tStatus.http404, err = strconv.ParseUint(lineSplit[19], 10, 64)
if err != nil { if err != nil {
return err return err
} }
tengineStatus.http416, err = strconv.ParseUint(lineSplit[20], 10, 64) tStatus.http416, err = strconv.ParseUint(lineSplit[20], 10, 64)
if err != nil { if err != nil {
return err return err
} }
tengineStatus.http499, err = strconv.ParseUint(lineSplit[21], 10, 64) tStatus.http499, err = strconv.ParseUint(lineSplit[21], 10, 64)
if err != nil { if err != nil {
return err return err
} }
tengineStatus.http500, err = strconv.ParseUint(lineSplit[22], 10, 64) tStatus.http500, err = strconv.ParseUint(lineSplit[22], 10, 64)
if err != nil { if err != nil {
return err return err
} }
tengineStatus.http502, err = strconv.ParseUint(lineSplit[23], 10, 64) tStatus.http502, err = strconv.ParseUint(lineSplit[23], 10, 64)
if err != nil { if err != nil {
return err return err
} }
tengineStatus.http503, err = strconv.ParseUint(lineSplit[24], 10, 64) tStatus.http503, err = strconv.ParseUint(lineSplit[24], 10, 64)
if err != nil { if err != nil {
return err return err
} }
tengineStatus.http504, err = strconv.ParseUint(lineSplit[25], 10, 64) tStatus.http504, err = strconv.ParseUint(lineSplit[25], 10, 64)
if err != nil { if err != nil {
return err return err
} }
tengineStatus.http508, err = strconv.ParseUint(lineSplit[26], 10, 64) tStatus.http508, err = strconv.ParseUint(lineSplit[26], 10, 64)
if err != nil { if err != nil {
return err return err
} }
tengineStatus.httpOtherDetailStatus, err = strconv.ParseUint(lineSplit[27], 10, 64) tStatus.httpOtherDetailStatus, err = strconv.ParseUint(lineSplit[27], 10, 64)
if err != nil { if err != nil {
return err return err
} }
tengineStatus.httpUps4xx, err = strconv.ParseUint(lineSplit[28], 10, 64) tStatus.httpUps4xx, err = strconv.ParseUint(lineSplit[28], 10, 64)
if err != nil { if err != nil {
return err return err
} }
tengineStatus.httpUps5xx, err = strconv.ParseUint(lineSplit[29], 10, 64) tStatus.httpUps5xx, err = strconv.ParseUint(lineSplit[29], 10, 64)
if err != nil { if err != nil {
return err return err
} }
tags := getTags(addr, tengineStatus.host) tags := getTags(addr, tStatus.host)
fields := map[string]interface{}{ fields := map[string]interface{}{
"bytes_in": tengineStatus.bytesIn, "bytes_in": tStatus.bytesIn,
"bytes_out": tengineStatus.bytesOut, "bytes_out": tStatus.bytesOut,
"conn_total": tengineStatus.connTotal, "conn_total": tStatus.connTotal,
"req_total": tengineStatus.reqTotal, "req_total": tStatus.reqTotal,
"http_2xx": tengineStatus.http2xx, "http_2xx": tStatus.http2xx,
"http_3xx": tengineStatus.http3xx, "http_3xx": tStatus.http3xx,
"http_4xx": tengineStatus.http4xx, "http_4xx": tStatus.http4xx,
"http_5xx": tengineStatus.http5xx, "http_5xx": tStatus.http5xx,
"http_other_status": tengineStatus.httpOtherStatus, "http_other_status": tStatus.httpOtherStatus,
"rt": tengineStatus.rt, "rt": tStatus.rt,
"ups_req": tengineStatus.upsReq, "ups_req": tStatus.upsReq,
"ups_rt": tengineStatus.upsRt, "ups_rt": tStatus.upsRt,
"ups_tries": tengineStatus.upsTries, "ups_tries": tStatus.upsTries,
"http_200": tengineStatus.http200, "http_200": tStatus.http200,
"http_206": tengineStatus.http206, "http_206": tStatus.http206,
"http_302": tengineStatus.http302, "http_302": tStatus.http302,
"http_304": tengineStatus.http304, "http_304": tStatus.http304,
"http_403": tengineStatus.http403, "http_403": tStatus.http403,
"http_404": tengineStatus.http404, "http_404": tStatus.http404,
"http_416": tengineStatus.http416, "http_416": tStatus.http416,
"http_499": tengineStatus.http499, "http_499": tStatus.http499,
"http_500": tengineStatus.http500, "http_500": tStatus.http500,
"http_502": tengineStatus.http502, "http_502": tStatus.http502,
"http_503": tengineStatus.http503, "http_503": tStatus.http503,
"http_504": tengineStatus.http504, "http_504": tStatus.http504,
"http_508": tengineStatus.http508, "http_508": tStatus.http508,
"http_other_detail_status": tengineStatus.httpOtherDetailStatus, "http_other_detail_status": tStatus.httpOtherDetailStatus,
"http_ups_4xx": tengineStatus.httpUps4xx, "http_ups_4xx": tStatus.httpUps4xx,
"http_ups_5xx": tengineStatus.httpUps5xx, "http_ups_5xx": tStatus.httpUps5xx,
} }
acc.AddFields("tengine", fields, tags) acc.AddFields("tengine", fields, tags)
} }

View File

@ -19,6 +19,17 @@ import (
//go:embed sample.conf //go:embed sample.conf
var sampleConfig string var sampleConfig string
type Tomcat struct {
URL string `toml:"url"`
Username string `toml:"username"`
Password string `toml:"password"`
Timeout config.Duration `toml:"timeout"`
tls.ClientConfig
client *http.Client
request *http.Request
}
type tomcatStatus struct { type tomcatStatus struct {
TomcatJvm tomcatJvm `xml:"jvm"` TomcatJvm tomcatJvm `xml:"jvm"`
TomcatConnectors []tomcatConnector `xml:"connector"` TomcatConnectors []tomcatConnector `xml:"connector"`
@ -55,6 +66,7 @@ type threadInfo struct {
CurrentThreadCount int64 `xml:"currentThreadCount,attr"` CurrentThreadCount int64 `xml:"currentThreadCount,attr"`
CurrentThreadsBusy int64 `xml:"currentThreadsBusy,attr"` CurrentThreadsBusy int64 `xml:"currentThreadsBusy,attr"`
} }
type requestInfo struct { type requestInfo struct {
MaxTime int `xml:"maxTime,attr"` MaxTime int `xml:"maxTime,attr"`
ProcessingTime int `xml:"processingTime,attr"` ProcessingTime int `xml:"processingTime,attr"`
@ -64,17 +76,6 @@ type requestInfo struct {
BytesSent int64 `xml:"bytesSent,attr"` BytesSent int64 `xml:"bytesSent,attr"`
} }
type Tomcat struct {
URL string
Username string
Password string
Timeout config.Duration
tls.ClientConfig
client *http.Client
request *http.Request
}
func (*Tomcat) SampleConfig() string { func (*Tomcat) SampleConfig() string {
return sampleConfig return sampleConfig
} }

View File

@ -13,8 +13,8 @@ import (
var sampleConfig string var sampleConfig string
type Trig struct { type Trig struct {
Amplitude float64 `toml:"amplitude"`
x float64 x float64
Amplitude float64
} }
func (*Trig) SampleConfig() string { func (*Trig) SampleConfig() string {

View File

@ -17,15 +17,14 @@ import (
var sampleConfig string var sampleConfig string
type Twemproxy struct { type Twemproxy struct {
Addr string Addr string `toml:"addr"`
Pools []string Pools []string `toml:"pools"`
} }
func (*Twemproxy) SampleConfig() string { func (*Twemproxy) SampleConfig() string {
return sampleConfig return sampleConfig
} }
// Gather data from all Twemproxy instances
func (t *Twemproxy) Gather(acc telegraf.Accumulator) error { func (t *Twemproxy) Gather(acc telegraf.Accumulator) error {
conn, err := net.DialTimeout("tcp", t.Addr, 1*time.Second) conn, err := net.DialTimeout("tcp", t.Addr, 1*time.Second)
if err != nil { if err != nil {
@ -49,11 +48,7 @@ func (t *Twemproxy) Gather(acc telegraf.Accumulator) error {
} }
// Process Twemproxy server stats // Process Twemproxy server stats
func (t *Twemproxy) processStat( func (t *Twemproxy) processStat(acc telegraf.Accumulator, tags map[string]string, data map[string]interface{}) {
acc telegraf.Accumulator,
tags map[string]string,
data map[string]interface{},
) {
if source, ok := data["source"]; ok { if source, ok := data["source"]; ok {
if val, ok := source.(string); ok { if val, ok := source.(string); ok {
tags["source"] = val tags["source"] = val

View File

@ -23,9 +23,11 @@ import (
//go:embed sample.conf //go:embed sample.conf
var sampleConfig string var sampleConfig string
type runner func(unbound Unbound) (*bytes.Buffer, error) var (
defaultBinary = "/usr/sbin/unbound-control"
defaultTimeout = config.Duration(time.Second)
)
// Unbound is used to store configuration values
type Unbound struct { type Unbound struct {
Binary string `toml:"binary"` Binary string `toml:"binary"`
Timeout config.Duration `toml:"timeout"` Timeout config.Duration `toml:"timeout"`
@ -37,61 +39,8 @@ type Unbound struct {
run runner run runner
} }
var defaultBinary = "/usr/sbin/unbound-control" type runner func(unbound Unbound) (*bytes.Buffer, error)
var defaultTimeout = config.Duration(time.Second)
// Shell out to unbound_stat and return the output
func unboundRunner(unbound Unbound) (*bytes.Buffer, error) {
cmdArgs := []string{"stats_noreset"}
if unbound.Server != "" {
host, port, err := net.SplitHostPort(unbound.Server)
if err != nil { // No port was specified
host = unbound.Server
port = ""
}
// Unbound control requires an IP address, and we want to be nice to the user
resolver := net.Resolver{}
ctx, lookUpCancel := context.WithTimeout(context.Background(), time.Duration(unbound.Timeout))
defer lookUpCancel()
serverIps, err := resolver.LookupIPAddr(ctx, host)
if err != nil {
return nil, fmt.Errorf("error looking up ip for server %q: %w", unbound.Server, err)
}
if len(serverIps) == 0 {
return nil, fmt.Errorf("error no ip for server %q: %w", unbound.Server, err)
}
server := serverIps[0].IP.String()
if port != "" {
server = server + "@" + port
}
cmdArgs = append([]string{"-s", server}, cmdArgs...)
}
if unbound.ConfigFile != "" {
cmdArgs = append([]string{"-c", unbound.ConfigFile}, cmdArgs...)
}
cmd := exec.Command(unbound.Binary, cmdArgs...)
if unbound.UseSudo {
cmdArgs = append([]string{unbound.Binary}, cmdArgs...)
cmd = exec.Command("sudo", cmdArgs...)
}
var out bytes.Buffer
cmd.Stdout = &out
err := internal.RunTimeout(cmd, time.Duration(unbound.Timeout))
if err != nil {
return &out, fmt.Errorf("error running unbound-control %q %q: %w", unbound.Binary, cmdArgs, err)
}
return &out, nil
}
// Gather collects stats from unbound-control and adds them to the Accumulator
func (*Unbound) SampleConfig() string { func (*Unbound) SampleConfig() string {
return sampleConfig return sampleConfig
} }
@ -175,6 +124,57 @@ func (s *Unbound) Gather(acc telegraf.Accumulator) error {
return nil return nil
} }
// Shell out to unbound_stat and return the output
func unboundRunner(unbound Unbound) (*bytes.Buffer, error) {
cmdArgs := []string{"stats_noreset"}
if unbound.Server != "" {
host, port, err := net.SplitHostPort(unbound.Server)
if err != nil { // No port was specified
host = unbound.Server
port = ""
}
// Unbound control requires an IP address, and we want to be nice to the user
resolver := net.Resolver{}
ctx, lookUpCancel := context.WithTimeout(context.Background(), time.Duration(unbound.Timeout))
defer lookUpCancel()
serverIps, err := resolver.LookupIPAddr(ctx, host)
if err != nil {
return nil, fmt.Errorf("error looking up ip for server %q: %w", unbound.Server, err)
}
if len(serverIps) == 0 {
return nil, fmt.Errorf("error no ip for server %q: %w", unbound.Server, err)
}
server := serverIps[0].IP.String()
if port != "" {
server = server + "@" + port
}
cmdArgs = append([]string{"-s", server}, cmdArgs...)
}
if unbound.ConfigFile != "" {
cmdArgs = append([]string{"-c", unbound.ConfigFile}, cmdArgs...)
}
cmd := exec.Command(unbound.Binary, cmdArgs...)
if unbound.UseSudo {
cmdArgs = append([]string{unbound.Binary}, cmdArgs...)
cmd = exec.Command("sudo", cmdArgs...)
}
var out bytes.Buffer
cmd.Stdout = &out
err := internal.RunTimeout(cmd, time.Duration(unbound.Timeout))
if err != nil {
return &out, fmt.Errorf("error running unbound-control %q %q: %w", unbound.Binary, cmdArgs, err)
}
return &out, nil
}
func init() { func init() {
inputs.Add("unbound", func() telegraf.Input { inputs.Add("unbound", func() telegraf.Input {
return &Unbound{ return &Unbound{

View File

@ -9,7 +9,7 @@ import (
"github.com/influxdata/telegraf/testutil" "github.com/influxdata/telegraf/testutil"
) )
func UnboundControl(output string) func(Unbound) (*bytes.Buffer, error) { func unboundControl(output string) func(Unbound) (*bytes.Buffer, error) {
return func(Unbound) (*bytes.Buffer, error) { return func(Unbound) (*bytes.Buffer, error) {
return bytes.NewBufferString(output), nil return bytes.NewBufferString(output), nil
} }
@ -18,7 +18,7 @@ func UnboundControl(output string) func(Unbound) (*bytes.Buffer, error) {
func TestParseFullOutput(t *testing.T) { func TestParseFullOutput(t *testing.T) {
acc := &testutil.Accumulator{} acc := &testutil.Accumulator{}
v := &Unbound{ v := &Unbound{
run: UnboundControl(fullOutput), run: unboundControl(fullOutput),
} }
err := v.Gather(acc) err := v.Gather(acc)
@ -35,7 +35,7 @@ func TestParseFullOutput(t *testing.T) {
func TestParseFullOutputThreadAsTag(t *testing.T) { func TestParseFullOutputThreadAsTag(t *testing.T) {
acc := &testutil.Accumulator{} acc := &testutil.Accumulator{}
v := &Unbound{ v := &Unbound{
run: UnboundControl(fullOutput), run: unboundControl(fullOutput),
ThreadAsTag: true, ThreadAsTag: true,
} }
err := v.Gather(acc) err := v.Gather(acc)
@ -133,6 +133,7 @@ var parsedFullOutputThreadAsTagMeasurementUnboundThreads = map[string]interface{
"recursion_time_avg": float64(0.015020), "recursion_time_avg": float64(0.015020),
"recursion_time_median": float64(0.00292343), "recursion_time_median": float64(0.00292343),
} }
var parsedFullOutputThreadAsTagMeasurementUnbound = map[string]interface{}{ var parsedFullOutputThreadAsTagMeasurementUnbound = map[string]interface{}{
"total_num_queries": float64(11907596), "total_num_queries": float64(11907596),
"total_num_cachehits": float64(11489288), "total_num_cachehits": float64(11489288),

View File

@ -18,12 +18,9 @@ import (
//go:embed sample.conf //go:embed sample.conf
var sampleConfig string var sampleConfig string
// see: https://networkupstools.org/docs/developer-guide.chunked/index.html var (
const defaultAddress = "127.0.0.1" // Define the set of variables _always_ included in a metric
const defaultPort = 3493 mandatoryVariableSet = map[string]bool{
// Define the set of variables _always_ included in a metric
var mandatoryVariableSet = map[string]bool{
"battery.date": true, "battery.date": true,
"battery.mfr.date": true, "battery.mfr.date": true,
"battery.runtime": true, "battery.runtime": true,
@ -31,10 +28,9 @@ var mandatoryVariableSet = map[string]bool{
"device.serial": true, "device.serial": true,
"ups.firmware": true, "ups.firmware": true,
"ups.status": true, "ups.status": true,
} }
// Define the default field set to add if existing
// Define the default field set to add if existing defaultFieldSet = map[string]string{
var defaultFieldSet = map[string]string{
"battery.charge": "battery_charge_percent", "battery.charge": "battery_charge_percent",
"battery.runtime.low": "battery_runtime_low", "battery.runtime.low": "battery_runtime_low",
"battery.voltage": "battery_voltage", "battery.voltage": "battery_voltage",
@ -51,7 +47,13 @@ var defaultFieldSet = map[string]string{
"ups.realpower": "real_power", "ups.realpower": "real_power",
"ups.delay.shutdown": "ups_delay_shutdown", "ups.delay.shutdown": "ups_delay_shutdown",
"ups.delay.start": "ups_delay_start", "ups.delay.start": "ups_delay_start",
} }
)
const (
defaultAddress = "127.0.0.1"
defaultPort = 3493
)
type Upsd struct { type Upsd struct {
Server string `toml:"server"` Server string `toml:"server"`

View File

@ -106,13 +106,13 @@ func TestCases(t *testing.T) {
} }
type interaction struct { type interaction struct {
Expected string expected string
Response string response string
} }
type variable struct { type variable struct {
Name string name string
Value string value string
} }
type mockServer struct { type mockServer struct {
@ -122,32 +122,32 @@ type mockServer struct {
func (s *mockServer) init() { func (s *mockServer) init() {
s.protocol = []interaction{ s.protocol = []interaction{
{ {
Expected: "VER\n", expected: "VER\n",
Response: "1\n", response: "1\n",
}, },
{ {
Expected: "NETVER\n", expected: "NETVER\n",
Response: "1\n", response: "1\n",
}, },
{ {
Expected: "LIST UPS\n", expected: "LIST UPS\n",
Response: "BEGIN LIST UPS\nUPS fake \"fake UPS\"\nEND LIST UPS\n", response: "BEGIN LIST UPS\nUPS fake \"fake UPS\"\nEND LIST UPS\n",
}, },
{ {
Expected: "LIST CLIENT fake\n", expected: "LIST CLIENT fake\n",
Response: "BEGIN LIST CLIENT fake\nCLIENT fake 127.0.0.1\nEND LIST CLIENT fake\n", response: "BEGIN LIST CLIENT fake\nCLIENT fake 127.0.0.1\nEND LIST CLIENT fake\n",
}, },
{ {
Expected: "LIST CMD fake\n", expected: "LIST CMD fake\n",
Response: "BEGIN LIST CMD fake\nEND LIST CMD fake\n", response: "BEGIN LIST CMD fake\nEND LIST CMD fake\n",
}, },
{ {
Expected: "GET UPSDESC fake\n", expected: "GET UPSDESC fake\n",
Response: "UPSDESC fake \"stub-ups-description\"\n", response: "UPSDESC fake \"stub-ups-description\"\n",
}, },
{ {
Expected: "GET NUMLOGINS fake\n", expected: "GET NUMLOGINS fake\n",
Response: "NUMLOGINS fake 1\n", response: "NUMLOGINS fake 1\n",
}, },
} }
} }
@ -156,29 +156,29 @@ func (s *mockServer) addVariables(variables []variable, types map[string]string)
// Add a VAR entries for the variables // Add a VAR entries for the variables
values := make([]string, 0, len(variables)) values := make([]string, 0, len(variables))
for _, v := range variables { for _, v := range variables {
values = append(values, fmt.Sprintf("VAR fake %s %q", v.Name, v.Value)) values = append(values, fmt.Sprintf("VAR fake %s %q", v.name, v.value))
} }
s.protocol = append(s.protocol, interaction{ s.protocol = append(s.protocol, interaction{
Expected: "LIST VAR fake\n", expected: "LIST VAR fake\n",
Response: "BEGIN LIST VAR fake\n" + strings.Join(values, "\n") + "\nEND LIST VAR fake\n", response: "BEGIN LIST VAR fake\n" + strings.Join(values, "\n") + "\nEND LIST VAR fake\n",
}) })
// Add a description and type interaction for the variable // Add a description and type interaction for the variable
for _, v := range variables { for _, v := range variables {
variableType, found := types[v.Name] variableType, found := types[v.name]
if !found { if !found {
return fmt.Errorf("type for variable %q not found", v.Name) return fmt.Errorf("type for variable %q not found", v.name)
} }
s.protocol = append(s.protocol, s.protocol = append(s.protocol,
interaction{ interaction{
Expected: "GET DESC fake " + v.Name + "\n", expected: "GET DESC fake " + v.name + "\n",
Response: "DESC fake" + v.Name + " \"No description here\"\n", response: "DESC fake" + v.name + " \"No description here\"\n",
}, },
interaction{ interaction{
Expected: "GET TYPE fake " + v.Name + "\n", expected: "GET TYPE fake " + v.name + "\n",
Response: "TYPE fake " + v.Name + " " + variableType + "\n", response: "TYPE fake " + v.name + " " + variableType + "\n",
}, },
) )
} }
@ -217,12 +217,12 @@ func (s *mockServer) listen(ctx context.Context) (*net.TCPAddr, error) {
} }
request := in[:n] request := in[:n]
if !bytes.Equal([]byte(interaction.Expected), request) { if !bytes.Equal([]byte(interaction.expected), request) {
fmt.Printf("Unexpected request %q, expected %q\n", string(request), interaction.Expected) fmt.Printf("Unexpected request %q, expected %q\n", string(request), interaction.expected)
return return
} }
if _, err := conn.Write([]byte(interaction.Response)); err != nil { if _, err := conn.Write([]byte(interaction.response)); err != nil {
fmt.Printf("Cannot write answer for request %q: %v\n", string(request), err) fmt.Printf("Cannot write answer for request %q: %v\n", string(request), err)
return return
} }

View File

@ -25,7 +25,6 @@ import (
//go:embed sample.conf //go:embed sample.conf
var sampleConfig string var sampleConfig string
// Uwsgi server struct
type Uwsgi struct { type Uwsgi struct {
Servers []string `toml:"servers"` Servers []string `toml:"servers"`
Timeout config.Duration `toml:"timeout"` Timeout config.Duration `toml:"timeout"`
@ -33,11 +32,82 @@ type Uwsgi struct {
client *http.Client client *http.Client
} }
// statsServer defines the stats server structure.
type statsServer struct {
// Tags
source string
PID int `json:"pid"`
UID int `json:"uid"`
GID int `json:"gid"`
Version string `json:"version"`
// Fields
ListenQueue int `json:"listen_queue"`
ListenQueueErrors int `json:"listen_queue_errors"`
SignalQueue int `json:"signal_queue"`
Load int `json:"load"`
Workers []*worker `json:"workers"`
}
// worker defines the worker metric structure.
type worker struct {
// Tags
WorkerID int `json:"id"`
PID int `json:"pid"`
// Fields
Accepting int `json:"accepting"`
Requests int `json:"requests"`
DeltaRequests int `json:"delta_requests"`
Exceptions int `json:"exceptions"`
HarakiriCount int `json:"harakiri_count"`
Signals int `json:"signals"`
SignalQueue int `json:"signal_queue"`
Status string `json:"status"`
Rss int `json:"rss"`
Vsz int `json:"vsz"`
RunningTime int `json:"running_time"`
LastSpawn int `json:"last_spawn"`
RespawnCount int `json:"respawn_count"`
Tx int `json:"tx"`
AvgRt int `json:"avg_rt"`
Apps []*app `json:"apps"`
Cores []*core `json:"cores"`
}
// app defines the app metric structure.
type app struct {
// Tags
AppID int `json:"id"`
// Fields
Modifier1 int `json:"modifier1"`
Requests int `json:"requests"`
StartupTime int `json:"startup_time"`
Exceptions int `json:"exceptions"`
}
// core defines the core metric structure.
type core struct {
// Tags
CoreID int `json:"id"`
// Fields
Requests int `json:"requests"`
StaticRequests int `json:"static_requests"`
RoutedRequests int `json:"routed_requests"`
OffloadedRequests int `json:"offloaded_requests"`
WriteErrors int `json:"write_errors"`
ReadErrors int `json:"read_errors"`
InRequest int `json:"in_request"`
}
func (*Uwsgi) SampleConfig() string { func (*Uwsgi) SampleConfig() string {
return sampleConfig return sampleConfig
} }
// Gather collect data from uWSGI Server
func (u *Uwsgi) Gather(acc telegraf.Accumulator) error { func (u *Uwsgi) Gather(acc telegraf.Accumulator) error {
if u.client == nil { if u.client == nil {
u.client = &http.Client{ u.client = &http.Client{
@ -71,7 +141,7 @@ func (u *Uwsgi) Gather(acc telegraf.Accumulator) error {
func (u *Uwsgi) gatherServer(acc telegraf.Accumulator, address *url.URL) error { func (u *Uwsgi) gatherServer(acc telegraf.Accumulator, address *url.URL) error {
var err error var err error
var r io.ReadCloser var r io.ReadCloser
var s StatsServer var s statsServer
switch address.Scheme { switch address.Scheme {
case "tcp": case "tcp":
@ -111,7 +181,7 @@ func (u *Uwsgi) gatherServer(acc telegraf.Accumulator, address *url.URL) error {
return err return err
} }
func gatherStatServer(acc telegraf.Accumulator, s *StatsServer) { func gatherStatServer(acc telegraf.Accumulator, s *statsServer) {
fields := map[string]interface{}{ fields := map[string]interface{}{
"listen_queue": s.ListenQueue, "listen_queue": s.ListenQueue,
"listen_queue_errors": s.ListenQueueErrors, "listen_queue_errors": s.ListenQueueErrors,
@ -133,7 +203,7 @@ func gatherStatServer(acc telegraf.Accumulator, s *StatsServer) {
gatherCores(acc, s) gatherCores(acc, s)
} }
func gatherWorkers(acc telegraf.Accumulator, s *StatsServer) { func gatherWorkers(acc telegraf.Accumulator, s *statsServer) {
for _, w := range s.Workers { for _, w := range s.Workers {
fields := map[string]interface{}{ fields := map[string]interface{}{
"requests": w.Requests, "requests": w.Requests,
@ -162,7 +232,7 @@ func gatherWorkers(acc telegraf.Accumulator, s *StatsServer) {
} }
} }
func gatherApps(acc telegraf.Accumulator, s *StatsServer) { func gatherApps(acc telegraf.Accumulator, s *statsServer) {
for _, w := range s.Workers { for _, w := range s.Workers {
for _, a := range w.Apps { for _, a := range w.Apps {
fields := map[string]interface{}{ fields := map[string]interface{}{
@ -181,7 +251,7 @@ func gatherApps(acc telegraf.Accumulator, s *StatsServer) {
} }
} }
func gatherCores(acc telegraf.Accumulator, s *StatsServer) { func gatherCores(acc telegraf.Accumulator, s *statsServer) {
for _, w := range s.Workers { for _, w := range s.Workers {
for _, c := range w.Cores { for _, c := range w.Cores {
fields := map[string]interface{}{ fields := map[string]interface{}{
@ -210,75 +280,3 @@ func init() {
} }
}) })
} }
// StatsServer defines the stats server structure.
type StatsServer struct {
// Tags
source string
PID int `json:"pid"`
UID int `json:"uid"`
GID int `json:"gid"`
Version string `json:"version"`
// Fields
ListenQueue int `json:"listen_queue"`
ListenQueueErrors int `json:"listen_queue_errors"`
SignalQueue int `json:"signal_queue"`
Load int `json:"load"`
Workers []*Worker `json:"workers"`
}
// Worker defines the worker metric structure.
type Worker struct {
// Tags
WorkerID int `json:"id"`
PID int `json:"pid"`
// Fields
Accepting int `json:"accepting"`
Requests int `json:"requests"`
DeltaRequests int `json:"delta_requests"`
Exceptions int `json:"exceptions"`
HarakiriCount int `json:"harakiri_count"`
Signals int `json:"signals"`
SignalQueue int `json:"signal_queue"`
Status string `json:"status"`
Rss int `json:"rss"`
Vsz int `json:"vsz"`
RunningTime int `json:"running_time"`
LastSpawn int `json:"last_spawn"`
RespawnCount int `json:"respawn_count"`
Tx int `json:"tx"`
AvgRt int `json:"avg_rt"`
Apps []*App `json:"apps"`
Cores []*Core `json:"cores"`
}
// App defines the app metric structure.
type App struct {
// Tags
AppID int `json:"id"`
// Fields
Modifier1 int `json:"modifier1"`
Requests int `json:"requests"`
StartupTime int `json:"startup_time"`
Exceptions int `json:"exceptions"`
}
// Core defines the core metric structure.
type Core struct {
// Tags
CoreID int `json:"id"`
// Fields
Requests int `json:"requests"`
StaticRequests int `json:"static_requests"`
RoutedRequests int `json:"routed_requests"`
OffloadedRequests int `json:"offloaded_requests"`
WriteErrors int `json:"write_errors"`
ReadErrors int `json:"read_errors"`
InRequest int `json:"in_request"`
}

View File

@ -22,6 +22,8 @@ import (
//go:embed sample.conf //go:embed sample.conf
var sampleConfig string var sampleConfig string
const timeLayout = "2006-01-02 15:04:05 -0700 MST"
// Vault configuration object // Vault configuration object
type Vault struct { type Vault struct {
URL string `toml:"url"` URL string `toml:"url"`
@ -33,8 +35,6 @@ type Vault struct {
client *http.Client client *http.Client
} }
const timeLayout = "2006-01-02 15:04:05 -0700 MST"
func (*Vault) SampleConfig() string { func (*Vault) SampleConfig() string {
return sampleConfig return sampleConfig
} }
@ -74,7 +74,7 @@ func (*Vault) Start(telegraf.Accumulator) error {
return nil return nil
} }
// Gather, collects metrics from Vault endpoint // Gather collects metrics from Vault endpoint
func (n *Vault) Gather(acc telegraf.Accumulator) error { func (n *Vault) Gather(acc telegraf.Accumulator) error {
sysMetrics, err := n.loadJSON(n.URL + "/v1/sys/metrics") sysMetrics, err := n.loadJSON(n.URL + "/v1/sys/metrics")
if err != nil { if err != nil {
@ -90,7 +90,7 @@ func (n *Vault) Stop() {
} }
} }
func (n *Vault) loadJSON(url string) (*SysMetrics, error) { func (n *Vault) loadJSON(url string) (*sysMetrics, error) {
req, err := http.NewRequest("GET", url, nil) req, err := http.NewRequest("GET", url, nil)
if err != nil { if err != nil {
return nil, err return nil, err
@ -109,7 +109,7 @@ func (n *Vault) loadJSON(url string) (*SysMetrics, error) {
return nil, fmt.Errorf("%s returned HTTP status %s", url, resp.Status) return nil, fmt.Errorf("%s returned HTTP status %s", url, resp.Status)
} }
var metrics SysMetrics var metrics sysMetrics
err = json.NewDecoder(resp.Body).Decode(&metrics) err = json.NewDecoder(resp.Body).Decode(&metrics)
if err != nil { if err != nil {
return nil, fmt.Errorf("error parsing json response: %w", err) return nil, fmt.Errorf("error parsing json response: %w", err)
@ -119,7 +119,7 @@ func (n *Vault) loadJSON(url string) (*SysMetrics, error) {
} }
// buildVaultMetrics, it builds all the metrics and adds them to the accumulator // buildVaultMetrics, it builds all the metrics and adds them to the accumulator
func buildVaultMetrics(acc telegraf.Accumulator, sysMetrics *SysMetrics) error { func buildVaultMetrics(acc telegraf.Accumulator, sysMetrics *sysMetrics) error {
t, err := internal.ParseTimestamp(timeLayout, sysMetrics.Timestamp, nil) t, err := internal.ParseTimestamp(timeLayout, sysMetrics.Timestamp, nil)
if err != nil { if err != nil {
return fmt.Errorf("error parsing time: %w", err) return fmt.Errorf("error parsing time: %w", err)

View File

@ -1,6 +1,6 @@
package vault package vault
type SysMetrics struct { type sysMetrics struct {
Timestamp string `json:"timestamp"` Timestamp string `json:"timestamp"`
Gauges []gauge `json:"Gauges"` Gauges []gauge `json:"Gauges"`
Counters []counter `json:"Counters"` Counters []counter `json:"Counters"`

View File

@ -27,47 +27,47 @@ import (
// and server say. // and server say.
const absoluteMaxMetrics = 10000 const absoluteMaxMetrics = 10000
// ClientFactory is used to obtain Clients to be used throughout the plugin. Typically, // clientFactory is used to obtain Clients to be used throughout the plugin. Typically,
// a single Client is reused across all functions and goroutines, but the client // a single client is reused across all functions and goroutines, but the client
// is periodically recycled to avoid authentication expiration issues. // is periodically recycled to avoid authentication expiration issues.
type ClientFactory struct { type clientFactory struct {
client *Client client *client
mux sync.Mutex mux sync.Mutex
vSphereURL *url.URL vSphereURL *url.URL
parent *VSphere parent *VSphere
} }
// Client represents a connection to vSphere and is backed by a govmomi connection // client represents a connection to vSphere and is backed by a govmomi connection
type Client struct { type client struct {
Client *govmomi.Client client *govmomi.Client
Views *view.Manager views *view.Manager
Root *view.ContainerView root *view.ContainerView
Perf *performance.Manager perf *performance.Manager
Valid bool valid bool
Timeout time.Duration timeout time.Duration
closeGate sync.Once closeGate sync.Once
log telegraf.Logger log telegraf.Logger
} }
// NewClientFactory creates a new ClientFactory and prepares it for use. // newClientFactory creates a new clientFactory and prepares it for use.
func NewClientFactory(vSphereURL *url.URL, parent *VSphere) *ClientFactory { func newClientFactory(vSphereURL *url.URL, parent *VSphere) *clientFactory {
return &ClientFactory{ return &clientFactory{
client: nil, client: nil,
parent: parent, parent: parent,
vSphereURL: vSphereURL, vSphereURL: vSphereURL,
} }
} }
// GetClient returns a client. The caller is responsible for calling Release() // getClient returns a client. The caller is responsible for calling Release()
// on the client once it's done using it. // on the client once it's done using it.
func (cf *ClientFactory) GetClient(ctx context.Context) (*Client, error) { func (cf *clientFactory) getClient(ctx context.Context) (*client, error) {
cf.mux.Lock() cf.mux.Lock()
defer cf.mux.Unlock() defer cf.mux.Unlock()
retrying := false retrying := false
for { for {
if cf.client == nil { if cf.client == nil {
var err error var err error
if cf.client, err = NewClient(ctx, cf.vSphereURL, cf.parent); err != nil { if cf.client, err = newClient(ctx, cf.vSphereURL, cf.parent); err != nil {
return nil, err return nil, err
} }
} }
@ -89,13 +89,13 @@ func (cf *ClientFactory) GetClient(ctx context.Context) (*Client, error) {
} }
} }
func (cf *ClientFactory) testClient(ctx context.Context) error { func (cf *clientFactory) testClient(ctx context.Context) error {
// Execute a dummy call against the server to make sure the client is // Execute a dummy call against the server to make sure the client is
// still functional. If not, try to log back in. If that doesn't work, // still functional. If not, try to log back in. If that doesn't work,
// we give up. // we give up.
ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(cf.parent.Timeout)) ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(cf.parent.Timeout))
defer cancel1() defer cancel1()
if _, err := methods.GetCurrentTime(ctx1, cf.client.Client); err != nil { if _, err := methods.GetCurrentTime(ctx1, cf.client.client); err != nil {
cf.parent.Log.Info("Client session seems to have time out. Reauthenticating!") cf.parent.Log.Info("Client session seems to have time out. Reauthenticating!")
ctx2, cancel2 := context.WithTimeout(ctx, time.Duration(cf.parent.Timeout)) ctx2, cancel2 := context.WithTimeout(ctx, time.Duration(cf.parent.Timeout))
defer cancel2() defer cancel2()
@ -113,7 +113,7 @@ func (cf *ClientFactory) testClient(ctx context.Context) error {
defer password.Destroy() defer password.Destroy()
auth := url.UserPassword(username.String(), password.String()) auth := url.UserPassword(username.String(), password.String())
if err := cf.client.Client.SessionManager.Login(ctx2, auth); err != nil { if err := cf.client.client.SessionManager.Login(ctx2, auth); err != nil {
return fmt.Errorf("renewing authentication failed: %w", err) return fmt.Errorf("renewing authentication failed: %w", err)
} }
} }
@ -121,10 +121,10 @@ func (cf *ClientFactory) testClient(ctx context.Context) error {
return nil return nil
} }
// NewClient creates a new vSphere client based on the url and setting passed as parameters. // newClient creates a new vSphere client based on the url and setting passed as parameters.
func NewClient(ctx context.Context, vSphereURL *url.URL, vs *VSphere) (*Client, error) { func newClient(ctx context.Context, vSphereURL *url.URL, vs *VSphere) (*client, error) {
sw := NewStopwatch("connect", vSphereURL.Host) sw := newStopwatch("connect", vSphereURL.Host)
defer sw.Stop() defer sw.stop()
tlsCfg, err := vs.ClientConfig.TLSConfig() tlsCfg, err := vs.ClientConfig.TLSConfig()
if err != nil { if err != nil {
@ -215,19 +215,19 @@ func NewClient(ctx context.Context, vSphereURL *url.URL, vs *VSphere) (*Client,
p := performance.NewManager(c.Client) p := performance.NewManager(c.Client)
client := &Client{ client := &client{
log: vs.Log, log: vs.Log,
Client: c, client: c,
Views: m, views: m,
Root: v, root: v,
Perf: p, perf: p,
Valid: true, valid: true,
Timeout: time.Duration(vs.Timeout), timeout: time.Duration(vs.Timeout),
} }
// Adjust max query size if needed // Adjust max query size if needed
ctx3, cancel3 := context.WithTimeout(ctx, time.Duration(vs.Timeout)) ctx3, cancel3 := context.WithTimeout(ctx, time.Duration(vs.Timeout))
defer cancel3() defer cancel3()
n, err := client.GetMaxQueryMetrics(ctx3) n, err := client.getMaxQueryMetrics(ctx3)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -239,8 +239,8 @@ func NewClient(ctx context.Context, vSphereURL *url.URL, vs *VSphere) (*Client,
return client, nil return client, nil
} }
// Close shuts down a ClientFactory and releases any resources associated with it. // close shuts down a clientFactory and releases any resources associated with it.
func (cf *ClientFactory) Close() { func (cf *clientFactory) close() {
cf.mux.Lock() cf.mux.Lock()
defer cf.mux.Unlock() defer cf.mux.Unlock()
if cf.client != nil { if cf.client != nil {
@ -248,37 +248,37 @@ func (cf *ClientFactory) Close() {
} }
} }
func (c *Client) close() { func (c *client) close() {
// Use a Once to prevent us from panics stemming from trying // Use a Once to prevent us from panics stemming from trying
// to close it multiple times. // to close it multiple times.
c.closeGate.Do(func() { c.closeGate.Do(func() {
ctx, cancel := context.WithTimeout(context.Background(), c.Timeout) ctx, cancel := context.WithTimeout(context.Background(), c.timeout)
defer cancel() defer cancel()
if c.Client != nil { if c.client != nil {
if err := c.Client.Logout(ctx); err != nil { if err := c.client.Logout(ctx); err != nil {
c.log.Errorf("Logout: %s", err.Error()) c.log.Errorf("Logout: %s", err.Error())
} }
} }
}) })
} }
// GetServerTime returns the time at the vCenter server // getServerTime returns the time at the vCenter server
func (c *Client) GetServerTime(ctx context.Context) (time.Time, error) { func (c *client) getServerTime(ctx context.Context) (time.Time, error) {
ctx, cancel := context.WithTimeout(ctx, c.Timeout) ctx, cancel := context.WithTimeout(ctx, c.timeout)
defer cancel() defer cancel()
t, err := methods.GetCurrentTime(ctx, c.Client) t, err := methods.GetCurrentTime(ctx, c.client)
if err != nil { if err != nil {
return time.Time{}, err return time.Time{}, err
} }
return *t, nil return *t, nil
} }
// GetMaxQueryMetrics returns the max_query_metrics setting as configured in vCenter // getMaxQueryMetrics returns the max_query_metrics setting as configured in vCenter
func (c *Client) GetMaxQueryMetrics(ctx context.Context) (int, error) { func (c *client) getMaxQueryMetrics(ctx context.Context) (int, error) {
ctx, cancel := context.WithTimeout(ctx, c.Timeout) ctx, cancel := context.WithTimeout(ctx, c.timeout)
defer cancel() defer cancel()
om := object.NewOptionManager(c.Client.Client, *c.Client.Client.ServiceContent.Setting) om := object.NewOptionManager(c.client.Client, *c.client.Client.ServiceContent.Setting)
res, err := om.Query(ctx, "config.vpxd.stats.maxQueryMetrics") res, err := om.Query(ctx, "config.vpxd.stats.maxQueryMetrics")
if err == nil { if err == nil {
if len(res) > 0 { if len(res) > 0 {
@ -300,7 +300,7 @@ func (c *Client) GetMaxQueryMetrics(ctx context.Context) (int, error) {
} }
// No usable maxQueryMetrics setting. Infer based on version // No usable maxQueryMetrics setting. Infer based on version
ver := c.Client.Client.ServiceContent.About.Version ver := c.client.Client.ServiceContent.About.Version
parts := strings.Split(ver, ".") parts := strings.Split(ver, ".")
if len(parts) < 2 { if len(parts) < 2 {
c.log.Warnf("vCenter returned an invalid version string: %s. Using default query size=64", ver) c.log.Warnf("vCenter returned an invalid version string: %s. Using default query size=64", ver)
@ -317,45 +317,38 @@ func (c *Client) GetMaxQueryMetrics(ctx context.Context) (int, error) {
return 256, nil return 256, nil
} }
// QueryMetrics wraps performance.Query to give it proper timeouts // queryMetrics wraps performance.Query to give it proper timeouts
func (c *Client) QueryMetrics(ctx context.Context, pqs []types.PerfQuerySpec) ([]performance.EntityMetric, error) { func (c *client) queryMetrics(ctx context.Context, pqs []types.PerfQuerySpec) ([]performance.EntityMetric, error) {
ctx1, cancel1 := context.WithTimeout(ctx, c.Timeout) ctx1, cancel1 := context.WithTimeout(ctx, c.timeout)
defer cancel1() defer cancel1()
metrics, err := c.Perf.Query(ctx1, pqs) metrics, err := c.perf.Query(ctx1, pqs)
if err != nil { if err != nil {
return nil, err return nil, err
} }
ctx2, cancel2 := context.WithTimeout(ctx, c.Timeout) ctx2, cancel2 := context.WithTimeout(ctx, c.timeout)
defer cancel2() defer cancel2()
return c.Perf.ToMetricSeries(ctx2, metrics) return c.perf.ToMetricSeries(ctx2, metrics)
} }
// CounterInfoByName wraps performance.CounterInfoByName to give it proper timeouts // counterInfoByName wraps performance.counterInfoByName to give it proper timeouts
func (c *Client) CounterInfoByName(ctx context.Context) (map[string]*types.PerfCounterInfo, error) { func (c *client) counterInfoByName(ctx context.Context) (map[string]*types.PerfCounterInfo, error) {
ctx1, cancel1 := context.WithTimeout(ctx, c.Timeout) ctx1, cancel1 := context.WithTimeout(ctx, c.timeout)
defer cancel1() defer cancel1()
return c.Perf.CounterInfoByName(ctx1) return c.perf.CounterInfoByName(ctx1)
} }
// CounterInfoByKey wraps performance.CounterInfoByKey to give it proper timeouts // counterInfoByKey wraps performance.counterInfoByKey to give it proper timeouts
func (c *Client) CounterInfoByKey(ctx context.Context) (map[int32]*types.PerfCounterInfo, error) { func (c *client) counterInfoByKey(ctx context.Context) (map[int32]*types.PerfCounterInfo, error) {
ctx1, cancel1 := context.WithTimeout(ctx, c.Timeout) ctx1, cancel1 := context.WithTimeout(ctx, c.timeout)
defer cancel1() defer cancel1()
return c.Perf.CounterInfoByKey(ctx1) return c.perf.CounterInfoByKey(ctx1)
} }
// ListResources wraps property.Collector.Retrieve to give it proper timeouts func (c *client) getCustomFields(ctx context.Context) (map[int32]string, error) {
func (c *Client) ListResources(ctx context.Context, root *view.ContainerView, kind, ps []string, dst interface{}) error { ctx1, cancel1 := context.WithTimeout(ctx, c.timeout)
ctx1, cancel1 := context.WithTimeout(ctx, c.Timeout)
defer cancel1() defer cancel1()
return root.Retrieve(ctx1, kind, ps, dst) cfm := object.NewCustomFieldsManager(c.client.Client)
}
func (c *Client) GetCustomFields(ctx context.Context) (map[int32]string, error) {
ctx1, cancel1 := context.WithTimeout(ctx, c.Timeout)
defer cancel1()
cfm := object.NewCustomFieldsManager(c.Client.Client)
fields, err := cfm.Field(ctx1) fields, err := cfm.Field(ctx1)
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -41,18 +41,17 @@ type queryChunk []types.PerfQuerySpec
type queryJob func(queryChunk) type queryJob func(queryChunk)
// Endpoint is a high-level representation of a connected vCenter endpoint. It is backed by the lower // endpoint is a high-level representation of a connected vCenter endpoint. It is backed by the lower level client type.
// level Client type. type endpoint struct {
type Endpoint struct { parent *VSphere
Parent *VSphere url *url.URL
URL *url.URL
resourceKinds map[string]*resourceKind resourceKinds map[string]*resourceKind
hwMarks *TSCache hwMarks *tsCache
lun2ds map[string]string lun2ds map[string]string
discoveryTicker *time.Ticker discoveryTicker *time.Ticker
collectMux sync.RWMutex collectMux sync.RWMutex
initialized bool initialized bool
clientFactory *ClientFactory clientFactory *clientFactory
busy sync.Mutex busy sync.Mutex
customFields map[int32]string customFields map[int32]string
customAttrFilter filter.Filter customAttrFilter filter.Filter
@ -76,7 +75,7 @@ type resourceKind struct {
paths []string paths []string
excludePaths []string excludePaths []string
collectInstances bool collectInstances bool
getObjects func(context.Context, *Endpoint, *ResourceFilter) (objectMap, error) getObjects func(context.Context, *endpoint, *resourceFilter) (objectMap, error)
include []string include []string
simple bool simple bool
metrics performance.MetricList metrics performance.MetricList
@ -108,7 +107,7 @@ type objectRef struct {
lookup map[string]string lookup map[string]string
} }
func (e *Endpoint) getParent(obj *objectRef, res *resourceKind) (*objectRef, bool) { func (e *endpoint) getParent(obj *objectRef, res *resourceKind) (*objectRef, bool) {
if pKind, ok := e.resourceKinds[res.parent]; ok { if pKind, ok := e.resourceKinds[res.parent]; ok {
if p, ok := pKind.objects[obj.parentRef.Value]; ok { if p, ok := pKind.objects[obj.parentRef.Value]; ok {
return p, true return p, true
@ -117,16 +116,16 @@ func (e *Endpoint) getParent(obj *objectRef, res *resourceKind) (*objectRef, boo
return nil, false return nil, false
} }
// NewEndpoint returns a new connection to a vCenter based on the URL and configuration passed // newEndpoint returns a new connection to a vCenter based on the URL and configuration passed
// as parameters. // as parameters.
func NewEndpoint(ctx context.Context, parent *VSphere, address *url.URL, log telegraf.Logger) (*Endpoint, error) { func newEndpoint(ctx context.Context, parent *VSphere, address *url.URL, log telegraf.Logger) (*endpoint, error) {
e := Endpoint{ e := endpoint{
URL: address, url: address,
Parent: parent, parent: parent,
hwMarks: NewTSCache(hwMarkTTL, log), hwMarks: newTSCache(hwMarkTTL, log),
lun2ds: make(map[string]string), lun2ds: make(map[string]string),
initialized: false, initialized: false,
clientFactory: NewClientFactory(address, parent), clientFactory: newClientFactory(address, parent),
customAttrFilter: newFilterOrPanic(parent.CustomAttributeInclude, parent.CustomAttributeExclude), customAttrFilter: newFilterOrPanic(parent.CustomAttributeInclude, parent.CustomAttributeExclude),
customAttrEnabled: anythingEnabled(parent.CustomAttributeExclude), customAttrEnabled: anythingEnabled(parent.CustomAttributeExclude),
log: log, log: log,
@ -294,18 +293,18 @@ func isSimple(include, exclude []string) bool {
return true return true
} }
func (e *Endpoint) startDiscovery(ctx context.Context) { func (e *endpoint) startDiscovery(ctx context.Context) {
e.discoveryTicker = time.NewTicker(time.Duration(e.Parent.ObjectDiscoveryInterval)) e.discoveryTicker = time.NewTicker(time.Duration(e.parent.ObjectDiscoveryInterval))
go func() { go func() {
for { for {
select { select {
case <-e.discoveryTicker.C: case <-e.discoveryTicker.C:
err := e.discover(ctx) err := e.discover(ctx)
if err != nil && !errors.Is(err, context.Canceled) { if err != nil && !errors.Is(err, context.Canceled) {
e.log.Errorf("Discovery for %s: %s", e.URL.Host, err.Error()) e.log.Errorf("Discovery for %s: %s", e.url.Host, err.Error())
} }
case <-ctx.Done(): case <-ctx.Done():
e.log.Debugf("Exiting discovery goroutine for %s", e.URL.Host) e.log.Debugf("Exiting discovery goroutine for %s", e.url.Host)
e.discoveryTicker.Stop() e.discoveryTicker.Stop()
return return
} }
@ -313,18 +312,18 @@ func (e *Endpoint) startDiscovery(ctx context.Context) {
}() }()
} }
func (e *Endpoint) initialDiscovery(ctx context.Context) { func (e *endpoint) initialDiscovery(ctx context.Context) {
err := e.discover(ctx) err := e.discover(ctx)
if err != nil && !errors.Is(err, context.Canceled) { if err != nil && !errors.Is(err, context.Canceled) {
e.log.Errorf("Discovery for %s: %s", e.URL.Host, err.Error()) e.log.Errorf("Discovery for %s: %s", e.url.Host, err.Error())
} }
e.startDiscovery(ctx) e.startDiscovery(ctx)
} }
func (e *Endpoint) init(ctx context.Context) error { func (e *endpoint) init(ctx context.Context) error {
client, err := e.clientFactory.GetClient(ctx) client, err := e.clientFactory.getClient(ctx)
if err != nil { if err != nil {
switch e.Parent.DisconnectedServersBehavior { switch e.parent.DisconnectedServersBehavior {
case "error": case "error":
return err return err
case "ignore": case "ignore":
@ -333,13 +332,13 @@ func (e *Endpoint) init(ctx context.Context) error {
return nil return nil
default: default:
return fmt.Errorf("%q is not a valid value for disconnected_servers_behavior", return fmt.Errorf("%q is not a valid value for disconnected_servers_behavior",
e.Parent.DisconnectedServersBehavior) e.parent.DisconnectedServersBehavior)
} }
} }
// Initial load of custom field metadata // Initial load of custom field metadata
if e.customAttrEnabled { if e.customAttrEnabled {
fields, err := client.GetCustomFields(ctx) fields, err := client.getCustomFields(ctx)
if err != nil { if err != nil {
e.log.Warn("Could not load custom field metadata") e.log.Warn("Could not load custom field metadata")
} else { } else {
@ -347,29 +346,29 @@ func (e *Endpoint) init(ctx context.Context) error {
} }
} }
if time.Duration(e.Parent.ObjectDiscoveryInterval) > 0 { if time.Duration(e.parent.ObjectDiscoveryInterval) > 0 {
e.Parent.Log.Debug("Running initial discovery") e.parent.Log.Debug("Running initial discovery")
e.initialDiscovery(ctx) e.initialDiscovery(ctx)
} }
e.initialized = true e.initialized = true
return nil return nil
} }
func (e *Endpoint) getMetricNameForID(id int32) string { func (e *endpoint) getMetricNameForID(id int32) string {
e.metricNameMux.RLock() e.metricNameMux.RLock()
defer e.metricNameMux.RUnlock() defer e.metricNameMux.RUnlock()
return e.metricNameLookup[id] return e.metricNameLookup[id]
} }
func (e *Endpoint) reloadMetricNameMap(ctx context.Context) error { func (e *endpoint) reloadMetricNameMap(ctx context.Context) error {
e.metricNameMux.Lock() e.metricNameMux.Lock()
defer e.metricNameMux.Unlock() defer e.metricNameMux.Unlock()
client, err := e.clientFactory.GetClient(ctx) client, err := e.clientFactory.getClient(ctx)
if err != nil { if err != nil {
return err return err
} }
mn, err := client.CounterInfoByKey(ctx) mn, err := client.counterInfoByKey(ctx)
if err != nil { if err != nil {
return err return err
} }
@ -380,28 +379,28 @@ func (e *Endpoint) reloadMetricNameMap(ctx context.Context) error {
return nil return nil
} }
func (e *Endpoint) getMetadata(ctx context.Context, obj *objectRef, sampling int32) (performance.MetricList, error) { func (e *endpoint) getMetadata(ctx context.Context, obj *objectRef, sampling int32) (performance.MetricList, error) {
client, err := e.clientFactory.GetClient(ctx) client, err := e.clientFactory.getClient(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(e.Parent.Timeout)) ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(e.parent.Timeout))
defer cancel1() defer cancel1()
metrics, err := client.Perf.AvailableMetric(ctx1, obj.ref.Reference(), sampling) metrics, err := client.perf.AvailableMetric(ctx1, obj.ref.Reference(), sampling)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return metrics, nil return metrics, nil
} }
func (e *Endpoint) getDatacenterName(ctx context.Context, client *Client, cache map[string]string, r types.ManagedObjectReference) (string, bool) { func (e *endpoint) getDatacenterName(ctx context.Context, client *client, cache map[string]string, r types.ManagedObjectReference) (string, bool) {
return e.getAncestorName(ctx, client, "Datacenter", cache, r) return e.getAncestorName(ctx, client, "Datacenter", cache, r)
} }
func (e *Endpoint) getAncestorName( func (e *endpoint) getAncestorName(
ctx context.Context, ctx context.Context,
client *Client, client *client,
resourceType string, resourceType string,
cache map[string]string, cache map[string]string,
r types.ManagedObjectReference, r types.ManagedObjectReference,
@ -418,13 +417,13 @@ func (e *Endpoint) getAncestorName(
return true return true
} }
path = append(path, here.Reference().String()) path = append(path, here.Reference().String())
o := object.NewCommon(client.Client.Client, r) o := object.NewCommon(client.client.Client, r)
var result mo.ManagedEntity var result mo.ManagedEntity
ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(e.Parent.Timeout)) ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(e.parent.Timeout))
defer cancel1() defer cancel1()
err := o.Properties(ctx1, here, []string{"parent", "name"}, &result) err := o.Properties(ctx1, here, []string{"parent", "name"}, &result)
if err != nil { if err != nil {
e.Parent.Log.Warnf("Error while resolving parent. Assuming no parent exists. Error: %s", err.Error()) e.parent.Log.Warnf("Error while resolving parent. Assuming no parent exists. Error: %s", err.Error())
return true return true
} }
if result.Reference().Type == resourceType { if result.Reference().Type == resourceType {
@ -433,7 +432,7 @@ func (e *Endpoint) getAncestorName(
return true return true
} }
if result.Parent == nil { if result.Parent == nil {
e.Parent.Log.Debugf("No parent found for %s (ascending from %s)", here.Reference(), r.Reference()) e.parent.Log.Debugf("No parent found for %s (ascending from %s)", here.Reference(), r.Reference())
return true return true
} }
here = result.Parent.Reference() here = result.Parent.Reference()
@ -446,7 +445,7 @@ func (e *Endpoint) getAncestorName(
return returnVal, returnVal != "" return returnVal, returnVal != ""
} }
func (e *Endpoint) discover(ctx context.Context) error { func (e *endpoint) discover(ctx context.Context) error {
e.busy.Lock() e.busy.Lock()
defer e.busy.Unlock() defer e.busy.Unlock()
if ctx.Err() != nil { if ctx.Err() != nil {
@ -458,17 +457,17 @@ func (e *Endpoint) discover(ctx context.Context) error {
return err return err
} }
sw := NewStopwatch("discover", e.URL.Host) sw := newStopwatch("discover", e.url.Host)
client, err := e.clientFactory.GetClient(ctx) client, err := e.clientFactory.getClient(ctx)
if err != nil { if err != nil {
return err return err
} }
// get the vSphere API version // get the vSphere API version
e.apiVersion = client.Client.ServiceContent.About.ApiVersion e.apiVersion = client.client.ServiceContent.About.ApiVersion
e.Parent.Log.Debugf("Discover new objects for %s", e.URL.Host) e.parent.Log.Debugf("Discover new objects for %s", e.url.Host)
dcNameCache := make(map[string]string) dcNameCache := make(map[string]string)
numRes := int64(0) numRes := int64(0)
@ -479,13 +478,13 @@ func (e *Endpoint) discover(ctx context.Context) error {
e.log.Debugf("Discovering resources for %s", res.name) e.log.Debugf("Discovering resources for %s", res.name)
// Need to do this for all resource types even if they are not enabled // Need to do this for all resource types even if they are not enabled
if res.enabled || (k != "vm" && k != "vsan") { if res.enabled || (k != "vm" && k != "vsan") {
rf := ResourceFilter{ rf := resourceFilter{
finder: &Finder{client}, finder: &finder{client},
resType: res.vcName, resType: res.vcName,
paths: res.paths, paths: res.paths,
excludePaths: res.excludePaths} excludePaths: res.excludePaths}
ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(e.Parent.Timeout)) ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(e.parent.Timeout))
objects, err := res.getObjects(ctx1, e, &rf) objects, err := res.getObjects(ctx1, e, &rf)
cancel1() cancel1()
if err != nil { if err != nil {
@ -513,7 +512,7 @@ func (e *Endpoint) discover(ctx context.Context) error {
} }
newObjects[k] = objects newObjects[k] = objects
SendInternalCounterWithTags("discovered_objects", e.URL.Host, map[string]string{"type": res.name}, int64(len(objects))) sendInternalCounterWithTags("discovered_objects", e.url.Host, map[string]string{"type": res.name}, int64(len(objects)))
numRes += int64(len(objects)) numRes += int64(len(objects))
} }
} }
@ -532,7 +531,7 @@ func (e *Endpoint) discover(ctx context.Context) error {
// Load custom field metadata // Load custom field metadata
var fields map[int32]string var fields map[int32]string
if e.customAttrEnabled { if e.customAttrEnabled {
fields, err = client.GetCustomFields(ctx) fields, err = client.getCustomFields(ctx)
if err != nil { if err != nil {
e.log.Warn("Could not load custom field metadata") e.log.Warn("Could not load custom field metadata")
fields = nil fields = nil
@ -552,14 +551,14 @@ func (e *Endpoint) discover(ctx context.Context) error {
e.customFields = fields e.customFields = fields
} }
sw.Stop() sw.stop()
SendInternalCounterWithTags("discovered_objects", e.URL.Host, map[string]string{"type": "instance-total"}, numRes) sendInternalCounterWithTags("discovered_objects", e.url.Host, map[string]string{"type": "instance-total"}, numRes)
return nil return nil
} }
func (e *Endpoint) simpleMetadataSelect(ctx context.Context, client *Client, res *resourceKind) { func (e *endpoint) simpleMetadataSelect(ctx context.Context, client *client, res *resourceKind) {
e.log.Debugf("Using fast metric metadata selection for %s", res.name) e.log.Debugf("Using fast metric metadata selection for %s", res.name)
m, err := client.CounterInfoByName(ctx) m, err := client.counterInfoByName(ctx)
if err != nil { if err != nil {
e.log.Errorf("Getting metric metadata. Discovery will be incomplete. Error: %s", err.Error()) e.log.Errorf("Getting metric metadata. Discovery will be incomplete. Error: %s", err.Error())
return return
@ -582,7 +581,7 @@ func (e *Endpoint) simpleMetadataSelect(ctx context.Context, client *Client, res
} }
} }
func (e *Endpoint) complexMetadataSelect(ctx context.Context, res *resourceKind, objects objectMap) { func (e *endpoint) complexMetadataSelect(ctx context.Context, res *resourceKind, objects objectMap) {
// We're only going to get metadata from maxMetadataSamples resources. If we have // We're only going to get metadata from maxMetadataSamples resources. If we have
// more resources than that, we pick maxMetadataSamples samples at random. // more resources than that, we pick maxMetadataSamples samples at random.
sampledObjects := make([]*objectRef, 0, len(objects)) sampledObjects := make([]*objectRef, 0, len(objects))
@ -602,10 +601,10 @@ func (e *Endpoint) complexMetadataSelect(ctx context.Context, res *resourceKind,
} }
instInfoMux := sync.Mutex{} instInfoMux := sync.Mutex{}
te := NewThrottledExecutor(e.Parent.DiscoverConcurrency) te := newThrottledExecutor(e.parent.DiscoverConcurrency)
for _, obj := range sampledObjects { for _, obj := range sampledObjects {
func(obj *objectRef) { func(obj *objectRef) {
te.Run(ctx, func() { te.run(ctx, func() {
metrics, err := e.getMetadata(ctx, obj, res.sampling) metrics, err := e.getMetadata(ctx, obj, res.sampling)
if err != nil { if err != nil {
e.log.Errorf("Getting metric metadata. Discovery will be incomplete. Error: %s", err.Error()) e.log.Errorf("Getting metric metadata. Discovery will be incomplete. Error: %s", err.Error())
@ -635,14 +634,14 @@ func (e *Endpoint) complexMetadataSelect(ctx context.Context, res *resourceKind,
}) })
}(obj) }(obj)
} }
te.Wait() te.wait()
} }
func getDatacenters(ctx context.Context, e *Endpoint, resourceFilter *ResourceFilter) (objectMap, error) { func getDatacenters(ctx context.Context, e *endpoint, resourceFilter *resourceFilter) (objectMap, error) {
var resources []mo.Datacenter var resources []mo.Datacenter
ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(e.Parent.Timeout)) ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(e.parent.Timeout))
defer cancel1() defer cancel1()
err := resourceFilter.FindAll(ctx1, &resources) err := resourceFilter.findAll(ctx1, &resources)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -661,11 +660,11 @@ func getDatacenters(ctx context.Context, e *Endpoint, resourceFilter *ResourceFi
return m, nil return m, nil
} }
func getClusters(ctx context.Context, e *Endpoint, resourceFilter *ResourceFilter) (objectMap, error) { func getClusters(ctx context.Context, e *endpoint, resourceFilter *resourceFilter) (objectMap, error) {
var resources []mo.ClusterComputeResource var resources []mo.ClusterComputeResource
ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(e.Parent.Timeout)) ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(e.parent.Timeout))
defer cancel1() defer cancel1()
err := resourceFilter.FindAll(ctx1, &resources) err := resourceFilter.findAll(ctx1, &resources)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -679,19 +678,19 @@ func getClusters(ctx context.Context, e *Endpoint, resourceFilter *ResourceFilte
// We're not interested in the immediate parent (a folder), but the data center. // We're not interested in the immediate parent (a folder), but the data center.
p, ok := cache[r.Parent.Value] p, ok := cache[r.Parent.Value]
if !ok { if !ok {
ctx2, cancel2 := context.WithTimeout(ctx, time.Duration(e.Parent.Timeout)) ctx2, cancel2 := context.WithTimeout(ctx, time.Duration(e.parent.Timeout))
defer cancel2() defer cancel2()
client, err := e.clientFactory.GetClient(ctx2) client, err := e.clientFactory.getClient(ctx2)
if err != nil { if err != nil {
return err return err
} }
o := object.NewFolder(client.Client.Client, *r.Parent) o := object.NewFolder(client.client.Client, *r.Parent)
var folder mo.Folder var folder mo.Folder
ctx3, cancel3 := context.WithTimeout(ctx, time.Duration(e.Parent.Timeout)) ctx3, cancel3 := context.WithTimeout(ctx, time.Duration(e.parent.Timeout))
defer cancel3() defer cancel3()
err = o.Properties(ctx3, *r.Parent, []string{"parent"}, &folder) err = o.Properties(ctx3, *r.Parent, []string{"parent"}, &folder)
if err != nil { if err != nil {
e.Parent.Log.Warnf("Error while getting folder parent: %s", err.Error()) e.parent.Log.Warnf("Error while getting folder parent: %s", err.Error())
p = nil p = nil
} else { } else {
pp := folder.Parent.Reference() pp := folder.Parent.Reference()
@ -715,9 +714,9 @@ func getClusters(ctx context.Context, e *Endpoint, resourceFilter *ResourceFilte
} }
// noinspection GoUnusedParameter // noinspection GoUnusedParameter
func getResourcePools(ctx context.Context, e *Endpoint, resourceFilter *ResourceFilter) (objectMap, error) { func getResourcePools(ctx context.Context, e *endpoint, resourceFilter *resourceFilter) (objectMap, error) {
var resources []mo.ResourcePool var resources []mo.ResourcePool
err := resourceFilter.FindAll(ctx, &resources) err := resourceFilter.findAll(ctx, &resources)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -746,9 +745,9 @@ func getResourcePoolName(rp types.ManagedObjectReference, rps objectMap) string
} }
// noinspection GoUnusedParameter // noinspection GoUnusedParameter
func getHosts(ctx context.Context, e *Endpoint, resourceFilter *ResourceFilter) (objectMap, error) { func getHosts(ctx context.Context, e *endpoint, resourceFilter *resourceFilter) (objectMap, error) {
var resources []mo.HostSystem var resources []mo.HostSystem
err := resourceFilter.FindAll(ctx, &resources) err := resourceFilter.findAll(ctx, &resources)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -766,22 +765,22 @@ func getHosts(ctx context.Context, e *Endpoint, resourceFilter *ResourceFilter)
return m, nil return m, nil
} }
func getVMs(ctx context.Context, e *Endpoint, resourceFilter *ResourceFilter) (objectMap, error) { func getVMs(ctx context.Context, e *endpoint, rf *resourceFilter) (objectMap, error) {
var resources []mo.VirtualMachine var resources []mo.VirtualMachine
ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(e.Parent.Timeout)) ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(e.parent.Timeout))
defer cancel1() defer cancel1()
err := resourceFilter.FindAll(ctx1, &resources) err := rf.findAll(ctx1, &resources)
if err != nil { if err != nil {
return nil, err return nil, err
} }
m := make(objectMap) m := make(objectMap)
client, err := e.clientFactory.GetClient(ctx) client, err := e.clientFactory.getClient(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// Create a ResourcePool Filter and get the list of Resource Pools // Create a ResourcePool Filter and get the list of Resource Pools
rprf := ResourceFilter{ rprf := resourceFilter{
finder: &Finder{client}, finder: &finder{client},
resType: "ResourcePool", resType: "ResourcePool",
paths: []string{"/*/host/**"}, paths: []string{"/*/host/**"},
excludePaths: nil} excludePaths: nil}
@ -798,7 +797,7 @@ func getVMs(ctx context.Context, e *Endpoint, resourceFilter *ResourceFilter) (o
guest := "unknown" guest := "unknown"
uuid := "" uuid := ""
lookup := make(map[string]string) lookup := make(map[string]string)
// Get the name of the VM resource pool // get the name of the VM resource pool
rpname := getResourcePoolName(*r.ResourcePool, resourcePools) rpname := getResourcePoolName(*r.ResourcePool, resourcePools)
// Extract host name // Extract host name
@ -817,7 +816,7 @@ func getVMs(ctx context.Context, e *Endpoint, resourceFilter *ResourceFilter) (o
ips := make(map[string][]string) ips := make(map[string][]string)
for _, ip := range net.IpConfig.IpAddress { for _, ip := range net.IpConfig.IpAddress {
addr := ip.IpAddress addr := ip.IpAddress
for _, ipType := range e.Parent.IPAddresses { for _, ipType := range e.parent.IPAddresses {
if !(ipType == "ipv4" && isIPv4.MatchString(addr) || if !(ipType == "ipv4" && isIPv4.MatchString(addr) ||
ipType == "ipv6" && isIPv6.MatchString(addr)) { ipType == "ipv6" && isIPv6.MatchString(addr)) {
continue continue
@ -881,11 +880,11 @@ func getVMs(ctx context.Context, e *Endpoint, resourceFilter *ResourceFilter) (o
return m, nil return m, nil
} }
func getDatastores(ctx context.Context, e *Endpoint, resourceFilter *ResourceFilter) (objectMap, error) { func getDatastores(ctx context.Context, e *endpoint, resourceFilter *resourceFilter) (objectMap, error) {
var resources []mo.Datastore var resources []mo.Datastore
ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(e.Parent.Timeout)) ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(e.parent.Timeout))
defer cancel1() defer cancel1()
err := resourceFilter.FindAll(ctx1, &resources) err := resourceFilter.findAll(ctx1, &resources)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -911,7 +910,7 @@ func getDatastores(ctx context.Context, e *Endpoint, resourceFilter *ResourceFil
return m, nil return m, nil
} }
func (e *Endpoint) loadCustomAttributes(entity mo.ManagedEntity) map[string]string { func (e *endpoint) loadCustomAttributes(entity mo.ManagedEntity) map[string]string {
if !e.customAttrEnabled { if !e.customAttrEnabled {
return make(map[string]string) return make(map[string]string)
} }
@ -919,12 +918,12 @@ func (e *Endpoint) loadCustomAttributes(entity mo.ManagedEntity) map[string]stri
for _, v := range entity.CustomValue { for _, v := range entity.CustomValue {
cv, ok := v.(*types.CustomFieldStringValue) cv, ok := v.(*types.CustomFieldStringValue)
if !ok { if !ok {
e.Parent.Log.Warnf("Metadata for custom field %d not of string type. Skipping", cv.Key) e.parent.Log.Warnf("Metadata for custom field %d not of string type. Skipping", cv.Key)
continue continue
} }
key, ok := e.customFields[cv.Key] key, ok := e.customFields[cv.Key]
if !ok { if !ok {
e.Parent.Log.Warnf("Metadata for custom field %d not found. Skipping", cv.Key) e.parent.Log.Warnf("Metadata for custom field %d not found. Skipping", cv.Key)
continue continue
} }
if e.customAttrFilter.Match(key) { if e.customAttrFilter.Match(key) {
@ -934,13 +933,13 @@ func (e *Endpoint) loadCustomAttributes(entity mo.ManagedEntity) map[string]stri
return cvs return cvs
} }
// Close shuts down an Endpoint and releases any resources associated with it. // close shuts down an endpoint and releases any resources associated with it.
func (e *Endpoint) Close() { func (e *endpoint) close() {
e.clientFactory.Close() e.clientFactory.close()
} }
// Collect runs a round of data collections as specified in the configuration. // collect runs a round of data collections as specified in the configuration.
func (e *Endpoint) Collect(ctx context.Context, acc telegraf.Accumulator) error { func (e *endpoint) collect(ctx context.Context, acc telegraf.Accumulator) error {
// Connection could have failed on init, so we need to check for a deferred // Connection could have failed on init, so we need to check for a deferred
// init request. // init request.
if !e.initialized { if !e.initialized {
@ -953,7 +952,7 @@ func (e *Endpoint) Collect(ctx context.Context, acc telegraf.Accumulator) error
// If we never managed to do a discovery, collection will be a no-op. Therefore, // If we never managed to do a discovery, collection will be a no-op. Therefore,
// we need to check that a connection is available, or the collection will // we need to check that a connection is available, or the collection will
// silently fail. // silently fail.
if _, err := e.clientFactory.GetClient(ctx); err != nil { if _, err := e.clientFactory.getClient(ctx); err != nil {
return err return err
} }
@ -965,7 +964,7 @@ func (e *Endpoint) Collect(ctx context.Context, acc telegraf.Accumulator) error
} }
// If discovery interval is disabled (0), discover on each collection cycle // If discovery interval is disabled (0), discover on each collection cycle
if time.Duration(e.Parent.ObjectDiscoveryInterval) == 0 { if time.Duration(e.parent.ObjectDiscoveryInterval) == 0 {
err := e.discover(ctx) err := e.discover(ctx)
if err != nil { if err != nil {
return err return err
@ -991,21 +990,21 @@ func (e *Endpoint) Collect(ctx context.Context, acc telegraf.Accumulator) error
} }
wg.Wait() wg.Wait()
// Purge old timestamps from the cache // purge old timestamps from the cache
e.hwMarks.Purge() e.hwMarks.purge()
return nil return nil
} }
// Workaround to make sure pqs is a copy of the loop variable and won't change. // Workaround to make sure pqs is a copy of the loop variable and won't change.
func submitChunkJob(ctx context.Context, te *ThrottledExecutor, job queryJob, pqs queryChunk) { func submitChunkJob(ctx context.Context, te *throttledExecutor, job queryJob, pqs queryChunk) {
te.Run(ctx, func() { te.run(ctx, func() {
job(pqs) job(pqs)
}) })
} }
func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now, latest time.Time, job queryJob) { func (e *endpoint) chunkify(ctx context.Context, res *resourceKind, now, latest time.Time, job queryJob) {
te := NewThrottledExecutor(e.Parent.CollectConcurrency) te := newThrottledExecutor(e.parent.CollectConcurrency)
maxMetrics := e.Parent.MaxQueryMetrics maxMetrics := e.parent.MaxQueryMetrics
if maxMetrics < 1 { if maxMetrics < 1 {
maxMetrics = 1 maxMetrics = 1
} }
@ -1017,7 +1016,7 @@ func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now, latest
maxMetrics = 10 maxMetrics = 10
} }
pqs := make(queryChunk, 0, e.Parent.MaxQueryObjects) pqs := make(queryChunk, 0, e.parent.MaxQueryObjects)
numQs := 0 numQs := 0
for _, obj := range res.objects { for _, obj := range res.objects {
@ -1029,9 +1028,9 @@ func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now, latest
e.log.Debugf("Unable to find metric name for id %d. Skipping!", metric.CounterId) e.log.Debugf("Unable to find metric name for id %d. Skipping!", metric.CounterId)
continue continue
} }
start, ok := e.hwMarks.Get(obj.ref.Value, metricName) start, ok := e.hwMarks.get(obj.ref.Value, metricName)
if !ok { if !ok {
start = latest.Add(time.Duration(-res.sampling) * time.Second * (time.Duration(e.Parent.MetricLookback) - 1)) start = latest.Add(time.Duration(-res.sampling) * time.Second * (time.Duration(e.parent.MetricLookback) - 1))
} }
if !start.Truncate(time.Second).Before(now.Truncate(time.Second)) { if !start.Truncate(time.Second).Before(now.Truncate(time.Second)) {
@ -1064,14 +1063,14 @@ func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now, latest
// OR if we're past the absolute maximum limit // OR if we're past the absolute maximum limit
if (!res.realTime && len(bucket.MetricId) >= maxMetrics) || len(bucket.MetricId) > maxRealtimeMetrics { if (!res.realTime && len(bucket.MetricId) >= maxMetrics) || len(bucket.MetricId) > maxRealtimeMetrics {
e.log.Debugf("Submitting partial query: %d metrics (%d remaining) of type %s for %s. Total objects %d", e.log.Debugf("Submitting partial query: %d metrics (%d remaining) of type %s for %s. Total objects %d",
len(bucket.MetricId), len(res.metrics)-metricIdx, res.name, e.URL.Host, len(res.objects)) len(bucket.MetricId), len(res.metrics)-metricIdx, res.name, e.url.Host, len(res.objects))
// Don't send work items if the context has been cancelled. // Don't send work items if the context has been cancelled.
if errors.Is(ctx.Err(), context.Canceled) { if errors.Is(ctx.Err(), context.Canceled) {
return return
} }
// Run collection job // run collection job
delete(timeBuckets, start.Unix()) delete(timeBuckets, start.Unix())
submitChunkJob(ctx, te, job, queryChunk{*bucket}) submitChunkJob(ctx, te, job, queryChunk{*bucket})
} }
@ -1080,10 +1079,10 @@ func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now, latest
for _, bucket := range timeBuckets { for _, bucket := range timeBuckets {
pqs = append(pqs, *bucket) pqs = append(pqs, *bucket)
numQs += len(bucket.MetricId) numQs += len(bucket.MetricId)
if (!res.realTime && numQs > e.Parent.MaxQueryObjects) || numQs > maxRealtimeMetrics { if (!res.realTime && numQs > e.parent.MaxQueryObjects) || numQs > maxRealtimeMetrics {
e.log.Debugf("Submitting final bucket job for %s: %d metrics", res.name, numQs) e.log.Debugf("Submitting final bucket job for %s: %d metrics", res.name, numQs)
submitChunkJob(ctx, te, job, pqs) submitChunkJob(ctx, te, job, pqs)
pqs = make(queryChunk, 0, e.Parent.MaxQueryObjects) pqs = make(queryChunk, 0, e.parent.MaxQueryObjects)
numQs = 0 numQs = 0
} }
} }
@ -1095,16 +1094,16 @@ func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now, latest
} }
// Wait for background collection to finish // Wait for background collection to finish
te.Wait() te.wait()
} }
func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc telegraf.Accumulator) error { func (e *endpoint) collectResource(ctx context.Context, resourceType string, acc telegraf.Accumulator) error {
res := e.resourceKinds[resourceType] res := e.resourceKinds[resourceType]
client, err := e.clientFactory.GetClient(ctx) client, err := e.clientFactory.getClient(ctx)
if err != nil { if err != nil {
return err return err
} }
now, err := client.GetServerTime(ctx) now, err := client.getServerTime(ctx)
if err != nil { if err != nil {
return err return err
} }
@ -1133,7 +1132,7 @@ func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc
if !res.realTime && elapsed < float64(res.sampling) { if !res.realTime && elapsed < float64(res.sampling) {
// No new data would be available. We're outta here! // No new data would be available. We're outta here!
e.log.Debugf("Sampling period for %s of %d has not elapsed on %s", e.log.Debugf("Sampling period for %s of %d has not elapsed on %s",
resourceType, res.sampling, e.URL.Host) resourceType, res.sampling, e.url.Host)
return nil return nil
} }
} else { } else {
@ -1141,10 +1140,10 @@ func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc
} }
internalTags := map[string]string{"resourcetype": resourceType} internalTags := map[string]string{"resourcetype": resourceType}
sw := NewStopwatchWithTags("gather_duration", e.URL.Host, internalTags) sw := newStopwatchWithTags("gather_duration", e.url.Host, internalTags)
e.log.Debugf("Collecting metrics for %d objects of type %s for %s", e.log.Debugf("Collecting metrics for %d objects of type %s for %s",
len(res.objects), resourceType, e.URL.Host) len(res.objects), resourceType, e.url.Host)
count := int64(0) count := int64(0)
@ -1160,7 +1159,7 @@ func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc
acc.AddError(errors.New("while collecting " + res.name + ": " + err.Error())) acc.AddError(errors.New("while collecting " + res.name + ": " + err.Error()))
return return
} }
e.Parent.Log.Debugf("CollectChunk for %s returned %d metrics", resourceType, n) e.parent.Log.Debugf("CollectChunk for %s returned %d metrics", resourceType, n)
atomic.AddInt64(&count, int64(n)) atomic.AddInt64(&count, int64(n))
tsMux.Lock() tsMux.Lock()
defer tsMux.Unlock() defer tsMux.Unlock()
@ -1173,12 +1172,12 @@ func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc
if !latestSample.IsZero() { if !latestSample.IsZero() {
res.latestSample = latestSample res.latestSample = latestSample
} }
sw.Stop() sw.stop()
SendInternalCounterWithTags("gather_count", e.URL.Host, internalTags, count) sendInternalCounterWithTags("gather_count", e.url.Host, internalTags, count)
return nil return nil
} }
func (e *Endpoint) alignSamples(info []types.PerfSampleInfo, values []int64, interval time.Duration) ([]types.PerfSampleInfo, []float64) { func (e *endpoint) alignSamples(info []types.PerfSampleInfo, values []int64, interval time.Duration) ([]types.PerfSampleInfo, []float64) {
rInfo := make([]types.PerfSampleInfo, 0, len(info)) rInfo := make([]types.PerfSampleInfo, 0, len(info))
rValues := make([]float64, 0, len(values)) rValues := make([]float64, 0, len(values))
bi := 1.0 bi := 1.0
@ -1216,7 +1215,7 @@ func (e *Endpoint) alignSamples(info []types.PerfSampleInfo, values []int64, int
return rInfo, rValues return rInfo, rValues
} }
func (e *Endpoint) collectChunk( func (e *endpoint) collectChunk(
ctx context.Context, ctx context.Context,
pqs queryChunk, pqs queryChunk,
res *resourceKind, res *resourceKind,
@ -1227,19 +1226,19 @@ func (e *Endpoint) collectChunk(
latestSample := time.Time{} latestSample := time.Time{}
count := 0 count := 0
resourceType := res.name resourceType := res.name
prefix := "vsphere" + e.Parent.Separator + resourceType prefix := "vsphere" + e.parent.Separator + resourceType
client, err := e.clientFactory.GetClient(ctx) client, err := e.clientFactory.getClient(ctx)
if err != nil { if err != nil {
return count, latestSample, err return count, latestSample, err
} }
metricInfo, err := client.CounterInfoByName(ctx) metricInfo, err := client.counterInfoByName(ctx)
if err != nil { if err != nil {
return count, latestSample, err return count, latestSample, err
} }
ems, err := client.QueryMetrics(ctx, pqs) ems, err := client.queryMetrics(ctx, pqs)
if err != nil { if err != nil {
return count, latestSample, err return count, latestSample, err
} }
@ -1258,7 +1257,7 @@ func (e *Endpoint) collectChunk(
for _, v := range em.Value { for _, v := range em.Value {
name := v.Name name := v.Name
t := map[string]string{ t := map[string]string{
"vcenter": e.URL.Host, "vcenter": e.url.Host,
"source": instInfo.name, "source": instInfo.name,
"moid": moid, "moid": moid,
} }
@ -1310,7 +1309,7 @@ func (e *Endpoint) collectChunk(
if info.UnitInfo.GetElementDescription().Key == "percent" { if info.UnitInfo.GetElementDescription().Key == "percent" {
bucket.fields[fn] = v / 100.0 bucket.fields[fn] = v / 100.0
} else { } else {
if e.Parent.UseIntSamples { if e.parent.UseIntSamples {
bucket.fields[fn] = int64(round(v)) bucket.fields[fn] = int64(round(v))
} else { } else {
bucket.fields[fn] = v bucket.fields[fn] = v
@ -1320,7 +1319,7 @@ func (e *Endpoint) collectChunk(
// Update hiwater marks // Update hiwater marks
adjTs := ts.Add(interval).Truncate(interval).Add(-time.Second) adjTs := ts.Add(interval).Truncate(interval).Add(-time.Second)
e.hwMarks.Put(moid, name, adjTs) e.hwMarks.put(moid, name, adjTs)
} }
if nValues == 0 { if nValues == 0 {
e.log.Debugf("Missing value for: %s, %s", name, objectRef.name) e.log.Debugf("Missing value for: %s, %s", name, objectRef.name)
@ -1336,7 +1335,7 @@ func (e *Endpoint) collectChunk(
return count, latestSample, nil return count, latestSample, nil
} }
func (e *Endpoint) populateTags(objectRef *objectRef, resourceType string, resource *resourceKind, t map[string]string, v performance.MetricSeries) { func (e *endpoint) populateTags(objectRef *objectRef, resourceType string, resource *resourceKind, t map[string]string, v performance.MetricSeries) {
// Map name of object. // Map name of object.
if resource.pKey != "" { if resource.pKey != "" {
t[resource.pKey] = objectRef.name t[resource.pKey] = objectRef.name
@ -1424,7 +1423,7 @@ func (e *Endpoint) populateTags(objectRef *objectRef, resourceType string, resou
} }
} }
func (e *Endpoint) populateGlobalFields(objectRef *objectRef, resourceType, prefix string) map[string]interface{} { func (e *endpoint) populateGlobalFields(objectRef *objectRef, resourceType, prefix string) map[string]interface{} {
globalFields := make(map[string]interface{}) globalFields := make(map[string]interface{})
if resourceType == "vm" && objectRef.memorySizeMB != 0 { if resourceType == "vm" && objectRef.memorySizeMB != 0 {
_, fieldName := e.makeMetricIdentifier(prefix, "memorySizeMB") _, fieldName := e.makeMetricIdentifier(prefix, "memorySizeMB")
@ -1437,12 +1436,12 @@ func (e *Endpoint) populateGlobalFields(objectRef *objectRef, resourceType, pref
return globalFields return globalFields
} }
func (e *Endpoint) makeMetricIdentifier(prefix, metric string) (metricName, fieldName string) { func (e *endpoint) makeMetricIdentifier(prefix, metric string) (metricName, fieldName string) {
parts := strings.Split(metric, ".") parts := strings.Split(metric, ".")
if len(parts) == 1 { if len(parts) == 1 {
return prefix, parts[0] return prefix, parts[0]
} }
return prefix + e.Parent.Separator + parts[0], strings.Join(parts[1:], e.Parent.Separator) return prefix + e.parent.Separator + parts[0], strings.Join(parts[1:], e.parent.Separator)
} }
func cleanGuestID(id string) string { func cleanGuestID(id string) string {

View File

@ -17,22 +17,22 @@ var addFields map[string][]string
var containers map[string]interface{} var containers map[string]interface{}
// Finder allows callers to find resources in vCenter given a query string. // finder allows callers to find resources in vCenter given a query string.
type Finder struct { type finder struct {
client *Client client *client
} }
// ResourceFilter is a convenience class holding a finder and a set of paths. It is useful when you need a // resourceFilter is a convenience class holding a finder and a set of paths. It is useful when you need a
// self contained object capable of returning a certain set of resources. // self contained object capable of returning a certain set of resources.
type ResourceFilter struct { type resourceFilter struct {
finder *Finder finder *finder
resType string resType string
paths []string paths []string
excludePaths []string excludePaths []string
} }
// FindAll returns the union of resources found given the supplied resource type and paths. // findAll returns the union of resources found given the supplied resource type and paths.
func (f *Finder) FindAll(ctx context.Context, resType string, paths, excludePaths []string, dst interface{}) error { func (f *finder) findAll(ctx context.Context, resType string, paths, excludePaths []string, dst interface{}) error {
objs := make(map[string]types.ObjectContent) objs := make(map[string]types.ObjectContent)
for _, p := range paths { for _, p := range paths {
if err := f.findResources(ctx, resType, p, objs); err != nil { if err := f.findResources(ctx, resType, p, objs); err != nil {
@ -53,8 +53,8 @@ func (f *Finder) FindAll(ctx context.Context, resType string, paths, excludePath
return objectContentToTypedArray(objs, dst) return objectContentToTypedArray(objs, dst)
} }
// Find returns the resources matching the specified path. // find returns the resources matching the specified path.
func (f *Finder) Find(ctx context.Context, resType, path string, dst interface{}) error { func (f *finder) find(ctx context.Context, resType, path string, dst interface{}) error {
objs := make(map[string]types.ObjectContent) objs := make(map[string]types.ObjectContent)
err := f.findResources(ctx, resType, path, objs) err := f.findResources(ctx, resType, path, objs)
if err != nil { if err != nil {
@ -63,13 +63,13 @@ func (f *Finder) Find(ctx context.Context, resType, path string, dst interface{}
return objectContentToTypedArray(objs, dst) return objectContentToTypedArray(objs, dst)
} }
func (f *Finder) findResources(ctx context.Context, resType, path string, objs map[string]types.ObjectContent) error { func (f *finder) findResources(ctx context.Context, resType, path string, objs map[string]types.ObjectContent) error {
p := strings.Split(path, "/") p := strings.Split(path, "/")
flt := make([]property.Match, len(p)-1) flt := make([]property.Match, len(p)-1)
for i := 1; i < len(p); i++ { for i := 1; i < len(p); i++ {
flt[i-1] = property.Match{"name": p[i]} flt[i-1] = property.Match{"name": p[i]}
} }
err := f.descend(ctx, f.client.Client.ServiceContent.RootFolder, resType, flt, 0, objs) err := f.descend(ctx, f.client.client.ServiceContent.RootFolder, resType, flt, 0, objs)
if err != nil { if err != nil {
return err return err
} }
@ -77,7 +77,7 @@ func (f *Finder) findResources(ctx context.Context, resType, path string, objs m
return nil return nil
} }
func (f *Finder) descend(ctx context.Context, root types.ManagedObjectReference, resType string, func (f *finder) descend(ctx context.Context, root types.ManagedObjectReference, resType string,
tokens []property.Match, pos int, objs map[string]types.ObjectContent) error { tokens []property.Match, pos int, objs map[string]types.ObjectContent) error {
isLeaf := pos == len(tokens)-1 isLeaf := pos == len(tokens)-1
@ -94,7 +94,7 @@ func (f *Finder) descend(ctx context.Context, root types.ManagedObjectReference,
return nil return nil
} }
m := view.NewManager(f.client.Client.Client) m := view.NewManager(f.client.client.Client)
v, err := m.CreateContainerView(ctx, root, ct, false) v, err := m.CreateContainerView(ctx, root, ct, false)
if err != nil { if err != nil {
return err return err
@ -222,10 +222,9 @@ func objectContentToTypedArray(objs map[string]types.ObjectContent, dst interfac
return nil return nil
} }
// FindAll finds all resources matching the paths that were specified upon creation of // findAll finds all resources matching the paths that were specified upon creation of the resourceFilter.
// the ResourceFilter. func (r *resourceFilter) findAll(ctx context.Context, dst interface{}) error {
func (r *ResourceFilter) FindAll(ctx context.Context, dst interface{}) error { return r.finder.findAll(ctx, r.resType, r.paths, r.excludePaths, dst)
return r.finder.FindAll(ctx, r.resType, r.paths, r.excludePaths, dst)
} }
func matchName(f property.Match, props []types.DynamicProperty) bool { func matchName(f property.Match, props []types.DynamicProperty) bool {

View File

@ -6,47 +6,36 @@ import (
"github.com/influxdata/telegraf/selfstat" "github.com/influxdata/telegraf/selfstat"
) )
// Stopwatch is a simple helper for recording timing information, // stopwatch is a simple helper for recording timing information, such as gather times and discovery times.
// such as gather times and discovery times. type stopwatch struct {
type Stopwatch struct {
stat selfstat.Stat stat selfstat.Stat
start time.Time start time.Time
} }
// NewStopwatch creates a new StopWatch and starts measuring time // newStopwatch creates a new StopWatch and starts measuring time its creation.
// its creation. func newStopwatch(name, vCenter string) *stopwatch {
func NewStopwatch(name, vCenter string) *Stopwatch { return &stopwatch{
return &Stopwatch{
stat: selfstat.RegisterTiming("vsphere", name+"_ns", map[string]string{"vcenter": vCenter}), stat: selfstat.RegisterTiming("vsphere", name+"_ns", map[string]string{"vcenter": vCenter}),
start: time.Now(), start: time.Now(),
} }
} }
// NewStopwatchWithTags creates a new StopWatch and starts measuring time // newStopwatchWithTags creates a new StopWatch and starts measuring time its creation. Allows additional tags.
// its creation. Allows additional tags. func newStopwatchWithTags(name, vCenter string, tags map[string]string) *stopwatch {
func NewStopwatchWithTags(name, vCenter string, tags map[string]string) *Stopwatch {
tags["vcenter"] = vCenter tags["vcenter"] = vCenter
return &Stopwatch{ return &stopwatch{
stat: selfstat.RegisterTiming("vsphere", name+"_ns", tags), stat: selfstat.RegisterTiming("vsphere", name+"_ns", tags),
start: time.Now(), start: time.Now(),
} }
} }
// Stop stops a Stopwatch and records the time. // stop stops a stopwatch and records the time.
func (s *Stopwatch) Stop() { func (s *stopwatch) stop() {
s.stat.Set(time.Since(s.start).Nanoseconds()) s.stat.Set(time.Since(s.start).Nanoseconds())
} }
// SendInternalCounter is a convenience method for sending // sendInternalCounterWithTags is a convenience method for sending non-timing internal metrics. Allows additional tags
// non-timing internal metrics. func sendInternalCounterWithTags(name, vCenter string, tags map[string]string, value int64) {
func SendInternalCounter(name, vCenter string, value int64) {
s := selfstat.Register("vsphere", name, map[string]string{"vcenter": vCenter})
s.Set(value)
}
// SendInternalCounterWithTags is a convenience method for sending
// non-timing internal metrics. Allows additional tags
func SendInternalCounterWithTags(name, vCenter string, tags map[string]string, value int64) {
tags["vcenter"] = vCenter tags["vcenter"] = vCenter
s := selfstat.Register("vsphere", name, tags) s := selfstat.Register("vsphere", name, tags)
s.Set(value) s.Set(value)

View File

@ -5,25 +5,24 @@ import (
"sync" "sync"
) )
// ThrottledExecutor provides a simple mechanism for running jobs in separate // throttledExecutor provides a simple mechanism for running jobs in separate
// goroutines while limit the number of concurrent jobs running at any given time. // goroutines while limit the number of concurrent jobs running at any given time.
type ThrottledExecutor struct { type throttledExecutor struct {
limiter chan struct{} limiter chan struct{}
wg sync.WaitGroup wg sync.WaitGroup
} }
// NewThrottledExecutor creates a new ThrottlesExecutor with a specified maximum // newThrottledExecutor creates a new ThrottlesExecutor with a specified maximum
// number of concurrent jobs // number of concurrent jobs
func NewThrottledExecutor(limit int) *ThrottledExecutor { func newThrottledExecutor(limit int) *throttledExecutor {
if limit == 0 { if limit == 0 {
panic("Limit must be > 0") panic("Limit must be > 0")
} }
return &ThrottledExecutor{limiter: make(chan struct{}, limit)} return &throttledExecutor{limiter: make(chan struct{}, limit)}
} }
// Run schedules a job for execution as soon as possible while respecting the // run schedules a job for execution as soon as possible while respecting the maximum concurrency limit.
// maximum concurrency limit. func (t *throttledExecutor) run(ctx context.Context, job func()) {
func (t *ThrottledExecutor) Run(ctx context.Context, job func()) {
t.wg.Add(1) t.wg.Add(1)
go func() { go func() {
defer t.wg.Done() defer t.wg.Done()
@ -39,7 +38,7 @@ func (t *ThrottledExecutor) Run(ctx context.Context, job func()) {
}() }()
} }
// Wait blocks until all scheduled jobs have finished // wait blocks until all scheduled jobs have finished
func (t *ThrottledExecutor) Wait() { func (t *throttledExecutor) wait() {
t.wg.Wait() t.wg.Wait()
} }

View File

@ -7,25 +7,25 @@ import (
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
) )
// TSCache is a cache of timestamps used to determine the validity of datapoints // tsCache is a cache of timestamps used to determine the validity of datapoints
type TSCache struct { type tsCache struct {
ttl time.Duration ttl time.Duration
table map[string]time.Time table map[string]time.Time
mux sync.RWMutex mux sync.RWMutex
log telegraf.Logger log telegraf.Logger
} }
// NewTSCache creates a new TSCache with a specified time-to-live after which timestamps are discarded. // newTSCache creates a new tsCache with a specified time-to-live after which timestamps are discarded.
func NewTSCache(ttl time.Duration, log telegraf.Logger) *TSCache { func newTSCache(ttl time.Duration, log telegraf.Logger) *tsCache {
return &TSCache{ return &tsCache{
ttl: ttl, ttl: ttl,
table: make(map[string]time.Time), table: make(map[string]time.Time),
log: log, log: log,
} }
} }
// Purge removes timestamps that are older than the time-to-live // purge removes timestamps that are older than the time-to-live
func (t *TSCache) Purge() { func (t *tsCache) purge() {
t.mux.Lock() t.mux.Lock()
defer t.mux.Unlock() defer t.mux.Unlock()
n := 0 n := 0
@ -38,28 +38,16 @@ func (t *TSCache) Purge() {
t.log.Debugf("purged timestamp cache. %d deleted with %d remaining", n, len(t.table)) t.log.Debugf("purged timestamp cache. %d deleted with %d remaining", n, len(t.table))
} }
// IsNew returns true if the supplied timestamp for the supplied key is more recent than the // get returns a timestamp (if present)
// timestamp we have on record. func (t *tsCache) get(key, metricName string) (time.Time, bool) {
func (t *TSCache) IsNew(key, metricName string, tm time.Time) bool {
t.mux.RLock()
defer t.mux.RUnlock()
v, ok := t.table[makeKey(key, metricName)]
if !ok {
return true // We've never seen this before, so consider everything a new sample
}
return !tm.Before(v)
}
// Get returns a timestamp (if present)
func (t *TSCache) Get(key, metricName string) (time.Time, bool) {
t.mux.RLock() t.mux.RLock()
defer t.mux.RUnlock() defer t.mux.RUnlock()
ts, ok := t.table[makeKey(key, metricName)] ts, ok := t.table[makeKey(key, metricName)]
return ts, ok return ts, ok
} }
// Put updates the latest timestamp for the supplied key. // put updates the latest timestamp for the supplied key.
func (t *TSCache) Put(key, metricName string, timestamp time.Time) { func (t *tsCache) put(key, metricName string, timestamp time.Time) {
t.mux.Lock() t.mux.Lock()
defer t.mux.Unlock() defer t.mux.Unlock()
k := makeKey(key, metricName) k := makeKey(key, metricName)

View File

@ -39,36 +39,36 @@ var (
) )
// collectVsan is the entry point for vsan metrics collection // collectVsan is the entry point for vsan metrics collection
func (e *Endpoint) collectVsan(ctx context.Context, acc telegraf.Accumulator) error { func (e *endpoint) collectVsan(ctx context.Context, acc telegraf.Accumulator) error {
lower := versionLowerThan(e.apiVersion, 5, 5) lower := versionLowerThan(e.apiVersion, 5, 5)
if lower { if lower {
return fmt.Errorf("a minimum API version of 5.5 is required for vSAN. Found: %s. Skipping vCenter: %s", e.apiVersion, e.URL.Host) return fmt.Errorf("a minimum API version of 5.5 is required for vSAN. Found: %s. Skipping vCenter: %s", e.apiVersion, e.url.Host)
} }
vsanPerfMetricsName = strings.Join([]string{"vsphere", "vsan", "performance"}, e.Parent.Separator) vsanPerfMetricsName = strings.Join([]string{"vsphere", "vsan", "performance"}, e.parent.Separator)
vsanSummaryMetricsName = strings.Join([]string{"vsphere", "vsan", "summary"}, e.Parent.Separator) vsanSummaryMetricsName = strings.Join([]string{"vsphere", "vsan", "summary"}, e.parent.Separator)
res := e.resourceKinds["vsan"] res := e.resourceKinds["vsan"]
client, err := e.clientFactory.GetClient(ctx) client, err := e.clientFactory.getClient(ctx)
if err != nil { if err != nil {
return fmt.Errorf("fail to get client when collect vsan: %w", err) return fmt.Errorf("fail to get client when collect vsan: %w", err)
} }
// Create vSAN client // Create vSAN client
vimClient := client.Client.Client vimClient := client.client.Client
vsanClient := vimClient.NewServiceClient(vsanPath, vsanNamespace) vsanClient := vimClient.NewServiceClient(vsanPath, vsanNamespace)
// vSAN Metrics to collect // vSAN Metrics to collect
metrics := e.getVsanMetadata(ctx, vsanClient, res) metrics := e.getVsanMetadata(ctx, vsanClient, res)
// Iterate over all clusters, run a goroutine for each cluster // Iterate over all clusters, run a goroutine for each cluster
te := NewThrottledExecutor(e.Parent.CollectConcurrency) te := newThrottledExecutor(e.parent.CollectConcurrency)
for _, obj := range res.objects { for _, obj := range res.objects {
te.Run(ctx, func() { te.run(ctx, func() {
e.collectVsanPerCluster(ctx, obj, vimClient, vsanClient, metrics, acc) e.collectVsanPerCluster(ctx, obj, vimClient, vsanClient, metrics, acc)
}) })
} }
te.Wait() te.wait()
return nil return nil
} }
// collectVsanPerCluster is called by goroutines in collectVsan function. // collectVsanPerCluster is called by goroutines in collectVsan function.
func (e *Endpoint) collectVsanPerCluster(ctx context.Context, clusterRef *objectRef, vimClient *vim25.Client, vsanClient *soap.Client, func (e *endpoint) collectVsanPerCluster(ctx context.Context, clusterRef *objectRef, vimClient *vim25.Client, vsanClient *soap.Client,
metrics map[string]string, acc telegraf.Accumulator) { metrics map[string]string, acc telegraf.Accumulator) {
// Construct a map for cmmds // Construct a map for cmmds
cluster := object.NewClusterComputeResource(vimClient, clusterRef.ref) cluster := object.NewClusterComputeResource(vimClient, clusterRef.ref)
@ -94,8 +94,8 @@ func (e *Endpoint) collectVsanPerCluster(ctx context.Context, clusterRef *object
} }
cmmds, err := getCmmdsMap(ctx, vimClient, cluster) cmmds, err := getCmmdsMap(ctx, vimClient, cluster)
if err != nil { if err != nil {
e.Parent.Log.Errorf("[vSAN] Error while query cmmds data. Error: %s. Skipping", err) e.parent.Log.Errorf("[vSAN] Error while query cmmds data. Error: %s. Skipping", err)
cmmds = make(map[string]CmmdsEntity) cmmds = make(map[string]cmmdsEntity)
} }
if err := e.queryPerformance(ctx, vsanClient, clusterRef, metrics, cmmds, acc); err != nil { if err := e.queryPerformance(ctx, vsanClient, clusterRef, metrics, cmmds, acc); err != nil {
acc.AddError(fmt.Errorf("error querying performance metrics for cluster %s: %w", clusterRef.name, err)) acc.AddError(fmt.Errorf("error querying performance metrics for cluster %s: %w", clusterRef.name, err))
@ -114,12 +114,12 @@ func vsanEnabled(ctx context.Context, clusterObj *object.ClusterComputeResource)
// getVsanMetadata returns a string list of the entity types that will be queried. // getVsanMetadata returns a string list of the entity types that will be queried.
// e.g ["summary.health", "summary.disk-usage", "summary.resync", "performance.cluster-domclient", "performance.host-domclient"] // e.g ["summary.health", "summary.disk-usage", "summary.resync", "performance.cluster-domclient", "performance.host-domclient"]
func (e *Endpoint) getVsanMetadata(ctx context.Context, vsanClient *soap.Client, res *resourceKind) map[string]string { func (e *endpoint) getVsanMetadata(ctx context.Context, vsanClient *soap.Client, res *resourceKind) map[string]string {
metrics := make(map[string]string) metrics := make(map[string]string)
if res.simple { // Skip getting supported Entity types from vCenter. Using user defined metrics without verifying. if res.simple { // Skip getting supported Entity types from vCenter. Using user defined metrics without verifying.
for _, entity := range res.include { for _, entity := range res.include {
if strings.Contains(entity, "*") { if strings.Contains(entity, "*") {
e.Parent.Log.Infof("[vSAN] Won't use wildcard match \"*\" when vsan_metric_skip_verify = true. Skipping") e.parent.Log.Infof("[vSAN] Won't use wildcard match \"*\" when vsan_metric_skip_verify = true. Skipping")
continue continue
} }
metrics[entity] = "" metrics[entity] = ""
@ -137,7 +137,7 @@ func (e *Endpoint) getVsanMetadata(ctx context.Context, vsanClient *soap.Client,
This: perfManagerRef, This: perfManagerRef,
}) })
if err != nil { if err != nil {
e.Parent.Log.Errorf("[vSAN] Fail to get supported entities: %v. Skipping vsan performance data.", err) e.parent.Log.Errorf("[vSAN] Fail to get supported entities: %v. Skipping vsan performance data.", err)
return metrics return metrics
} }
// Use the include & exclude configuration to filter all supported performance metrics // Use the include & exclude configuration to filter all supported performance metrics
@ -149,15 +149,15 @@ func (e *Endpoint) getVsanMetadata(ctx context.Context, vsanClient *soap.Client,
return metrics return metrics
} }
// getCmmdsMap returns a map which maps a uuid to a CmmdsEntity // getCmmdsMap returns a map which maps a uuid to a cmmdsEntity
func getCmmdsMap(ctx context.Context, client *vim25.Client, clusterObj *object.ClusterComputeResource) (map[string]CmmdsEntity, error) { func getCmmdsMap(ctx context.Context, client *vim25.Client, clusterObj *object.ClusterComputeResource) (map[string]cmmdsEntity, error) {
hosts, err := clusterObj.Hosts(ctx) hosts, err := clusterObj.Hosts(ctx)
if err != nil { if err != nil {
return nil, fmt.Errorf("fail to get host: %w", err) return nil, fmt.Errorf("fail to get host: %w", err)
} }
if len(hosts) == 0 { if len(hosts) == 0 {
return make(map[string]CmmdsEntity), nil return make(map[string]cmmdsEntity), nil
} }
queries := []types.HostVsanInternalSystemCmmdsQuery{ queries := []types.HostVsanInternalSystemCmmdsQuery{
@ -186,12 +186,12 @@ func getCmmdsMap(ctx context.Context, client *vim25.Client, clusterObj *object.C
if resp == nil { if resp == nil {
return nil, errors.New("all hosts fail to query cmmds") return nil, errors.New("all hosts fail to query cmmds")
} }
var clusterCmmds Cmmds var clusterCmmds cmmds
if err := json.Unmarshal([]byte(resp.Returnval), &clusterCmmds); err != nil { if err := json.Unmarshal([]byte(resp.Returnval), &clusterCmmds); err != nil {
return nil, fmt.Errorf("fail to convert cmmds to json: %w", err) return nil, fmt.Errorf("fail to convert cmmds to json: %w", err)
} }
cmmdsMap := make(map[string]CmmdsEntity) cmmdsMap := make(map[string]cmmdsEntity)
for _, entity := range clusterCmmds.Res { for _, entity := range clusterCmmds.Res {
cmmdsMap[entity.UUID] = entity cmmdsMap[entity.UUID] = entity
} }
@ -199,17 +199,17 @@ func getCmmdsMap(ctx context.Context, client *vim25.Client, clusterObj *object.C
} }
// queryPerformance adds performance metrics to telegraf accumulator // queryPerformance adds performance metrics to telegraf accumulator
func (e *Endpoint) queryPerformance(ctx context.Context, vsanClient *soap.Client, clusterRef *objectRef, metrics map[string]string, func (e *endpoint) queryPerformance(ctx context.Context, vsanClient *soap.Client, clusterRef *objectRef, metrics map[string]string,
cmmds map[string]CmmdsEntity, acc telegraf.Accumulator) error { cmmds map[string]cmmdsEntity, acc telegraf.Accumulator) error {
end := time.Now().UTC() end := time.Now().UTC()
// We're using a fake metric key, since we only store one highwater mark per resource // We're using a fake metric key, since we only store one highwater mark per resource
start, ok := e.hwMarks.Get(hwMarksKeyPrefix+clusterRef.ref.Value, "generic") start, ok := e.hwMarks.get(hwMarksKeyPrefix+clusterRef.ref.Value, "generic")
if !ok { if !ok {
// Look back 3 sampling periods by default // Look back 3 sampling periods by default
start = end.Add(time.Duration(e.Parent.MetricLookback) * time.Duration(-e.resourceKinds["vsan"].sampling) * time.Second) start = end.Add(time.Duration(e.parent.MetricLookback) * time.Duration(-e.resourceKinds["vsan"].sampling) * time.Second)
} }
e.Parent.Log.Debugf("[vSAN] Query vsan performance for time interval: %s ~ %s", start, end) e.parent.Log.Debugf("[vSAN] Query vsan performance for time interval: %s ~ %s", start, end)
latest := start latest := start
var commonError error var commonError error
@ -235,14 +235,14 @@ func (e *Endpoint) queryPerformance(ctx context.Context, vsanClient *soap.Client
resp, err := vsanmethods.VsanPerfQueryPerf(ctx, vsanClient, &perfRequest) resp, err := vsanmethods.VsanPerfQueryPerf(ctx, vsanClient, &perfRequest)
if err != nil { if err != nil {
if err.Error() == "ServerFaultCode: NotFound" { if err.Error() == "ServerFaultCode: NotFound" {
e.Parent.Log.Errorf("[vSAN] Is vSAN performance service enabled for %s? Skipping ...", clusterRef.name) e.parent.Log.Errorf("[vSAN] Is vSAN performance service enabled for %s? Skipping ...", clusterRef.name)
commonError = err commonError = err
break break
} }
e.Parent.Log.Errorf("[vSAN] Error querying performance data for %s: %s: %s.", clusterRef.name, entityRefID, err) e.parent.Log.Errorf("[vSAN] Error querying performance data for %s: %s: %s.", clusterRef.name, entityRefID, err)
continue continue
} }
tags := populateClusterTags(make(map[string]string), clusterRef, e.URL.Host) tags := populateClusterTags(make(map[string]string), clusterRef, e.url.Host)
count := 0 count := 0
for _, em := range resp.Returnval { for _, em := range resp.Returnval {
@ -263,7 +263,7 @@ func (e *Endpoint) queryPerformance(ctx context.Context, vsanClient *soap.Client
// Parse the input string to a time.Time object // Parse the input string to a time.Time object
utcTimeStamp, err := time.Parse("2006-01-02 15:04:05", t) utcTimeStamp, err := time.Parse("2006-01-02 15:04:05", t)
if err != nil { if err != nil {
e.Parent.Log.Errorf("[vSAN] Failed to parse a timestamp: %s. Skipping", utcTimeStamp) e.parent.Log.Errorf("[vSAN] Failed to parse a timestamp: %s. Skipping", utcTimeStamp)
timeStamps = append(timeStamps, time.Time{}) timeStamps = append(timeStamps, time.Time{})
continue continue
} }
@ -282,7 +282,7 @@ func (e *Endpoint) queryPerformance(ctx context.Context, vsanClient *soap.Client
bKey := em.EntityRefId + " " + strconv.FormatInt(ts.UnixNano(), 10) bKey := em.EntityRefId + " " + strconv.FormatInt(ts.UnixNano(), 10)
bucket, found := buckets[bKey] bucket, found := buckets[bKey]
if !found { if !found {
mn := vsanPerfMetricsName + e.Parent.Separator + formattedEntityName mn := vsanPerfMetricsName + e.parent.Separator + formattedEntityName
bucket = metricEntry{name: mn, ts: ts, fields: make(map[string]interface{}), tags: tags} bucket = metricEntry{name: mn, ts: ts, fields: make(map[string]interface{}), tags: tags}
buckets[bKey] = bucket buckets[bKey] = bucket
} }
@ -304,12 +304,12 @@ func (e *Endpoint) queryPerformance(ctx context.Context, vsanClient *soap.Client
count += len(buckets) count += len(buckets)
} }
} }
e.hwMarks.Put(hwMarksKeyPrefix+clusterRef.ref.Value, "generic", latest) e.hwMarks.put(hwMarksKeyPrefix+clusterRef.ref.Value, "generic", latest)
return commonError return commonError
} }
// queryDiskUsage adds 'FreeCapacityB' and 'TotalCapacityB' metrics to telegraf accumulator // queryDiskUsage adds 'FreeCapacityB' and 'TotalCapacityB' metrics to telegraf accumulator
func (e *Endpoint) queryDiskUsage(ctx context.Context, vsanClient *soap.Client, clusterRef *objectRef, acc telegraf.Accumulator) error { func (e *endpoint) queryDiskUsage(ctx context.Context, vsanClient *soap.Client, clusterRef *objectRef, acc telegraf.Accumulator) error {
spaceManagerRef := types.ManagedObjectReference{ spaceManagerRef := types.ManagedObjectReference{
Type: "VsanSpaceReportSystem", Type: "VsanSpaceReportSystem",
Value: "vsan-cluster-space-report-system", Value: "vsan-cluster-space-report-system",
@ -326,13 +326,13 @@ func (e *Endpoint) queryDiskUsage(ctx context.Context, vsanClient *soap.Client,
"free_capacity_byte": resp.Returnval.FreeCapacityB, "free_capacity_byte": resp.Returnval.FreeCapacityB,
"total_capacity_byte": resp.Returnval.TotalCapacityB, "total_capacity_byte": resp.Returnval.TotalCapacityB,
} }
tags := populateClusterTags(make(map[string]string), clusterRef, e.URL.Host) tags := populateClusterTags(make(map[string]string), clusterRef, e.url.Host)
acc.AddFields(vsanSummaryMetricsName, fields, tags) acc.AddFields(vsanSummaryMetricsName, fields, tags)
return nil return nil
} }
// queryDiskUsage adds 'OverallHealth' metric to telegraf accumulator // queryHealthSummary adds 'OverallHealth' metric to telegraf accumulator
func (e *Endpoint) queryHealthSummary(ctx context.Context, vsanClient *soap.Client, clusterRef *objectRef, acc telegraf.Accumulator) error { func (e *endpoint) queryHealthSummary(ctx context.Context, vsanClient *soap.Client, clusterRef *objectRef, acc telegraf.Accumulator) error {
healthSystemRef := types.ManagedObjectReference{ healthSystemRef := types.ManagedObjectReference{
Type: "VsanVcClusterHealthSystem", Type: "VsanVcClusterHealthSystem",
Value: "vsan-cluster-health-system", Value: "vsan-cluster-health-system",
@ -354,17 +354,17 @@ func (e *Endpoint) queryHealthSummary(ctx context.Context, vsanClient *soap.Clie
if val, ok := healthMap[healthStr]; ok { if val, ok := healthMap[healthStr]; ok {
fields["overall_health"] = val fields["overall_health"] = val
} }
tags := populateClusterTags(make(map[string]string), clusterRef, e.URL.Host) tags := populateClusterTags(make(map[string]string), clusterRef, e.url.Host)
acc.AddFields(vsanSummaryMetricsName, fields, tags) acc.AddFields(vsanSummaryMetricsName, fields, tags)
return nil return nil
} }
// queryResyncSummary adds resync information to accumulator // queryResyncSummary adds resync information to accumulator
func (e *Endpoint) queryResyncSummary(ctx context.Context, vsanClient *soap.Client, clusterObj *object.ClusterComputeResource, func (e *endpoint) queryResyncSummary(ctx context.Context, vsanClient *soap.Client, clusterObj *object.ClusterComputeResource,
clusterRef *objectRef, acc telegraf.Accumulator) error { clusterRef *objectRef, acc telegraf.Accumulator) error {
if lower := versionLowerThan(e.apiVersion, 6, 7); lower { if lower := versionLowerThan(e.apiVersion, 6, 7); lower {
e.Parent.Log.Infof("I! [inputs.vsphere][vSAN] Minimum API Version 6.7 required for resync summary. Found: %s. Skipping VCenter: %s", e.parent.Log.Infof("I! [inputs.vsphere][vSAN] Minimum API Version 6.7 required for resync summary. Found: %s. Skipping VCenter: %s",
e.apiVersion, e.URL.Host) e.apiVersion, e.url.Host)
return nil return nil
} }
hosts, err := clusterObj.Hosts(ctx) hosts, err := clusterObj.Hosts(ctx)
@ -377,7 +377,7 @@ func (e *Endpoint) queryResyncSummary(ctx context.Context, vsanClient *soap.Clie
hostRefValue := hosts[0].Reference().Value hostRefValue := hosts[0].Reference().Value
hostRefValueParts := strings.Split(hostRefValue, "-") hostRefValueParts := strings.Split(hostRefValue, "-")
if len(hostRefValueParts) != 2 { if len(hostRefValueParts) != 2 {
e.Parent.Log.Errorf("[vSAN] Host reference value does not match expected pattern: host-<num>. Actual Value %s", hostRefValue) e.parent.Log.Errorf("[vSAN] Host reference value does not match expected pattern: host-<num>. Actual Value %s", hostRefValue)
return err return err
} }
vsanSystemEx := types.ManagedObjectReference{ vsanSystemEx := types.ManagedObjectReference{
@ -401,7 +401,7 @@ func (e *Endpoint) queryResyncSummary(ctx context.Context, vsanClient *soap.Clie
fields["total_bytes_to_sync"] = resp.Returnval.TotalBytesToSync fields["total_bytes_to_sync"] = resp.Returnval.TotalBytesToSync
fields["total_objects_to_sync"] = resp.Returnval.TotalObjectsToSync fields["total_objects_to_sync"] = resp.Returnval.TotalObjectsToSync
fields["total_recovery_eta"] = resp.Returnval.TotalRecoveryETA fields["total_recovery_eta"] = resp.Returnval.TotalRecoveryETA
tags := populateClusterTags(make(map[string]string), clusterRef, e.URL.Host) tags := populateClusterTags(make(map[string]string), clusterRef, e.url.Host)
acc.AddFields(vsanSummaryMetricsName, fields, tags) acc.AddFields(vsanSummaryMetricsName, fields, tags)
return nil return nil
} }
@ -422,7 +422,7 @@ func populateClusterTags(tags map[string]string, clusterRef *objectRef, vcenter
} }
// populateCMMDSTags takes in a tag map, makes a copy, adds more tags using a cmmds map and returns the copy. // populateCMMDSTags takes in a tag map, makes a copy, adds more tags using a cmmds map and returns the copy.
func populateCMMDSTags(tags map[string]string, entityName, uuid string, cmmds map[string]CmmdsEntity) map[string]string { func populateCMMDSTags(tags map[string]string, entityName, uuid string, cmmds map[string]cmmdsEntity) map[string]string {
newTags := make(map[string]string) newTags := make(map[string]string)
// deep copy // deep copy
for k, v := range tags { for k, v := range tags {
@ -524,18 +524,18 @@ func versionLowerThan(current string, major, minor int) bool {
return true return true
} }
type CmmdsEntity struct { type cmmdsEntity struct {
UUID string `json:"uuid"` UUID string `json:"uuid"`
Owner string `json:"owner"` // ESXi UUID Owner string `json:"owner"` // ESXi UUID
Type string `json:"type"` Type string `json:"type"`
Content CmmdsContent `json:"content"` Content cmmdsContent `json:"content"`
} }
type Cmmds struct { type cmmds struct {
Res []CmmdsEntity `json:"result"` Res []cmmdsEntity `json:"result"`
} }
type CmmdsContent struct { type cmmdsContent struct {
Hostname string `json:"hostname"` Hostname string `json:"hostname"`
IsSsd float64 `json:"isSsd"` IsSsd float64 `json:"isSsd"`
SsdUUID string `json:"ssdUuid"` SsdUUID string `json:"ssdUuid"`

View File

@ -20,8 +20,6 @@ import (
//go:embed sample.conf //go:embed sample.conf
var sampleConfig string var sampleConfig string
// VSphere is the top level type for the vSphere input plugin. It contains all the configuration
// and a list of connected vSphere endpoints
type VSphere struct { type VSphere struct {
Vcenters []string `toml:"vcenters"` Vcenters []string `toml:"vcenters"`
Username config.Secret `toml:"username"` Username config.Secret `toml:"username"`
@ -81,7 +79,7 @@ type VSphere struct {
tls.ClientConfig // Mix in the TLS/SSL goodness from core tls.ClientConfig // Mix in the TLS/SSL goodness from core
proxy.HTTPProxy proxy.HTTPProxy
endpoints []*Endpoint endpoints []*endpoint
cancel context.CancelFunc cancel context.CancelFunc
} }
@ -89,21 +87,19 @@ func (*VSphere) SampleConfig() string {
return sampleConfig return sampleConfig
} }
// Start is called from telegraf core when a plugin is started and allows it to
// perform initialization tasks.
func (v *VSphere) Start(_ telegraf.Accumulator) error { func (v *VSphere) Start(_ telegraf.Accumulator) error {
v.Log.Info("Starting plugin") v.Log.Info("Starting plugin")
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
v.cancel = cancel v.cancel = cancel
// Create endpoints, one for each vCenter we're monitoring // Create endpoints, one for each vCenter we're monitoring
v.endpoints = make([]*Endpoint, 0, len(v.Vcenters)) v.endpoints = make([]*endpoint, 0, len(v.Vcenters))
for _, rawURL := range v.Vcenters { for _, rawURL := range v.Vcenters {
u, err := soap.ParseURL(rawURL) u, err := soap.ParseURL(rawURL)
if err != nil { if err != nil {
return err return err
} }
ep, err := NewEndpoint(ctx, v, u, v.Log) ep, err := newEndpoint(ctx, v, u, v.Log)
if err != nil { if err != nil {
return err return err
} }
@ -112,36 +108,13 @@ func (v *VSphere) Start(_ telegraf.Accumulator) error {
return nil return nil
} }
// Stop is called from telegraf core when a plugin is stopped and allows it to
// perform shutdown tasks.
func (v *VSphere) Stop() {
v.Log.Info("Stopping plugin")
v.cancel()
// Wait for all endpoints to finish. No need to wait for
// Gather() to finish here, since it Stop() will only be called
// after the last Gather() has finished. We do, however, need to
// wait for any discovery to complete by trying to grab the
// "busy" mutex.
for _, ep := range v.endpoints {
v.Log.Debugf("Waiting for endpoint %q to finish", ep.URL.Host)
func() {
ep.busy.Lock() // Wait until discovery is finished
defer ep.busy.Unlock()
ep.Close()
}()
}
}
// Gather is the main data collection function called by the Telegraf core. It performs all
// the data collection and writes all metrics into the Accumulator passed as an argument.
func (v *VSphere) Gather(acc telegraf.Accumulator) error { func (v *VSphere) Gather(acc telegraf.Accumulator) error {
var wg sync.WaitGroup var wg sync.WaitGroup
for _, ep := range v.endpoints { for _, ep := range v.endpoints {
wg.Add(1) wg.Add(1)
go func(endpoint *Endpoint) { go func(endpoint *endpoint) {
defer wg.Done() defer wg.Done()
err := endpoint.Collect(context.Background(), acc) err := endpoint.collect(context.Background(), acc)
if errors.Is(err, context.Canceled) { if errors.Is(err, context.Canceled) {
// No need to signal errors if we were merely canceled. // No need to signal errors if we were merely canceled.
err = nil err = nil
@ -156,6 +129,25 @@ func (v *VSphere) Gather(acc telegraf.Accumulator) error {
return nil return nil
} }
func (v *VSphere) Stop() {
v.Log.Info("Stopping plugin")
v.cancel()
// Wait for all endpoints to finish. No need to wait for
// Gather() to finish here, since it Stop() will only be called
// after the last Gather() has finished. We do, however, need to
// wait for any discovery to complete by trying to grab the
// "busy" mutex.
for _, ep := range v.endpoints {
v.Log.Debugf("Waiting for endpoint %q to finish", ep.url.Host)
func() {
ep.busy.Lock() // Wait until discovery is finished
defer ep.busy.Unlock()
ep.close()
}()
}
}
func init() { func init() {
inputs.Add("vsphere", func() telegraf.Input { inputs.Add("vsphere", func() telegraf.Input {
return &VSphere{ return &VSphere{

View File

@ -172,7 +172,7 @@ func testAlignUniform(t *testing.T, n int) {
}) })
values = append(values, 1) values = append(values, 1)
} }
e := Endpoint{log: testutil.Logger{}} e := endpoint{log: testutil.Logger{}}
newInfo, newValues := e.alignSamples(info, values, 60*time.Second) newInfo, newValues := e.alignSamples(info, values, 60*time.Second)
require.Len(t, newInfo, n/3, "Aligned infos have wrong size") require.Len(t, newInfo, n/3, "Aligned infos have wrong size")
require.Len(t, newValues, n/3, "Aligned values have wrong size") require.Len(t, newValues, n/3, "Aligned values have wrong size")
@ -198,7 +198,7 @@ func TestAlignMetrics(t *testing.T) {
}) })
values = append(values, int64(i%3+1)) values = append(values, int64(i%3+1))
} }
e := Endpoint{log: testutil.Logger{}} e := endpoint{log: testutil.Logger{}}
newInfo, newValues := e.alignSamples(info, values, 60*time.Second) newInfo, newValues := e.alignSamples(info, values, 60*time.Second)
require.Len(t, newInfo, n/3, "Aligned infos have wrong size") require.Len(t, newInfo, n/3, "Aligned infos have wrong size")
require.Len(t, newValues, n/3, "Aligned values have wrong size") require.Len(t, newValues, n/3, "Aligned values have wrong size")
@ -225,11 +225,11 @@ func TestMaxQuery(t *testing.T) {
v := defaultVSphere() v := defaultVSphere()
v.MaxQueryMetrics = 256 v.MaxQueryMetrics = 256
ctx := context.Background() ctx := context.Background()
c, err := NewClient(ctx, s.URL, v) c, err := newClient(ctx, s.URL, v)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, 256, v.MaxQueryMetrics) require.Equal(t, 256, v.MaxQueryMetrics)
om := object.NewOptionManager(c.Client.Client, *c.Client.Client.ServiceContent.Setting) om := object.NewOptionManager(c.client.Client, *c.client.Client.ServiceContent.Setting)
err = om.Update(ctx, []types.BaseOptionValue{&types.OptionValue{ err = om.Update(ctx, []types.BaseOptionValue{&types.OptionValue{
Key: "config.vpxd.stats.maxQueryMetrics", Key: "config.vpxd.stats.maxQueryMetrics",
Value: "42", Value: "42",
@ -238,17 +238,17 @@ func TestMaxQuery(t *testing.T) {
v.MaxQueryMetrics = 256 v.MaxQueryMetrics = 256
ctx = context.Background() ctx = context.Background()
c2, err := NewClient(ctx, s.URL, v) c2, err := newClient(ctx, s.URL, v)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, 42, v.MaxQueryMetrics) require.Equal(t, 42, v.MaxQueryMetrics)
c.close() c.close()
c2.close() c2.close()
} }
func testLookupVM(ctx context.Context, t *testing.T, f *Finder, path string, expected int, expectedName string) { func testLookupVM(ctx context.Context, t *testing.T, f *finder, path string, expected int, expectedName string) {
poweredOn := types.VirtualMachinePowerState("poweredOn") poweredOn := types.VirtualMachinePowerState("poweredOn")
var vm []mo.VirtualMachine var vm []mo.VirtualMachine
err := f.Find(ctx, "VirtualMachine", path, &vm) err := f.find(ctx, "VirtualMachine", path, &vm)
require.NoError(t, err) require.NoError(t, err)
require.Len(t, vm, expected) require.Len(t, vm, expected)
if expectedName != "" { if expectedName != "" {
@ -273,37 +273,37 @@ func TestFinder(t *testing.T) {
v := defaultVSphere() v := defaultVSphere()
ctx := context.Background() ctx := context.Background()
c, err := NewClient(ctx, s.URL, v) c, err := newClient(ctx, s.URL, v)
require.NoError(t, err) require.NoError(t, err)
f := Finder{c} f := finder{c}
var dc []mo.Datacenter var dc []mo.Datacenter
err = f.Find(ctx, "Datacenter", "/DC0", &dc) err = f.find(ctx, "Datacenter", "/DC0", &dc)
require.NoError(t, err) require.NoError(t, err)
require.Len(t, dc, 1) require.Len(t, dc, 1)
require.Equal(t, "DC0", dc[0].Name) require.Equal(t, "DC0", dc[0].Name)
var host []mo.HostSystem var host []mo.HostSystem
err = f.Find(ctx, "HostSystem", "/DC0/host/DC0_H0/DC0_H0", &host) err = f.find(ctx, "HostSystem", "/DC0/host/DC0_H0/DC0_H0", &host)
require.NoError(t, err) require.NoError(t, err)
require.Len(t, host, 1) require.Len(t, host, 1)
require.Equal(t, "DC0_H0", host[0].Name) require.Equal(t, "DC0_H0", host[0].Name)
host = make([]mo.HostSystem, 0) host = make([]mo.HostSystem, 0)
err = f.Find(ctx, "HostSystem", "/DC0/host/DC0_C0/DC0_C0_H0", &host) err = f.find(ctx, "HostSystem", "/DC0/host/DC0_C0/DC0_C0_H0", &host)
require.NoError(t, err) require.NoError(t, err)
require.Len(t, host, 1) require.Len(t, host, 1)
require.Equal(t, "DC0_C0_H0", host[0].Name) require.Equal(t, "DC0_C0_H0", host[0].Name)
resourcepool := make([]mo.ResourcePool, 0) resourcepool := make([]mo.ResourcePool, 0)
err = f.Find(ctx, "ResourcePool", "/DC0/host/DC0_C0/Resources/DC0_C0_RP0", &resourcepool) err = f.find(ctx, "ResourcePool", "/DC0/host/DC0_C0/Resources/DC0_C0_RP0", &resourcepool)
require.NoError(t, err) require.NoError(t, err)
require.Len(t, host, 1) require.Len(t, host, 1)
require.Equal(t, "DC0_C0_H0", host[0].Name) require.Equal(t, "DC0_C0_H0", host[0].Name)
host = make([]mo.HostSystem, 0) host = make([]mo.HostSystem, 0)
err = f.Find(ctx, "HostSystem", "/DC0/host/DC0_C0/*", &host) err = f.find(ctx, "HostSystem", "/DC0/host/DC0_C0/*", &host)
require.NoError(t, err) require.NoError(t, err)
require.Len(t, host, 3) require.Len(t, host, 3)
@ -322,58 +322,58 @@ func TestFinder(t *testing.T) {
testLookupVM(ctx, t, &f, "/*/host/**/*DC*/*/*DC*", 4, "") testLookupVM(ctx, t, &f, "/*/host/**/*DC*/*/*DC*", 4, "")
vm = make([]mo.VirtualMachine, 0) vm = make([]mo.VirtualMachine, 0)
err = f.FindAll(ctx, "VirtualMachine", []string{"/DC0/vm/DC0_H0*", "/DC0/vm/DC0_C0*"}, nil, &vm) err = f.findAll(ctx, "VirtualMachine", []string{"/DC0/vm/DC0_H0*", "/DC0/vm/DC0_C0*"}, nil, &vm)
require.NoError(t, err) require.NoError(t, err)
require.Len(t, vm, 4) require.Len(t, vm, 4)
rf := ResourceFilter{ rf := resourceFilter{
finder: &f, finder: &f,
paths: []string{"/DC0/vm/DC0_H0*", "/DC0/vm/DC0_C0*"}, paths: []string{"/DC0/vm/DC0_H0*", "/DC0/vm/DC0_C0*"},
excludePaths: []string{"/DC0/vm/DC0_H0_VM0"}, excludePaths: []string{"/DC0/vm/DC0_H0_VM0"},
resType: "VirtualMachine", resType: "VirtualMachine",
} }
vm = make([]mo.VirtualMachine, 0) vm = make([]mo.VirtualMachine, 0)
require.NoError(t, rf.FindAll(ctx, &vm)) require.NoError(t, rf.findAll(ctx, &vm))
require.Len(t, vm, 3) require.Len(t, vm, 3)
rf = ResourceFilter{ rf = resourceFilter{
finder: &f, finder: &f,
paths: []string{"/DC0/vm/DC0_H0*", "/DC0/vm/DC0_C0*"}, paths: []string{"/DC0/vm/DC0_H0*", "/DC0/vm/DC0_C0*"},
excludePaths: []string{"/**"}, excludePaths: []string{"/**"},
resType: "VirtualMachine", resType: "VirtualMachine",
} }
vm = make([]mo.VirtualMachine, 0) vm = make([]mo.VirtualMachine, 0)
require.NoError(t, rf.FindAll(ctx, &vm)) require.NoError(t, rf.findAll(ctx, &vm))
require.Empty(t, vm) require.Empty(t, vm)
rf = ResourceFilter{ rf = resourceFilter{
finder: &f, finder: &f,
paths: []string{"/**"}, paths: []string{"/**"},
excludePaths: []string{"/**"}, excludePaths: []string{"/**"},
resType: "VirtualMachine", resType: "VirtualMachine",
} }
vm = make([]mo.VirtualMachine, 0) vm = make([]mo.VirtualMachine, 0)
require.NoError(t, rf.FindAll(ctx, &vm)) require.NoError(t, rf.findAll(ctx, &vm))
require.Empty(t, vm) require.Empty(t, vm)
rf = ResourceFilter{ rf = resourceFilter{
finder: &f, finder: &f,
paths: []string{"/**"}, paths: []string{"/**"},
excludePaths: []string{"/this won't match anything"}, excludePaths: []string{"/this won't match anything"},
resType: "VirtualMachine", resType: "VirtualMachine",
} }
vm = make([]mo.VirtualMachine, 0) vm = make([]mo.VirtualMachine, 0)
require.NoError(t, rf.FindAll(ctx, &vm)) require.NoError(t, rf.findAll(ctx, &vm))
require.Len(t, vm, 8) require.Len(t, vm, 8)
rf = ResourceFilter{ rf = resourceFilter{
finder: &f, finder: &f,
paths: []string{"/**"}, paths: []string{"/**"},
excludePaths: []string{"/**/*VM0"}, excludePaths: []string{"/**/*VM0"},
resType: "VirtualMachine", resType: "VirtualMachine",
} }
vm = make([]mo.VirtualMachine, 0) vm = make([]mo.VirtualMachine, 0)
require.NoError(t, rf.FindAll(ctx, &vm)) require.NoError(t, rf.findAll(ctx, &vm))
require.Len(t, vm, 4) require.Len(t, vm, 4)
} }
@ -391,19 +391,19 @@ func TestFolders(t *testing.T) {
v := defaultVSphere() v := defaultVSphere()
c, err := NewClient(ctx, s.URL, v) c, err := newClient(ctx, s.URL, v)
require.NoError(t, err) require.NoError(t, err)
f := Finder{c} f := finder{c}
var folder []mo.Folder var folder []mo.Folder
err = f.Find(ctx, "Folder", "/F0", &folder) err = f.find(ctx, "Folder", "/F0", &folder)
require.NoError(t, err) require.NoError(t, err)
require.Len(t, folder, 1) require.Len(t, folder, 1)
require.Equal(t, "F0", folder[0].Name) require.Equal(t, "F0", folder[0].Name)
var dc []mo.Datacenter var dc []mo.Datacenter
err = f.Find(ctx, "Datacenter", "/F0/DC1", &dc) err = f.find(ctx, "Datacenter", "/F0/DC1", &dc)
require.NoError(t, err) require.NoError(t, err)
require.Len(t, dc, 1) require.Len(t, dc, 1)
require.Equal(t, "DC1", dc[0].Name) require.Equal(t, "DC1", dc[0].Name)
@ -422,16 +422,16 @@ func TestVsanCmmds(t *testing.T) {
v := defaultVSphere() v := defaultVSphere()
ctx := context.Background() ctx := context.Background()
c, err := NewClient(ctx, s.URL, v) c, err := newClient(ctx, s.URL, v)
require.NoError(t, err) require.NoError(t, err)
f := Finder{c} f := finder{c}
var clusters []mo.ClusterComputeResource var clusters []mo.ClusterComputeResource
err = f.FindAll(ctx, "ClusterComputeResource", []string{"/**"}, nil, &clusters) err = f.findAll(ctx, "ClusterComputeResource", []string{"/**"}, nil, &clusters)
require.NoError(t, err) require.NoError(t, err)
clusterObj := object.NewClusterComputeResource(c.Client.Client, clusters[0].Reference()) clusterObj := object.NewClusterComputeResource(c.client.Client, clusters[0].Reference())
_, err = getCmmdsMap(ctx, c.Client.Client, clusterObj) _, err = getCmmdsMap(ctx, c.client.Client, clusterObj)
require.Error(t, err) require.Error(t, err)
} }
@ -443,11 +443,11 @@ func TestVsanTags(t *testing.T) {
ssd := "52173131-3384-bb63-4ef8-c00b0ce7e3e7" ssd := "52173131-3384-bb63-4ef8-c00b0ce7e3e7"
hostname := "sc2-hs1-b2801.eng.vmware.com" hostname := "sc2-hs1-b2801.eng.vmware.com"
devName := "naa.55cd2e414d82c815:2" devName := "naa.55cd2e414d82c815:2"
var cmmds = map[string]CmmdsEntity{ var cmmds = map[string]cmmdsEntity{
nvmeDisk: {UUID: nvmeDisk, Type: "DISK_CAPACITY_TIER", Owner: host, Content: CmmdsContent{DevName: devName}}, nvmeDisk: {UUID: nvmeDisk, Type: "DISK_CAPACITY_TIER", Owner: host, Content: cmmdsContent{DevName: devName}},
disk: {UUID: disk, Type: "DISK", Owner: host, Content: CmmdsContent{DevName: devName, IsSsd: 1.}}, disk: {UUID: disk, Type: "DISK", Owner: host, Content: cmmdsContent{DevName: devName, IsSsd: 1.}},
ssdDisk: {UUID: ssdDisk, Type: "DISK", Owner: host, Content: CmmdsContent{DevName: devName, IsSsd: 0., SsdUUID: ssd}}, ssdDisk: {UUID: ssdDisk, Type: "DISK", Owner: host, Content: cmmdsContent{DevName: devName, IsSsd: 0., SsdUUID: ssd}},
host: {UUID: host, Type: "HOSTNAME", Owner: host, Content: CmmdsContent{Hostname: hostname}}, host: {UUID: host, Type: "HOSTNAME", Owner: host, Content: cmmdsContent{Hostname: hostname}},
} }
tags := populateCMMDSTags(make(map[string]string), "capacity-disk", disk, cmmds) tags := populateCMMDSTags(make(map[string]string), "capacity-disk", disk, cmmds)
require.Len(t, tags, 2) require.Len(t, tags, 2)
@ -472,13 +472,13 @@ func TestDisconnectedServerBehavior(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
v := defaultVSphere() v := defaultVSphere()
v.DisconnectedServersBehavior = "error" v.DisconnectedServersBehavior = "error"
_, err = NewEndpoint(context.Background(), v, u, v.Log) _, err = newEndpoint(context.Background(), v, u, v.Log)
require.Error(t, err) require.Error(t, err)
v.DisconnectedServersBehavior = "ignore" v.DisconnectedServersBehavior = "ignore"
_, err = NewEndpoint(context.Background(), v, u, v.Log) _, err = newEndpoint(context.Background(), v, u, v.Log)
require.NoError(t, err) require.NoError(t, err)
v.DisconnectedServersBehavior = "something else" v.DisconnectedServersBehavior = "something else"
_, err = NewEndpoint(context.Background(), v, u, v.Log) _, err = newEndpoint(context.Background(), v, u, v.Log)
require.Error(t, err) require.Error(t, err)
require.Equal(t, `"something else" is not a valid value for disconnected_servers_behavior`, err.Error()) require.Equal(t, `"something else" is not a valid value for disconnected_servers_behavior`, err.Error())
} }
@ -520,7 +520,7 @@ func testCollection(t *testing.T, excludeClusters bool) {
require.Emptyf(t, acc.Errors, "Errors found: %s", acc.Errors) require.Emptyf(t, acc.Errors, "Errors found: %s", acc.Errors)
require.NotEmpty(t, acc.Metrics, "No metrics were collected") require.NotEmpty(t, acc.Metrics, "No metrics were collected")
cache := make(map[string]string) cache := make(map[string]string)
client, err := v.endpoints[0].clientFactory.GetClient(context.Background()) client, err := v.endpoints[0].clientFactory.getClient(context.Background())
require.NoError(t, err) require.NoError(t, err)
hostCache := make(map[string]string) hostCache := make(map[string]string)
for _, m := range acc.Metrics { for _, m := range acc.Metrics {
@ -532,9 +532,9 @@ func testCollection(t *testing.T, excludeClusters bool) {
hostMoid, ok := hostCache[hostName] hostMoid, ok := hostCache[hostName]
if !ok { if !ok {
// We have to follow the host parent path to locate a cluster. Look up the host! // We have to follow the host parent path to locate a cluster. Look up the host!
finder := Finder{client} finder := finder{client}
var hosts []mo.HostSystem var hosts []mo.HostSystem
err := finder.Find(context.Background(), "HostSystem", "/**/"+hostName, &hosts) err := finder.find(context.Background(), "HostSystem", "/**/"+hostName, &hosts)
require.NoError(t, err) require.NoError(t, err)
require.NotEmpty(t, hosts) require.NotEmpty(t, hosts)
hostMoid = hosts[0].Reference().Value hostMoid = hosts[0].Reference().Value
@ -558,7 +558,7 @@ func testCollection(t *testing.T, excludeClusters bool) {
require.Empty(t, mustHaveMetrics, "Some metrics were not found") require.Empty(t, mustHaveMetrics, "Some metrics were not found")
} }
func isInCluster(v *VSphere, client *Client, cache map[string]string, resourceKind, moid string) bool { func isInCluster(v *VSphere, client *client, cache map[string]string, resourceKind, moid string) bool {
ctx := context.Background() ctx := context.Background()
ref := types.ManagedObjectReference{ ref := types.ManagedObjectReference{
Type: resourceKind, Type: resourceKind,