chore: Fix linter findings for `revive:exported` in `plugins/inputs/[t-v]*` (#16408)
This commit is contained in:
parent
7a65a4d355
commit
e57f48f608
|
|
@ -19,6 +19,9 @@ import (
|
|||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
//go:embed sample.conf
|
||||
var sampleConfig string
|
||||
|
||||
type Tacacs struct {
|
||||
Servers []string `toml:"servers"`
|
||||
Username config.Secret `toml:"username"`
|
||||
|
|
@ -31,9 +34,6 @@ type Tacacs struct {
|
|||
authStart tacplus.AuthenStart
|
||||
}
|
||||
|
||||
//go:embed sample.conf
|
||||
var sampleConfig string
|
||||
|
||||
func (*Tacacs) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
|
@ -74,7 +74,22 @@ func (t *Tacacs) Init() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func AuthenReplyToString(code uint8) string {
|
||||
func (t *Tacacs) Gather(acc telegraf.Accumulator) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for idx := range t.clients {
|
||||
wg.Add(1)
|
||||
go func(client *tacplus.Client) {
|
||||
defer wg.Done()
|
||||
acc.AddError(t.pollServer(acc, client))
|
||||
}(&t.clients[idx])
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
func authenReplyToString(code uint8) string {
|
||||
switch code {
|
||||
case tacplus.AuthenStatusPass:
|
||||
return `AuthenStatusPass`
|
||||
|
|
@ -96,21 +111,6 @@ func AuthenReplyToString(code uint8) string {
|
|||
return "AuthenStatusUnknown(" + strconv.FormatUint(uint64(code), 10) + ")"
|
||||
}
|
||||
|
||||
func (t *Tacacs) Gather(acc telegraf.Accumulator) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for idx := range t.clients {
|
||||
wg.Add(1)
|
||||
go func(client *tacplus.Client) {
|
||||
defer wg.Done()
|
||||
acc.AddError(t.pollServer(acc, client))
|
||||
}(&t.clients[idx])
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Tacacs) pollServer(acc telegraf.Accumulator, client *tacplus.Client) error {
|
||||
// Create the fields for this metric
|
||||
tags := map[string]string{"source": client.Addr}
|
||||
|
|
@ -157,7 +157,7 @@ func (t *Tacacs) pollServer(acc telegraf.Accumulator, client *tacplus.Client) er
|
|||
defer session.Close()
|
||||
if reply.Status != tacplus.AuthenStatusGetUser {
|
||||
fields["responsetime_ms"] = time.Since(startTime).Milliseconds()
|
||||
fields["response_status"] = AuthenReplyToString(reply.Status)
|
||||
fields["response_status"] = authenReplyToString(reply.Status)
|
||||
acc.AddFields("tacacs", fields, tags)
|
||||
return nil
|
||||
}
|
||||
|
|
@ -174,7 +174,7 @@ func (t *Tacacs) pollServer(acc telegraf.Accumulator, client *tacplus.Client) er
|
|||
}
|
||||
if reply.Status != tacplus.AuthenStatusGetPass {
|
||||
fields["responsetime_ms"] = time.Since(startTime).Milliseconds()
|
||||
fields["response_status"] = AuthenReplyToString(reply.Status)
|
||||
fields["response_status"] = authenReplyToString(reply.Status)
|
||||
acc.AddFields("tacacs", fields, tags)
|
||||
return nil
|
||||
}
|
||||
|
|
@ -191,13 +191,13 @@ func (t *Tacacs) pollServer(acc telegraf.Accumulator, client *tacplus.Client) er
|
|||
}
|
||||
if reply.Status != tacplus.AuthenStatusPass {
|
||||
fields["responsetime_ms"] = time.Since(startTime).Milliseconds()
|
||||
fields["response_status"] = AuthenReplyToString(reply.Status)
|
||||
fields["response_status"] = authenReplyToString(reply.Status)
|
||||
acc.AddFields("tacacs", fields, tags)
|
||||
return nil
|
||||
}
|
||||
|
||||
fields["responsetime_ms"] = time.Since(startTime).Milliseconds()
|
||||
fields["response_status"] = AuthenReplyToString(reply.Status)
|
||||
fields["response_status"] = authenReplyToString(reply.Status)
|
||||
acc.AddFields("tacacs", fields, tags)
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -10,34 +10,103 @@ import (
|
|||
"github.com/influxdata/telegraf/config"
|
||||
)
|
||||
|
||||
// Indicates relation to the multiline event: previous or next
|
||||
type MultilineMatchWhichLine int
|
||||
const (
|
||||
// previous => Append current line to previous line
|
||||
previous multilineMatchWhichLine = iota
|
||||
// next => next line will be appended to current line
|
||||
next
|
||||
)
|
||||
|
||||
type Multiline struct {
|
||||
config *MultilineConfig
|
||||
// Indicates relation to the multiline event: previous or next
|
||||
type multilineMatchWhichLine int
|
||||
|
||||
type multiline struct {
|
||||
config *multilineConfig
|
||||
enabled bool
|
||||
patternRegexp *regexp.Regexp
|
||||
quote byte
|
||||
inQuote bool
|
||||
}
|
||||
|
||||
type MultilineConfig struct {
|
||||
type multilineConfig struct {
|
||||
Pattern string `toml:"pattern"`
|
||||
MatchWhichLine MultilineMatchWhichLine `toml:"match_which_line"`
|
||||
MatchWhichLine multilineMatchWhichLine `toml:"match_which_line"`
|
||||
InvertMatch bool `toml:"invert_match"`
|
||||
PreserveNewline bool `toml:"preserve_newline"`
|
||||
Quotation string `toml:"quotation"`
|
||||
Timeout *config.Duration `toml:"timeout"`
|
||||
}
|
||||
|
||||
const (
|
||||
// Previous => Append current line to previous line
|
||||
Previous MultilineMatchWhichLine = iota
|
||||
// Next => Next line will be appended to current line
|
||||
Next
|
||||
)
|
||||
func (m *multiline) isEnabled() bool {
|
||||
return m.enabled
|
||||
}
|
||||
|
||||
func (m *MultilineConfig) NewMultiline() (*Multiline, error) {
|
||||
func (m *multiline) processLine(text string, buffer *bytes.Buffer) string {
|
||||
if m.matchQuotation(text) || m.matchString(text) {
|
||||
// Restore the newline removed by tail's scanner
|
||||
if buffer.Len() > 0 && m.config.PreserveNewline {
|
||||
buffer.WriteString("\n")
|
||||
}
|
||||
buffer.WriteString(text)
|
||||
return ""
|
||||
}
|
||||
|
||||
if m.config.MatchWhichLine == previous {
|
||||
previousText := buffer.String()
|
||||
buffer.Reset()
|
||||
buffer.WriteString(text)
|
||||
text = previousText
|
||||
} else {
|
||||
// next
|
||||
if buffer.Len() > 0 {
|
||||
if m.config.PreserveNewline {
|
||||
buffer.WriteString("\n")
|
||||
}
|
||||
buffer.WriteString(text)
|
||||
text = buffer.String()
|
||||
buffer.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
return text
|
||||
}
|
||||
|
||||
func (m *multiline) matchQuotation(text string) bool {
|
||||
if m.config.Quotation == "ignore" {
|
||||
return false
|
||||
}
|
||||
escaped := 0
|
||||
count := 0
|
||||
for i := 0; i < len(text); i++ {
|
||||
if text[i] == '\\' {
|
||||
escaped++
|
||||
continue
|
||||
}
|
||||
|
||||
// If we do encounter a backslash-quote combination, we interpret this
|
||||
// as an escaped-quoted and should not count the quote. However,
|
||||
// backslash-backslash combinations (or any even number of backslashes)
|
||||
// are interpreted as a literal backslash not escaping the quote.
|
||||
if text[i] == m.quote && escaped%2 == 0 {
|
||||
count++
|
||||
}
|
||||
// If we encounter any non-quote, non-backslash character we can
|
||||
// safely reset the escape state.
|
||||
escaped = 0
|
||||
}
|
||||
even := count%2 == 0
|
||||
m.inQuote = (m.inQuote && even) || (!m.inQuote && !even)
|
||||
return m.inQuote
|
||||
}
|
||||
|
||||
func (m *multiline) matchString(text string) bool {
|
||||
if m.patternRegexp != nil {
|
||||
return m.patternRegexp.MatchString(text) != m.config.InvertMatch
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *multilineConfig) newMultiline() (*multiline, error) {
|
||||
var r *regexp.Regexp
|
||||
|
||||
if m.Pattern != "" {
|
||||
|
|
@ -67,7 +136,7 @@ func (m *MultilineConfig) NewMultiline() (*Multiline, error) {
|
|||
m.Timeout = &d
|
||||
}
|
||||
|
||||
return &Multiline{
|
||||
return &multiline{
|
||||
config: m,
|
||||
enabled: enabled,
|
||||
patternRegexp: r,
|
||||
|
|
@ -75,41 +144,7 @@ func (m *MultilineConfig) NewMultiline() (*Multiline, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (m *Multiline) IsEnabled() bool {
|
||||
return m.enabled
|
||||
}
|
||||
|
||||
func (m *Multiline) ProcessLine(text string, buffer *bytes.Buffer) string {
|
||||
if m.matchQuotation(text) || m.matchString(text) {
|
||||
// Restore the newline removed by tail's scanner
|
||||
if buffer.Len() > 0 && m.config.PreserveNewline {
|
||||
buffer.WriteString("\n")
|
||||
}
|
||||
buffer.WriteString(text)
|
||||
return ""
|
||||
}
|
||||
|
||||
if m.config.MatchWhichLine == Previous {
|
||||
previousText := buffer.String()
|
||||
buffer.Reset()
|
||||
buffer.WriteString(text)
|
||||
text = previousText
|
||||
} else {
|
||||
// Next
|
||||
if buffer.Len() > 0 {
|
||||
if m.config.PreserveNewline {
|
||||
buffer.WriteString("\n")
|
||||
}
|
||||
buffer.WriteString(text)
|
||||
text = buffer.String()
|
||||
buffer.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
return text
|
||||
}
|
||||
|
||||
func Flush(buffer *bytes.Buffer) string {
|
||||
func flush(buffer *bytes.Buffer) string {
|
||||
if buffer.Len() == 0 {
|
||||
return ""
|
||||
}
|
||||
|
|
@ -118,66 +153,31 @@ func Flush(buffer *bytes.Buffer) string {
|
|||
return text
|
||||
}
|
||||
|
||||
func (m *Multiline) matchQuotation(text string) bool {
|
||||
if m.config.Quotation == "ignore" {
|
||||
return false
|
||||
}
|
||||
escaped := 0
|
||||
count := 0
|
||||
for i := 0; i < len(text); i++ {
|
||||
if text[i] == '\\' {
|
||||
escaped++
|
||||
continue
|
||||
}
|
||||
|
||||
// If we do encounter a backslash-quote combination, we interpret this
|
||||
// as an escaped-quoted and should not count the quote. However,
|
||||
// backslash-backslash combinations (or any even number of backslashes)
|
||||
// are interpreted as a literal backslash not escaping the quote.
|
||||
if text[i] == m.quote && escaped%2 == 0 {
|
||||
count++
|
||||
}
|
||||
// If we encounter any non-quote, non-backslash character we can
|
||||
// safely reset the escape state.
|
||||
escaped = 0
|
||||
}
|
||||
even := count%2 == 0
|
||||
m.inQuote = (m.inQuote && even) || (!m.inQuote && !even)
|
||||
return m.inQuote
|
||||
}
|
||||
|
||||
func (m *Multiline) matchString(text string) bool {
|
||||
if m.patternRegexp != nil {
|
||||
return m.patternRegexp.MatchString(text) != m.config.InvertMatch
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (w MultilineMatchWhichLine) String() string {
|
||||
func (w multilineMatchWhichLine) String() string {
|
||||
switch w {
|
||||
case Previous:
|
||||
case previous:
|
||||
return "previous"
|
||||
case Next:
|
||||
case next:
|
||||
return "next"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// UnmarshalTOML implements ability to unmarshal MultilineMatchWhichLine from TOML files.
|
||||
func (w *MultilineMatchWhichLine) UnmarshalTOML(data []byte) (err error) {
|
||||
// UnmarshalTOML implements ability to unmarshal multilineMatchWhichLine from TOML files.
|
||||
func (w *multilineMatchWhichLine) UnmarshalTOML(data []byte) (err error) {
|
||||
return w.UnmarshalText(data)
|
||||
}
|
||||
|
||||
// UnmarshalText implements encoding.TextUnmarshaler
|
||||
func (w *MultilineMatchWhichLine) UnmarshalText(data []byte) (err error) {
|
||||
func (w *multilineMatchWhichLine) UnmarshalText(data []byte) (err error) {
|
||||
s := string(data)
|
||||
switch strings.ToUpper(s) {
|
||||
case `PREVIOUS`, `"PREVIOUS"`, `'PREVIOUS'`:
|
||||
*w = Previous
|
||||
*w = previous
|
||||
return nil
|
||||
|
||||
case `NEXT`, `"NEXT"`, `'NEXT'`:
|
||||
*w = Next
|
||||
*w = next
|
||||
return nil
|
||||
}
|
||||
*w = -1
|
||||
|
|
@ -185,7 +185,7 @@ func (w *MultilineMatchWhichLine) UnmarshalText(data []byte) (err error) {
|
|||
}
|
||||
|
||||
// MarshalText implements encoding.TextMarshaler
|
||||
func (w MultilineMatchWhichLine) MarshalText() ([]byte, error) {
|
||||
func (w multilineMatchWhichLine) MarshalText() ([]byte, error) {
|
||||
s := w.String()
|
||||
if s != "" {
|
||||
return []byte(s), nil
|
||||
|
|
|
|||
|
|
@ -15,35 +15,35 @@ import (
|
|||
)
|
||||
|
||||
func TestMultilineConfigOK(t *testing.T) {
|
||||
c := &MultilineConfig{
|
||||
c := &multilineConfig{
|
||||
Pattern: ".*",
|
||||
MatchWhichLine: Previous,
|
||||
MatchWhichLine: previous,
|
||||
}
|
||||
|
||||
_, err := c.NewMultiline()
|
||||
_, err := c.newMultiline()
|
||||
|
||||
require.NoError(t, err, "Configuration was OK.")
|
||||
}
|
||||
|
||||
func TestMultilineConfigError(t *testing.T) {
|
||||
c := &MultilineConfig{
|
||||
c := &multilineConfig{
|
||||
Pattern: "\xA0",
|
||||
MatchWhichLine: Previous,
|
||||
MatchWhichLine: previous,
|
||||
}
|
||||
|
||||
_, err := c.NewMultiline()
|
||||
_, err := c.newMultiline()
|
||||
|
||||
require.Error(t, err, "The pattern was invalid")
|
||||
}
|
||||
|
||||
func TestMultilineConfigTimeoutSpecified(t *testing.T) {
|
||||
duration := config.Duration(10 * time.Second)
|
||||
c := &MultilineConfig{
|
||||
c := &multilineConfig{
|
||||
Pattern: ".*",
|
||||
MatchWhichLine: Previous,
|
||||
MatchWhichLine: previous,
|
||||
Timeout: &duration,
|
||||
}
|
||||
m, err := c.NewMultiline()
|
||||
m, err := c.newMultiline()
|
||||
require.NoError(t, err, "Configuration was OK.")
|
||||
|
||||
require.Equal(t, duration, *m.config.Timeout)
|
||||
|
|
@ -51,44 +51,44 @@ func TestMultilineConfigTimeoutSpecified(t *testing.T) {
|
|||
|
||||
func TestMultilineConfigDefaultTimeout(t *testing.T) {
|
||||
duration := config.Duration(5 * time.Second)
|
||||
c := &MultilineConfig{
|
||||
c := &multilineConfig{
|
||||
Pattern: ".*",
|
||||
MatchWhichLine: Previous,
|
||||
MatchWhichLine: previous,
|
||||
}
|
||||
m, err := c.NewMultiline()
|
||||
m, err := c.newMultiline()
|
||||
require.NoError(t, err, "Configuration was OK.")
|
||||
|
||||
require.Equal(t, duration, *m.config.Timeout)
|
||||
}
|
||||
|
||||
func TestMultilineIsEnabled(t *testing.T) {
|
||||
c := &MultilineConfig{
|
||||
c := &multilineConfig{
|
||||
Pattern: ".*",
|
||||
MatchWhichLine: Previous,
|
||||
MatchWhichLine: previous,
|
||||
}
|
||||
m, err := c.NewMultiline()
|
||||
m, err := c.newMultiline()
|
||||
require.NoError(t, err, "Configuration was OK.")
|
||||
|
||||
isEnabled := m.IsEnabled()
|
||||
isEnabled := m.isEnabled()
|
||||
|
||||
require.True(t, isEnabled, "Should have been enabled")
|
||||
}
|
||||
|
||||
func TestMultilineIsDisabled(t *testing.T) {
|
||||
c := &MultilineConfig{
|
||||
MatchWhichLine: Previous,
|
||||
c := &multilineConfig{
|
||||
MatchWhichLine: previous,
|
||||
}
|
||||
m, err := c.NewMultiline()
|
||||
m, err := c.newMultiline()
|
||||
require.NoError(t, err, "Configuration was OK.")
|
||||
|
||||
isEnabled := m.IsEnabled()
|
||||
isEnabled := m.isEnabled()
|
||||
|
||||
require.False(t, isEnabled, "Should have been disabled")
|
||||
}
|
||||
|
||||
func TestMultilineFlushEmpty(t *testing.T) {
|
||||
var buffer bytes.Buffer
|
||||
text := Flush(&buffer)
|
||||
text := flush(&buffer)
|
||||
|
||||
require.Empty(t, text)
|
||||
}
|
||||
|
|
@ -97,78 +97,78 @@ func TestMultilineFlush(t *testing.T) {
|
|||
var buffer bytes.Buffer
|
||||
buffer.WriteString("foo")
|
||||
|
||||
text := Flush(&buffer)
|
||||
text := flush(&buffer)
|
||||
require.Equal(t, "foo", text)
|
||||
require.Zero(t, buffer.Len())
|
||||
}
|
||||
|
||||
func TestMultiLineProcessLinePrevious(t *testing.T) {
|
||||
c := &MultilineConfig{
|
||||
c := &multilineConfig{
|
||||
Pattern: "^=>",
|
||||
MatchWhichLine: Previous,
|
||||
MatchWhichLine: previous,
|
||||
}
|
||||
m, err := c.NewMultiline()
|
||||
m, err := c.newMultiline()
|
||||
require.NoError(t, err, "Configuration was OK.")
|
||||
var buffer bytes.Buffer
|
||||
|
||||
text := m.ProcessLine("1", &buffer)
|
||||
text := m.processLine("1", &buffer)
|
||||
require.Empty(t, text)
|
||||
require.NotZero(t, buffer.Len())
|
||||
|
||||
text = m.ProcessLine("=>2", &buffer)
|
||||
text = m.processLine("=>2", &buffer)
|
||||
require.Empty(t, text)
|
||||
require.NotZero(t, buffer.Len())
|
||||
|
||||
text = m.ProcessLine("=>3", &buffer)
|
||||
text = m.processLine("=>3", &buffer)
|
||||
require.Empty(t, text)
|
||||
require.NotZero(t, buffer.Len())
|
||||
|
||||
text = m.ProcessLine("4", &buffer)
|
||||
text = m.processLine("4", &buffer)
|
||||
require.Equal(t, "1=>2=>3", text)
|
||||
require.NotZero(t, buffer.Len())
|
||||
|
||||
text = m.ProcessLine("5", &buffer)
|
||||
text = m.processLine("5", &buffer)
|
||||
require.Equal(t, "4", text)
|
||||
require.Equal(t, "5", buffer.String())
|
||||
}
|
||||
|
||||
func TestMultiLineProcessLineNext(t *testing.T) {
|
||||
c := &MultilineConfig{
|
||||
c := &multilineConfig{
|
||||
Pattern: "=>$",
|
||||
MatchWhichLine: Next,
|
||||
MatchWhichLine: next,
|
||||
}
|
||||
m, err := c.NewMultiline()
|
||||
m, err := c.newMultiline()
|
||||
require.NoError(t, err, "Configuration was OK.")
|
||||
var buffer bytes.Buffer
|
||||
|
||||
text := m.ProcessLine("1=>", &buffer)
|
||||
text := m.processLine("1=>", &buffer)
|
||||
require.Empty(t, text)
|
||||
require.NotZero(t, buffer.Len())
|
||||
|
||||
text = m.ProcessLine("2=>", &buffer)
|
||||
text = m.processLine("2=>", &buffer)
|
||||
require.Empty(t, text)
|
||||
require.NotZero(t, buffer.Len())
|
||||
|
||||
text = m.ProcessLine("3=>", &buffer)
|
||||
text = m.processLine("3=>", &buffer)
|
||||
require.Empty(t, text)
|
||||
require.NotZero(t, buffer.Len())
|
||||
|
||||
text = m.ProcessLine("4", &buffer)
|
||||
text = m.processLine("4", &buffer)
|
||||
require.Equal(t, "1=>2=>3=>4", text)
|
||||
require.Zero(t, buffer.Len())
|
||||
|
||||
text = m.ProcessLine("5", &buffer)
|
||||
text = m.processLine("5", &buffer)
|
||||
require.Equal(t, "5", text)
|
||||
require.Zero(t, buffer.Len())
|
||||
}
|
||||
|
||||
func TestMultiLineMatchStringWithInvertMatchFalse(t *testing.T) {
|
||||
c := &MultilineConfig{
|
||||
c := &multilineConfig{
|
||||
Pattern: "=>$",
|
||||
MatchWhichLine: Next,
|
||||
MatchWhichLine: next,
|
||||
InvertMatch: false,
|
||||
}
|
||||
m, err := c.NewMultiline()
|
||||
m, err := c.newMultiline()
|
||||
require.NoError(t, err, "Configuration was OK.")
|
||||
|
||||
matches1 := m.matchString("t=>")
|
||||
|
|
@ -179,12 +179,12 @@ func TestMultiLineMatchStringWithInvertMatchFalse(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestMultiLineMatchStringWithInvertTrue(t *testing.T) {
|
||||
c := &MultilineConfig{
|
||||
c := &multilineConfig{
|
||||
Pattern: "=>$",
|
||||
MatchWhichLine: Next,
|
||||
MatchWhichLine: next,
|
||||
InvertMatch: true,
|
||||
}
|
||||
m, err := c.NewMultiline()
|
||||
m, err := c.newMultiline()
|
||||
require.NoError(t, err, "Configuration was OK.")
|
||||
|
||||
matches1 := m.matchString("t=>")
|
||||
|
|
@ -195,33 +195,33 @@ func TestMultiLineMatchStringWithInvertTrue(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestMultilineWhat(t *testing.T) {
|
||||
var w1 MultilineMatchWhichLine
|
||||
var w1 multilineMatchWhichLine
|
||||
require.NoError(t, w1.UnmarshalTOML([]byte(`"previous"`)))
|
||||
require.Equal(t, Previous, w1)
|
||||
require.Equal(t, previous, w1)
|
||||
|
||||
var w2 MultilineMatchWhichLine
|
||||
var w2 multilineMatchWhichLine
|
||||
require.NoError(t, w2.UnmarshalTOML([]byte(`previous`)))
|
||||
require.Equal(t, Previous, w2)
|
||||
require.Equal(t, previous, w2)
|
||||
|
||||
var w3 MultilineMatchWhichLine
|
||||
var w3 multilineMatchWhichLine
|
||||
require.NoError(t, w3.UnmarshalTOML([]byte(`'previous'`)))
|
||||
require.Equal(t, Previous, w3)
|
||||
require.Equal(t, previous, w3)
|
||||
|
||||
var w4 MultilineMatchWhichLine
|
||||
var w4 multilineMatchWhichLine
|
||||
require.NoError(t, w4.UnmarshalTOML([]byte(`"next"`)))
|
||||
require.Equal(t, Next, w4)
|
||||
require.Equal(t, next, w4)
|
||||
|
||||
var w5 MultilineMatchWhichLine
|
||||
var w5 multilineMatchWhichLine
|
||||
require.NoError(t, w5.UnmarshalTOML([]byte(`next`)))
|
||||
require.Equal(t, Next, w5)
|
||||
require.Equal(t, next, w5)
|
||||
|
||||
var w6 MultilineMatchWhichLine
|
||||
var w6 multilineMatchWhichLine
|
||||
require.NoError(t, w6.UnmarshalTOML([]byte(`'next'`)))
|
||||
require.Equal(t, Next, w6)
|
||||
require.Equal(t, next, w6)
|
||||
|
||||
var w7 MultilineMatchWhichLine
|
||||
var w7 multilineMatchWhichLine
|
||||
require.Error(t, w7.UnmarshalTOML([]byte(`nope`)))
|
||||
require.Equal(t, MultilineMatchWhichLine(-1), w7)
|
||||
require.Equal(t, multilineMatchWhichLine(-1), w7)
|
||||
}
|
||||
|
||||
func TestMultilineQuoted(t *testing.T) {
|
||||
|
|
@ -265,12 +265,12 @@ func TestMultilineQuoted(t *testing.T) {
|
|||
fmt.Sprintf("1660819827450,5,all of %sthis%s should %sbasically%s work...,E", tt.quote, tt.quote, tt.quote, tt.quote),
|
||||
}
|
||||
|
||||
c := &MultilineConfig{
|
||||
MatchWhichLine: Next,
|
||||
c := &multilineConfig{
|
||||
MatchWhichLine: next,
|
||||
Quotation: tt.quotation,
|
||||
PreserveNewline: true,
|
||||
}
|
||||
m, err := c.NewMultiline()
|
||||
m, err := c.newMultiline()
|
||||
require.NoError(t, err)
|
||||
|
||||
f, err := os.Open(filepath.Join("testdata", tt.filename))
|
||||
|
|
@ -283,13 +283,13 @@ func TestMultilineQuoted(t *testing.T) {
|
|||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
|
||||
text := m.ProcessLine(line, &buffer)
|
||||
text := m.processLine(line, &buffer)
|
||||
if text == "" {
|
||||
continue
|
||||
}
|
||||
result = append(result, text)
|
||||
}
|
||||
if text := Flush(&buffer); text != "" {
|
||||
if text := flush(&buffer); text != "" {
|
||||
result = append(result, text)
|
||||
}
|
||||
|
||||
|
|
@ -327,12 +327,12 @@ func TestMultilineQuotedError(t *testing.T) {
|
|||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
c := &MultilineConfig{
|
||||
MatchWhichLine: Next,
|
||||
c := &multilineConfig{
|
||||
MatchWhichLine: next,
|
||||
Quotation: tt.quotation,
|
||||
PreserveNewline: true,
|
||||
}
|
||||
m, err := c.NewMultiline()
|
||||
m, err := c.newMultiline()
|
||||
require.NoError(t, err)
|
||||
|
||||
f, err := os.Open(filepath.Join("testdata", tt.filename))
|
||||
|
|
@ -345,13 +345,13 @@ func TestMultilineQuotedError(t *testing.T) {
|
|||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
|
||||
text := m.ProcessLine(line, &buffer)
|
||||
text := m.processLine(line, &buffer)
|
||||
if text == "" {
|
||||
continue
|
||||
}
|
||||
result = append(result, text)
|
||||
}
|
||||
if text := Flush(&buffer); text != "" {
|
||||
if text := flush(&buffer); text != "" {
|
||||
result = append(result, text)
|
||||
}
|
||||
|
||||
|
|
@ -364,12 +364,12 @@ func TestMultilineNewline(t *testing.T) {
|
|||
tests := []struct {
|
||||
name string
|
||||
filename string
|
||||
cfg *MultilineConfig
|
||||
cfg *multilineConfig
|
||||
expected []string
|
||||
}{
|
||||
{
|
||||
name: "do not preserve newline",
|
||||
cfg: &MultilineConfig{
|
||||
cfg: &multilineConfig{
|
||||
Pattern: `\[[0-9]{2}/[A-Za-z]{3}/[0-9]{4}:[0-9]{2}:[0-9]{2}:[0-9]{2} \+[0-9]{4}\]`,
|
||||
InvertMatch: true,
|
||||
},
|
||||
|
|
@ -386,7 +386,7 @@ func TestMultilineNewline(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "preserve newline",
|
||||
cfg: &MultilineConfig{
|
||||
cfg: &multilineConfig{
|
||||
Pattern: `\[[0-9]{2}/[A-Za-z]{3}/[0-9]{4}:[0-9]{2}:[0-9]{2}:[0-9]{2} \+[0-9]{4}\]`,
|
||||
InvertMatch: true,
|
||||
PreserveNewline: true,
|
||||
|
|
@ -406,7 +406,7 @@ java.lang.ArithmeticException: / by zero
|
|||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
m, err := tt.cfg.NewMultiline()
|
||||
m, err := tt.cfg.newMultiline()
|
||||
require.NoError(t, err)
|
||||
|
||||
f, err := os.Open(filepath.Join("testdata", tt.filename))
|
||||
|
|
@ -419,13 +419,13 @@ java.lang.ArithmeticException: / by zero
|
|||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
|
||||
text := m.ProcessLine(line, &buffer)
|
||||
text := m.processLine(line, &buffer)
|
||||
if text == "" {
|
||||
continue
|
||||
}
|
||||
result = append(result, text)
|
||||
}
|
||||
if text := Flush(&buffer); text != "" {
|
||||
if text := flush(&buffer); text != "" {
|
||||
result = append(result, text)
|
||||
}
|
||||
|
||||
|
|
@ -435,41 +435,41 @@ java.lang.ArithmeticException: / by zero
|
|||
}
|
||||
|
||||
func TestMultiLineQuotedAndPattern(t *testing.T) {
|
||||
c := &MultilineConfig{
|
||||
c := &multilineConfig{
|
||||
Pattern: "=>$",
|
||||
MatchWhichLine: Next,
|
||||
MatchWhichLine: next,
|
||||
Quotation: "double-quotes",
|
||||
PreserveNewline: true,
|
||||
}
|
||||
m, err := c.NewMultiline()
|
||||
m, err := c.newMultiline()
|
||||
require.NoError(t, err, "Configuration was OK.")
|
||||
var buffer bytes.Buffer
|
||||
|
||||
text := m.ProcessLine("1=>", &buffer)
|
||||
text := m.processLine("1=>", &buffer)
|
||||
require.Empty(t, text)
|
||||
require.NotZero(t, buffer.Len())
|
||||
|
||||
text = m.ProcessLine("2=>", &buffer)
|
||||
text = m.processLine("2=>", &buffer)
|
||||
require.Empty(t, text)
|
||||
require.NotZero(t, buffer.Len())
|
||||
|
||||
text = m.ProcessLine(`"a quoted`, &buffer)
|
||||
text = m.processLine(`"a quoted`, &buffer)
|
||||
require.Empty(t, text)
|
||||
require.NotZero(t, buffer.Len())
|
||||
|
||||
text = m.ProcessLine(`multiline string"=>`, &buffer)
|
||||
text = m.processLine(`multiline string"=>`, &buffer)
|
||||
require.Empty(t, text)
|
||||
require.NotZero(t, buffer.Len())
|
||||
|
||||
text = m.ProcessLine("3=>", &buffer)
|
||||
text = m.processLine("3=>", &buffer)
|
||||
require.Empty(t, text)
|
||||
require.NotZero(t, buffer.Len())
|
||||
|
||||
text = m.ProcessLine("4", &buffer)
|
||||
text = m.processLine("4", &buffer)
|
||||
require.Equal(t, "1=>\n2=>\n\"a quoted\nmultiline string\"=>\n3=>\n4", text)
|
||||
require.Zero(t, buffer.Len())
|
||||
|
||||
text = m.ProcessLine("5", &buffer)
|
||||
text = m.processLine("5", &buffer)
|
||||
require.Equal(t, "5", text)
|
||||
require.Zero(t, buffer.Len())
|
||||
}
|
||||
|
|
|
|||
|
|
@ -28,16 +28,13 @@ import (
|
|||
//go:embed sample.conf
|
||||
var sampleConfig string
|
||||
|
||||
var once sync.Once
|
||||
|
||||
var (
|
||||
once sync.Once
|
||||
|
||||
offsets = make(map[string]int64)
|
||||
offsetsMutex = new(sync.Mutex)
|
||||
)
|
||||
|
||||
type empty struct{}
|
||||
type semaphore chan empty
|
||||
|
||||
type Tail struct {
|
||||
Files []string `toml:"files"`
|
||||
FromBeginning bool `toml:"from_beginning"`
|
||||
|
|
@ -58,8 +55,8 @@ type Tail struct {
|
|||
|
||||
acc telegraf.TrackingAccumulator
|
||||
|
||||
MultilineConfig MultilineConfig `toml:"multiline"`
|
||||
multiline *Multiline
|
||||
MultilineConfig multilineConfig `toml:"multiline"`
|
||||
multiline *multiline
|
||||
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
|
@ -67,26 +64,17 @@ type Tail struct {
|
|||
decoder *encoding.Decoder
|
||||
}
|
||||
|
||||
func NewTail() *Tail {
|
||||
offsetsMutex.Lock()
|
||||
offsetsCopy := make(map[string]int64, len(offsets))
|
||||
for k, v := range offsets {
|
||||
offsetsCopy[k] = v
|
||||
}
|
||||
offsetsMutex.Unlock()
|
||||
|
||||
return &Tail{
|
||||
FromBeginning: false,
|
||||
MaxUndeliveredLines: 1000,
|
||||
offsets: offsetsCopy,
|
||||
PathTag: "path",
|
||||
}
|
||||
}
|
||||
type empty struct{}
|
||||
type semaphore chan empty
|
||||
|
||||
func (*Tail) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (t *Tail) SetParserFunc(fn telegraf.ParserFunc) {
|
||||
t.parserFunc = fn
|
||||
}
|
||||
|
||||
func (t *Tail) Init() error {
|
||||
if t.MaxUndeliveredLines == 0 {
|
||||
return errors.New("max_undelivered_lines must be positive")
|
||||
|
|
@ -106,6 +94,43 @@ func (t *Tail) Init() error {
|
|||
return err
|
||||
}
|
||||
|
||||
func (t *Tail) Start(acc telegraf.Accumulator) error {
|
||||
t.acc = acc.WithTracking(t.MaxUndeliveredLines)
|
||||
|
||||
t.ctx, t.cancel = context.WithCancel(context.Background())
|
||||
|
||||
t.wg.Add(1)
|
||||
go func() {
|
||||
defer t.wg.Done()
|
||||
for {
|
||||
select {
|
||||
case <-t.ctx.Done():
|
||||
return
|
||||
case <-t.acc.Delivered():
|
||||
<-t.sem
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
var err error
|
||||
t.multiline, err = t.MultilineConfig.newMultiline()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t.tailers = make(map[string]*tail.Tail)
|
||||
|
||||
err = t.tailNewFiles(t.FromBeginning)
|
||||
|
||||
// assumption that once Start is called, all parallel plugins have already been initialized
|
||||
offsetsMutex.Lock()
|
||||
offsets = make(map[string]int64)
|
||||
offsetsMutex.Unlock()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (t *Tail) GetState() interface{} {
|
||||
return t.offsets
|
||||
}
|
||||
|
|
@ -125,41 +150,33 @@ func (t *Tail) Gather(_ telegraf.Accumulator) error {
|
|||
return t.tailNewFiles(true)
|
||||
}
|
||||
|
||||
func (t *Tail) Start(acc telegraf.Accumulator) error {
|
||||
t.acc = acc.WithTracking(t.MaxUndeliveredLines)
|
||||
|
||||
t.ctx, t.cancel = context.WithCancel(context.Background())
|
||||
|
||||
t.wg.Add(1)
|
||||
go func() {
|
||||
defer t.wg.Done()
|
||||
for {
|
||||
select {
|
||||
case <-t.ctx.Done():
|
||||
return
|
||||
case <-t.acc.Delivered():
|
||||
<-t.sem
|
||||
func (t *Tail) Stop() {
|
||||
for _, tailer := range t.tailers {
|
||||
if !t.Pipe && !t.FromBeginning {
|
||||
// store offset for resume
|
||||
offset, err := tailer.Tell()
|
||||
if err == nil {
|
||||
t.Log.Debugf("Recording offset %d for %q", offset, tailer.Filename)
|
||||
t.offsets[tailer.Filename] = offset
|
||||
} else {
|
||||
t.Log.Errorf("Recording offset for %q: %s", tailer.Filename, err.Error())
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
var err error
|
||||
t.multiline, err = t.MultilineConfig.NewMultiline()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
err := tailer.Stop()
|
||||
if err != nil {
|
||||
t.Log.Errorf("Stopping tail on %q: %s", tailer.Filename, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
t.tailers = make(map[string]*tail.Tail)
|
||||
t.cancel()
|
||||
t.wg.Wait()
|
||||
|
||||
err = t.tailNewFiles(t.FromBeginning)
|
||||
|
||||
// assumption that once Start is called, all parallel plugins have already been initialized
|
||||
// persist offsets
|
||||
offsetsMutex.Lock()
|
||||
offsets = make(map[string]int64)
|
||||
for k, v := range t.offsets {
|
||||
offsets[k] = v
|
||||
}
|
||||
offsetsMutex.Unlock()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (t *Tail) tailNewFiles(fromBeginning bool) error {
|
||||
|
|
@ -249,7 +266,6 @@ func (t *Tail) tailNewFiles(fromBeginning bool) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// ParseLine parses a line of text.
|
||||
func parseLine(parser telegraf.Parser, line string) ([]telegraf.Metric, error) {
|
||||
m, err := parser.Parse([]byte(line))
|
||||
if err != nil {
|
||||
|
|
@ -261,8 +277,8 @@ func parseLine(parser telegraf.Parser, line string) ([]telegraf.Metric, error) {
|
|||
return m, err
|
||||
}
|
||||
|
||||
// Receiver is launched as a goroutine to continuously watch a tailed logfile
|
||||
// for changes, parse any incoming msgs, and add to the accumulator.
|
||||
// receiver is launched as a goroutine to continuously watch a tailed logfile
|
||||
// for changes, parse any incoming messages, and add to the accumulator.
|
||||
func (t *Tail) receiver(parser telegraf.Parser, tailer *tail.Tail) {
|
||||
// holds the individual lines of multi-line log entries.
|
||||
var buffer bytes.Buffer
|
||||
|
|
@ -272,7 +288,7 @@ func (t *Tail) receiver(parser telegraf.Parser, tailer *tail.Tail) {
|
|||
|
||||
// The multiline mode requires a timer in order to flush the multiline buffer
|
||||
// if no new lines are incoming.
|
||||
if t.multiline.IsEnabled() {
|
||||
if t.multiline.isEnabled() {
|
||||
timer = time.NewTimer(time.Duration(*t.MultilineConfig.Timeout))
|
||||
timeout = timer.C
|
||||
}
|
||||
|
|
@ -304,14 +320,14 @@ func (t *Tail) receiver(parser telegraf.Parser, tailer *tail.Tail) {
|
|||
// Fix up files with Windows line endings.
|
||||
text = strings.TrimRight(line.Text, "\r")
|
||||
|
||||
if t.multiline.IsEnabled() {
|
||||
if text = t.multiline.ProcessLine(text, &buffer); text == "" {
|
||||
if t.multiline.isEnabled() {
|
||||
if text = t.multiline.processLine(text, &buffer); text == "" {
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
if line == nil || !channelOpen || !tailerOpen {
|
||||
if text += Flush(&buffer); text == "" {
|
||||
if text += flush(&buffer); text == "" {
|
||||
if !channelOpen {
|
||||
return
|
||||
}
|
||||
|
|
@ -377,41 +393,24 @@ func (t *Tail) receiver(parser telegraf.Parser, tailer *tail.Tail) {
|
|||
}
|
||||
}
|
||||
|
||||
func (t *Tail) Stop() {
|
||||
for _, tailer := range t.tailers {
|
||||
if !t.Pipe && !t.FromBeginning {
|
||||
// store offset for resume
|
||||
offset, err := tailer.Tell()
|
||||
if err == nil {
|
||||
t.Log.Debugf("Recording offset %d for %q", offset, tailer.Filename)
|
||||
t.offsets[tailer.Filename] = offset
|
||||
} else {
|
||||
t.Log.Errorf("Recording offset for %q: %s", tailer.Filename, err.Error())
|
||||
}
|
||||
}
|
||||
err := tailer.Stop()
|
||||
if err != nil {
|
||||
t.Log.Errorf("Stopping tail on %q: %s", tailer.Filename, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
t.cancel()
|
||||
t.wg.Wait()
|
||||
|
||||
// persist offsets
|
||||
func newTail() *Tail {
|
||||
offsetsMutex.Lock()
|
||||
for k, v := range t.offsets {
|
||||
offsets[k] = v
|
||||
offsetsCopy := make(map[string]int64, len(offsets))
|
||||
for k, v := range offsets {
|
||||
offsetsCopy[k] = v
|
||||
}
|
||||
offsetsMutex.Unlock()
|
||||
}
|
||||
|
||||
func (t *Tail) SetParserFunc(fn telegraf.ParserFunc) {
|
||||
t.parserFunc = fn
|
||||
return &Tail{
|
||||
FromBeginning: false,
|
||||
MaxUndeliveredLines: 1000,
|
||||
offsets: offsetsCopy,
|
||||
PathTag: "path",
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("tail", func() telegraf.Input {
|
||||
return NewTail()
|
||||
return newTail()
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,3 +3,34 @@
|
|||
//go:build solaris
|
||||
|
||||
package tail
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
//go:embed sample.conf
|
||||
var sampleConfig string
|
||||
|
||||
type Tail struct {
|
||||
Log telegraf.Logger `toml:"-"`
|
||||
}
|
||||
|
||||
func (*Tail) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (h *Tail) Init() error {
|
||||
h.Log.Warn("Current platform is not supported")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*Tail) Gather(telegraf.Accumulator) error { return nil }
|
||||
|
||||
func init() {
|
||||
inputs.Add("tail", func() telegraf.Input {
|
||||
return &Tail{}
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ func newInfluxParser() (telegraf.Parser, error) {
|
|||
return parser, nil
|
||||
}
|
||||
|
||||
func NewTestTail() *Tail {
|
||||
func newTestTail() *Tail {
|
||||
offsetsMutex.Lock()
|
||||
offsetsCopy := make(map[string]int64, len(offsets))
|
||||
for k, v := range offsets {
|
||||
|
|
@ -62,7 +62,7 @@ cpu usage_idle=100
|
|||
|
||||
logger := &testutil.CaptureLogger{}
|
||||
|
||||
tt := NewTestTail()
|
||||
tt := newTestTail()
|
||||
tt.Log = logger
|
||||
tt.FromBeginning = true
|
||||
tt.Files = []string{tmpfile}
|
||||
|
|
@ -86,7 +86,7 @@ func TestColoredLine(t *testing.T) {
|
|||
tmpfile := filepath.Join(t.TempDir(), "input.csv")
|
||||
require.NoError(t, os.WriteFile(tmpfile, []byte(content), 0600))
|
||||
|
||||
tt := NewTestTail()
|
||||
tt := newTestTail()
|
||||
tt.Log = testutil.Logger{}
|
||||
tt.FromBeginning = true
|
||||
tt.Filters = []string{"ansi_color"}
|
||||
|
|
@ -116,7 +116,7 @@ func TestTailDosLineEndings(t *testing.T) {
|
|||
tmpfile := filepath.Join(t.TempDir(), "input.csv")
|
||||
require.NoError(t, os.WriteFile(tmpfile, []byte(content), 0600))
|
||||
|
||||
tt := NewTestTail()
|
||||
tt := newTestTail()
|
||||
tt.Log = testutil.Logger{}
|
||||
tt.FromBeginning = true
|
||||
tt.Files = []string{tmpfile}
|
||||
|
|
@ -144,13 +144,13 @@ func TestGrokParseLogFilesWithMultiline(t *testing.T) {
|
|||
d, err := time.ParseDuration("100s")
|
||||
require.NoError(t, err)
|
||||
duration := config.Duration(d)
|
||||
tt := NewTail()
|
||||
tt := newTail()
|
||||
tt.Log = testutil.Logger{}
|
||||
tt.FromBeginning = true
|
||||
tt.Files = []string{filepath.Join("testdata", "test_multiline.log")}
|
||||
tt.MultilineConfig = MultilineConfig{
|
||||
tt.MultilineConfig = multilineConfig{
|
||||
Pattern: `^[^\[]`,
|
||||
MatchWhichLine: Previous,
|
||||
MatchWhichLine: previous,
|
||||
InvertMatch: false,
|
||||
Timeout: &duration,
|
||||
}
|
||||
|
|
@ -207,14 +207,14 @@ func TestGrokParseLogFilesWithMultilineTimeout(t *testing.T) {
|
|||
// set tight timeout for tests
|
||||
d := 10 * time.Millisecond
|
||||
duration := config.Duration(d)
|
||||
tt := NewTail()
|
||||
tt := newTail()
|
||||
|
||||
tt.Log = testutil.Logger{}
|
||||
tt.FromBeginning = true
|
||||
tt.Files = []string{tmpfile.Name()}
|
||||
tt.MultilineConfig = MultilineConfig{
|
||||
tt.MultilineConfig = multilineConfig{
|
||||
Pattern: `^[^\[]`,
|
||||
MatchWhichLine: Previous,
|
||||
MatchWhichLine: previous,
|
||||
InvertMatch: false,
|
||||
Timeout: &duration,
|
||||
}
|
||||
|
|
@ -259,13 +259,13 @@ func TestGrokParseLogFilesWithMultilineTailerCloseFlushesMultilineBuffer(t *test
|
|||
// we make sure the timeout won't kick in
|
||||
duration := config.Duration(100 * time.Second)
|
||||
|
||||
tt := NewTestTail()
|
||||
tt := newTestTail()
|
||||
tt.Log = testutil.Logger{}
|
||||
tt.FromBeginning = true
|
||||
tt.Files = []string{filepath.Join("testdata", "test_multiline.log")}
|
||||
tt.MultilineConfig = MultilineConfig{
|
||||
tt.MultilineConfig = multilineConfig{
|
||||
Pattern: `^[^\[]`,
|
||||
MatchWhichLine: Previous,
|
||||
MatchWhichLine: previous,
|
||||
InvertMatch: false,
|
||||
Timeout: &duration,
|
||||
}
|
||||
|
|
@ -312,7 +312,7 @@ cpu,42
|
|||
tmpfile := filepath.Join(t.TempDir(), "input.csv")
|
||||
require.NoError(t, os.WriteFile(tmpfile, []byte(content), 0600))
|
||||
|
||||
plugin := NewTestTail()
|
||||
plugin := newTestTail()
|
||||
plugin.Log = testutil.Logger{}
|
||||
plugin.FromBeginning = true
|
||||
plugin.Files = []string{tmpfile}
|
||||
|
|
@ -386,7 +386,7 @@ skip2,mem,100
|
|||
time.Unix(0, 0)),
|
||||
}
|
||||
|
||||
plugin := NewTestTail()
|
||||
plugin := newTestTail()
|
||||
plugin.Log = testutil.Logger{}
|
||||
plugin.FromBeginning = true
|
||||
plugin.Files = []string{tmpfile}
|
||||
|
|
@ -444,7 +444,7 @@ func TestMultipleMetricsOnFirstLine(t *testing.T) {
|
|||
time.Unix(0, 0)),
|
||||
}
|
||||
|
||||
plugin := NewTestTail()
|
||||
plugin := newTestTail()
|
||||
plugin.Log = testutil.Logger{}
|
||||
plugin.FromBeginning = true
|
||||
plugin.Files = []string{tmpfile}
|
||||
|
|
@ -613,7 +613,7 @@ func TestTailEOF(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
require.NoError(t, tmpfile.Sync())
|
||||
|
||||
tt := NewTestTail()
|
||||
tt := newTestTail()
|
||||
tt.Log = testutil.Logger{}
|
||||
tt.FromBeginning = true
|
||||
tt.Files = []string{tmpfile.Name()}
|
||||
|
|
|
|||
|
|
@ -15,45 +15,16 @@ import (
|
|||
var sampleConfig string
|
||||
|
||||
type Teamspeak struct {
|
||||
Server string
|
||||
Username string
|
||||
Password string
|
||||
Nickname string
|
||||
VirtualServers []int `toml:"virtual_servers"`
|
||||
Server string `toml:"server"`
|
||||
Username string `toml:"username"`
|
||||
Password string `toml:"password"`
|
||||
Nickname string `toml:"nickname"`
|
||||
VirtualServers []int `toml:"virtual_servers"`
|
||||
|
||||
client *ts3.Client
|
||||
connected bool
|
||||
}
|
||||
|
||||
func (ts *Teamspeak) connect() error {
|
||||
var err error
|
||||
|
||||
ts.client, err = ts3.NewClient(ts.Server)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = ts.client.Login(ts.Username, ts.Password)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(ts.Nickname) > 0 {
|
||||
for _, vserver := range ts.VirtualServers {
|
||||
if err := ts.client.Use(vserver); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ts.client.SetNick(ts.Nickname); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ts.connected = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*Teamspeak) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
|
@ -109,6 +80,35 @@ func (ts *Teamspeak) Gather(acc telegraf.Accumulator) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (ts *Teamspeak) connect() error {
|
||||
var err error
|
||||
|
||||
ts.client, err = ts3.NewClient(ts.Server)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = ts.client.Login(ts.Username, ts.Password)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(ts.Nickname) > 0 {
|
||||
for _, vserver := range ts.VirtualServers {
|
||||
if err := ts.client.Use(vserver); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ts.client.SetNick(ts.Nickname); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ts.connected = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("teamspeak", func() telegraf.Input {
|
||||
return &Teamspeak{
|
||||
|
|
|
|||
|
|
@ -16,12 +16,12 @@ import (
|
|||
|
||||
const scalingFactor = float64(1000.0)
|
||||
|
||||
type TemperatureStat struct {
|
||||
Name string
|
||||
Label string
|
||||
Device string
|
||||
Temperature float64
|
||||
Additional map[string]interface{}
|
||||
type temperatureStat struct {
|
||||
name string
|
||||
label string
|
||||
device string
|
||||
temperature float64
|
||||
additional map[string]interface{}
|
||||
}
|
||||
|
||||
func (t *Temperature) Init() error {
|
||||
|
|
@ -64,48 +64,48 @@ func (t *Temperature) Gather(acc telegraf.Accumulator) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (t *Temperature) createMetricsV1(acc telegraf.Accumulator, temperatures []TemperatureStat) {
|
||||
func (t *Temperature) createMetricsV1(acc telegraf.Accumulator, temperatures []temperatureStat) {
|
||||
for _, temp := range temperatures {
|
||||
sensor := temp.Name
|
||||
if temp.Label != "" {
|
||||
sensor += "_" + strings.ReplaceAll(temp.Label, " ", "")
|
||||
sensor := temp.name
|
||||
if temp.label != "" {
|
||||
sensor += "_" + strings.ReplaceAll(temp.label, " ", "")
|
||||
}
|
||||
|
||||
// Mandatory measurement value
|
||||
tags := map[string]string{"sensor": sensor + "_input"}
|
||||
if t.DeviceTag {
|
||||
tags["device"] = temp.Device
|
||||
tags["device"] = temp.device
|
||||
}
|
||||
acc.AddFields("temp", map[string]interface{}{"temp": temp.Temperature}, tags)
|
||||
acc.AddFields("temp", map[string]interface{}{"temp": temp.temperature}, tags)
|
||||
|
||||
// Optional values values
|
||||
for measurement, value := range temp.Additional {
|
||||
for measurement, value := range temp.additional {
|
||||
tags := map[string]string{"sensor": sensor + "_" + measurement}
|
||||
if t.DeviceTag {
|
||||
tags["device"] = temp.Device
|
||||
tags["device"] = temp.device
|
||||
}
|
||||
acc.AddFields("temp", map[string]interface{}{"temp": value}, tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Temperature) createMetricsV2(acc telegraf.Accumulator, temperatures []TemperatureStat) {
|
||||
func (t *Temperature) createMetricsV2(acc telegraf.Accumulator, temperatures []temperatureStat) {
|
||||
for _, temp := range temperatures {
|
||||
sensor := temp.Name
|
||||
if temp.Label != "" {
|
||||
sensor += "_" + strings.ReplaceAll(temp.Label, " ", "_")
|
||||
sensor := temp.name
|
||||
if temp.label != "" {
|
||||
sensor += "_" + strings.ReplaceAll(temp.label, " ", "_")
|
||||
}
|
||||
|
||||
// Mandatory measurement value
|
||||
tags := map[string]string{"sensor": sensor}
|
||||
if t.DeviceTag {
|
||||
tags["device"] = temp.Device
|
||||
tags["device"] = temp.device
|
||||
}
|
||||
acc.AddFields("temp", map[string]interface{}{"temp": temp.Temperature}, tags)
|
||||
acc.AddFields("temp", map[string]interface{}{"temp": temp.temperature}, tags)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Temperature) gatherHwmon(syspath string) ([]TemperatureStat, error) {
|
||||
func (t *Temperature) gatherHwmon(syspath string) ([]temperatureStat, error) {
|
||||
// Get all hwmon devices
|
||||
sensors, err := filepath.Glob(filepath.Join(syspath, "class", "hwmon", "hwmon*", "temp*_input"))
|
||||
if err != nil {
|
||||
|
|
@ -127,7 +127,7 @@ func (t *Temperature) gatherHwmon(syspath string) ([]TemperatureStat, error) {
|
|||
}
|
||||
|
||||
// Collect the sensor information
|
||||
stats := make([]TemperatureStat, 0, len(sensors))
|
||||
stats := make([]temperatureStat, 0, len(sensors))
|
||||
for _, s := range sensors {
|
||||
// Get the sensor directory and the temperature prefix from the path
|
||||
path := filepath.Dir(s)
|
||||
|
|
@ -153,11 +153,11 @@ func (t *Temperature) gatherHwmon(syspath string) ([]TemperatureStat, error) {
|
|||
}
|
||||
|
||||
// Do the actual sensor readings
|
||||
temp := TemperatureStat{
|
||||
Name: name,
|
||||
Label: strings.ToLower(label),
|
||||
Device: deviceName,
|
||||
Additional: make(map[string]interface{}),
|
||||
temp := temperatureStat{
|
||||
name: name,
|
||||
label: strings.ToLower(label),
|
||||
device: deviceName,
|
||||
additional: make(map[string]interface{}),
|
||||
}
|
||||
|
||||
// Temperature (mandatory)
|
||||
|
|
@ -168,7 +168,7 @@ func (t *Temperature) gatherHwmon(syspath string) ([]TemperatureStat, error) {
|
|||
continue
|
||||
}
|
||||
if v, err := strconv.ParseFloat(strings.TrimSpace(string(buf)), 64); err == nil {
|
||||
temp.Temperature = v / scalingFactor
|
||||
temp.temperature = v / scalingFactor
|
||||
}
|
||||
|
||||
// Read all possible values of the sensor
|
||||
|
|
@ -198,7 +198,7 @@ func (t *Temperature) gatherHwmon(syspath string) ([]TemperatureStat, error) {
|
|||
if err != nil {
|
||||
continue
|
||||
}
|
||||
temp.Additional[measurement] = v / scalingFactor
|
||||
temp.additional[measurement] = v / scalingFactor
|
||||
}
|
||||
|
||||
stats = append(stats, temp)
|
||||
|
|
@ -207,7 +207,7 @@ func (t *Temperature) gatherHwmon(syspath string) ([]TemperatureStat, error) {
|
|||
return stats, nil
|
||||
}
|
||||
|
||||
func (t *Temperature) gatherThermalZone(syspath string) ([]TemperatureStat, error) {
|
||||
func (t *Temperature) gatherThermalZone(syspath string) ([]temperatureStat, error) {
|
||||
// For file layout see https://www.kernel.org/doc/Documentation/thermal/sysfs-api.txt
|
||||
zones, err := filepath.Glob(filepath.Join(syspath, "class", "thermal", "thermal_zone*"))
|
||||
if err != nil {
|
||||
|
|
@ -220,7 +220,7 @@ func (t *Temperature) gatherThermalZone(syspath string) ([]TemperatureStat, erro
|
|||
}
|
||||
|
||||
// Collect the sensor information
|
||||
stats := make([]TemperatureStat, 0, len(zones))
|
||||
stats := make([]temperatureStat, 0, len(zones))
|
||||
for _, path := range zones {
|
||||
// Type of the zone corresponding to the sensor name in our nomenclature
|
||||
buf, err := os.ReadFile(filepath.Join(path, "type"))
|
||||
|
|
@ -241,7 +241,7 @@ func (t *Temperature) gatherThermalZone(syspath string) ([]TemperatureStat, erro
|
|||
continue
|
||||
}
|
||||
|
||||
temp := TemperatureStat{Name: name, Temperature: v / scalingFactor}
|
||||
temp := temperatureStat{name: name, temperature: v / scalingFactor}
|
||||
stats = append(stats, temp)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -25,13 +25,46 @@ import (
|
|||
var sampleConfig string
|
||||
|
||||
type Tengine struct {
|
||||
Urls []string
|
||||
ResponseTimeout config.Duration
|
||||
Urls []string `toml:"urls"`
|
||||
ResponseTimeout config.Duration `toml:"response_timeout"`
|
||||
tls.ClientConfig
|
||||
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
type tengineStatus struct {
|
||||
host string
|
||||
bytesIn uint64
|
||||
bytesOut uint64
|
||||
connTotal uint64
|
||||
reqTotal uint64
|
||||
http2xx uint64
|
||||
http3xx uint64
|
||||
http4xx uint64
|
||||
http5xx uint64
|
||||
httpOtherStatus uint64
|
||||
rt uint64
|
||||
upsReq uint64
|
||||
upsRt uint64
|
||||
upsTries uint64
|
||||
http200 uint64
|
||||
http206 uint64
|
||||
http302 uint64
|
||||
http304 uint64
|
||||
http403 uint64
|
||||
http404 uint64
|
||||
http416 uint64
|
||||
http499 uint64
|
||||
http500 uint64
|
||||
http502 uint64
|
||||
http503 uint64
|
||||
http504 uint64
|
||||
http508 uint64
|
||||
httpOtherDetailStatus uint64
|
||||
httpUps4xx uint64
|
||||
httpUps5xx uint64
|
||||
}
|
||||
|
||||
func (*Tengine) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
|
@ -87,41 +120,8 @@ func (n *Tengine) createHTTPClient() (*http.Client, error) {
|
|||
return client, nil
|
||||
}
|
||||
|
||||
type TengineStatus struct {
|
||||
host string
|
||||
bytesIn uint64
|
||||
bytesOut uint64
|
||||
connTotal uint64
|
||||
reqTotal uint64
|
||||
http2xx uint64
|
||||
http3xx uint64
|
||||
http4xx uint64
|
||||
http5xx uint64
|
||||
httpOtherStatus uint64
|
||||
rt uint64
|
||||
upsReq uint64
|
||||
upsRt uint64
|
||||
upsTries uint64
|
||||
http200 uint64
|
||||
http206 uint64
|
||||
http302 uint64
|
||||
http304 uint64
|
||||
http403 uint64
|
||||
http404 uint64
|
||||
http416 uint64
|
||||
http499 uint64
|
||||
http500 uint64
|
||||
http502 uint64
|
||||
http503 uint64
|
||||
http504 uint64
|
||||
http508 uint64
|
||||
httpOtherDetailStatus uint64
|
||||
httpUps4xx uint64
|
||||
httpUps5xx uint64
|
||||
}
|
||||
|
||||
func (n *Tengine) gatherURL(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
var tengineStatus TengineStatus
|
||||
var tStatus tengineStatus
|
||||
resp, err := n.client.Get(addr.String())
|
||||
if err != nil {
|
||||
return fmt.Errorf("error making HTTP request to %q: %w", addr.String(), err)
|
||||
|
|
@ -142,157 +142,157 @@ func (n *Tengine) gatherURL(addr *url.URL, acc telegraf.Accumulator) error {
|
|||
if len(lineSplit) != 30 {
|
||||
continue
|
||||
}
|
||||
tengineStatus.host = lineSplit[0]
|
||||
tStatus.host = lineSplit[0]
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tengineStatus.bytesIn, err = strconv.ParseUint(lineSplit[1], 10, 64)
|
||||
tStatus.bytesIn, err = strconv.ParseUint(lineSplit[1], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tengineStatus.bytesOut, err = strconv.ParseUint(lineSplit[2], 10, 64)
|
||||
tStatus.bytesOut, err = strconv.ParseUint(lineSplit[2], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tengineStatus.connTotal, err = strconv.ParseUint(lineSplit[3], 10, 64)
|
||||
tStatus.connTotal, err = strconv.ParseUint(lineSplit[3], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tengineStatus.reqTotal, err = strconv.ParseUint(lineSplit[4], 10, 64)
|
||||
tStatus.reqTotal, err = strconv.ParseUint(lineSplit[4], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tengineStatus.http2xx, err = strconv.ParseUint(lineSplit[5], 10, 64)
|
||||
tStatus.http2xx, err = strconv.ParseUint(lineSplit[5], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tengineStatus.http3xx, err = strconv.ParseUint(lineSplit[6], 10, 64)
|
||||
tStatus.http3xx, err = strconv.ParseUint(lineSplit[6], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tengineStatus.http4xx, err = strconv.ParseUint(lineSplit[7], 10, 64)
|
||||
tStatus.http4xx, err = strconv.ParseUint(lineSplit[7], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tengineStatus.http5xx, err = strconv.ParseUint(lineSplit[8], 10, 64)
|
||||
tStatus.http5xx, err = strconv.ParseUint(lineSplit[8], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tengineStatus.httpOtherStatus, err = strconv.ParseUint(lineSplit[9], 10, 64)
|
||||
tStatus.httpOtherStatus, err = strconv.ParseUint(lineSplit[9], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tengineStatus.rt, err = strconv.ParseUint(lineSplit[10], 10, 64)
|
||||
tStatus.rt, err = strconv.ParseUint(lineSplit[10], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tengineStatus.upsReq, err = strconv.ParseUint(lineSplit[11], 10, 64)
|
||||
tStatus.upsReq, err = strconv.ParseUint(lineSplit[11], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tengineStatus.upsRt, err = strconv.ParseUint(lineSplit[12], 10, 64)
|
||||
tStatus.upsRt, err = strconv.ParseUint(lineSplit[12], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tengineStatus.upsTries, err = strconv.ParseUint(lineSplit[13], 10, 64)
|
||||
tStatus.upsTries, err = strconv.ParseUint(lineSplit[13], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tengineStatus.http200, err = strconv.ParseUint(lineSplit[14], 10, 64)
|
||||
tStatus.http200, err = strconv.ParseUint(lineSplit[14], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tengineStatus.http206, err = strconv.ParseUint(lineSplit[15], 10, 64)
|
||||
tStatus.http206, err = strconv.ParseUint(lineSplit[15], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tengineStatus.http302, err = strconv.ParseUint(lineSplit[16], 10, 64)
|
||||
tStatus.http302, err = strconv.ParseUint(lineSplit[16], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tengineStatus.http304, err = strconv.ParseUint(lineSplit[17], 10, 64)
|
||||
tStatus.http304, err = strconv.ParseUint(lineSplit[17], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tengineStatus.http403, err = strconv.ParseUint(lineSplit[18], 10, 64)
|
||||
tStatus.http403, err = strconv.ParseUint(lineSplit[18], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tengineStatus.http404, err = strconv.ParseUint(lineSplit[19], 10, 64)
|
||||
tStatus.http404, err = strconv.ParseUint(lineSplit[19], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tengineStatus.http416, err = strconv.ParseUint(lineSplit[20], 10, 64)
|
||||
tStatus.http416, err = strconv.ParseUint(lineSplit[20], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tengineStatus.http499, err = strconv.ParseUint(lineSplit[21], 10, 64)
|
||||
tStatus.http499, err = strconv.ParseUint(lineSplit[21], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tengineStatus.http500, err = strconv.ParseUint(lineSplit[22], 10, 64)
|
||||
tStatus.http500, err = strconv.ParseUint(lineSplit[22], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tengineStatus.http502, err = strconv.ParseUint(lineSplit[23], 10, 64)
|
||||
tStatus.http502, err = strconv.ParseUint(lineSplit[23], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tengineStatus.http503, err = strconv.ParseUint(lineSplit[24], 10, 64)
|
||||
tStatus.http503, err = strconv.ParseUint(lineSplit[24], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tengineStatus.http504, err = strconv.ParseUint(lineSplit[25], 10, 64)
|
||||
tStatus.http504, err = strconv.ParseUint(lineSplit[25], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tengineStatus.http508, err = strconv.ParseUint(lineSplit[26], 10, 64)
|
||||
tStatus.http508, err = strconv.ParseUint(lineSplit[26], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tengineStatus.httpOtherDetailStatus, err = strconv.ParseUint(lineSplit[27], 10, 64)
|
||||
tStatus.httpOtherDetailStatus, err = strconv.ParseUint(lineSplit[27], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tengineStatus.httpUps4xx, err = strconv.ParseUint(lineSplit[28], 10, 64)
|
||||
tStatus.httpUps4xx, err = strconv.ParseUint(lineSplit[28], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tengineStatus.httpUps5xx, err = strconv.ParseUint(lineSplit[29], 10, 64)
|
||||
tStatus.httpUps5xx, err = strconv.ParseUint(lineSplit[29], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tags := getTags(addr, tengineStatus.host)
|
||||
tags := getTags(addr, tStatus.host)
|
||||
fields := map[string]interface{}{
|
||||
"bytes_in": tengineStatus.bytesIn,
|
||||
"bytes_out": tengineStatus.bytesOut,
|
||||
"conn_total": tengineStatus.connTotal,
|
||||
"req_total": tengineStatus.reqTotal,
|
||||
"http_2xx": tengineStatus.http2xx,
|
||||
"http_3xx": tengineStatus.http3xx,
|
||||
"http_4xx": tengineStatus.http4xx,
|
||||
"http_5xx": tengineStatus.http5xx,
|
||||
"http_other_status": tengineStatus.httpOtherStatus,
|
||||
"rt": tengineStatus.rt,
|
||||
"ups_req": tengineStatus.upsReq,
|
||||
"ups_rt": tengineStatus.upsRt,
|
||||
"ups_tries": tengineStatus.upsTries,
|
||||
"http_200": tengineStatus.http200,
|
||||
"http_206": tengineStatus.http206,
|
||||
"http_302": tengineStatus.http302,
|
||||
"http_304": tengineStatus.http304,
|
||||
"http_403": tengineStatus.http403,
|
||||
"http_404": tengineStatus.http404,
|
||||
"http_416": tengineStatus.http416,
|
||||
"http_499": tengineStatus.http499,
|
||||
"http_500": tengineStatus.http500,
|
||||
"http_502": tengineStatus.http502,
|
||||
"http_503": tengineStatus.http503,
|
||||
"http_504": tengineStatus.http504,
|
||||
"http_508": tengineStatus.http508,
|
||||
"http_other_detail_status": tengineStatus.httpOtherDetailStatus,
|
||||
"http_ups_4xx": tengineStatus.httpUps4xx,
|
||||
"http_ups_5xx": tengineStatus.httpUps5xx,
|
||||
"bytes_in": tStatus.bytesIn,
|
||||
"bytes_out": tStatus.bytesOut,
|
||||
"conn_total": tStatus.connTotal,
|
||||
"req_total": tStatus.reqTotal,
|
||||
"http_2xx": tStatus.http2xx,
|
||||
"http_3xx": tStatus.http3xx,
|
||||
"http_4xx": tStatus.http4xx,
|
||||
"http_5xx": tStatus.http5xx,
|
||||
"http_other_status": tStatus.httpOtherStatus,
|
||||
"rt": tStatus.rt,
|
||||
"ups_req": tStatus.upsReq,
|
||||
"ups_rt": tStatus.upsRt,
|
||||
"ups_tries": tStatus.upsTries,
|
||||
"http_200": tStatus.http200,
|
||||
"http_206": tStatus.http206,
|
||||
"http_302": tStatus.http302,
|
||||
"http_304": tStatus.http304,
|
||||
"http_403": tStatus.http403,
|
||||
"http_404": tStatus.http404,
|
||||
"http_416": tStatus.http416,
|
||||
"http_499": tStatus.http499,
|
||||
"http_500": tStatus.http500,
|
||||
"http_502": tStatus.http502,
|
||||
"http_503": tStatus.http503,
|
||||
"http_504": tStatus.http504,
|
||||
"http_508": tStatus.http508,
|
||||
"http_other_detail_status": tStatus.httpOtherDetailStatus,
|
||||
"http_ups_4xx": tStatus.httpUps4xx,
|
||||
"http_ups_5xx": tStatus.httpUps5xx,
|
||||
}
|
||||
acc.AddFields("tengine", fields, tags)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,6 +19,17 @@ import (
|
|||
//go:embed sample.conf
|
||||
var sampleConfig string
|
||||
|
||||
type Tomcat struct {
|
||||
URL string `toml:"url"`
|
||||
Username string `toml:"username"`
|
||||
Password string `toml:"password"`
|
||||
Timeout config.Duration `toml:"timeout"`
|
||||
tls.ClientConfig
|
||||
|
||||
client *http.Client
|
||||
request *http.Request
|
||||
}
|
||||
|
||||
type tomcatStatus struct {
|
||||
TomcatJvm tomcatJvm `xml:"jvm"`
|
||||
TomcatConnectors []tomcatConnector `xml:"connector"`
|
||||
|
|
@ -55,6 +66,7 @@ type threadInfo struct {
|
|||
CurrentThreadCount int64 `xml:"currentThreadCount,attr"`
|
||||
CurrentThreadsBusy int64 `xml:"currentThreadsBusy,attr"`
|
||||
}
|
||||
|
||||
type requestInfo struct {
|
||||
MaxTime int `xml:"maxTime,attr"`
|
||||
ProcessingTime int `xml:"processingTime,attr"`
|
||||
|
|
@ -64,17 +76,6 @@ type requestInfo struct {
|
|||
BytesSent int64 `xml:"bytesSent,attr"`
|
||||
}
|
||||
|
||||
type Tomcat struct {
|
||||
URL string
|
||||
Username string
|
||||
Password string
|
||||
Timeout config.Duration
|
||||
tls.ClientConfig
|
||||
|
||||
client *http.Client
|
||||
request *http.Request
|
||||
}
|
||||
|
||||
func (*Tomcat) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
|
|
|||
|
|
@ -13,8 +13,8 @@ import (
|
|||
var sampleConfig string
|
||||
|
||||
type Trig struct {
|
||||
Amplitude float64 `toml:"amplitude"`
|
||||
x float64
|
||||
Amplitude float64
|
||||
}
|
||||
|
||||
func (*Trig) SampleConfig() string {
|
||||
|
|
|
|||
|
|
@ -17,15 +17,14 @@ import (
|
|||
var sampleConfig string
|
||||
|
||||
type Twemproxy struct {
|
||||
Addr string
|
||||
Pools []string
|
||||
Addr string `toml:"addr"`
|
||||
Pools []string `toml:"pools"`
|
||||
}
|
||||
|
||||
func (*Twemproxy) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
// Gather data from all Twemproxy instances
|
||||
func (t *Twemproxy) Gather(acc telegraf.Accumulator) error {
|
||||
conn, err := net.DialTimeout("tcp", t.Addr, 1*time.Second)
|
||||
if err != nil {
|
||||
|
|
@ -49,11 +48,7 @@ func (t *Twemproxy) Gather(acc telegraf.Accumulator) error {
|
|||
}
|
||||
|
||||
// Process Twemproxy server stats
|
||||
func (t *Twemproxy) processStat(
|
||||
acc telegraf.Accumulator,
|
||||
tags map[string]string,
|
||||
data map[string]interface{},
|
||||
) {
|
||||
func (t *Twemproxy) processStat(acc telegraf.Accumulator, tags map[string]string, data map[string]interface{}) {
|
||||
if source, ok := data["source"]; ok {
|
||||
if val, ok := source.(string); ok {
|
||||
tags["source"] = val
|
||||
|
|
|
|||
|
|
@ -23,9 +23,11 @@ import (
|
|||
//go:embed sample.conf
|
||||
var sampleConfig string
|
||||
|
||||
type runner func(unbound Unbound) (*bytes.Buffer, error)
|
||||
var (
|
||||
defaultBinary = "/usr/sbin/unbound-control"
|
||||
defaultTimeout = config.Duration(time.Second)
|
||||
)
|
||||
|
||||
// Unbound is used to store configuration values
|
||||
type Unbound struct {
|
||||
Binary string `toml:"binary"`
|
||||
Timeout config.Duration `toml:"timeout"`
|
||||
|
|
@ -37,61 +39,8 @@ type Unbound struct {
|
|||
run runner
|
||||
}
|
||||
|
||||
var defaultBinary = "/usr/sbin/unbound-control"
|
||||
var defaultTimeout = config.Duration(time.Second)
|
||||
type runner func(unbound Unbound) (*bytes.Buffer, error)
|
||||
|
||||
// Shell out to unbound_stat and return the output
|
||||
func unboundRunner(unbound Unbound) (*bytes.Buffer, error) {
|
||||
cmdArgs := []string{"stats_noreset"}
|
||||
|
||||
if unbound.Server != "" {
|
||||
host, port, err := net.SplitHostPort(unbound.Server)
|
||||
if err != nil { // No port was specified
|
||||
host = unbound.Server
|
||||
port = ""
|
||||
}
|
||||
|
||||
// Unbound control requires an IP address, and we want to be nice to the user
|
||||
resolver := net.Resolver{}
|
||||
ctx, lookUpCancel := context.WithTimeout(context.Background(), time.Duration(unbound.Timeout))
|
||||
defer lookUpCancel()
|
||||
serverIps, err := resolver.LookupIPAddr(ctx, host)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error looking up ip for server %q: %w", unbound.Server, err)
|
||||
}
|
||||
if len(serverIps) == 0 {
|
||||
return nil, fmt.Errorf("error no ip for server %q: %w", unbound.Server, err)
|
||||
}
|
||||
server := serverIps[0].IP.String()
|
||||
if port != "" {
|
||||
server = server + "@" + port
|
||||
}
|
||||
|
||||
cmdArgs = append([]string{"-s", server}, cmdArgs...)
|
||||
}
|
||||
|
||||
if unbound.ConfigFile != "" {
|
||||
cmdArgs = append([]string{"-c", unbound.ConfigFile}, cmdArgs...)
|
||||
}
|
||||
|
||||
cmd := exec.Command(unbound.Binary, cmdArgs...)
|
||||
|
||||
if unbound.UseSudo {
|
||||
cmdArgs = append([]string{unbound.Binary}, cmdArgs...)
|
||||
cmd = exec.Command("sudo", cmdArgs...)
|
||||
}
|
||||
|
||||
var out bytes.Buffer
|
||||
cmd.Stdout = &out
|
||||
err := internal.RunTimeout(cmd, time.Duration(unbound.Timeout))
|
||||
if err != nil {
|
||||
return &out, fmt.Errorf("error running unbound-control %q %q: %w", unbound.Binary, cmdArgs, err)
|
||||
}
|
||||
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
// Gather collects stats from unbound-control and adds them to the Accumulator
|
||||
func (*Unbound) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
|
@ -175,6 +124,57 @@ func (s *Unbound) Gather(acc telegraf.Accumulator) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Shell out to unbound_stat and return the output
|
||||
func unboundRunner(unbound Unbound) (*bytes.Buffer, error) {
|
||||
cmdArgs := []string{"stats_noreset"}
|
||||
|
||||
if unbound.Server != "" {
|
||||
host, port, err := net.SplitHostPort(unbound.Server)
|
||||
if err != nil { // No port was specified
|
||||
host = unbound.Server
|
||||
port = ""
|
||||
}
|
||||
|
||||
// Unbound control requires an IP address, and we want to be nice to the user
|
||||
resolver := net.Resolver{}
|
||||
ctx, lookUpCancel := context.WithTimeout(context.Background(), time.Duration(unbound.Timeout))
|
||||
defer lookUpCancel()
|
||||
serverIps, err := resolver.LookupIPAddr(ctx, host)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error looking up ip for server %q: %w", unbound.Server, err)
|
||||
}
|
||||
if len(serverIps) == 0 {
|
||||
return nil, fmt.Errorf("error no ip for server %q: %w", unbound.Server, err)
|
||||
}
|
||||
server := serverIps[0].IP.String()
|
||||
if port != "" {
|
||||
server = server + "@" + port
|
||||
}
|
||||
|
||||
cmdArgs = append([]string{"-s", server}, cmdArgs...)
|
||||
}
|
||||
|
||||
if unbound.ConfigFile != "" {
|
||||
cmdArgs = append([]string{"-c", unbound.ConfigFile}, cmdArgs...)
|
||||
}
|
||||
|
||||
cmd := exec.Command(unbound.Binary, cmdArgs...)
|
||||
|
||||
if unbound.UseSudo {
|
||||
cmdArgs = append([]string{unbound.Binary}, cmdArgs...)
|
||||
cmd = exec.Command("sudo", cmdArgs...)
|
||||
}
|
||||
|
||||
var out bytes.Buffer
|
||||
cmd.Stdout = &out
|
||||
err := internal.RunTimeout(cmd, time.Duration(unbound.Timeout))
|
||||
if err != nil {
|
||||
return &out, fmt.Errorf("error running unbound-control %q %q: %w", unbound.Binary, cmdArgs, err)
|
||||
}
|
||||
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("unbound", func() telegraf.Input {
|
||||
return &Unbound{
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ import (
|
|||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
func UnboundControl(output string) func(Unbound) (*bytes.Buffer, error) {
|
||||
func unboundControl(output string) func(Unbound) (*bytes.Buffer, error) {
|
||||
return func(Unbound) (*bytes.Buffer, error) {
|
||||
return bytes.NewBufferString(output), nil
|
||||
}
|
||||
|
|
@ -18,7 +18,7 @@ func UnboundControl(output string) func(Unbound) (*bytes.Buffer, error) {
|
|||
func TestParseFullOutput(t *testing.T) {
|
||||
acc := &testutil.Accumulator{}
|
||||
v := &Unbound{
|
||||
run: UnboundControl(fullOutput),
|
||||
run: unboundControl(fullOutput),
|
||||
}
|
||||
err := v.Gather(acc)
|
||||
|
||||
|
|
@ -35,7 +35,7 @@ func TestParseFullOutput(t *testing.T) {
|
|||
func TestParseFullOutputThreadAsTag(t *testing.T) {
|
||||
acc := &testutil.Accumulator{}
|
||||
v := &Unbound{
|
||||
run: UnboundControl(fullOutput),
|
||||
run: unboundControl(fullOutput),
|
||||
ThreadAsTag: true,
|
||||
}
|
||||
err := v.Gather(acc)
|
||||
|
|
@ -133,6 +133,7 @@ var parsedFullOutputThreadAsTagMeasurementUnboundThreads = map[string]interface{
|
|||
"recursion_time_avg": float64(0.015020),
|
||||
"recursion_time_median": float64(0.00292343),
|
||||
}
|
||||
|
||||
var parsedFullOutputThreadAsTagMeasurementUnbound = map[string]interface{}{
|
||||
"total_num_queries": float64(11907596),
|
||||
"total_num_cachehits": float64(11489288),
|
||||
|
|
|
|||
|
|
@ -18,40 +18,42 @@ import (
|
|||
//go:embed sample.conf
|
||||
var sampleConfig string
|
||||
|
||||
// see: https://networkupstools.org/docs/developer-guide.chunked/index.html
|
||||
const defaultAddress = "127.0.0.1"
|
||||
const defaultPort = 3493
|
||||
var (
|
||||
// Define the set of variables _always_ included in a metric
|
||||
mandatoryVariableSet = map[string]bool{
|
||||
"battery.date": true,
|
||||
"battery.mfr.date": true,
|
||||
"battery.runtime": true,
|
||||
"device.model": true,
|
||||
"device.serial": true,
|
||||
"ups.firmware": true,
|
||||
"ups.status": true,
|
||||
}
|
||||
// Define the default field set to add if existing
|
||||
defaultFieldSet = map[string]string{
|
||||
"battery.charge": "battery_charge_percent",
|
||||
"battery.runtime.low": "battery_runtime_low",
|
||||
"battery.voltage": "battery_voltage",
|
||||
"input.frequency": "input_frequency",
|
||||
"input.transfer.high": "input_transfer_high",
|
||||
"input.transfer.low": "input_transfer_low",
|
||||
"input.voltage": "input_voltage",
|
||||
"ups.temperature": "internal_temp",
|
||||
"ups.load": "load_percent",
|
||||
"battery.voltage.nominal": "nominal_battery_voltage",
|
||||
"input.voltage.nominal": "nominal_input_voltage",
|
||||
"ups.realpower.nominal": "nominal_power",
|
||||
"output.voltage": "output_voltage",
|
||||
"ups.realpower": "real_power",
|
||||
"ups.delay.shutdown": "ups_delay_shutdown",
|
||||
"ups.delay.start": "ups_delay_start",
|
||||
}
|
||||
)
|
||||
|
||||
// Define the set of variables _always_ included in a metric
|
||||
var mandatoryVariableSet = map[string]bool{
|
||||
"battery.date": true,
|
||||
"battery.mfr.date": true,
|
||||
"battery.runtime": true,
|
||||
"device.model": true,
|
||||
"device.serial": true,
|
||||
"ups.firmware": true,
|
||||
"ups.status": true,
|
||||
}
|
||||
|
||||
// Define the default field set to add if existing
|
||||
var defaultFieldSet = map[string]string{
|
||||
"battery.charge": "battery_charge_percent",
|
||||
"battery.runtime.low": "battery_runtime_low",
|
||||
"battery.voltage": "battery_voltage",
|
||||
"input.frequency": "input_frequency",
|
||||
"input.transfer.high": "input_transfer_high",
|
||||
"input.transfer.low": "input_transfer_low",
|
||||
"input.voltage": "input_voltage",
|
||||
"ups.temperature": "internal_temp",
|
||||
"ups.load": "load_percent",
|
||||
"battery.voltage.nominal": "nominal_battery_voltage",
|
||||
"input.voltage.nominal": "nominal_input_voltage",
|
||||
"ups.realpower.nominal": "nominal_power",
|
||||
"output.voltage": "output_voltage",
|
||||
"ups.realpower": "real_power",
|
||||
"ups.delay.shutdown": "ups_delay_shutdown",
|
||||
"ups.delay.start": "ups_delay_start",
|
||||
}
|
||||
const (
|
||||
defaultAddress = "127.0.0.1"
|
||||
defaultPort = 3493
|
||||
)
|
||||
|
||||
type Upsd struct {
|
||||
Server string `toml:"server"`
|
||||
|
|
|
|||
|
|
@ -106,13 +106,13 @@ func TestCases(t *testing.T) {
|
|||
}
|
||||
|
||||
type interaction struct {
|
||||
Expected string
|
||||
Response string
|
||||
expected string
|
||||
response string
|
||||
}
|
||||
|
||||
type variable struct {
|
||||
Name string
|
||||
Value string
|
||||
name string
|
||||
value string
|
||||
}
|
||||
|
||||
type mockServer struct {
|
||||
|
|
@ -122,32 +122,32 @@ type mockServer struct {
|
|||
func (s *mockServer) init() {
|
||||
s.protocol = []interaction{
|
||||
{
|
||||
Expected: "VER\n",
|
||||
Response: "1\n",
|
||||
expected: "VER\n",
|
||||
response: "1\n",
|
||||
},
|
||||
{
|
||||
Expected: "NETVER\n",
|
||||
Response: "1\n",
|
||||
expected: "NETVER\n",
|
||||
response: "1\n",
|
||||
},
|
||||
{
|
||||
Expected: "LIST UPS\n",
|
||||
Response: "BEGIN LIST UPS\nUPS fake \"fake UPS\"\nEND LIST UPS\n",
|
||||
expected: "LIST UPS\n",
|
||||
response: "BEGIN LIST UPS\nUPS fake \"fake UPS\"\nEND LIST UPS\n",
|
||||
},
|
||||
{
|
||||
Expected: "LIST CLIENT fake\n",
|
||||
Response: "BEGIN LIST CLIENT fake\nCLIENT fake 127.0.0.1\nEND LIST CLIENT fake\n",
|
||||
expected: "LIST CLIENT fake\n",
|
||||
response: "BEGIN LIST CLIENT fake\nCLIENT fake 127.0.0.1\nEND LIST CLIENT fake\n",
|
||||
},
|
||||
{
|
||||
Expected: "LIST CMD fake\n",
|
||||
Response: "BEGIN LIST CMD fake\nEND LIST CMD fake\n",
|
||||
expected: "LIST CMD fake\n",
|
||||
response: "BEGIN LIST CMD fake\nEND LIST CMD fake\n",
|
||||
},
|
||||
{
|
||||
Expected: "GET UPSDESC fake\n",
|
||||
Response: "UPSDESC fake \"stub-ups-description\"\n",
|
||||
expected: "GET UPSDESC fake\n",
|
||||
response: "UPSDESC fake \"stub-ups-description\"\n",
|
||||
},
|
||||
{
|
||||
Expected: "GET NUMLOGINS fake\n",
|
||||
Response: "NUMLOGINS fake 1\n",
|
||||
expected: "GET NUMLOGINS fake\n",
|
||||
response: "NUMLOGINS fake 1\n",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
@ -156,29 +156,29 @@ func (s *mockServer) addVariables(variables []variable, types map[string]string)
|
|||
// Add a VAR entries for the variables
|
||||
values := make([]string, 0, len(variables))
|
||||
for _, v := range variables {
|
||||
values = append(values, fmt.Sprintf("VAR fake %s %q", v.Name, v.Value))
|
||||
values = append(values, fmt.Sprintf("VAR fake %s %q", v.name, v.value))
|
||||
}
|
||||
|
||||
s.protocol = append(s.protocol, interaction{
|
||||
Expected: "LIST VAR fake\n",
|
||||
Response: "BEGIN LIST VAR fake\n" + strings.Join(values, "\n") + "\nEND LIST VAR fake\n",
|
||||
expected: "LIST VAR fake\n",
|
||||
response: "BEGIN LIST VAR fake\n" + strings.Join(values, "\n") + "\nEND LIST VAR fake\n",
|
||||
})
|
||||
|
||||
// Add a description and type interaction for the variable
|
||||
for _, v := range variables {
|
||||
variableType, found := types[v.Name]
|
||||
variableType, found := types[v.name]
|
||||
if !found {
|
||||
return fmt.Errorf("type for variable %q not found", v.Name)
|
||||
return fmt.Errorf("type for variable %q not found", v.name)
|
||||
}
|
||||
|
||||
s.protocol = append(s.protocol,
|
||||
interaction{
|
||||
Expected: "GET DESC fake " + v.Name + "\n",
|
||||
Response: "DESC fake" + v.Name + " \"No description here\"\n",
|
||||
expected: "GET DESC fake " + v.name + "\n",
|
||||
response: "DESC fake" + v.name + " \"No description here\"\n",
|
||||
},
|
||||
interaction{
|
||||
Expected: "GET TYPE fake " + v.Name + "\n",
|
||||
Response: "TYPE fake " + v.Name + " " + variableType + "\n",
|
||||
expected: "GET TYPE fake " + v.name + "\n",
|
||||
response: "TYPE fake " + v.name + " " + variableType + "\n",
|
||||
},
|
||||
)
|
||||
}
|
||||
|
|
@ -217,12 +217,12 @@ func (s *mockServer) listen(ctx context.Context) (*net.TCPAddr, error) {
|
|||
}
|
||||
|
||||
request := in[:n]
|
||||
if !bytes.Equal([]byte(interaction.Expected), request) {
|
||||
fmt.Printf("Unexpected request %q, expected %q\n", string(request), interaction.Expected)
|
||||
if !bytes.Equal([]byte(interaction.expected), request) {
|
||||
fmt.Printf("Unexpected request %q, expected %q\n", string(request), interaction.expected)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := conn.Write([]byte(interaction.Response)); err != nil {
|
||||
if _, err := conn.Write([]byte(interaction.response)); err != nil {
|
||||
fmt.Printf("Cannot write answer for request %q: %v\n", string(request), err)
|
||||
return
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,7 +25,6 @@ import (
|
|||
//go:embed sample.conf
|
||||
var sampleConfig string
|
||||
|
||||
// Uwsgi server struct
|
||||
type Uwsgi struct {
|
||||
Servers []string `toml:"servers"`
|
||||
Timeout config.Duration `toml:"timeout"`
|
||||
|
|
@ -33,11 +32,82 @@ type Uwsgi struct {
|
|||
client *http.Client
|
||||
}
|
||||
|
||||
// statsServer defines the stats server structure.
|
||||
type statsServer struct {
|
||||
// Tags
|
||||
source string
|
||||
PID int `json:"pid"`
|
||||
UID int `json:"uid"`
|
||||
GID int `json:"gid"`
|
||||
Version string `json:"version"`
|
||||
|
||||
// Fields
|
||||
ListenQueue int `json:"listen_queue"`
|
||||
ListenQueueErrors int `json:"listen_queue_errors"`
|
||||
SignalQueue int `json:"signal_queue"`
|
||||
Load int `json:"load"`
|
||||
|
||||
Workers []*worker `json:"workers"`
|
||||
}
|
||||
|
||||
// worker defines the worker metric structure.
|
||||
type worker struct {
|
||||
// Tags
|
||||
WorkerID int `json:"id"`
|
||||
PID int `json:"pid"`
|
||||
|
||||
// Fields
|
||||
Accepting int `json:"accepting"`
|
||||
Requests int `json:"requests"`
|
||||
DeltaRequests int `json:"delta_requests"`
|
||||
Exceptions int `json:"exceptions"`
|
||||
HarakiriCount int `json:"harakiri_count"`
|
||||
Signals int `json:"signals"`
|
||||
SignalQueue int `json:"signal_queue"`
|
||||
Status string `json:"status"`
|
||||
Rss int `json:"rss"`
|
||||
Vsz int `json:"vsz"`
|
||||
RunningTime int `json:"running_time"`
|
||||
LastSpawn int `json:"last_spawn"`
|
||||
RespawnCount int `json:"respawn_count"`
|
||||
Tx int `json:"tx"`
|
||||
AvgRt int `json:"avg_rt"`
|
||||
|
||||
Apps []*app `json:"apps"`
|
||||
Cores []*core `json:"cores"`
|
||||
}
|
||||
|
||||
// app defines the app metric structure.
|
||||
type app struct {
|
||||
// Tags
|
||||
AppID int `json:"id"`
|
||||
|
||||
// Fields
|
||||
Modifier1 int `json:"modifier1"`
|
||||
Requests int `json:"requests"`
|
||||
StartupTime int `json:"startup_time"`
|
||||
Exceptions int `json:"exceptions"`
|
||||
}
|
||||
|
||||
// core defines the core metric structure.
|
||||
type core struct {
|
||||
// Tags
|
||||
CoreID int `json:"id"`
|
||||
|
||||
// Fields
|
||||
Requests int `json:"requests"`
|
||||
StaticRequests int `json:"static_requests"`
|
||||
RoutedRequests int `json:"routed_requests"`
|
||||
OffloadedRequests int `json:"offloaded_requests"`
|
||||
WriteErrors int `json:"write_errors"`
|
||||
ReadErrors int `json:"read_errors"`
|
||||
InRequest int `json:"in_request"`
|
||||
}
|
||||
|
||||
func (*Uwsgi) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
// Gather collect data from uWSGI Server
|
||||
func (u *Uwsgi) Gather(acc telegraf.Accumulator) error {
|
||||
if u.client == nil {
|
||||
u.client = &http.Client{
|
||||
|
|
@ -71,7 +141,7 @@ func (u *Uwsgi) Gather(acc telegraf.Accumulator) error {
|
|||
func (u *Uwsgi) gatherServer(acc telegraf.Accumulator, address *url.URL) error {
|
||||
var err error
|
||||
var r io.ReadCloser
|
||||
var s StatsServer
|
||||
var s statsServer
|
||||
|
||||
switch address.Scheme {
|
||||
case "tcp":
|
||||
|
|
@ -111,7 +181,7 @@ func (u *Uwsgi) gatherServer(acc telegraf.Accumulator, address *url.URL) error {
|
|||
return err
|
||||
}
|
||||
|
||||
func gatherStatServer(acc telegraf.Accumulator, s *StatsServer) {
|
||||
func gatherStatServer(acc telegraf.Accumulator, s *statsServer) {
|
||||
fields := map[string]interface{}{
|
||||
"listen_queue": s.ListenQueue,
|
||||
"listen_queue_errors": s.ListenQueueErrors,
|
||||
|
|
@ -133,7 +203,7 @@ func gatherStatServer(acc telegraf.Accumulator, s *StatsServer) {
|
|||
gatherCores(acc, s)
|
||||
}
|
||||
|
||||
func gatherWorkers(acc telegraf.Accumulator, s *StatsServer) {
|
||||
func gatherWorkers(acc telegraf.Accumulator, s *statsServer) {
|
||||
for _, w := range s.Workers {
|
||||
fields := map[string]interface{}{
|
||||
"requests": w.Requests,
|
||||
|
|
@ -162,7 +232,7 @@ func gatherWorkers(acc telegraf.Accumulator, s *StatsServer) {
|
|||
}
|
||||
}
|
||||
|
||||
func gatherApps(acc telegraf.Accumulator, s *StatsServer) {
|
||||
func gatherApps(acc telegraf.Accumulator, s *statsServer) {
|
||||
for _, w := range s.Workers {
|
||||
for _, a := range w.Apps {
|
||||
fields := map[string]interface{}{
|
||||
|
|
@ -181,7 +251,7 @@ func gatherApps(acc telegraf.Accumulator, s *StatsServer) {
|
|||
}
|
||||
}
|
||||
|
||||
func gatherCores(acc telegraf.Accumulator, s *StatsServer) {
|
||||
func gatherCores(acc telegraf.Accumulator, s *statsServer) {
|
||||
for _, w := range s.Workers {
|
||||
for _, c := range w.Cores {
|
||||
fields := map[string]interface{}{
|
||||
|
|
@ -210,75 +280,3 @@ func init() {
|
|||
}
|
||||
})
|
||||
}
|
||||
|
||||
// StatsServer defines the stats server structure.
|
||||
type StatsServer struct {
|
||||
// Tags
|
||||
source string
|
||||
PID int `json:"pid"`
|
||||
UID int `json:"uid"`
|
||||
GID int `json:"gid"`
|
||||
Version string `json:"version"`
|
||||
|
||||
// Fields
|
||||
ListenQueue int `json:"listen_queue"`
|
||||
ListenQueueErrors int `json:"listen_queue_errors"`
|
||||
SignalQueue int `json:"signal_queue"`
|
||||
Load int `json:"load"`
|
||||
|
||||
Workers []*Worker `json:"workers"`
|
||||
}
|
||||
|
||||
// Worker defines the worker metric structure.
|
||||
type Worker struct {
|
||||
// Tags
|
||||
WorkerID int `json:"id"`
|
||||
PID int `json:"pid"`
|
||||
|
||||
// Fields
|
||||
Accepting int `json:"accepting"`
|
||||
Requests int `json:"requests"`
|
||||
DeltaRequests int `json:"delta_requests"`
|
||||
Exceptions int `json:"exceptions"`
|
||||
HarakiriCount int `json:"harakiri_count"`
|
||||
Signals int `json:"signals"`
|
||||
SignalQueue int `json:"signal_queue"`
|
||||
Status string `json:"status"`
|
||||
Rss int `json:"rss"`
|
||||
Vsz int `json:"vsz"`
|
||||
RunningTime int `json:"running_time"`
|
||||
LastSpawn int `json:"last_spawn"`
|
||||
RespawnCount int `json:"respawn_count"`
|
||||
Tx int `json:"tx"`
|
||||
AvgRt int `json:"avg_rt"`
|
||||
|
||||
Apps []*App `json:"apps"`
|
||||
Cores []*Core `json:"cores"`
|
||||
}
|
||||
|
||||
// App defines the app metric structure.
|
||||
type App struct {
|
||||
// Tags
|
||||
AppID int `json:"id"`
|
||||
|
||||
// Fields
|
||||
Modifier1 int `json:"modifier1"`
|
||||
Requests int `json:"requests"`
|
||||
StartupTime int `json:"startup_time"`
|
||||
Exceptions int `json:"exceptions"`
|
||||
}
|
||||
|
||||
// Core defines the core metric structure.
|
||||
type Core struct {
|
||||
// Tags
|
||||
CoreID int `json:"id"`
|
||||
|
||||
// Fields
|
||||
Requests int `json:"requests"`
|
||||
StaticRequests int `json:"static_requests"`
|
||||
RoutedRequests int `json:"routed_requests"`
|
||||
OffloadedRequests int `json:"offloaded_requests"`
|
||||
WriteErrors int `json:"write_errors"`
|
||||
ReadErrors int `json:"read_errors"`
|
||||
InRequest int `json:"in_request"`
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,6 +22,8 @@ import (
|
|||
//go:embed sample.conf
|
||||
var sampleConfig string
|
||||
|
||||
const timeLayout = "2006-01-02 15:04:05 -0700 MST"
|
||||
|
||||
// Vault configuration object
|
||||
type Vault struct {
|
||||
URL string `toml:"url"`
|
||||
|
|
@ -33,8 +35,6 @@ type Vault struct {
|
|||
client *http.Client
|
||||
}
|
||||
|
||||
const timeLayout = "2006-01-02 15:04:05 -0700 MST"
|
||||
|
||||
func (*Vault) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
|
@ -74,7 +74,7 @@ func (*Vault) Start(telegraf.Accumulator) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Gather, collects metrics from Vault endpoint
|
||||
// Gather collects metrics from Vault endpoint
|
||||
func (n *Vault) Gather(acc telegraf.Accumulator) error {
|
||||
sysMetrics, err := n.loadJSON(n.URL + "/v1/sys/metrics")
|
||||
if err != nil {
|
||||
|
|
@ -90,7 +90,7 @@ func (n *Vault) Stop() {
|
|||
}
|
||||
}
|
||||
|
||||
func (n *Vault) loadJSON(url string) (*SysMetrics, error) {
|
||||
func (n *Vault) loadJSON(url string) (*sysMetrics, error) {
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -109,7 +109,7 @@ func (n *Vault) loadJSON(url string) (*SysMetrics, error) {
|
|||
return nil, fmt.Errorf("%s returned HTTP status %s", url, resp.Status)
|
||||
}
|
||||
|
||||
var metrics SysMetrics
|
||||
var metrics sysMetrics
|
||||
err = json.NewDecoder(resp.Body).Decode(&metrics)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing json response: %w", err)
|
||||
|
|
@ -119,7 +119,7 @@ func (n *Vault) loadJSON(url string) (*SysMetrics, error) {
|
|||
}
|
||||
|
||||
// buildVaultMetrics, it builds all the metrics and adds them to the accumulator
|
||||
func buildVaultMetrics(acc telegraf.Accumulator, sysMetrics *SysMetrics) error {
|
||||
func buildVaultMetrics(acc telegraf.Accumulator, sysMetrics *sysMetrics) error {
|
||||
t, err := internal.ParseTimestamp(timeLayout, sysMetrics.Timestamp, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing time: %w", err)
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
package vault
|
||||
|
||||
type SysMetrics struct {
|
||||
type sysMetrics struct {
|
||||
Timestamp string `json:"timestamp"`
|
||||
Gauges []gauge `json:"Gauges"`
|
||||
Counters []counter `json:"Counters"`
|
||||
|
|
|
|||
|
|
@ -27,47 +27,47 @@ import (
|
|||
// and server say.
|
||||
const absoluteMaxMetrics = 10000
|
||||
|
||||
// ClientFactory is used to obtain Clients to be used throughout the plugin. Typically,
|
||||
// a single Client is reused across all functions and goroutines, but the client
|
||||
// clientFactory is used to obtain Clients to be used throughout the plugin. Typically,
|
||||
// a single client is reused across all functions and goroutines, but the client
|
||||
// is periodically recycled to avoid authentication expiration issues.
|
||||
type ClientFactory struct {
|
||||
client *Client
|
||||
type clientFactory struct {
|
||||
client *client
|
||||
mux sync.Mutex
|
||||
vSphereURL *url.URL
|
||||
parent *VSphere
|
||||
}
|
||||
|
||||
// Client represents a connection to vSphere and is backed by a govmomi connection
|
||||
type Client struct {
|
||||
Client *govmomi.Client
|
||||
Views *view.Manager
|
||||
Root *view.ContainerView
|
||||
Perf *performance.Manager
|
||||
Valid bool
|
||||
Timeout time.Duration
|
||||
// client represents a connection to vSphere and is backed by a govmomi connection
|
||||
type client struct {
|
||||
client *govmomi.Client
|
||||
views *view.Manager
|
||||
root *view.ContainerView
|
||||
perf *performance.Manager
|
||||
valid bool
|
||||
timeout time.Duration
|
||||
closeGate sync.Once
|
||||
log telegraf.Logger
|
||||
}
|
||||
|
||||
// NewClientFactory creates a new ClientFactory and prepares it for use.
|
||||
func NewClientFactory(vSphereURL *url.URL, parent *VSphere) *ClientFactory {
|
||||
return &ClientFactory{
|
||||
// newClientFactory creates a new clientFactory and prepares it for use.
|
||||
func newClientFactory(vSphereURL *url.URL, parent *VSphere) *clientFactory {
|
||||
return &clientFactory{
|
||||
client: nil,
|
||||
parent: parent,
|
||||
vSphereURL: vSphereURL,
|
||||
}
|
||||
}
|
||||
|
||||
// GetClient returns a client. The caller is responsible for calling Release()
|
||||
// getClient returns a client. The caller is responsible for calling Release()
|
||||
// on the client once it's done using it.
|
||||
func (cf *ClientFactory) GetClient(ctx context.Context) (*Client, error) {
|
||||
func (cf *clientFactory) getClient(ctx context.Context) (*client, error) {
|
||||
cf.mux.Lock()
|
||||
defer cf.mux.Unlock()
|
||||
retrying := false
|
||||
for {
|
||||
if cf.client == nil {
|
||||
var err error
|
||||
if cf.client, err = NewClient(ctx, cf.vSphereURL, cf.parent); err != nil {
|
||||
if cf.client, err = newClient(ctx, cf.vSphereURL, cf.parent); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
|
@ -89,13 +89,13 @@ func (cf *ClientFactory) GetClient(ctx context.Context) (*Client, error) {
|
|||
}
|
||||
}
|
||||
|
||||
func (cf *ClientFactory) testClient(ctx context.Context) error {
|
||||
func (cf *clientFactory) testClient(ctx context.Context) error {
|
||||
// Execute a dummy call against the server to make sure the client is
|
||||
// still functional. If not, try to log back in. If that doesn't work,
|
||||
// we give up.
|
||||
ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(cf.parent.Timeout))
|
||||
defer cancel1()
|
||||
if _, err := methods.GetCurrentTime(ctx1, cf.client.Client); err != nil {
|
||||
if _, err := methods.GetCurrentTime(ctx1, cf.client.client); err != nil {
|
||||
cf.parent.Log.Info("Client session seems to have time out. Reauthenticating!")
|
||||
ctx2, cancel2 := context.WithTimeout(ctx, time.Duration(cf.parent.Timeout))
|
||||
defer cancel2()
|
||||
|
|
@ -113,7 +113,7 @@ func (cf *ClientFactory) testClient(ctx context.Context) error {
|
|||
defer password.Destroy()
|
||||
auth := url.UserPassword(username.String(), password.String())
|
||||
|
||||
if err := cf.client.Client.SessionManager.Login(ctx2, auth); err != nil {
|
||||
if err := cf.client.client.SessionManager.Login(ctx2, auth); err != nil {
|
||||
return fmt.Errorf("renewing authentication failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
|
@ -121,10 +121,10 @@ func (cf *ClientFactory) testClient(ctx context.Context) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// NewClient creates a new vSphere client based on the url and setting passed as parameters.
|
||||
func NewClient(ctx context.Context, vSphereURL *url.URL, vs *VSphere) (*Client, error) {
|
||||
sw := NewStopwatch("connect", vSphereURL.Host)
|
||||
defer sw.Stop()
|
||||
// newClient creates a new vSphere client based on the url and setting passed as parameters.
|
||||
func newClient(ctx context.Context, vSphereURL *url.URL, vs *VSphere) (*client, error) {
|
||||
sw := newStopwatch("connect", vSphereURL.Host)
|
||||
defer sw.stop()
|
||||
|
||||
tlsCfg, err := vs.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
|
|
@ -215,19 +215,19 @@ func NewClient(ctx context.Context, vSphereURL *url.URL, vs *VSphere) (*Client,
|
|||
|
||||
p := performance.NewManager(c.Client)
|
||||
|
||||
client := &Client{
|
||||
client := &client{
|
||||
log: vs.Log,
|
||||
Client: c,
|
||||
Views: m,
|
||||
Root: v,
|
||||
Perf: p,
|
||||
Valid: true,
|
||||
Timeout: time.Duration(vs.Timeout),
|
||||
client: c,
|
||||
views: m,
|
||||
root: v,
|
||||
perf: p,
|
||||
valid: true,
|
||||
timeout: time.Duration(vs.Timeout),
|
||||
}
|
||||
// Adjust max query size if needed
|
||||
ctx3, cancel3 := context.WithTimeout(ctx, time.Duration(vs.Timeout))
|
||||
defer cancel3()
|
||||
n, err := client.GetMaxQueryMetrics(ctx3)
|
||||
n, err := client.getMaxQueryMetrics(ctx3)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -239,8 +239,8 @@ func NewClient(ctx context.Context, vSphereURL *url.URL, vs *VSphere) (*Client,
|
|||
return client, nil
|
||||
}
|
||||
|
||||
// Close shuts down a ClientFactory and releases any resources associated with it.
|
||||
func (cf *ClientFactory) Close() {
|
||||
// close shuts down a clientFactory and releases any resources associated with it.
|
||||
func (cf *clientFactory) close() {
|
||||
cf.mux.Lock()
|
||||
defer cf.mux.Unlock()
|
||||
if cf.client != nil {
|
||||
|
|
@ -248,37 +248,37 @@ func (cf *ClientFactory) Close() {
|
|||
}
|
||||
}
|
||||
|
||||
func (c *Client) close() {
|
||||
func (c *client) close() {
|
||||
// Use a Once to prevent us from panics stemming from trying
|
||||
// to close it multiple times.
|
||||
c.closeGate.Do(func() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), c.Timeout)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), c.timeout)
|
||||
defer cancel()
|
||||
if c.Client != nil {
|
||||
if err := c.Client.Logout(ctx); err != nil {
|
||||
if c.client != nil {
|
||||
if err := c.client.Logout(ctx); err != nil {
|
||||
c.log.Errorf("Logout: %s", err.Error())
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// GetServerTime returns the time at the vCenter server
|
||||
func (c *Client) GetServerTime(ctx context.Context) (time.Time, error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, c.Timeout)
|
||||
// getServerTime returns the time at the vCenter server
|
||||
func (c *client) getServerTime(ctx context.Context) (time.Time, error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, c.timeout)
|
||||
defer cancel()
|
||||
t, err := methods.GetCurrentTime(ctx, c.Client)
|
||||
t, err := methods.GetCurrentTime(ctx, c.client)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
return *t, nil
|
||||
}
|
||||
|
||||
// GetMaxQueryMetrics returns the max_query_metrics setting as configured in vCenter
|
||||
func (c *Client) GetMaxQueryMetrics(ctx context.Context) (int, error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, c.Timeout)
|
||||
// getMaxQueryMetrics returns the max_query_metrics setting as configured in vCenter
|
||||
func (c *client) getMaxQueryMetrics(ctx context.Context) (int, error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, c.timeout)
|
||||
defer cancel()
|
||||
|
||||
om := object.NewOptionManager(c.Client.Client, *c.Client.Client.ServiceContent.Setting)
|
||||
om := object.NewOptionManager(c.client.Client, *c.client.Client.ServiceContent.Setting)
|
||||
res, err := om.Query(ctx, "config.vpxd.stats.maxQueryMetrics")
|
||||
if err == nil {
|
||||
if len(res) > 0 {
|
||||
|
|
@ -300,7 +300,7 @@ func (c *Client) GetMaxQueryMetrics(ctx context.Context) (int, error) {
|
|||
}
|
||||
|
||||
// No usable maxQueryMetrics setting. Infer based on version
|
||||
ver := c.Client.Client.ServiceContent.About.Version
|
||||
ver := c.client.Client.ServiceContent.About.Version
|
||||
parts := strings.Split(ver, ".")
|
||||
if len(parts) < 2 {
|
||||
c.log.Warnf("vCenter returned an invalid version string: %s. Using default query size=64", ver)
|
||||
|
|
@ -317,45 +317,38 @@ func (c *Client) GetMaxQueryMetrics(ctx context.Context) (int, error) {
|
|||
return 256, nil
|
||||
}
|
||||
|
||||
// QueryMetrics wraps performance.Query to give it proper timeouts
|
||||
func (c *Client) QueryMetrics(ctx context.Context, pqs []types.PerfQuerySpec) ([]performance.EntityMetric, error) {
|
||||
ctx1, cancel1 := context.WithTimeout(ctx, c.Timeout)
|
||||
// queryMetrics wraps performance.Query to give it proper timeouts
|
||||
func (c *client) queryMetrics(ctx context.Context, pqs []types.PerfQuerySpec) ([]performance.EntityMetric, error) {
|
||||
ctx1, cancel1 := context.WithTimeout(ctx, c.timeout)
|
||||
defer cancel1()
|
||||
metrics, err := c.Perf.Query(ctx1, pqs)
|
||||
metrics, err := c.perf.Query(ctx1, pqs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctx2, cancel2 := context.WithTimeout(ctx, c.Timeout)
|
||||
ctx2, cancel2 := context.WithTimeout(ctx, c.timeout)
|
||||
defer cancel2()
|
||||
return c.Perf.ToMetricSeries(ctx2, metrics)
|
||||
return c.perf.ToMetricSeries(ctx2, metrics)
|
||||
}
|
||||
|
||||
// CounterInfoByName wraps performance.CounterInfoByName to give it proper timeouts
|
||||
func (c *Client) CounterInfoByName(ctx context.Context) (map[string]*types.PerfCounterInfo, error) {
|
||||
ctx1, cancel1 := context.WithTimeout(ctx, c.Timeout)
|
||||
// counterInfoByName wraps performance.counterInfoByName to give it proper timeouts
|
||||
func (c *client) counterInfoByName(ctx context.Context) (map[string]*types.PerfCounterInfo, error) {
|
||||
ctx1, cancel1 := context.WithTimeout(ctx, c.timeout)
|
||||
defer cancel1()
|
||||
return c.Perf.CounterInfoByName(ctx1)
|
||||
return c.perf.CounterInfoByName(ctx1)
|
||||
}
|
||||
|
||||
// CounterInfoByKey wraps performance.CounterInfoByKey to give it proper timeouts
|
||||
func (c *Client) CounterInfoByKey(ctx context.Context) (map[int32]*types.PerfCounterInfo, error) {
|
||||
ctx1, cancel1 := context.WithTimeout(ctx, c.Timeout)
|
||||
// counterInfoByKey wraps performance.counterInfoByKey to give it proper timeouts
|
||||
func (c *client) counterInfoByKey(ctx context.Context) (map[int32]*types.PerfCounterInfo, error) {
|
||||
ctx1, cancel1 := context.WithTimeout(ctx, c.timeout)
|
||||
defer cancel1()
|
||||
return c.Perf.CounterInfoByKey(ctx1)
|
||||
return c.perf.CounterInfoByKey(ctx1)
|
||||
}
|
||||
|
||||
// ListResources wraps property.Collector.Retrieve to give it proper timeouts
|
||||
func (c *Client) ListResources(ctx context.Context, root *view.ContainerView, kind, ps []string, dst interface{}) error {
|
||||
ctx1, cancel1 := context.WithTimeout(ctx, c.Timeout)
|
||||
func (c *client) getCustomFields(ctx context.Context) (map[int32]string, error) {
|
||||
ctx1, cancel1 := context.WithTimeout(ctx, c.timeout)
|
||||
defer cancel1()
|
||||
return root.Retrieve(ctx1, kind, ps, dst)
|
||||
}
|
||||
|
||||
func (c *Client) GetCustomFields(ctx context.Context) (map[int32]string, error) {
|
||||
ctx1, cancel1 := context.WithTimeout(ctx, c.Timeout)
|
||||
defer cancel1()
|
||||
cfm := object.NewCustomFieldsManager(c.Client.Client)
|
||||
cfm := object.NewCustomFieldsManager(c.client.Client)
|
||||
fields, err := cfm.Field(ctx1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
|||
|
|
@ -41,18 +41,17 @@ type queryChunk []types.PerfQuerySpec
|
|||
|
||||
type queryJob func(queryChunk)
|
||||
|
||||
// Endpoint is a high-level representation of a connected vCenter endpoint. It is backed by the lower
|
||||
// level Client type.
|
||||
type Endpoint struct {
|
||||
Parent *VSphere
|
||||
URL *url.URL
|
||||
// endpoint is a high-level representation of a connected vCenter endpoint. It is backed by the lower level client type.
|
||||
type endpoint struct {
|
||||
parent *VSphere
|
||||
url *url.URL
|
||||
resourceKinds map[string]*resourceKind
|
||||
hwMarks *TSCache
|
||||
hwMarks *tsCache
|
||||
lun2ds map[string]string
|
||||
discoveryTicker *time.Ticker
|
||||
collectMux sync.RWMutex
|
||||
initialized bool
|
||||
clientFactory *ClientFactory
|
||||
clientFactory *clientFactory
|
||||
busy sync.Mutex
|
||||
customFields map[int32]string
|
||||
customAttrFilter filter.Filter
|
||||
|
|
@ -76,7 +75,7 @@ type resourceKind struct {
|
|||
paths []string
|
||||
excludePaths []string
|
||||
collectInstances bool
|
||||
getObjects func(context.Context, *Endpoint, *ResourceFilter) (objectMap, error)
|
||||
getObjects func(context.Context, *endpoint, *resourceFilter) (objectMap, error)
|
||||
include []string
|
||||
simple bool
|
||||
metrics performance.MetricList
|
||||
|
|
@ -108,7 +107,7 @@ type objectRef struct {
|
|||
lookup map[string]string
|
||||
}
|
||||
|
||||
func (e *Endpoint) getParent(obj *objectRef, res *resourceKind) (*objectRef, bool) {
|
||||
func (e *endpoint) getParent(obj *objectRef, res *resourceKind) (*objectRef, bool) {
|
||||
if pKind, ok := e.resourceKinds[res.parent]; ok {
|
||||
if p, ok := pKind.objects[obj.parentRef.Value]; ok {
|
||||
return p, true
|
||||
|
|
@ -117,16 +116,16 @@ func (e *Endpoint) getParent(obj *objectRef, res *resourceKind) (*objectRef, boo
|
|||
return nil, false
|
||||
}
|
||||
|
||||
// NewEndpoint returns a new connection to a vCenter based on the URL and configuration passed
|
||||
// newEndpoint returns a new connection to a vCenter based on the URL and configuration passed
|
||||
// as parameters.
|
||||
func NewEndpoint(ctx context.Context, parent *VSphere, address *url.URL, log telegraf.Logger) (*Endpoint, error) {
|
||||
e := Endpoint{
|
||||
URL: address,
|
||||
Parent: parent,
|
||||
hwMarks: NewTSCache(hwMarkTTL, log),
|
||||
func newEndpoint(ctx context.Context, parent *VSphere, address *url.URL, log telegraf.Logger) (*endpoint, error) {
|
||||
e := endpoint{
|
||||
url: address,
|
||||
parent: parent,
|
||||
hwMarks: newTSCache(hwMarkTTL, log),
|
||||
lun2ds: make(map[string]string),
|
||||
initialized: false,
|
||||
clientFactory: NewClientFactory(address, parent),
|
||||
clientFactory: newClientFactory(address, parent),
|
||||
customAttrFilter: newFilterOrPanic(parent.CustomAttributeInclude, parent.CustomAttributeExclude),
|
||||
customAttrEnabled: anythingEnabled(parent.CustomAttributeExclude),
|
||||
log: log,
|
||||
|
|
@ -294,18 +293,18 @@ func isSimple(include, exclude []string) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func (e *Endpoint) startDiscovery(ctx context.Context) {
|
||||
e.discoveryTicker = time.NewTicker(time.Duration(e.Parent.ObjectDiscoveryInterval))
|
||||
func (e *endpoint) startDiscovery(ctx context.Context) {
|
||||
e.discoveryTicker = time.NewTicker(time.Duration(e.parent.ObjectDiscoveryInterval))
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-e.discoveryTicker.C:
|
||||
err := e.discover(ctx)
|
||||
if err != nil && !errors.Is(err, context.Canceled) {
|
||||
e.log.Errorf("Discovery for %s: %s", e.URL.Host, err.Error())
|
||||
e.log.Errorf("Discovery for %s: %s", e.url.Host, err.Error())
|
||||
}
|
||||
case <-ctx.Done():
|
||||
e.log.Debugf("Exiting discovery goroutine for %s", e.URL.Host)
|
||||
e.log.Debugf("Exiting discovery goroutine for %s", e.url.Host)
|
||||
e.discoveryTicker.Stop()
|
||||
return
|
||||
}
|
||||
|
|
@ -313,18 +312,18 @@ func (e *Endpoint) startDiscovery(ctx context.Context) {
|
|||
}()
|
||||
}
|
||||
|
||||
func (e *Endpoint) initialDiscovery(ctx context.Context) {
|
||||
func (e *endpoint) initialDiscovery(ctx context.Context) {
|
||||
err := e.discover(ctx)
|
||||
if err != nil && !errors.Is(err, context.Canceled) {
|
||||
e.log.Errorf("Discovery for %s: %s", e.URL.Host, err.Error())
|
||||
e.log.Errorf("Discovery for %s: %s", e.url.Host, err.Error())
|
||||
}
|
||||
e.startDiscovery(ctx)
|
||||
}
|
||||
|
||||
func (e *Endpoint) init(ctx context.Context) error {
|
||||
client, err := e.clientFactory.GetClient(ctx)
|
||||
func (e *endpoint) init(ctx context.Context) error {
|
||||
client, err := e.clientFactory.getClient(ctx)
|
||||
if err != nil {
|
||||
switch e.Parent.DisconnectedServersBehavior {
|
||||
switch e.parent.DisconnectedServersBehavior {
|
||||
case "error":
|
||||
return err
|
||||
case "ignore":
|
||||
|
|
@ -333,13 +332,13 @@ func (e *Endpoint) init(ctx context.Context) error {
|
|||
return nil
|
||||
default:
|
||||
return fmt.Errorf("%q is not a valid value for disconnected_servers_behavior",
|
||||
e.Parent.DisconnectedServersBehavior)
|
||||
e.parent.DisconnectedServersBehavior)
|
||||
}
|
||||
}
|
||||
|
||||
// Initial load of custom field metadata
|
||||
if e.customAttrEnabled {
|
||||
fields, err := client.GetCustomFields(ctx)
|
||||
fields, err := client.getCustomFields(ctx)
|
||||
if err != nil {
|
||||
e.log.Warn("Could not load custom field metadata")
|
||||
} else {
|
||||
|
|
@ -347,29 +346,29 @@ func (e *Endpoint) init(ctx context.Context) error {
|
|||
}
|
||||
}
|
||||
|
||||
if time.Duration(e.Parent.ObjectDiscoveryInterval) > 0 {
|
||||
e.Parent.Log.Debug("Running initial discovery")
|
||||
if time.Duration(e.parent.ObjectDiscoveryInterval) > 0 {
|
||||
e.parent.Log.Debug("Running initial discovery")
|
||||
e.initialDiscovery(ctx)
|
||||
}
|
||||
e.initialized = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Endpoint) getMetricNameForID(id int32) string {
|
||||
func (e *endpoint) getMetricNameForID(id int32) string {
|
||||
e.metricNameMux.RLock()
|
||||
defer e.metricNameMux.RUnlock()
|
||||
return e.metricNameLookup[id]
|
||||
}
|
||||
|
||||
func (e *Endpoint) reloadMetricNameMap(ctx context.Context) error {
|
||||
func (e *endpoint) reloadMetricNameMap(ctx context.Context) error {
|
||||
e.metricNameMux.Lock()
|
||||
defer e.metricNameMux.Unlock()
|
||||
client, err := e.clientFactory.GetClient(ctx)
|
||||
client, err := e.clientFactory.getClient(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mn, err := client.CounterInfoByKey(ctx)
|
||||
mn, err := client.counterInfoByKey(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -380,28 +379,28 @@ func (e *Endpoint) reloadMetricNameMap(ctx context.Context) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (e *Endpoint) getMetadata(ctx context.Context, obj *objectRef, sampling int32) (performance.MetricList, error) {
|
||||
client, err := e.clientFactory.GetClient(ctx)
|
||||
func (e *endpoint) getMetadata(ctx context.Context, obj *objectRef, sampling int32) (performance.MetricList, error) {
|
||||
client, err := e.clientFactory.getClient(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(e.Parent.Timeout))
|
||||
ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(e.parent.Timeout))
|
||||
defer cancel1()
|
||||
metrics, err := client.Perf.AvailableMetric(ctx1, obj.ref.Reference(), sampling)
|
||||
metrics, err := client.perf.AvailableMetric(ctx1, obj.ref.Reference(), sampling)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return metrics, nil
|
||||
}
|
||||
|
||||
func (e *Endpoint) getDatacenterName(ctx context.Context, client *Client, cache map[string]string, r types.ManagedObjectReference) (string, bool) {
|
||||
func (e *endpoint) getDatacenterName(ctx context.Context, client *client, cache map[string]string, r types.ManagedObjectReference) (string, bool) {
|
||||
return e.getAncestorName(ctx, client, "Datacenter", cache, r)
|
||||
}
|
||||
|
||||
func (e *Endpoint) getAncestorName(
|
||||
func (e *endpoint) getAncestorName(
|
||||
ctx context.Context,
|
||||
client *Client,
|
||||
client *client,
|
||||
resourceType string,
|
||||
cache map[string]string,
|
||||
r types.ManagedObjectReference,
|
||||
|
|
@ -418,13 +417,13 @@ func (e *Endpoint) getAncestorName(
|
|||
return true
|
||||
}
|
||||
path = append(path, here.Reference().String())
|
||||
o := object.NewCommon(client.Client.Client, r)
|
||||
o := object.NewCommon(client.client.Client, r)
|
||||
var result mo.ManagedEntity
|
||||
ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(e.Parent.Timeout))
|
||||
ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(e.parent.Timeout))
|
||||
defer cancel1()
|
||||
err := o.Properties(ctx1, here, []string{"parent", "name"}, &result)
|
||||
if err != nil {
|
||||
e.Parent.Log.Warnf("Error while resolving parent. Assuming no parent exists. Error: %s", err.Error())
|
||||
e.parent.Log.Warnf("Error while resolving parent. Assuming no parent exists. Error: %s", err.Error())
|
||||
return true
|
||||
}
|
||||
if result.Reference().Type == resourceType {
|
||||
|
|
@ -433,7 +432,7 @@ func (e *Endpoint) getAncestorName(
|
|||
return true
|
||||
}
|
||||
if result.Parent == nil {
|
||||
e.Parent.Log.Debugf("No parent found for %s (ascending from %s)", here.Reference(), r.Reference())
|
||||
e.parent.Log.Debugf("No parent found for %s (ascending from %s)", here.Reference(), r.Reference())
|
||||
return true
|
||||
}
|
||||
here = result.Parent.Reference()
|
||||
|
|
@ -446,7 +445,7 @@ func (e *Endpoint) getAncestorName(
|
|||
return returnVal, returnVal != ""
|
||||
}
|
||||
|
||||
func (e *Endpoint) discover(ctx context.Context) error {
|
||||
func (e *endpoint) discover(ctx context.Context) error {
|
||||
e.busy.Lock()
|
||||
defer e.busy.Unlock()
|
||||
if ctx.Err() != nil {
|
||||
|
|
@ -458,17 +457,17 @@ func (e *Endpoint) discover(ctx context.Context) error {
|
|||
return err
|
||||
}
|
||||
|
||||
sw := NewStopwatch("discover", e.URL.Host)
|
||||
sw := newStopwatch("discover", e.url.Host)
|
||||
|
||||
client, err := e.clientFactory.GetClient(ctx)
|
||||
client, err := e.clientFactory.getClient(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// get the vSphere API version
|
||||
e.apiVersion = client.Client.ServiceContent.About.ApiVersion
|
||||
e.apiVersion = client.client.ServiceContent.About.ApiVersion
|
||||
|
||||
e.Parent.Log.Debugf("Discover new objects for %s", e.URL.Host)
|
||||
e.parent.Log.Debugf("Discover new objects for %s", e.url.Host)
|
||||
dcNameCache := make(map[string]string)
|
||||
|
||||
numRes := int64(0)
|
||||
|
|
@ -479,13 +478,13 @@ func (e *Endpoint) discover(ctx context.Context) error {
|
|||
e.log.Debugf("Discovering resources for %s", res.name)
|
||||
// Need to do this for all resource types even if they are not enabled
|
||||
if res.enabled || (k != "vm" && k != "vsan") {
|
||||
rf := ResourceFilter{
|
||||
finder: &Finder{client},
|
||||
rf := resourceFilter{
|
||||
finder: &finder{client},
|
||||
resType: res.vcName,
|
||||
paths: res.paths,
|
||||
excludePaths: res.excludePaths}
|
||||
|
||||
ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(e.Parent.Timeout))
|
||||
ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(e.parent.Timeout))
|
||||
objects, err := res.getObjects(ctx1, e, &rf)
|
||||
cancel1()
|
||||
if err != nil {
|
||||
|
|
@ -513,7 +512,7 @@ func (e *Endpoint) discover(ctx context.Context) error {
|
|||
}
|
||||
newObjects[k] = objects
|
||||
|
||||
SendInternalCounterWithTags("discovered_objects", e.URL.Host, map[string]string{"type": res.name}, int64(len(objects)))
|
||||
sendInternalCounterWithTags("discovered_objects", e.url.Host, map[string]string{"type": res.name}, int64(len(objects)))
|
||||
numRes += int64(len(objects))
|
||||
}
|
||||
}
|
||||
|
|
@ -532,7 +531,7 @@ func (e *Endpoint) discover(ctx context.Context) error {
|
|||
// Load custom field metadata
|
||||
var fields map[int32]string
|
||||
if e.customAttrEnabled {
|
||||
fields, err = client.GetCustomFields(ctx)
|
||||
fields, err = client.getCustomFields(ctx)
|
||||
if err != nil {
|
||||
e.log.Warn("Could not load custom field metadata")
|
||||
fields = nil
|
||||
|
|
@ -552,14 +551,14 @@ func (e *Endpoint) discover(ctx context.Context) error {
|
|||
e.customFields = fields
|
||||
}
|
||||
|
||||
sw.Stop()
|
||||
SendInternalCounterWithTags("discovered_objects", e.URL.Host, map[string]string{"type": "instance-total"}, numRes)
|
||||
sw.stop()
|
||||
sendInternalCounterWithTags("discovered_objects", e.url.Host, map[string]string{"type": "instance-total"}, numRes)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Endpoint) simpleMetadataSelect(ctx context.Context, client *Client, res *resourceKind) {
|
||||
func (e *endpoint) simpleMetadataSelect(ctx context.Context, client *client, res *resourceKind) {
|
||||
e.log.Debugf("Using fast metric metadata selection for %s", res.name)
|
||||
m, err := client.CounterInfoByName(ctx)
|
||||
m, err := client.counterInfoByName(ctx)
|
||||
if err != nil {
|
||||
e.log.Errorf("Getting metric metadata. Discovery will be incomplete. Error: %s", err.Error())
|
||||
return
|
||||
|
|
@ -582,7 +581,7 @@ func (e *Endpoint) simpleMetadataSelect(ctx context.Context, client *Client, res
|
|||
}
|
||||
}
|
||||
|
||||
func (e *Endpoint) complexMetadataSelect(ctx context.Context, res *resourceKind, objects objectMap) {
|
||||
func (e *endpoint) complexMetadataSelect(ctx context.Context, res *resourceKind, objects objectMap) {
|
||||
// We're only going to get metadata from maxMetadataSamples resources. If we have
|
||||
// more resources than that, we pick maxMetadataSamples samples at random.
|
||||
sampledObjects := make([]*objectRef, 0, len(objects))
|
||||
|
|
@ -602,10 +601,10 @@ func (e *Endpoint) complexMetadataSelect(ctx context.Context, res *resourceKind,
|
|||
}
|
||||
|
||||
instInfoMux := sync.Mutex{}
|
||||
te := NewThrottledExecutor(e.Parent.DiscoverConcurrency)
|
||||
te := newThrottledExecutor(e.parent.DiscoverConcurrency)
|
||||
for _, obj := range sampledObjects {
|
||||
func(obj *objectRef) {
|
||||
te.Run(ctx, func() {
|
||||
te.run(ctx, func() {
|
||||
metrics, err := e.getMetadata(ctx, obj, res.sampling)
|
||||
if err != nil {
|
||||
e.log.Errorf("Getting metric metadata. Discovery will be incomplete. Error: %s", err.Error())
|
||||
|
|
@ -635,14 +634,14 @@ func (e *Endpoint) complexMetadataSelect(ctx context.Context, res *resourceKind,
|
|||
})
|
||||
}(obj)
|
||||
}
|
||||
te.Wait()
|
||||
te.wait()
|
||||
}
|
||||
|
||||
func getDatacenters(ctx context.Context, e *Endpoint, resourceFilter *ResourceFilter) (objectMap, error) {
|
||||
func getDatacenters(ctx context.Context, e *endpoint, resourceFilter *resourceFilter) (objectMap, error) {
|
||||
var resources []mo.Datacenter
|
||||
ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(e.Parent.Timeout))
|
||||
ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(e.parent.Timeout))
|
||||
defer cancel1()
|
||||
err := resourceFilter.FindAll(ctx1, &resources)
|
||||
err := resourceFilter.findAll(ctx1, &resources)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -661,11 +660,11 @@ func getDatacenters(ctx context.Context, e *Endpoint, resourceFilter *ResourceFi
|
|||
return m, nil
|
||||
}
|
||||
|
||||
func getClusters(ctx context.Context, e *Endpoint, resourceFilter *ResourceFilter) (objectMap, error) {
|
||||
func getClusters(ctx context.Context, e *endpoint, resourceFilter *resourceFilter) (objectMap, error) {
|
||||
var resources []mo.ClusterComputeResource
|
||||
ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(e.Parent.Timeout))
|
||||
ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(e.parent.Timeout))
|
||||
defer cancel1()
|
||||
err := resourceFilter.FindAll(ctx1, &resources)
|
||||
err := resourceFilter.findAll(ctx1, &resources)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -679,19 +678,19 @@ func getClusters(ctx context.Context, e *Endpoint, resourceFilter *ResourceFilte
|
|||
// We're not interested in the immediate parent (a folder), but the data center.
|
||||
p, ok := cache[r.Parent.Value]
|
||||
if !ok {
|
||||
ctx2, cancel2 := context.WithTimeout(ctx, time.Duration(e.Parent.Timeout))
|
||||
ctx2, cancel2 := context.WithTimeout(ctx, time.Duration(e.parent.Timeout))
|
||||
defer cancel2()
|
||||
client, err := e.clientFactory.GetClient(ctx2)
|
||||
client, err := e.clientFactory.getClient(ctx2)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o := object.NewFolder(client.Client.Client, *r.Parent)
|
||||
o := object.NewFolder(client.client.Client, *r.Parent)
|
||||
var folder mo.Folder
|
||||
ctx3, cancel3 := context.WithTimeout(ctx, time.Duration(e.Parent.Timeout))
|
||||
ctx3, cancel3 := context.WithTimeout(ctx, time.Duration(e.parent.Timeout))
|
||||
defer cancel3()
|
||||
err = o.Properties(ctx3, *r.Parent, []string{"parent"}, &folder)
|
||||
if err != nil {
|
||||
e.Parent.Log.Warnf("Error while getting folder parent: %s", err.Error())
|
||||
e.parent.Log.Warnf("Error while getting folder parent: %s", err.Error())
|
||||
p = nil
|
||||
} else {
|
||||
pp := folder.Parent.Reference()
|
||||
|
|
@ -715,9 +714,9 @@ func getClusters(ctx context.Context, e *Endpoint, resourceFilter *ResourceFilte
|
|||
}
|
||||
|
||||
// noinspection GoUnusedParameter
|
||||
func getResourcePools(ctx context.Context, e *Endpoint, resourceFilter *ResourceFilter) (objectMap, error) {
|
||||
func getResourcePools(ctx context.Context, e *endpoint, resourceFilter *resourceFilter) (objectMap, error) {
|
||||
var resources []mo.ResourcePool
|
||||
err := resourceFilter.FindAll(ctx, &resources)
|
||||
err := resourceFilter.findAll(ctx, &resources)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -746,9 +745,9 @@ func getResourcePoolName(rp types.ManagedObjectReference, rps objectMap) string
|
|||
}
|
||||
|
||||
// noinspection GoUnusedParameter
|
||||
func getHosts(ctx context.Context, e *Endpoint, resourceFilter *ResourceFilter) (objectMap, error) {
|
||||
func getHosts(ctx context.Context, e *endpoint, resourceFilter *resourceFilter) (objectMap, error) {
|
||||
var resources []mo.HostSystem
|
||||
err := resourceFilter.FindAll(ctx, &resources)
|
||||
err := resourceFilter.findAll(ctx, &resources)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -766,22 +765,22 @@ func getHosts(ctx context.Context, e *Endpoint, resourceFilter *ResourceFilter)
|
|||
return m, nil
|
||||
}
|
||||
|
||||
func getVMs(ctx context.Context, e *Endpoint, resourceFilter *ResourceFilter) (objectMap, error) {
|
||||
func getVMs(ctx context.Context, e *endpoint, rf *resourceFilter) (objectMap, error) {
|
||||
var resources []mo.VirtualMachine
|
||||
ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(e.Parent.Timeout))
|
||||
ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(e.parent.Timeout))
|
||||
defer cancel1()
|
||||
err := resourceFilter.FindAll(ctx1, &resources)
|
||||
err := rf.findAll(ctx1, &resources)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m := make(objectMap)
|
||||
client, err := e.clientFactory.GetClient(ctx)
|
||||
client, err := e.clientFactory.getClient(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Create a ResourcePool Filter and get the list of Resource Pools
|
||||
rprf := ResourceFilter{
|
||||
finder: &Finder{client},
|
||||
rprf := resourceFilter{
|
||||
finder: &finder{client},
|
||||
resType: "ResourcePool",
|
||||
paths: []string{"/*/host/**"},
|
||||
excludePaths: nil}
|
||||
|
|
@ -798,7 +797,7 @@ func getVMs(ctx context.Context, e *Endpoint, resourceFilter *ResourceFilter) (o
|
|||
guest := "unknown"
|
||||
uuid := ""
|
||||
lookup := make(map[string]string)
|
||||
// Get the name of the VM resource pool
|
||||
// get the name of the VM resource pool
|
||||
rpname := getResourcePoolName(*r.ResourcePool, resourcePools)
|
||||
|
||||
// Extract host name
|
||||
|
|
@ -817,7 +816,7 @@ func getVMs(ctx context.Context, e *Endpoint, resourceFilter *ResourceFilter) (o
|
|||
ips := make(map[string][]string)
|
||||
for _, ip := range net.IpConfig.IpAddress {
|
||||
addr := ip.IpAddress
|
||||
for _, ipType := range e.Parent.IPAddresses {
|
||||
for _, ipType := range e.parent.IPAddresses {
|
||||
if !(ipType == "ipv4" && isIPv4.MatchString(addr) ||
|
||||
ipType == "ipv6" && isIPv6.MatchString(addr)) {
|
||||
continue
|
||||
|
|
@ -881,11 +880,11 @@ func getVMs(ctx context.Context, e *Endpoint, resourceFilter *ResourceFilter) (o
|
|||
return m, nil
|
||||
}
|
||||
|
||||
func getDatastores(ctx context.Context, e *Endpoint, resourceFilter *ResourceFilter) (objectMap, error) {
|
||||
func getDatastores(ctx context.Context, e *endpoint, resourceFilter *resourceFilter) (objectMap, error) {
|
||||
var resources []mo.Datastore
|
||||
ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(e.Parent.Timeout))
|
||||
ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(e.parent.Timeout))
|
||||
defer cancel1()
|
||||
err := resourceFilter.FindAll(ctx1, &resources)
|
||||
err := resourceFilter.findAll(ctx1, &resources)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -911,7 +910,7 @@ func getDatastores(ctx context.Context, e *Endpoint, resourceFilter *ResourceFil
|
|||
return m, nil
|
||||
}
|
||||
|
||||
func (e *Endpoint) loadCustomAttributes(entity mo.ManagedEntity) map[string]string {
|
||||
func (e *endpoint) loadCustomAttributes(entity mo.ManagedEntity) map[string]string {
|
||||
if !e.customAttrEnabled {
|
||||
return make(map[string]string)
|
||||
}
|
||||
|
|
@ -919,12 +918,12 @@ func (e *Endpoint) loadCustomAttributes(entity mo.ManagedEntity) map[string]stri
|
|||
for _, v := range entity.CustomValue {
|
||||
cv, ok := v.(*types.CustomFieldStringValue)
|
||||
if !ok {
|
||||
e.Parent.Log.Warnf("Metadata for custom field %d not of string type. Skipping", cv.Key)
|
||||
e.parent.Log.Warnf("Metadata for custom field %d not of string type. Skipping", cv.Key)
|
||||
continue
|
||||
}
|
||||
key, ok := e.customFields[cv.Key]
|
||||
if !ok {
|
||||
e.Parent.Log.Warnf("Metadata for custom field %d not found. Skipping", cv.Key)
|
||||
e.parent.Log.Warnf("Metadata for custom field %d not found. Skipping", cv.Key)
|
||||
continue
|
||||
}
|
||||
if e.customAttrFilter.Match(key) {
|
||||
|
|
@ -934,13 +933,13 @@ func (e *Endpoint) loadCustomAttributes(entity mo.ManagedEntity) map[string]stri
|
|||
return cvs
|
||||
}
|
||||
|
||||
// Close shuts down an Endpoint and releases any resources associated with it.
|
||||
func (e *Endpoint) Close() {
|
||||
e.clientFactory.Close()
|
||||
// close shuts down an endpoint and releases any resources associated with it.
|
||||
func (e *endpoint) close() {
|
||||
e.clientFactory.close()
|
||||
}
|
||||
|
||||
// Collect runs a round of data collections as specified in the configuration.
|
||||
func (e *Endpoint) Collect(ctx context.Context, acc telegraf.Accumulator) error {
|
||||
// collect runs a round of data collections as specified in the configuration.
|
||||
func (e *endpoint) collect(ctx context.Context, acc telegraf.Accumulator) error {
|
||||
// Connection could have failed on init, so we need to check for a deferred
|
||||
// init request.
|
||||
if !e.initialized {
|
||||
|
|
@ -953,7 +952,7 @@ func (e *Endpoint) Collect(ctx context.Context, acc telegraf.Accumulator) error
|
|||
// If we never managed to do a discovery, collection will be a no-op. Therefore,
|
||||
// we need to check that a connection is available, or the collection will
|
||||
// silently fail.
|
||||
if _, err := e.clientFactory.GetClient(ctx); err != nil {
|
||||
if _, err := e.clientFactory.getClient(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
@ -965,7 +964,7 @@ func (e *Endpoint) Collect(ctx context.Context, acc telegraf.Accumulator) error
|
|||
}
|
||||
|
||||
// If discovery interval is disabled (0), discover on each collection cycle
|
||||
if time.Duration(e.Parent.ObjectDiscoveryInterval) == 0 {
|
||||
if time.Duration(e.parent.ObjectDiscoveryInterval) == 0 {
|
||||
err := e.discover(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -991,21 +990,21 @@ func (e *Endpoint) Collect(ctx context.Context, acc telegraf.Accumulator) error
|
|||
}
|
||||
wg.Wait()
|
||||
|
||||
// Purge old timestamps from the cache
|
||||
e.hwMarks.Purge()
|
||||
// purge old timestamps from the cache
|
||||
e.hwMarks.purge()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Workaround to make sure pqs is a copy of the loop variable and won't change.
|
||||
func submitChunkJob(ctx context.Context, te *ThrottledExecutor, job queryJob, pqs queryChunk) {
|
||||
te.Run(ctx, func() {
|
||||
func submitChunkJob(ctx context.Context, te *throttledExecutor, job queryJob, pqs queryChunk) {
|
||||
te.run(ctx, func() {
|
||||
job(pqs)
|
||||
})
|
||||
}
|
||||
|
||||
func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now, latest time.Time, job queryJob) {
|
||||
te := NewThrottledExecutor(e.Parent.CollectConcurrency)
|
||||
maxMetrics := e.Parent.MaxQueryMetrics
|
||||
func (e *endpoint) chunkify(ctx context.Context, res *resourceKind, now, latest time.Time, job queryJob) {
|
||||
te := newThrottledExecutor(e.parent.CollectConcurrency)
|
||||
maxMetrics := e.parent.MaxQueryMetrics
|
||||
if maxMetrics < 1 {
|
||||
maxMetrics = 1
|
||||
}
|
||||
|
|
@ -1017,7 +1016,7 @@ func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now, latest
|
|||
maxMetrics = 10
|
||||
}
|
||||
|
||||
pqs := make(queryChunk, 0, e.Parent.MaxQueryObjects)
|
||||
pqs := make(queryChunk, 0, e.parent.MaxQueryObjects)
|
||||
numQs := 0
|
||||
|
||||
for _, obj := range res.objects {
|
||||
|
|
@ -1029,9 +1028,9 @@ func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now, latest
|
|||
e.log.Debugf("Unable to find metric name for id %d. Skipping!", metric.CounterId)
|
||||
continue
|
||||
}
|
||||
start, ok := e.hwMarks.Get(obj.ref.Value, metricName)
|
||||
start, ok := e.hwMarks.get(obj.ref.Value, metricName)
|
||||
if !ok {
|
||||
start = latest.Add(time.Duration(-res.sampling) * time.Second * (time.Duration(e.Parent.MetricLookback) - 1))
|
||||
start = latest.Add(time.Duration(-res.sampling) * time.Second * (time.Duration(e.parent.MetricLookback) - 1))
|
||||
}
|
||||
|
||||
if !start.Truncate(time.Second).Before(now.Truncate(time.Second)) {
|
||||
|
|
@ -1064,14 +1063,14 @@ func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now, latest
|
|||
// OR if we're past the absolute maximum limit
|
||||
if (!res.realTime && len(bucket.MetricId) >= maxMetrics) || len(bucket.MetricId) > maxRealtimeMetrics {
|
||||
e.log.Debugf("Submitting partial query: %d metrics (%d remaining) of type %s for %s. Total objects %d",
|
||||
len(bucket.MetricId), len(res.metrics)-metricIdx, res.name, e.URL.Host, len(res.objects))
|
||||
len(bucket.MetricId), len(res.metrics)-metricIdx, res.name, e.url.Host, len(res.objects))
|
||||
|
||||
// Don't send work items if the context has been cancelled.
|
||||
if errors.Is(ctx.Err(), context.Canceled) {
|
||||
return
|
||||
}
|
||||
|
||||
// Run collection job
|
||||
// run collection job
|
||||
delete(timeBuckets, start.Unix())
|
||||
submitChunkJob(ctx, te, job, queryChunk{*bucket})
|
||||
}
|
||||
|
|
@ -1080,10 +1079,10 @@ func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now, latest
|
|||
for _, bucket := range timeBuckets {
|
||||
pqs = append(pqs, *bucket)
|
||||
numQs += len(bucket.MetricId)
|
||||
if (!res.realTime && numQs > e.Parent.MaxQueryObjects) || numQs > maxRealtimeMetrics {
|
||||
if (!res.realTime && numQs > e.parent.MaxQueryObjects) || numQs > maxRealtimeMetrics {
|
||||
e.log.Debugf("Submitting final bucket job for %s: %d metrics", res.name, numQs)
|
||||
submitChunkJob(ctx, te, job, pqs)
|
||||
pqs = make(queryChunk, 0, e.Parent.MaxQueryObjects)
|
||||
pqs = make(queryChunk, 0, e.parent.MaxQueryObjects)
|
||||
numQs = 0
|
||||
}
|
||||
}
|
||||
|
|
@ -1095,16 +1094,16 @@ func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now, latest
|
|||
}
|
||||
|
||||
// Wait for background collection to finish
|
||||
te.Wait()
|
||||
te.wait()
|
||||
}
|
||||
|
||||
func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc telegraf.Accumulator) error {
|
||||
func (e *endpoint) collectResource(ctx context.Context, resourceType string, acc telegraf.Accumulator) error {
|
||||
res := e.resourceKinds[resourceType]
|
||||
client, err := e.clientFactory.GetClient(ctx)
|
||||
client, err := e.clientFactory.getClient(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
now, err := client.GetServerTime(ctx)
|
||||
now, err := client.getServerTime(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -1133,7 +1132,7 @@ func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc
|
|||
if !res.realTime && elapsed < float64(res.sampling) {
|
||||
// No new data would be available. We're outta here!
|
||||
e.log.Debugf("Sampling period for %s of %d has not elapsed on %s",
|
||||
resourceType, res.sampling, e.URL.Host)
|
||||
resourceType, res.sampling, e.url.Host)
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
|
|
@ -1141,10 +1140,10 @@ func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc
|
|||
}
|
||||
|
||||
internalTags := map[string]string{"resourcetype": resourceType}
|
||||
sw := NewStopwatchWithTags("gather_duration", e.URL.Host, internalTags)
|
||||
sw := newStopwatchWithTags("gather_duration", e.url.Host, internalTags)
|
||||
|
||||
e.log.Debugf("Collecting metrics for %d objects of type %s for %s",
|
||||
len(res.objects), resourceType, e.URL.Host)
|
||||
len(res.objects), resourceType, e.url.Host)
|
||||
|
||||
count := int64(0)
|
||||
|
||||
|
|
@ -1160,7 +1159,7 @@ func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc
|
|||
acc.AddError(errors.New("while collecting " + res.name + ": " + err.Error()))
|
||||
return
|
||||
}
|
||||
e.Parent.Log.Debugf("CollectChunk for %s returned %d metrics", resourceType, n)
|
||||
e.parent.Log.Debugf("CollectChunk for %s returned %d metrics", resourceType, n)
|
||||
atomic.AddInt64(&count, int64(n))
|
||||
tsMux.Lock()
|
||||
defer tsMux.Unlock()
|
||||
|
|
@ -1173,12 +1172,12 @@ func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc
|
|||
if !latestSample.IsZero() {
|
||||
res.latestSample = latestSample
|
||||
}
|
||||
sw.Stop()
|
||||
SendInternalCounterWithTags("gather_count", e.URL.Host, internalTags, count)
|
||||
sw.stop()
|
||||
sendInternalCounterWithTags("gather_count", e.url.Host, internalTags, count)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Endpoint) alignSamples(info []types.PerfSampleInfo, values []int64, interval time.Duration) ([]types.PerfSampleInfo, []float64) {
|
||||
func (e *endpoint) alignSamples(info []types.PerfSampleInfo, values []int64, interval time.Duration) ([]types.PerfSampleInfo, []float64) {
|
||||
rInfo := make([]types.PerfSampleInfo, 0, len(info))
|
||||
rValues := make([]float64, 0, len(values))
|
||||
bi := 1.0
|
||||
|
|
@ -1216,7 +1215,7 @@ func (e *Endpoint) alignSamples(info []types.PerfSampleInfo, values []int64, int
|
|||
return rInfo, rValues
|
||||
}
|
||||
|
||||
func (e *Endpoint) collectChunk(
|
||||
func (e *endpoint) collectChunk(
|
||||
ctx context.Context,
|
||||
pqs queryChunk,
|
||||
res *resourceKind,
|
||||
|
|
@ -1227,19 +1226,19 @@ func (e *Endpoint) collectChunk(
|
|||
latestSample := time.Time{}
|
||||
count := 0
|
||||
resourceType := res.name
|
||||
prefix := "vsphere" + e.Parent.Separator + resourceType
|
||||
prefix := "vsphere" + e.parent.Separator + resourceType
|
||||
|
||||
client, err := e.clientFactory.GetClient(ctx)
|
||||
client, err := e.clientFactory.getClient(ctx)
|
||||
if err != nil {
|
||||
return count, latestSample, err
|
||||
}
|
||||
|
||||
metricInfo, err := client.CounterInfoByName(ctx)
|
||||
metricInfo, err := client.counterInfoByName(ctx)
|
||||
if err != nil {
|
||||
return count, latestSample, err
|
||||
}
|
||||
|
||||
ems, err := client.QueryMetrics(ctx, pqs)
|
||||
ems, err := client.queryMetrics(ctx, pqs)
|
||||
if err != nil {
|
||||
return count, latestSample, err
|
||||
}
|
||||
|
|
@ -1258,7 +1257,7 @@ func (e *Endpoint) collectChunk(
|
|||
for _, v := range em.Value {
|
||||
name := v.Name
|
||||
t := map[string]string{
|
||||
"vcenter": e.URL.Host,
|
||||
"vcenter": e.url.Host,
|
||||
"source": instInfo.name,
|
||||
"moid": moid,
|
||||
}
|
||||
|
|
@ -1310,7 +1309,7 @@ func (e *Endpoint) collectChunk(
|
|||
if info.UnitInfo.GetElementDescription().Key == "percent" {
|
||||
bucket.fields[fn] = v / 100.0
|
||||
} else {
|
||||
if e.Parent.UseIntSamples {
|
||||
if e.parent.UseIntSamples {
|
||||
bucket.fields[fn] = int64(round(v))
|
||||
} else {
|
||||
bucket.fields[fn] = v
|
||||
|
|
@ -1320,7 +1319,7 @@ func (e *Endpoint) collectChunk(
|
|||
|
||||
// Update hiwater marks
|
||||
adjTs := ts.Add(interval).Truncate(interval).Add(-time.Second)
|
||||
e.hwMarks.Put(moid, name, adjTs)
|
||||
e.hwMarks.put(moid, name, adjTs)
|
||||
}
|
||||
if nValues == 0 {
|
||||
e.log.Debugf("Missing value for: %s, %s", name, objectRef.name)
|
||||
|
|
@ -1336,7 +1335,7 @@ func (e *Endpoint) collectChunk(
|
|||
return count, latestSample, nil
|
||||
}
|
||||
|
||||
func (e *Endpoint) populateTags(objectRef *objectRef, resourceType string, resource *resourceKind, t map[string]string, v performance.MetricSeries) {
|
||||
func (e *endpoint) populateTags(objectRef *objectRef, resourceType string, resource *resourceKind, t map[string]string, v performance.MetricSeries) {
|
||||
// Map name of object.
|
||||
if resource.pKey != "" {
|
||||
t[resource.pKey] = objectRef.name
|
||||
|
|
@ -1424,7 +1423,7 @@ func (e *Endpoint) populateTags(objectRef *objectRef, resourceType string, resou
|
|||
}
|
||||
}
|
||||
|
||||
func (e *Endpoint) populateGlobalFields(objectRef *objectRef, resourceType, prefix string) map[string]interface{} {
|
||||
func (e *endpoint) populateGlobalFields(objectRef *objectRef, resourceType, prefix string) map[string]interface{} {
|
||||
globalFields := make(map[string]interface{})
|
||||
if resourceType == "vm" && objectRef.memorySizeMB != 0 {
|
||||
_, fieldName := e.makeMetricIdentifier(prefix, "memorySizeMB")
|
||||
|
|
@ -1437,12 +1436,12 @@ func (e *Endpoint) populateGlobalFields(objectRef *objectRef, resourceType, pref
|
|||
return globalFields
|
||||
}
|
||||
|
||||
func (e *Endpoint) makeMetricIdentifier(prefix, metric string) (metricName, fieldName string) {
|
||||
func (e *endpoint) makeMetricIdentifier(prefix, metric string) (metricName, fieldName string) {
|
||||
parts := strings.Split(metric, ".")
|
||||
if len(parts) == 1 {
|
||||
return prefix, parts[0]
|
||||
}
|
||||
return prefix + e.Parent.Separator + parts[0], strings.Join(parts[1:], e.Parent.Separator)
|
||||
return prefix + e.parent.Separator + parts[0], strings.Join(parts[1:], e.parent.Separator)
|
||||
}
|
||||
|
||||
func cleanGuestID(id string) string {
|
||||
|
|
|
|||
|
|
@ -17,22 +17,22 @@ var addFields map[string][]string
|
|||
|
||||
var containers map[string]interface{}
|
||||
|
||||
// Finder allows callers to find resources in vCenter given a query string.
|
||||
type Finder struct {
|
||||
client *Client
|
||||
// finder allows callers to find resources in vCenter given a query string.
|
||||
type finder struct {
|
||||
client *client
|
||||
}
|
||||
|
||||
// ResourceFilter is a convenience class holding a finder and a set of paths. It is useful when you need a
|
||||
// resourceFilter is a convenience class holding a finder and a set of paths. It is useful when you need a
|
||||
// self contained object capable of returning a certain set of resources.
|
||||
type ResourceFilter struct {
|
||||
finder *Finder
|
||||
type resourceFilter struct {
|
||||
finder *finder
|
||||
resType string
|
||||
paths []string
|
||||
excludePaths []string
|
||||
}
|
||||
|
||||
// FindAll returns the union of resources found given the supplied resource type and paths.
|
||||
func (f *Finder) FindAll(ctx context.Context, resType string, paths, excludePaths []string, dst interface{}) error {
|
||||
// findAll returns the union of resources found given the supplied resource type and paths.
|
||||
func (f *finder) findAll(ctx context.Context, resType string, paths, excludePaths []string, dst interface{}) error {
|
||||
objs := make(map[string]types.ObjectContent)
|
||||
for _, p := range paths {
|
||||
if err := f.findResources(ctx, resType, p, objs); err != nil {
|
||||
|
|
@ -53,8 +53,8 @@ func (f *Finder) FindAll(ctx context.Context, resType string, paths, excludePath
|
|||
return objectContentToTypedArray(objs, dst)
|
||||
}
|
||||
|
||||
// Find returns the resources matching the specified path.
|
||||
func (f *Finder) Find(ctx context.Context, resType, path string, dst interface{}) error {
|
||||
// find returns the resources matching the specified path.
|
||||
func (f *finder) find(ctx context.Context, resType, path string, dst interface{}) error {
|
||||
objs := make(map[string]types.ObjectContent)
|
||||
err := f.findResources(ctx, resType, path, objs)
|
||||
if err != nil {
|
||||
|
|
@ -63,13 +63,13 @@ func (f *Finder) Find(ctx context.Context, resType, path string, dst interface{}
|
|||
return objectContentToTypedArray(objs, dst)
|
||||
}
|
||||
|
||||
func (f *Finder) findResources(ctx context.Context, resType, path string, objs map[string]types.ObjectContent) error {
|
||||
func (f *finder) findResources(ctx context.Context, resType, path string, objs map[string]types.ObjectContent) error {
|
||||
p := strings.Split(path, "/")
|
||||
flt := make([]property.Match, len(p)-1)
|
||||
for i := 1; i < len(p); i++ {
|
||||
flt[i-1] = property.Match{"name": p[i]}
|
||||
}
|
||||
err := f.descend(ctx, f.client.Client.ServiceContent.RootFolder, resType, flt, 0, objs)
|
||||
err := f.descend(ctx, f.client.client.ServiceContent.RootFolder, resType, flt, 0, objs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -77,7 +77,7 @@ func (f *Finder) findResources(ctx context.Context, resType, path string, objs m
|
|||
return nil
|
||||
}
|
||||
|
||||
func (f *Finder) descend(ctx context.Context, root types.ManagedObjectReference, resType string,
|
||||
func (f *finder) descend(ctx context.Context, root types.ManagedObjectReference, resType string,
|
||||
tokens []property.Match, pos int, objs map[string]types.ObjectContent) error {
|
||||
isLeaf := pos == len(tokens)-1
|
||||
|
||||
|
|
@ -94,7 +94,7 @@ func (f *Finder) descend(ctx context.Context, root types.ManagedObjectReference,
|
|||
return nil
|
||||
}
|
||||
|
||||
m := view.NewManager(f.client.Client.Client)
|
||||
m := view.NewManager(f.client.client.Client)
|
||||
v, err := m.CreateContainerView(ctx, root, ct, false)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -222,10 +222,9 @@ func objectContentToTypedArray(objs map[string]types.ObjectContent, dst interfac
|
|||
return nil
|
||||
}
|
||||
|
||||
// FindAll finds all resources matching the paths that were specified upon creation of
|
||||
// the ResourceFilter.
|
||||
func (r *ResourceFilter) FindAll(ctx context.Context, dst interface{}) error {
|
||||
return r.finder.FindAll(ctx, r.resType, r.paths, r.excludePaths, dst)
|
||||
// findAll finds all resources matching the paths that were specified upon creation of the resourceFilter.
|
||||
func (r *resourceFilter) findAll(ctx context.Context, dst interface{}) error {
|
||||
return r.finder.findAll(ctx, r.resType, r.paths, r.excludePaths, dst)
|
||||
}
|
||||
|
||||
func matchName(f property.Match, props []types.DynamicProperty) bool {
|
||||
|
|
|
|||
|
|
@ -6,47 +6,36 @@ import (
|
|||
"github.com/influxdata/telegraf/selfstat"
|
||||
)
|
||||
|
||||
// Stopwatch is a simple helper for recording timing information,
|
||||
// such as gather times and discovery times.
|
||||
type Stopwatch struct {
|
||||
// stopwatch is a simple helper for recording timing information, such as gather times and discovery times.
|
||||
type stopwatch struct {
|
||||
stat selfstat.Stat
|
||||
start time.Time
|
||||
}
|
||||
|
||||
// NewStopwatch creates a new StopWatch and starts measuring time
|
||||
// its creation.
|
||||
func NewStopwatch(name, vCenter string) *Stopwatch {
|
||||
return &Stopwatch{
|
||||
// newStopwatch creates a new StopWatch and starts measuring time its creation.
|
||||
func newStopwatch(name, vCenter string) *stopwatch {
|
||||
return &stopwatch{
|
||||
stat: selfstat.RegisterTiming("vsphere", name+"_ns", map[string]string{"vcenter": vCenter}),
|
||||
start: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
// NewStopwatchWithTags creates a new StopWatch and starts measuring time
|
||||
// its creation. Allows additional tags.
|
||||
func NewStopwatchWithTags(name, vCenter string, tags map[string]string) *Stopwatch {
|
||||
// newStopwatchWithTags creates a new StopWatch and starts measuring time its creation. Allows additional tags.
|
||||
func newStopwatchWithTags(name, vCenter string, tags map[string]string) *stopwatch {
|
||||
tags["vcenter"] = vCenter
|
||||
return &Stopwatch{
|
||||
return &stopwatch{
|
||||
stat: selfstat.RegisterTiming("vsphere", name+"_ns", tags),
|
||||
start: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
// Stop stops a Stopwatch and records the time.
|
||||
func (s *Stopwatch) Stop() {
|
||||
// stop stops a stopwatch and records the time.
|
||||
func (s *stopwatch) stop() {
|
||||
s.stat.Set(time.Since(s.start).Nanoseconds())
|
||||
}
|
||||
|
||||
// SendInternalCounter is a convenience method for sending
|
||||
// non-timing internal metrics.
|
||||
func SendInternalCounter(name, vCenter string, value int64) {
|
||||
s := selfstat.Register("vsphere", name, map[string]string{"vcenter": vCenter})
|
||||
s.Set(value)
|
||||
}
|
||||
|
||||
// SendInternalCounterWithTags is a convenience method for sending
|
||||
// non-timing internal metrics. Allows additional tags
|
||||
func SendInternalCounterWithTags(name, vCenter string, tags map[string]string, value int64) {
|
||||
// sendInternalCounterWithTags is a convenience method for sending non-timing internal metrics. Allows additional tags
|
||||
func sendInternalCounterWithTags(name, vCenter string, tags map[string]string, value int64) {
|
||||
tags["vcenter"] = vCenter
|
||||
s := selfstat.Register("vsphere", name, tags)
|
||||
s.Set(value)
|
||||
|
|
|
|||
|
|
@ -5,25 +5,24 @@ import (
|
|||
"sync"
|
||||
)
|
||||
|
||||
// ThrottledExecutor provides a simple mechanism for running jobs in separate
|
||||
// throttledExecutor provides a simple mechanism for running jobs in separate
|
||||
// goroutines while limit the number of concurrent jobs running at any given time.
|
||||
type ThrottledExecutor struct {
|
||||
type throttledExecutor struct {
|
||||
limiter chan struct{}
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
// NewThrottledExecutor creates a new ThrottlesExecutor with a specified maximum
|
||||
// newThrottledExecutor creates a new ThrottlesExecutor with a specified maximum
|
||||
// number of concurrent jobs
|
||||
func NewThrottledExecutor(limit int) *ThrottledExecutor {
|
||||
func newThrottledExecutor(limit int) *throttledExecutor {
|
||||
if limit == 0 {
|
||||
panic("Limit must be > 0")
|
||||
}
|
||||
return &ThrottledExecutor{limiter: make(chan struct{}, limit)}
|
||||
return &throttledExecutor{limiter: make(chan struct{}, limit)}
|
||||
}
|
||||
|
||||
// Run schedules a job for execution as soon as possible while respecting the
|
||||
// maximum concurrency limit.
|
||||
func (t *ThrottledExecutor) Run(ctx context.Context, job func()) {
|
||||
// run schedules a job for execution as soon as possible while respecting the maximum concurrency limit.
|
||||
func (t *throttledExecutor) run(ctx context.Context, job func()) {
|
||||
t.wg.Add(1)
|
||||
go func() {
|
||||
defer t.wg.Done()
|
||||
|
|
@ -39,7 +38,7 @@ func (t *ThrottledExecutor) Run(ctx context.Context, job func()) {
|
|||
}()
|
||||
}
|
||||
|
||||
// Wait blocks until all scheduled jobs have finished
|
||||
func (t *ThrottledExecutor) Wait() {
|
||||
// wait blocks until all scheduled jobs have finished
|
||||
func (t *throttledExecutor) wait() {
|
||||
t.wg.Wait()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -7,25 +7,25 @@ import (
|
|||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
// TSCache is a cache of timestamps used to determine the validity of datapoints
|
||||
type TSCache struct {
|
||||
// tsCache is a cache of timestamps used to determine the validity of datapoints
|
||||
type tsCache struct {
|
||||
ttl time.Duration
|
||||
table map[string]time.Time
|
||||
mux sync.RWMutex
|
||||
log telegraf.Logger
|
||||
}
|
||||
|
||||
// NewTSCache creates a new TSCache with a specified time-to-live after which timestamps are discarded.
|
||||
func NewTSCache(ttl time.Duration, log telegraf.Logger) *TSCache {
|
||||
return &TSCache{
|
||||
// newTSCache creates a new tsCache with a specified time-to-live after which timestamps are discarded.
|
||||
func newTSCache(ttl time.Duration, log telegraf.Logger) *tsCache {
|
||||
return &tsCache{
|
||||
ttl: ttl,
|
||||
table: make(map[string]time.Time),
|
||||
log: log,
|
||||
}
|
||||
}
|
||||
|
||||
// Purge removes timestamps that are older than the time-to-live
|
||||
func (t *TSCache) Purge() {
|
||||
// purge removes timestamps that are older than the time-to-live
|
||||
func (t *tsCache) purge() {
|
||||
t.mux.Lock()
|
||||
defer t.mux.Unlock()
|
||||
n := 0
|
||||
|
|
@ -38,28 +38,16 @@ func (t *TSCache) Purge() {
|
|||
t.log.Debugf("purged timestamp cache. %d deleted with %d remaining", n, len(t.table))
|
||||
}
|
||||
|
||||
// IsNew returns true if the supplied timestamp for the supplied key is more recent than the
|
||||
// timestamp we have on record.
|
||||
func (t *TSCache) IsNew(key, metricName string, tm time.Time) bool {
|
||||
t.mux.RLock()
|
||||
defer t.mux.RUnlock()
|
||||
v, ok := t.table[makeKey(key, metricName)]
|
||||
if !ok {
|
||||
return true // We've never seen this before, so consider everything a new sample
|
||||
}
|
||||
return !tm.Before(v)
|
||||
}
|
||||
|
||||
// Get returns a timestamp (if present)
|
||||
func (t *TSCache) Get(key, metricName string) (time.Time, bool) {
|
||||
// get returns a timestamp (if present)
|
||||
func (t *tsCache) get(key, metricName string) (time.Time, bool) {
|
||||
t.mux.RLock()
|
||||
defer t.mux.RUnlock()
|
||||
ts, ok := t.table[makeKey(key, metricName)]
|
||||
return ts, ok
|
||||
}
|
||||
|
||||
// Put updates the latest timestamp for the supplied key.
|
||||
func (t *TSCache) Put(key, metricName string, timestamp time.Time) {
|
||||
// put updates the latest timestamp for the supplied key.
|
||||
func (t *tsCache) put(key, metricName string, timestamp time.Time) {
|
||||
t.mux.Lock()
|
||||
defer t.mux.Unlock()
|
||||
k := makeKey(key, metricName)
|
||||
|
|
|
|||
|
|
@ -39,36 +39,36 @@ var (
|
|||
)
|
||||
|
||||
// collectVsan is the entry point for vsan metrics collection
|
||||
func (e *Endpoint) collectVsan(ctx context.Context, acc telegraf.Accumulator) error {
|
||||
func (e *endpoint) collectVsan(ctx context.Context, acc telegraf.Accumulator) error {
|
||||
lower := versionLowerThan(e.apiVersion, 5, 5)
|
||||
if lower {
|
||||
return fmt.Errorf("a minimum API version of 5.5 is required for vSAN. Found: %s. Skipping vCenter: %s", e.apiVersion, e.URL.Host)
|
||||
return fmt.Errorf("a minimum API version of 5.5 is required for vSAN. Found: %s. Skipping vCenter: %s", e.apiVersion, e.url.Host)
|
||||
}
|
||||
vsanPerfMetricsName = strings.Join([]string{"vsphere", "vsan", "performance"}, e.Parent.Separator)
|
||||
vsanSummaryMetricsName = strings.Join([]string{"vsphere", "vsan", "summary"}, e.Parent.Separator)
|
||||
vsanPerfMetricsName = strings.Join([]string{"vsphere", "vsan", "performance"}, e.parent.Separator)
|
||||
vsanSummaryMetricsName = strings.Join([]string{"vsphere", "vsan", "summary"}, e.parent.Separator)
|
||||
res := e.resourceKinds["vsan"]
|
||||
client, err := e.clientFactory.GetClient(ctx)
|
||||
client, err := e.clientFactory.getClient(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fail to get client when collect vsan: %w", err)
|
||||
}
|
||||
// Create vSAN client
|
||||
vimClient := client.Client.Client
|
||||
vimClient := client.client.Client
|
||||
vsanClient := vimClient.NewServiceClient(vsanPath, vsanNamespace)
|
||||
// vSAN Metrics to collect
|
||||
metrics := e.getVsanMetadata(ctx, vsanClient, res)
|
||||
// Iterate over all clusters, run a goroutine for each cluster
|
||||
te := NewThrottledExecutor(e.Parent.CollectConcurrency)
|
||||
te := newThrottledExecutor(e.parent.CollectConcurrency)
|
||||
for _, obj := range res.objects {
|
||||
te.Run(ctx, func() {
|
||||
te.run(ctx, func() {
|
||||
e.collectVsanPerCluster(ctx, obj, vimClient, vsanClient, metrics, acc)
|
||||
})
|
||||
}
|
||||
te.Wait()
|
||||
te.wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
// collectVsanPerCluster is called by goroutines in collectVsan function.
|
||||
func (e *Endpoint) collectVsanPerCluster(ctx context.Context, clusterRef *objectRef, vimClient *vim25.Client, vsanClient *soap.Client,
|
||||
func (e *endpoint) collectVsanPerCluster(ctx context.Context, clusterRef *objectRef, vimClient *vim25.Client, vsanClient *soap.Client,
|
||||
metrics map[string]string, acc telegraf.Accumulator) {
|
||||
// Construct a map for cmmds
|
||||
cluster := object.NewClusterComputeResource(vimClient, clusterRef.ref)
|
||||
|
|
@ -94,8 +94,8 @@ func (e *Endpoint) collectVsanPerCluster(ctx context.Context, clusterRef *object
|
|||
}
|
||||
cmmds, err := getCmmdsMap(ctx, vimClient, cluster)
|
||||
if err != nil {
|
||||
e.Parent.Log.Errorf("[vSAN] Error while query cmmds data. Error: %s. Skipping", err)
|
||||
cmmds = make(map[string]CmmdsEntity)
|
||||
e.parent.Log.Errorf("[vSAN] Error while query cmmds data. Error: %s. Skipping", err)
|
||||
cmmds = make(map[string]cmmdsEntity)
|
||||
}
|
||||
if err := e.queryPerformance(ctx, vsanClient, clusterRef, metrics, cmmds, acc); err != nil {
|
||||
acc.AddError(fmt.Errorf("error querying performance metrics for cluster %s: %w", clusterRef.name, err))
|
||||
|
|
@ -114,12 +114,12 @@ func vsanEnabled(ctx context.Context, clusterObj *object.ClusterComputeResource)
|
|||
|
||||
// getVsanMetadata returns a string list of the entity types that will be queried.
|
||||
// e.g ["summary.health", "summary.disk-usage", "summary.resync", "performance.cluster-domclient", "performance.host-domclient"]
|
||||
func (e *Endpoint) getVsanMetadata(ctx context.Context, vsanClient *soap.Client, res *resourceKind) map[string]string {
|
||||
func (e *endpoint) getVsanMetadata(ctx context.Context, vsanClient *soap.Client, res *resourceKind) map[string]string {
|
||||
metrics := make(map[string]string)
|
||||
if res.simple { // Skip getting supported Entity types from vCenter. Using user defined metrics without verifying.
|
||||
for _, entity := range res.include {
|
||||
if strings.Contains(entity, "*") {
|
||||
e.Parent.Log.Infof("[vSAN] Won't use wildcard match \"*\" when vsan_metric_skip_verify = true. Skipping")
|
||||
e.parent.Log.Infof("[vSAN] Won't use wildcard match \"*\" when vsan_metric_skip_verify = true. Skipping")
|
||||
continue
|
||||
}
|
||||
metrics[entity] = ""
|
||||
|
|
@ -137,7 +137,7 @@ func (e *Endpoint) getVsanMetadata(ctx context.Context, vsanClient *soap.Client,
|
|||
This: perfManagerRef,
|
||||
})
|
||||
if err != nil {
|
||||
e.Parent.Log.Errorf("[vSAN] Fail to get supported entities: %v. Skipping vsan performance data.", err)
|
||||
e.parent.Log.Errorf("[vSAN] Fail to get supported entities: %v. Skipping vsan performance data.", err)
|
||||
return metrics
|
||||
}
|
||||
// Use the include & exclude configuration to filter all supported performance metrics
|
||||
|
|
@ -149,15 +149,15 @@ func (e *Endpoint) getVsanMetadata(ctx context.Context, vsanClient *soap.Client,
|
|||
return metrics
|
||||
}
|
||||
|
||||
// getCmmdsMap returns a map which maps a uuid to a CmmdsEntity
|
||||
func getCmmdsMap(ctx context.Context, client *vim25.Client, clusterObj *object.ClusterComputeResource) (map[string]CmmdsEntity, error) {
|
||||
// getCmmdsMap returns a map which maps a uuid to a cmmdsEntity
|
||||
func getCmmdsMap(ctx context.Context, client *vim25.Client, clusterObj *object.ClusterComputeResource) (map[string]cmmdsEntity, error) {
|
||||
hosts, err := clusterObj.Hosts(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("fail to get host: %w", err)
|
||||
}
|
||||
|
||||
if len(hosts) == 0 {
|
||||
return make(map[string]CmmdsEntity), nil
|
||||
return make(map[string]cmmdsEntity), nil
|
||||
}
|
||||
|
||||
queries := []types.HostVsanInternalSystemCmmdsQuery{
|
||||
|
|
@ -186,12 +186,12 @@ func getCmmdsMap(ctx context.Context, client *vim25.Client, clusterObj *object.C
|
|||
if resp == nil {
|
||||
return nil, errors.New("all hosts fail to query cmmds")
|
||||
}
|
||||
var clusterCmmds Cmmds
|
||||
var clusterCmmds cmmds
|
||||
if err := json.Unmarshal([]byte(resp.Returnval), &clusterCmmds); err != nil {
|
||||
return nil, fmt.Errorf("fail to convert cmmds to json: %w", err)
|
||||
}
|
||||
|
||||
cmmdsMap := make(map[string]CmmdsEntity)
|
||||
cmmdsMap := make(map[string]cmmdsEntity)
|
||||
for _, entity := range clusterCmmds.Res {
|
||||
cmmdsMap[entity.UUID] = entity
|
||||
}
|
||||
|
|
@ -199,17 +199,17 @@ func getCmmdsMap(ctx context.Context, client *vim25.Client, clusterObj *object.C
|
|||
}
|
||||
|
||||
// queryPerformance adds performance metrics to telegraf accumulator
|
||||
func (e *Endpoint) queryPerformance(ctx context.Context, vsanClient *soap.Client, clusterRef *objectRef, metrics map[string]string,
|
||||
cmmds map[string]CmmdsEntity, acc telegraf.Accumulator) error {
|
||||
func (e *endpoint) queryPerformance(ctx context.Context, vsanClient *soap.Client, clusterRef *objectRef, metrics map[string]string,
|
||||
cmmds map[string]cmmdsEntity, acc telegraf.Accumulator) error {
|
||||
end := time.Now().UTC()
|
||||
|
||||
// We're using a fake metric key, since we only store one highwater mark per resource
|
||||
start, ok := e.hwMarks.Get(hwMarksKeyPrefix+clusterRef.ref.Value, "generic")
|
||||
start, ok := e.hwMarks.get(hwMarksKeyPrefix+clusterRef.ref.Value, "generic")
|
||||
if !ok {
|
||||
// Look back 3 sampling periods by default
|
||||
start = end.Add(time.Duration(e.Parent.MetricLookback) * time.Duration(-e.resourceKinds["vsan"].sampling) * time.Second)
|
||||
start = end.Add(time.Duration(e.parent.MetricLookback) * time.Duration(-e.resourceKinds["vsan"].sampling) * time.Second)
|
||||
}
|
||||
e.Parent.Log.Debugf("[vSAN] Query vsan performance for time interval: %s ~ %s", start, end)
|
||||
e.parent.Log.Debugf("[vSAN] Query vsan performance for time interval: %s ~ %s", start, end)
|
||||
latest := start
|
||||
|
||||
var commonError error
|
||||
|
|
@ -235,14 +235,14 @@ func (e *Endpoint) queryPerformance(ctx context.Context, vsanClient *soap.Client
|
|||
resp, err := vsanmethods.VsanPerfQueryPerf(ctx, vsanClient, &perfRequest)
|
||||
if err != nil {
|
||||
if err.Error() == "ServerFaultCode: NotFound" {
|
||||
e.Parent.Log.Errorf("[vSAN] Is vSAN performance service enabled for %s? Skipping ...", clusterRef.name)
|
||||
e.parent.Log.Errorf("[vSAN] Is vSAN performance service enabled for %s? Skipping ...", clusterRef.name)
|
||||
commonError = err
|
||||
break
|
||||
}
|
||||
e.Parent.Log.Errorf("[vSAN] Error querying performance data for %s: %s: %s.", clusterRef.name, entityRefID, err)
|
||||
e.parent.Log.Errorf("[vSAN] Error querying performance data for %s: %s: %s.", clusterRef.name, entityRefID, err)
|
||||
continue
|
||||
}
|
||||
tags := populateClusterTags(make(map[string]string), clusterRef, e.URL.Host)
|
||||
tags := populateClusterTags(make(map[string]string), clusterRef, e.url.Host)
|
||||
|
||||
count := 0
|
||||
for _, em := range resp.Returnval {
|
||||
|
|
@ -263,7 +263,7 @@ func (e *Endpoint) queryPerformance(ctx context.Context, vsanClient *soap.Client
|
|||
// Parse the input string to a time.Time object
|
||||
utcTimeStamp, err := time.Parse("2006-01-02 15:04:05", t)
|
||||
if err != nil {
|
||||
e.Parent.Log.Errorf("[vSAN] Failed to parse a timestamp: %s. Skipping", utcTimeStamp)
|
||||
e.parent.Log.Errorf("[vSAN] Failed to parse a timestamp: %s. Skipping", utcTimeStamp)
|
||||
timeStamps = append(timeStamps, time.Time{})
|
||||
continue
|
||||
}
|
||||
|
|
@ -282,7 +282,7 @@ func (e *Endpoint) queryPerformance(ctx context.Context, vsanClient *soap.Client
|
|||
bKey := em.EntityRefId + " " + strconv.FormatInt(ts.UnixNano(), 10)
|
||||
bucket, found := buckets[bKey]
|
||||
if !found {
|
||||
mn := vsanPerfMetricsName + e.Parent.Separator + formattedEntityName
|
||||
mn := vsanPerfMetricsName + e.parent.Separator + formattedEntityName
|
||||
bucket = metricEntry{name: mn, ts: ts, fields: make(map[string]interface{}), tags: tags}
|
||||
buckets[bKey] = bucket
|
||||
}
|
||||
|
|
@ -304,12 +304,12 @@ func (e *Endpoint) queryPerformance(ctx context.Context, vsanClient *soap.Client
|
|||
count += len(buckets)
|
||||
}
|
||||
}
|
||||
e.hwMarks.Put(hwMarksKeyPrefix+clusterRef.ref.Value, "generic", latest)
|
||||
e.hwMarks.put(hwMarksKeyPrefix+clusterRef.ref.Value, "generic", latest)
|
||||
return commonError
|
||||
}
|
||||
|
||||
// queryDiskUsage adds 'FreeCapacityB' and 'TotalCapacityB' metrics to telegraf accumulator
|
||||
func (e *Endpoint) queryDiskUsage(ctx context.Context, vsanClient *soap.Client, clusterRef *objectRef, acc telegraf.Accumulator) error {
|
||||
func (e *endpoint) queryDiskUsage(ctx context.Context, vsanClient *soap.Client, clusterRef *objectRef, acc telegraf.Accumulator) error {
|
||||
spaceManagerRef := types.ManagedObjectReference{
|
||||
Type: "VsanSpaceReportSystem",
|
||||
Value: "vsan-cluster-space-report-system",
|
||||
|
|
@ -326,13 +326,13 @@ func (e *Endpoint) queryDiskUsage(ctx context.Context, vsanClient *soap.Client,
|
|||
"free_capacity_byte": resp.Returnval.FreeCapacityB,
|
||||
"total_capacity_byte": resp.Returnval.TotalCapacityB,
|
||||
}
|
||||
tags := populateClusterTags(make(map[string]string), clusterRef, e.URL.Host)
|
||||
tags := populateClusterTags(make(map[string]string), clusterRef, e.url.Host)
|
||||
acc.AddFields(vsanSummaryMetricsName, fields, tags)
|
||||
return nil
|
||||
}
|
||||
|
||||
// queryDiskUsage adds 'OverallHealth' metric to telegraf accumulator
|
||||
func (e *Endpoint) queryHealthSummary(ctx context.Context, vsanClient *soap.Client, clusterRef *objectRef, acc telegraf.Accumulator) error {
|
||||
// queryHealthSummary adds 'OverallHealth' metric to telegraf accumulator
|
||||
func (e *endpoint) queryHealthSummary(ctx context.Context, vsanClient *soap.Client, clusterRef *objectRef, acc telegraf.Accumulator) error {
|
||||
healthSystemRef := types.ManagedObjectReference{
|
||||
Type: "VsanVcClusterHealthSystem",
|
||||
Value: "vsan-cluster-health-system",
|
||||
|
|
@ -354,17 +354,17 @@ func (e *Endpoint) queryHealthSummary(ctx context.Context, vsanClient *soap.Clie
|
|||
if val, ok := healthMap[healthStr]; ok {
|
||||
fields["overall_health"] = val
|
||||
}
|
||||
tags := populateClusterTags(make(map[string]string), clusterRef, e.URL.Host)
|
||||
tags := populateClusterTags(make(map[string]string), clusterRef, e.url.Host)
|
||||
acc.AddFields(vsanSummaryMetricsName, fields, tags)
|
||||
return nil
|
||||
}
|
||||
|
||||
// queryResyncSummary adds resync information to accumulator
|
||||
func (e *Endpoint) queryResyncSummary(ctx context.Context, vsanClient *soap.Client, clusterObj *object.ClusterComputeResource,
|
||||
func (e *endpoint) queryResyncSummary(ctx context.Context, vsanClient *soap.Client, clusterObj *object.ClusterComputeResource,
|
||||
clusterRef *objectRef, acc telegraf.Accumulator) error {
|
||||
if lower := versionLowerThan(e.apiVersion, 6, 7); lower {
|
||||
e.Parent.Log.Infof("I! [inputs.vsphere][vSAN] Minimum API Version 6.7 required for resync summary. Found: %s. Skipping VCenter: %s",
|
||||
e.apiVersion, e.URL.Host)
|
||||
e.parent.Log.Infof("I! [inputs.vsphere][vSAN] Minimum API Version 6.7 required for resync summary. Found: %s. Skipping VCenter: %s",
|
||||
e.apiVersion, e.url.Host)
|
||||
return nil
|
||||
}
|
||||
hosts, err := clusterObj.Hosts(ctx)
|
||||
|
|
@ -377,7 +377,7 @@ func (e *Endpoint) queryResyncSummary(ctx context.Context, vsanClient *soap.Clie
|
|||
hostRefValue := hosts[0].Reference().Value
|
||||
hostRefValueParts := strings.Split(hostRefValue, "-")
|
||||
if len(hostRefValueParts) != 2 {
|
||||
e.Parent.Log.Errorf("[vSAN] Host reference value does not match expected pattern: host-<num>. Actual Value %s", hostRefValue)
|
||||
e.parent.Log.Errorf("[vSAN] Host reference value does not match expected pattern: host-<num>. Actual Value %s", hostRefValue)
|
||||
return err
|
||||
}
|
||||
vsanSystemEx := types.ManagedObjectReference{
|
||||
|
|
@ -401,7 +401,7 @@ func (e *Endpoint) queryResyncSummary(ctx context.Context, vsanClient *soap.Clie
|
|||
fields["total_bytes_to_sync"] = resp.Returnval.TotalBytesToSync
|
||||
fields["total_objects_to_sync"] = resp.Returnval.TotalObjectsToSync
|
||||
fields["total_recovery_eta"] = resp.Returnval.TotalRecoveryETA
|
||||
tags := populateClusterTags(make(map[string]string), clusterRef, e.URL.Host)
|
||||
tags := populateClusterTags(make(map[string]string), clusterRef, e.url.Host)
|
||||
acc.AddFields(vsanSummaryMetricsName, fields, tags)
|
||||
return nil
|
||||
}
|
||||
|
|
@ -422,7 +422,7 @@ func populateClusterTags(tags map[string]string, clusterRef *objectRef, vcenter
|
|||
}
|
||||
|
||||
// populateCMMDSTags takes in a tag map, makes a copy, adds more tags using a cmmds map and returns the copy.
|
||||
func populateCMMDSTags(tags map[string]string, entityName, uuid string, cmmds map[string]CmmdsEntity) map[string]string {
|
||||
func populateCMMDSTags(tags map[string]string, entityName, uuid string, cmmds map[string]cmmdsEntity) map[string]string {
|
||||
newTags := make(map[string]string)
|
||||
// deep copy
|
||||
for k, v := range tags {
|
||||
|
|
@ -524,18 +524,18 @@ func versionLowerThan(current string, major, minor int) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
type CmmdsEntity struct {
|
||||
type cmmdsEntity struct {
|
||||
UUID string `json:"uuid"`
|
||||
Owner string `json:"owner"` // ESXi UUID
|
||||
Type string `json:"type"`
|
||||
Content CmmdsContent `json:"content"`
|
||||
Content cmmdsContent `json:"content"`
|
||||
}
|
||||
|
||||
type Cmmds struct {
|
||||
Res []CmmdsEntity `json:"result"`
|
||||
type cmmds struct {
|
||||
Res []cmmdsEntity `json:"result"`
|
||||
}
|
||||
|
||||
type CmmdsContent struct {
|
||||
type cmmdsContent struct {
|
||||
Hostname string `json:"hostname"`
|
||||
IsSsd float64 `json:"isSsd"`
|
||||
SsdUUID string `json:"ssdUuid"`
|
||||
|
|
|
|||
|
|
@ -20,8 +20,6 @@ import (
|
|||
//go:embed sample.conf
|
||||
var sampleConfig string
|
||||
|
||||
// VSphere is the top level type for the vSphere input plugin. It contains all the configuration
|
||||
// and a list of connected vSphere endpoints
|
||||
type VSphere struct {
|
||||
Vcenters []string `toml:"vcenters"`
|
||||
Username config.Secret `toml:"username"`
|
||||
|
|
@ -81,7 +79,7 @@ type VSphere struct {
|
|||
tls.ClientConfig // Mix in the TLS/SSL goodness from core
|
||||
proxy.HTTPProxy
|
||||
|
||||
endpoints []*Endpoint
|
||||
endpoints []*endpoint
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
|
|
@ -89,21 +87,19 @@ func (*VSphere) SampleConfig() string {
|
|||
return sampleConfig
|
||||
}
|
||||
|
||||
// Start is called from telegraf core when a plugin is started and allows it to
|
||||
// perform initialization tasks.
|
||||
func (v *VSphere) Start(_ telegraf.Accumulator) error {
|
||||
v.Log.Info("Starting plugin")
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
v.cancel = cancel
|
||||
|
||||
// Create endpoints, one for each vCenter we're monitoring
|
||||
v.endpoints = make([]*Endpoint, 0, len(v.Vcenters))
|
||||
v.endpoints = make([]*endpoint, 0, len(v.Vcenters))
|
||||
for _, rawURL := range v.Vcenters {
|
||||
u, err := soap.ParseURL(rawURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ep, err := NewEndpoint(ctx, v, u, v.Log)
|
||||
ep, err := newEndpoint(ctx, v, u, v.Log)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -112,36 +108,13 @@ func (v *VSphere) Start(_ telegraf.Accumulator) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Stop is called from telegraf core when a plugin is stopped and allows it to
|
||||
// perform shutdown tasks.
|
||||
func (v *VSphere) Stop() {
|
||||
v.Log.Info("Stopping plugin")
|
||||
v.cancel()
|
||||
|
||||
// Wait for all endpoints to finish. No need to wait for
|
||||
// Gather() to finish here, since it Stop() will only be called
|
||||
// after the last Gather() has finished. We do, however, need to
|
||||
// wait for any discovery to complete by trying to grab the
|
||||
// "busy" mutex.
|
||||
for _, ep := range v.endpoints {
|
||||
v.Log.Debugf("Waiting for endpoint %q to finish", ep.URL.Host)
|
||||
func() {
|
||||
ep.busy.Lock() // Wait until discovery is finished
|
||||
defer ep.busy.Unlock()
|
||||
ep.Close()
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// Gather is the main data collection function called by the Telegraf core. It performs all
|
||||
// the data collection and writes all metrics into the Accumulator passed as an argument.
|
||||
func (v *VSphere) Gather(acc telegraf.Accumulator) error {
|
||||
var wg sync.WaitGroup
|
||||
for _, ep := range v.endpoints {
|
||||
wg.Add(1)
|
||||
go func(endpoint *Endpoint) {
|
||||
go func(endpoint *endpoint) {
|
||||
defer wg.Done()
|
||||
err := endpoint.Collect(context.Background(), acc)
|
||||
err := endpoint.collect(context.Background(), acc)
|
||||
if errors.Is(err, context.Canceled) {
|
||||
// No need to signal errors if we were merely canceled.
|
||||
err = nil
|
||||
|
|
@ -156,6 +129,25 @@ func (v *VSphere) Gather(acc telegraf.Accumulator) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (v *VSphere) Stop() {
|
||||
v.Log.Info("Stopping plugin")
|
||||
v.cancel()
|
||||
|
||||
// Wait for all endpoints to finish. No need to wait for
|
||||
// Gather() to finish here, since it Stop() will only be called
|
||||
// after the last Gather() has finished. We do, however, need to
|
||||
// wait for any discovery to complete by trying to grab the
|
||||
// "busy" mutex.
|
||||
for _, ep := range v.endpoints {
|
||||
v.Log.Debugf("Waiting for endpoint %q to finish", ep.url.Host)
|
||||
func() {
|
||||
ep.busy.Lock() // Wait until discovery is finished
|
||||
defer ep.busy.Unlock()
|
||||
ep.close()
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("vsphere", func() telegraf.Input {
|
||||
return &VSphere{
|
||||
|
|
|
|||
|
|
@ -172,7 +172,7 @@ func testAlignUniform(t *testing.T, n int) {
|
|||
})
|
||||
values = append(values, 1)
|
||||
}
|
||||
e := Endpoint{log: testutil.Logger{}}
|
||||
e := endpoint{log: testutil.Logger{}}
|
||||
newInfo, newValues := e.alignSamples(info, values, 60*time.Second)
|
||||
require.Len(t, newInfo, n/3, "Aligned infos have wrong size")
|
||||
require.Len(t, newValues, n/3, "Aligned values have wrong size")
|
||||
|
|
@ -198,7 +198,7 @@ func TestAlignMetrics(t *testing.T) {
|
|||
})
|
||||
values = append(values, int64(i%3+1))
|
||||
}
|
||||
e := Endpoint{log: testutil.Logger{}}
|
||||
e := endpoint{log: testutil.Logger{}}
|
||||
newInfo, newValues := e.alignSamples(info, values, 60*time.Second)
|
||||
require.Len(t, newInfo, n/3, "Aligned infos have wrong size")
|
||||
require.Len(t, newValues, n/3, "Aligned values have wrong size")
|
||||
|
|
@ -225,11 +225,11 @@ func TestMaxQuery(t *testing.T) {
|
|||
v := defaultVSphere()
|
||||
v.MaxQueryMetrics = 256
|
||||
ctx := context.Background()
|
||||
c, err := NewClient(ctx, s.URL, v)
|
||||
c, err := newClient(ctx, s.URL, v)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 256, v.MaxQueryMetrics)
|
||||
|
||||
om := object.NewOptionManager(c.Client.Client, *c.Client.Client.ServiceContent.Setting)
|
||||
om := object.NewOptionManager(c.client.Client, *c.client.Client.ServiceContent.Setting)
|
||||
err = om.Update(ctx, []types.BaseOptionValue{&types.OptionValue{
|
||||
Key: "config.vpxd.stats.maxQueryMetrics",
|
||||
Value: "42",
|
||||
|
|
@ -238,17 +238,17 @@ func TestMaxQuery(t *testing.T) {
|
|||
|
||||
v.MaxQueryMetrics = 256
|
||||
ctx = context.Background()
|
||||
c2, err := NewClient(ctx, s.URL, v)
|
||||
c2, err := newClient(ctx, s.URL, v)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 42, v.MaxQueryMetrics)
|
||||
c.close()
|
||||
c2.close()
|
||||
}
|
||||
|
||||
func testLookupVM(ctx context.Context, t *testing.T, f *Finder, path string, expected int, expectedName string) {
|
||||
func testLookupVM(ctx context.Context, t *testing.T, f *finder, path string, expected int, expectedName string) {
|
||||
poweredOn := types.VirtualMachinePowerState("poweredOn")
|
||||
var vm []mo.VirtualMachine
|
||||
err := f.Find(ctx, "VirtualMachine", path, &vm)
|
||||
err := f.find(ctx, "VirtualMachine", path, &vm)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, vm, expected)
|
||||
if expectedName != "" {
|
||||
|
|
@ -273,37 +273,37 @@ func TestFinder(t *testing.T) {
|
|||
v := defaultVSphere()
|
||||
ctx := context.Background()
|
||||
|
||||
c, err := NewClient(ctx, s.URL, v)
|
||||
c, err := newClient(ctx, s.URL, v)
|
||||
require.NoError(t, err)
|
||||
|
||||
f := Finder{c}
|
||||
f := finder{c}
|
||||
|
||||
var dc []mo.Datacenter
|
||||
err = f.Find(ctx, "Datacenter", "/DC0", &dc)
|
||||
err = f.find(ctx, "Datacenter", "/DC0", &dc)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, dc, 1)
|
||||
require.Equal(t, "DC0", dc[0].Name)
|
||||
|
||||
var host []mo.HostSystem
|
||||
err = f.Find(ctx, "HostSystem", "/DC0/host/DC0_H0/DC0_H0", &host)
|
||||
err = f.find(ctx, "HostSystem", "/DC0/host/DC0_H0/DC0_H0", &host)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, host, 1)
|
||||
require.Equal(t, "DC0_H0", host[0].Name)
|
||||
|
||||
host = make([]mo.HostSystem, 0)
|
||||
err = f.Find(ctx, "HostSystem", "/DC0/host/DC0_C0/DC0_C0_H0", &host)
|
||||
err = f.find(ctx, "HostSystem", "/DC0/host/DC0_C0/DC0_C0_H0", &host)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, host, 1)
|
||||
require.Equal(t, "DC0_C0_H0", host[0].Name)
|
||||
|
||||
resourcepool := make([]mo.ResourcePool, 0)
|
||||
err = f.Find(ctx, "ResourcePool", "/DC0/host/DC0_C0/Resources/DC0_C0_RP0", &resourcepool)
|
||||
err = f.find(ctx, "ResourcePool", "/DC0/host/DC0_C0/Resources/DC0_C0_RP0", &resourcepool)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, host, 1)
|
||||
require.Equal(t, "DC0_C0_H0", host[0].Name)
|
||||
|
||||
host = make([]mo.HostSystem, 0)
|
||||
err = f.Find(ctx, "HostSystem", "/DC0/host/DC0_C0/*", &host)
|
||||
err = f.find(ctx, "HostSystem", "/DC0/host/DC0_C0/*", &host)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, host, 3)
|
||||
|
||||
|
|
@ -322,58 +322,58 @@ func TestFinder(t *testing.T) {
|
|||
testLookupVM(ctx, t, &f, "/*/host/**/*DC*/*/*DC*", 4, "")
|
||||
|
||||
vm = make([]mo.VirtualMachine, 0)
|
||||
err = f.FindAll(ctx, "VirtualMachine", []string{"/DC0/vm/DC0_H0*", "/DC0/vm/DC0_C0*"}, nil, &vm)
|
||||
err = f.findAll(ctx, "VirtualMachine", []string{"/DC0/vm/DC0_H0*", "/DC0/vm/DC0_C0*"}, nil, &vm)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, vm, 4)
|
||||
|
||||
rf := ResourceFilter{
|
||||
rf := resourceFilter{
|
||||
finder: &f,
|
||||
paths: []string{"/DC0/vm/DC0_H0*", "/DC0/vm/DC0_C0*"},
|
||||
excludePaths: []string{"/DC0/vm/DC0_H0_VM0"},
|
||||
resType: "VirtualMachine",
|
||||
}
|
||||
vm = make([]mo.VirtualMachine, 0)
|
||||
require.NoError(t, rf.FindAll(ctx, &vm))
|
||||
require.NoError(t, rf.findAll(ctx, &vm))
|
||||
require.Len(t, vm, 3)
|
||||
|
||||
rf = ResourceFilter{
|
||||
rf = resourceFilter{
|
||||
finder: &f,
|
||||
paths: []string{"/DC0/vm/DC0_H0*", "/DC0/vm/DC0_C0*"},
|
||||
excludePaths: []string{"/**"},
|
||||
resType: "VirtualMachine",
|
||||
}
|
||||
vm = make([]mo.VirtualMachine, 0)
|
||||
require.NoError(t, rf.FindAll(ctx, &vm))
|
||||
require.NoError(t, rf.findAll(ctx, &vm))
|
||||
require.Empty(t, vm)
|
||||
|
||||
rf = ResourceFilter{
|
||||
rf = resourceFilter{
|
||||
finder: &f,
|
||||
paths: []string{"/**"},
|
||||
excludePaths: []string{"/**"},
|
||||
resType: "VirtualMachine",
|
||||
}
|
||||
vm = make([]mo.VirtualMachine, 0)
|
||||
require.NoError(t, rf.FindAll(ctx, &vm))
|
||||
require.NoError(t, rf.findAll(ctx, &vm))
|
||||
require.Empty(t, vm)
|
||||
|
||||
rf = ResourceFilter{
|
||||
rf = resourceFilter{
|
||||
finder: &f,
|
||||
paths: []string{"/**"},
|
||||
excludePaths: []string{"/this won't match anything"},
|
||||
resType: "VirtualMachine",
|
||||
}
|
||||
vm = make([]mo.VirtualMachine, 0)
|
||||
require.NoError(t, rf.FindAll(ctx, &vm))
|
||||
require.NoError(t, rf.findAll(ctx, &vm))
|
||||
require.Len(t, vm, 8)
|
||||
|
||||
rf = ResourceFilter{
|
||||
rf = resourceFilter{
|
||||
finder: &f,
|
||||
paths: []string{"/**"},
|
||||
excludePaths: []string{"/**/*VM0"},
|
||||
resType: "VirtualMachine",
|
||||
}
|
||||
vm = make([]mo.VirtualMachine, 0)
|
||||
require.NoError(t, rf.FindAll(ctx, &vm))
|
||||
require.NoError(t, rf.findAll(ctx, &vm))
|
||||
require.Len(t, vm, 4)
|
||||
}
|
||||
|
||||
|
|
@ -391,19 +391,19 @@ func TestFolders(t *testing.T) {
|
|||
|
||||
v := defaultVSphere()
|
||||
|
||||
c, err := NewClient(ctx, s.URL, v)
|
||||
c, err := newClient(ctx, s.URL, v)
|
||||
require.NoError(t, err)
|
||||
|
||||
f := Finder{c}
|
||||
f := finder{c}
|
||||
|
||||
var folder []mo.Folder
|
||||
err = f.Find(ctx, "Folder", "/F0", &folder)
|
||||
err = f.find(ctx, "Folder", "/F0", &folder)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, folder, 1)
|
||||
require.Equal(t, "F0", folder[0].Name)
|
||||
|
||||
var dc []mo.Datacenter
|
||||
err = f.Find(ctx, "Datacenter", "/F0/DC1", &dc)
|
||||
err = f.find(ctx, "Datacenter", "/F0/DC1", &dc)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, dc, 1)
|
||||
require.Equal(t, "DC1", dc[0].Name)
|
||||
|
|
@ -422,16 +422,16 @@ func TestVsanCmmds(t *testing.T) {
|
|||
v := defaultVSphere()
|
||||
ctx := context.Background()
|
||||
|
||||
c, err := NewClient(ctx, s.URL, v)
|
||||
c, err := newClient(ctx, s.URL, v)
|
||||
require.NoError(t, err)
|
||||
|
||||
f := Finder{c}
|
||||
f := finder{c}
|
||||
var clusters []mo.ClusterComputeResource
|
||||
err = f.FindAll(ctx, "ClusterComputeResource", []string{"/**"}, nil, &clusters)
|
||||
err = f.findAll(ctx, "ClusterComputeResource", []string{"/**"}, nil, &clusters)
|
||||
require.NoError(t, err)
|
||||
|
||||
clusterObj := object.NewClusterComputeResource(c.Client.Client, clusters[0].Reference())
|
||||
_, err = getCmmdsMap(ctx, c.Client.Client, clusterObj)
|
||||
clusterObj := object.NewClusterComputeResource(c.client.Client, clusters[0].Reference())
|
||||
_, err = getCmmdsMap(ctx, c.client.Client, clusterObj)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
|
|
@ -443,11 +443,11 @@ func TestVsanTags(t *testing.T) {
|
|||
ssd := "52173131-3384-bb63-4ef8-c00b0ce7e3e7"
|
||||
hostname := "sc2-hs1-b2801.eng.vmware.com"
|
||||
devName := "naa.55cd2e414d82c815:2"
|
||||
var cmmds = map[string]CmmdsEntity{
|
||||
nvmeDisk: {UUID: nvmeDisk, Type: "DISK_CAPACITY_TIER", Owner: host, Content: CmmdsContent{DevName: devName}},
|
||||
disk: {UUID: disk, Type: "DISK", Owner: host, Content: CmmdsContent{DevName: devName, IsSsd: 1.}},
|
||||
ssdDisk: {UUID: ssdDisk, Type: "DISK", Owner: host, Content: CmmdsContent{DevName: devName, IsSsd: 0., SsdUUID: ssd}},
|
||||
host: {UUID: host, Type: "HOSTNAME", Owner: host, Content: CmmdsContent{Hostname: hostname}},
|
||||
var cmmds = map[string]cmmdsEntity{
|
||||
nvmeDisk: {UUID: nvmeDisk, Type: "DISK_CAPACITY_TIER", Owner: host, Content: cmmdsContent{DevName: devName}},
|
||||
disk: {UUID: disk, Type: "DISK", Owner: host, Content: cmmdsContent{DevName: devName, IsSsd: 1.}},
|
||||
ssdDisk: {UUID: ssdDisk, Type: "DISK", Owner: host, Content: cmmdsContent{DevName: devName, IsSsd: 0., SsdUUID: ssd}},
|
||||
host: {UUID: host, Type: "HOSTNAME", Owner: host, Content: cmmdsContent{Hostname: hostname}},
|
||||
}
|
||||
tags := populateCMMDSTags(make(map[string]string), "capacity-disk", disk, cmmds)
|
||||
require.Len(t, tags, 2)
|
||||
|
|
@ -472,13 +472,13 @@ func TestDisconnectedServerBehavior(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
v := defaultVSphere()
|
||||
v.DisconnectedServersBehavior = "error"
|
||||
_, err = NewEndpoint(context.Background(), v, u, v.Log)
|
||||
_, err = newEndpoint(context.Background(), v, u, v.Log)
|
||||
require.Error(t, err)
|
||||
v.DisconnectedServersBehavior = "ignore"
|
||||
_, err = NewEndpoint(context.Background(), v, u, v.Log)
|
||||
_, err = newEndpoint(context.Background(), v, u, v.Log)
|
||||
require.NoError(t, err)
|
||||
v.DisconnectedServersBehavior = "something else"
|
||||
_, err = NewEndpoint(context.Background(), v, u, v.Log)
|
||||
_, err = newEndpoint(context.Background(), v, u, v.Log)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, `"something else" is not a valid value for disconnected_servers_behavior`, err.Error())
|
||||
}
|
||||
|
|
@ -520,7 +520,7 @@ func testCollection(t *testing.T, excludeClusters bool) {
|
|||
require.Emptyf(t, acc.Errors, "Errors found: %s", acc.Errors)
|
||||
require.NotEmpty(t, acc.Metrics, "No metrics were collected")
|
||||
cache := make(map[string]string)
|
||||
client, err := v.endpoints[0].clientFactory.GetClient(context.Background())
|
||||
client, err := v.endpoints[0].clientFactory.getClient(context.Background())
|
||||
require.NoError(t, err)
|
||||
hostCache := make(map[string]string)
|
||||
for _, m := range acc.Metrics {
|
||||
|
|
@ -532,9 +532,9 @@ func testCollection(t *testing.T, excludeClusters bool) {
|
|||
hostMoid, ok := hostCache[hostName]
|
||||
if !ok {
|
||||
// We have to follow the host parent path to locate a cluster. Look up the host!
|
||||
finder := Finder{client}
|
||||
finder := finder{client}
|
||||
var hosts []mo.HostSystem
|
||||
err := finder.Find(context.Background(), "HostSystem", "/**/"+hostName, &hosts)
|
||||
err := finder.find(context.Background(), "HostSystem", "/**/"+hostName, &hosts)
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, hosts)
|
||||
hostMoid = hosts[0].Reference().Value
|
||||
|
|
@ -558,7 +558,7 @@ func testCollection(t *testing.T, excludeClusters bool) {
|
|||
require.Empty(t, mustHaveMetrics, "Some metrics were not found")
|
||||
}
|
||||
|
||||
func isInCluster(v *VSphere, client *Client, cache map[string]string, resourceKind, moid string) bool {
|
||||
func isInCluster(v *VSphere, client *client, cache map[string]string, resourceKind, moid string) bool {
|
||||
ctx := context.Background()
|
||||
ref := types.ManagedObjectReference{
|
||||
Type: resourceKind,
|
||||
|
|
|
|||
Loading…
Reference in New Issue