Revive fixes - part 3 (#8872)
* * Revive fixes regarding following set of rules: [rule.var-naming]
This commit is contained in:
parent
4584d691a7
commit
8a6907a186
|
|
@ -31,14 +31,38 @@ linters-settings:
|
|||
- name: redefines-builtin-id
|
||||
|
||||
run:
|
||||
# which dirs to skip: issues from them won't be reported;
|
||||
# can use regexp here: generated.*, regexp is applied on full path;
|
||||
# default value is empty list, but default dirs are skipped independently
|
||||
# from this option's value (see skip-dirs-use-default).
|
||||
# "/" will be replaced by current OS file path separator to properly work
|
||||
# on Windows.
|
||||
skip-dirs:
|
||||
- scripts
|
||||
- docs
|
||||
- etc
|
||||
|
||||
# which files to skip: they will be analyzed, but issues from them
|
||||
# won't be reported. Default value is empty list, but there is
|
||||
# no need to include all autogenerated files, we confidently recognize
|
||||
# autogenerated files. If it's not please let us know.
|
||||
# "/" will be replaced by current OS file path separator to properly work
|
||||
# on Windows.
|
||||
skip-files:
|
||||
- plugins/parsers/influx/machine.go*
|
||||
|
||||
issues:
|
||||
# List of regexps of issue texts to exclude, empty list by default.
|
||||
# But independently from this option we use default exclude patterns,
|
||||
# it can be disabled by `exclude-use-default: false`. To list all
|
||||
# excluded by default patterns execute `golangci-lint run --help`
|
||||
exclude:
|
||||
- don't use an underscore in package name
|
||||
- exported.*should have comment.*or be unexported
|
||||
- comment on exported.*should be of the form
|
||||
|
||||
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
||||
max-issues-per-linter: 0
|
||||
|
||||
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
||||
max-same-issues: 0
|
||||
|
|
|
|||
|
|
@ -25,9 +25,9 @@ type configuredStats struct {
|
|||
stdev bool
|
||||
sum bool
|
||||
diff bool
|
||||
non_negative_diff bool
|
||||
nonNegativeDiff bool
|
||||
rate bool
|
||||
non_negative_rate bool
|
||||
nonNegativeRate bool
|
||||
interval bool
|
||||
}
|
||||
|
||||
|
|
@ -197,13 +197,13 @@ func (b *BasicStats) Push(acc telegraf.Accumulator) {
|
|||
if b.statsConfig.diff {
|
||||
fields[k+"_diff"] = v.diff
|
||||
}
|
||||
if b.statsConfig.non_negative_diff && v.diff >= 0 {
|
||||
if b.statsConfig.nonNegativeDiff && v.diff >= 0 {
|
||||
fields[k+"_non_negative_diff"] = v.diff
|
||||
}
|
||||
if b.statsConfig.rate {
|
||||
fields[k+"_rate"] = v.rate
|
||||
}
|
||||
if b.statsConfig.non_negative_rate && v.diff >= 0 {
|
||||
if b.statsConfig.nonNegativeRate && v.diff >= 0 {
|
||||
fields[k+"_non_negative_rate"] = v.rate
|
||||
}
|
||||
if b.statsConfig.interval {
|
||||
|
|
@ -242,11 +242,11 @@ func (b *BasicStats) parseStats() *configuredStats {
|
|||
case "diff":
|
||||
parsed.diff = true
|
||||
case "non_negative_diff":
|
||||
parsed.non_negative_diff = true
|
||||
parsed.nonNegativeDiff = true
|
||||
case "rate":
|
||||
parsed.rate = true
|
||||
case "non_negative_rate":
|
||||
parsed.non_negative_rate = true
|
||||
parsed.nonNegativeRate = true
|
||||
case "interval":
|
||||
parsed.interval = true
|
||||
default:
|
||||
|
|
@ -267,9 +267,9 @@ func (b *BasicStats) getConfiguredStats() {
|
|||
variance: true,
|
||||
stdev: true,
|
||||
sum: false,
|
||||
non_negative_diff: false,
|
||||
nonNegativeDiff: false,
|
||||
rate: false,
|
||||
non_negative_rate: false,
|
||||
nonNegativeRate: false,
|
||||
}
|
||||
} else {
|
||||
b.statsConfig = b.parseStats()
|
||||
|
|
|
|||
|
|
@ -49,9 +49,9 @@ type Subscribers struct {
|
|||
|
||||
type Subscriber struct {
|
||||
XMLName xml.Name `xml:"subscriber"`
|
||||
ClientId string `xml:"clientId,attr"`
|
||||
ClientID string `xml:"clientId,attr"`
|
||||
SubscriptionName string `xml:"subscriptionName,attr"`
|
||||
ConnectionId string `xml:"connectionId,attr"`
|
||||
ConnectionID string `xml:"connectionId,attr"`
|
||||
DestinationName string `xml:"destinationName,attr"`
|
||||
Selector string `xml:"selector,attr"`
|
||||
Active string `xml:"active,attr"`
|
||||
|
|
@ -117,7 +117,7 @@ func (a *ActiveMQ) SampleConfig() string {
|
|||
return sampleConfig
|
||||
}
|
||||
|
||||
func (a *ActiveMQ) createHttpClient() (*http.Client, error) {
|
||||
func (a *ActiveMQ) createHTTPClient() (*http.Client, error) {
|
||||
tlsCfg, err := a.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -157,7 +157,7 @@ func (a *ActiveMQ) Init() error {
|
|||
|
||||
a.baseURL = u
|
||||
|
||||
a.client, err = a.createHttpClient()
|
||||
a.client, err = a.createHTTPClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -228,9 +228,9 @@ func (a *ActiveMQ) GatherSubscribersMetrics(acc telegraf.Accumulator, subscriber
|
|||
records := make(map[string]interface{})
|
||||
tags := make(map[string]string)
|
||||
|
||||
tags["client_id"] = subscriber.ClientId
|
||||
tags["client_id"] = subscriber.ClientID
|
||||
tags["subscription_name"] = subscriber.SubscriptionName
|
||||
tags["connection_id"] = subscriber.ConnectionId
|
||||
tags["connection_id"] = subscriber.ConnectionID
|
||||
tags["destination_name"] = subscriber.DestinationName
|
||||
tags["selector"] = subscriber.Selector
|
||||
tags["active"] = subscriber.Active
|
||||
|
|
|
|||
|
|
@ -67,7 +67,7 @@ func (n *Apache) Gather(acc telegraf.Accumulator) error {
|
|||
}
|
||||
|
||||
if n.client == nil {
|
||||
client, err := n.createHttpClient()
|
||||
client, err := n.createHTTPClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -84,7 +84,7 @@ func (n *Apache) Gather(acc telegraf.Accumulator) error {
|
|||
wg.Add(1)
|
||||
go func(addr *url.URL) {
|
||||
defer wg.Done()
|
||||
acc.AddError(n.gatherUrl(addr, acc))
|
||||
acc.AddError(n.gatherURL(addr, acc))
|
||||
}(addr)
|
||||
}
|
||||
|
||||
|
|
@ -92,7 +92,7 @@ func (n *Apache) Gather(acc telegraf.Accumulator) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (n *Apache) createHttpClient() (*http.Client, error) {
|
||||
func (n *Apache) createHTTPClient() (*http.Client, error) {
|
||||
tlsCfg, err := n.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -108,7 +108,7 @@ func (n *Apache) createHttpClient() (*http.Client, error) {
|
|||
return client, nil
|
||||
}
|
||||
|
||||
func (n *Apache) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
func (n *Apache) gatherURL(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
req, err := http.NewRequest("GET", addr.String(), nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error on new request to %s : %s", addr.String(), err)
|
||||
|
|
|
|||
|
|
@ -12,26 +12,26 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
dirty_data = "1.5G"
|
||||
dirtyData = "1.5G"
|
||||
bypassed = "4.7T"
|
||||
cache_bypass_hits = "146155333"
|
||||
cache_bypass_misses = "0"
|
||||
cache_hit_ratio = "90"
|
||||
cache_hits = "511469583"
|
||||
cache_miss_collisions = "157567"
|
||||
cache_misses = "50616331"
|
||||
cache_readaheads = "2"
|
||||
cacheBypassHits = "146155333"
|
||||
cacheBypassMisses = "0"
|
||||
cacheHitRatio = "90"
|
||||
cacheHits = "511469583"
|
||||
cacheMissCollisions = "157567"
|
||||
cacheMisses = "50616331"
|
||||
cacheReadaheads = "2"
|
||||
)
|
||||
|
||||
var (
|
||||
testBcachePath = os.TempDir() + "/telegraf/sys/fs/bcache"
|
||||
testBcacheUuidPath = testBcachePath + "/663955a3-765a-4737-a9fd-8250a7a78411"
|
||||
testBcacheUUIDPath = testBcachePath + "/663955a3-765a-4737-a9fd-8250a7a78411"
|
||||
testBcacheDevPath = os.TempDir() + "/telegraf/sys/devices/virtual/block/bcache0"
|
||||
testBcacheBackingDevPath = os.TempDir() + "/telegraf/sys/devices/virtual/block/md10"
|
||||
)
|
||||
|
||||
func TestBcacheGeneratesMetrics(t *testing.T) {
|
||||
err := os.MkdirAll(testBcacheUuidPath, 0755)
|
||||
err := os.MkdirAll(testBcacheUUIDPath, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = os.MkdirAll(testBcacheDevPath, 0755)
|
||||
|
|
@ -40,49 +40,49 @@ func TestBcacheGeneratesMetrics(t *testing.T) {
|
|||
err = os.MkdirAll(testBcacheBackingDevPath+"/bcache", 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = os.Symlink(testBcacheBackingDevPath+"/bcache", testBcacheUuidPath+"/bdev0")
|
||||
err = os.Symlink(testBcacheBackingDevPath+"/bcache", testBcacheUUIDPath+"/bdev0")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = os.Symlink(testBcacheDevPath, testBcacheUuidPath+"/bdev0/dev")
|
||||
err = os.Symlink(testBcacheDevPath, testBcacheUUIDPath+"/bdev0/dev")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = os.MkdirAll(testBcacheUuidPath+"/bdev0/stats_total", 0755)
|
||||
err = os.MkdirAll(testBcacheUUIDPath+"/bdev0/stats_total", 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/dirty_data",
|
||||
[]byte(dirty_data), 0644)
|
||||
err = ioutil.WriteFile(testBcacheUUIDPath+"/bdev0/dirty_data",
|
||||
[]byte(dirtyData), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/bypassed",
|
||||
err = ioutil.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/bypassed",
|
||||
[]byte(bypassed), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_bypass_hits",
|
||||
[]byte(cache_bypass_hits), 0644)
|
||||
err = ioutil.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_bypass_hits",
|
||||
[]byte(cacheBypassHits), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_bypass_misses",
|
||||
[]byte(cache_bypass_misses), 0644)
|
||||
err = ioutil.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_bypass_misses",
|
||||
[]byte(cacheBypassMisses), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_hit_ratio",
|
||||
[]byte(cache_hit_ratio), 0644)
|
||||
err = ioutil.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_hit_ratio",
|
||||
[]byte(cacheHitRatio), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_hits",
|
||||
[]byte(cache_hits), 0644)
|
||||
err = ioutil.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_hits",
|
||||
[]byte(cacheHits), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_miss_collisions",
|
||||
[]byte(cache_miss_collisions), 0644)
|
||||
err = ioutil.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_miss_collisions",
|
||||
[]byte(cacheMissCollisions), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_misses",
|
||||
[]byte(cache_misses), 0644)
|
||||
err = ioutil.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_misses",
|
||||
[]byte(cacheMisses), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_readaheads",
|
||||
[]byte(cache_readaheads), 0644)
|
||||
err = ioutil.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_readaheads",
|
||||
[]byte(cacheReadaheads), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
|
|
|
|||
|
|
@ -128,7 +128,7 @@ func (b *Beanstalkd) gatherServerStats(connection *textproto.Conn, acc telegraf.
|
|||
},
|
||||
map[string]string{
|
||||
"hostname": stats.Hostname,
|
||||
"id": stats.Id,
|
||||
"id": stats.ID,
|
||||
"server": b.Server,
|
||||
"version": stats.Version,
|
||||
},
|
||||
|
|
@ -169,13 +169,13 @@ func (b *Beanstalkd) gatherTubeStats(connection *textproto.Conn, tube string, ac
|
|||
}
|
||||
|
||||
func runQuery(connection *textproto.Conn, cmd string, result interface{}) error {
|
||||
requestId, err := connection.Cmd(cmd)
|
||||
requestID, err := connection.Cmd(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
connection.StartResponse(requestId)
|
||||
defer connection.EndResponse(requestId)
|
||||
connection.StartResponse(requestID)
|
||||
defer connection.EndResponse(requestID)
|
||||
|
||||
status, err := connection.ReadLine()
|
||||
if err != nil {
|
||||
|
|
@ -240,7 +240,7 @@ type statsResponse struct {
|
|||
CurrentWaiting int `yaml:"current-waiting"`
|
||||
CurrentWorkers int `yaml:"current-workers"`
|
||||
Hostname string `yaml:"hostname"`
|
||||
Id string `yaml:"id"`
|
||||
ID string `yaml:"id"`
|
||||
JobTimeouts int `yaml:"job-timeouts"`
|
||||
MaxJobSize int `yaml:"max-job-size"`
|
||||
Pid int `yaml:"pid"`
|
||||
|
|
|
|||
|
|
@ -170,16 +170,16 @@ func (beat *Beat) Gather(accumulator telegraf.Accumulator) error {
|
|||
beatStats := &BeatStats{}
|
||||
beatInfo := &BeatInfo{}
|
||||
|
||||
infoUrl, err := url.Parse(beat.URL + suffixInfo)
|
||||
infoURL, err := url.Parse(beat.URL + suffixInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
statsUrl, err := url.Parse(beat.URL + suffixStats)
|
||||
statsURL, err := url.Parse(beat.URL + suffixStats)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = beat.gatherJSONData(infoUrl.String(), beatInfo)
|
||||
err = beat.gatherJSONData(infoURL.String(), beatInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -191,7 +191,7 @@ func (beat *Beat) Gather(accumulator telegraf.Accumulator) error {
|
|||
"beat_version": beatInfo.Version,
|
||||
}
|
||||
|
||||
err = beat.gatherJSONData(statsUrl.String(), beatStats)
|
||||
err = beat.gatherJSONData(statsURL.String(), beatStats)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -65,7 +65,7 @@ func (b *Bind) Gather(acc telegraf.Accumulator) error {
|
|||
wg.Add(1)
|
||||
go func(addr *url.URL) {
|
||||
defer wg.Done()
|
||||
acc.AddError(b.gatherUrl(addr, acc))
|
||||
acc.AddError(b.gatherURL(addr, acc))
|
||||
}(addr)
|
||||
}
|
||||
|
||||
|
|
@ -73,7 +73,7 @@ func (b *Bind) Gather(acc telegraf.Accumulator) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (b *Bind) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
func (b *Bind) gatherURL(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
switch addr.Path {
|
||||
case "":
|
||||
// BIND 9.6 - 9.8
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ type jsonMemory struct {
|
|||
ContextSize int64
|
||||
Lost int64
|
||||
Contexts []struct {
|
||||
Id string
|
||||
ID string
|
||||
Name string
|
||||
Total int64
|
||||
InUse int64
|
||||
|
|
@ -113,7 +113,7 @@ func (b *Bind) addStatsJSON(stats jsonStats, acc telegraf.Accumulator, urlTag st
|
|||
// Detailed, per-context memory stats
|
||||
if b.GatherMemoryContexts {
|
||||
for _, c := range stats.Memory.Contexts {
|
||||
tags := map[string]string{"url": urlTag, "id": c.Id, "name": c.Name, "source": host, "port": port}
|
||||
tags := map[string]string{"url": urlTag, "id": c.ID, "name": c.Name, "source": host, "port": port}
|
||||
fields := map[string]interface{}{"total": c.Total, "in_use": c.InUse}
|
||||
|
||||
acc.AddGauge("bind_memory_context", fields, tags)
|
||||
|
|
@ -153,9 +153,9 @@ func (b *Bind) readStatsJSON(addr *url.URL, acc telegraf.Accumulator) error {
|
|||
|
||||
// Progressively build up full jsonStats struct by parsing the individual HTTP responses
|
||||
for _, suffix := range [...]string{"/server", "/net", "/mem"} {
|
||||
scrapeUrl := addr.String() + suffix
|
||||
scrapeURL := addr.String() + suffix
|
||||
|
||||
resp, err := b.client.Get(scrapeUrl)
|
||||
resp, err := b.client.Get(scrapeURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -163,7 +163,7 @@ func (b *Bind) readStatsJSON(addr *url.URL, acc telegraf.Accumulator) error {
|
|||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("%s returned HTTP status: %s", scrapeUrl, resp.Status)
|
||||
return fmt.Errorf("%s returned HTTP status: %s", scrapeURL, resp.Status)
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(resp.Body).Decode(&stats); err != nil {
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@ type v2Statistics struct {
|
|||
Memory struct {
|
||||
Contexts []struct {
|
||||
// Omitted nodes: references, maxinuse, blocksize, pools, hiwater, lowater
|
||||
Id string `xml:"id"`
|
||||
ID string `xml:"id"`
|
||||
Name string `xml:"name"`
|
||||
Total int64 `xml:"total"`
|
||||
InUse int64 `xml:"inuse"`
|
||||
|
|
@ -142,7 +142,7 @@ func (b *Bind) readStatsXMLv2(addr *url.URL, acc telegraf.Accumulator) error {
|
|||
// Detailed, per-context memory stats
|
||||
if b.GatherMemoryContexts {
|
||||
for _, c := range stats.Statistics.Memory.Contexts {
|
||||
tags := map[string]string{"url": addr.Host, "id": c.Id, "name": c.Name, "source": host, "port": port}
|
||||
tags := map[string]string{"url": addr.Host, "id": c.ID, "name": c.Name, "source": host, "port": port}
|
||||
fields := map[string]interface{}{"total": c.Total, "in_use": c.InUse}
|
||||
|
||||
acc.AddGauge("bind_memory_context", fields, tags)
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ type v3Stats struct {
|
|||
type v3Memory struct {
|
||||
Contexts []struct {
|
||||
// Omitted nodes: references, maxinuse, blocksize, pools, hiwater, lowater
|
||||
Id string `xml:"id"`
|
||||
ID string `xml:"id"`
|
||||
Name string `xml:"name"`
|
||||
Total int64 `xml:"total"`
|
||||
InUse int64 `xml:"inuse"`
|
||||
|
|
@ -98,7 +98,7 @@ func (b *Bind) addStatsXMLv3(stats v3Stats, acc telegraf.Accumulator, hostPort s
|
|||
// Detailed, per-context memory stats
|
||||
if b.GatherMemoryContexts {
|
||||
for _, c := range stats.Memory.Contexts {
|
||||
tags := map[string]string{"url": hostPort, "source": host, "port": port, "id": c.Id, "name": c.Name}
|
||||
tags := map[string]string{"url": hostPort, "source": host, "port": port, "id": c.ID, "name": c.Name}
|
||||
fields := map[string]interface{}{"total": c.Total, "in_use": c.InUse}
|
||||
|
||||
acc.AddGauge("bind_memory_context", fields, tags)
|
||||
|
|
@ -138,9 +138,9 @@ func (b *Bind) readStatsXMLv3(addr *url.URL, acc telegraf.Accumulator) error {
|
|||
|
||||
// Progressively build up full v3Stats struct by parsing the individual HTTP responses
|
||||
for _, suffix := range [...]string{"/server", "/net", "/mem"} {
|
||||
scrapeUrl := addr.String() + suffix
|
||||
scrapeURL := addr.String() + suffix
|
||||
|
||||
resp, err := b.client.Get(scrapeUrl)
|
||||
resp, err := b.client.Get(scrapeURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -148,7 +148,7 @@ func (b *Bind) readStatsXMLv3(addr *url.URL, acc telegraf.Accumulator) error {
|
|||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("%s returned HTTP status: %s", scrapeUrl, resp.Status)
|
||||
return fmt.Errorf("%s returned HTTP status: %s", scrapeURL, resp.Status)
|
||||
}
|
||||
|
||||
if err := xml.NewDecoder(resp.Body).Decode(&stats); err != nil {
|
||||
|
|
|
|||
|
|
@ -197,9 +197,9 @@ func (c *Cassandra) Description() string {
|
|||
return "Read Cassandra metrics through Jolokia"
|
||||
}
|
||||
|
||||
func (c *Cassandra) getAttr(requestUrl *url.URL) (map[string]interface{}, error) {
|
||||
func (c *Cassandra) getAttr(requestURL *url.URL) (map[string]interface{}, error) {
|
||||
// Create + send request
|
||||
req, err := http.NewRequest("GET", requestUrl.String(), nil)
|
||||
req, err := http.NewRequest("GET", requestURL.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -213,7 +213,7 @@ func (c *Cassandra) getAttr(requestUrl *url.URL) (map[string]interface{}, error)
|
|||
// Process response
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
err = fmt.Errorf("response from url \"%s\" has status code %d (%s), expected %d (%s)",
|
||||
requestUrl,
|
||||
requestURL,
|
||||
resp.StatusCode,
|
||||
http.StatusText(resp.StatusCode),
|
||||
http.StatusOK,
|
||||
|
|
@ -292,24 +292,24 @@ func (c *Cassandra) Gather(acc telegraf.Accumulator) error {
|
|||
}
|
||||
|
||||
// Prepare URL
|
||||
requestUrl, err := url.Parse("http://" + serverTokens["host"] + ":" +
|
||||
requestURL, err := url.Parse("http://" + serverTokens["host"] + ":" +
|
||||
serverTokens["port"] + context + metric)
|
||||
if err != nil {
|
||||
acc.AddError(err)
|
||||
continue
|
||||
}
|
||||
if serverTokens["user"] != "" && serverTokens["passwd"] != "" {
|
||||
requestUrl.User = url.UserPassword(serverTokens["user"],
|
||||
requestURL.User = url.UserPassword(serverTokens["user"],
|
||||
serverTokens["passwd"])
|
||||
}
|
||||
|
||||
out, err := c.getAttr(requestUrl)
|
||||
out, err := c.getAttr(requestURL)
|
||||
if err != nil {
|
||||
acc.AddError(err)
|
||||
continue
|
||||
}
|
||||
if out["status"] != 200.0 {
|
||||
acc.AddError(fmt.Errorf("provided URL returned with status %v - %s", out["status"], requestUrl))
|
||||
acc.AddError(fmt.Errorf("provided URL returned with status %v - %s", out["status"], requestURL))
|
||||
continue
|
||||
}
|
||||
m.addTagsFields(out)
|
||||
|
|
|
|||
|
|
@ -120,7 +120,7 @@ func (c *Ceph) gatherAdminSocketStats(acc telegraf.Accumulator) error {
|
|||
for tag, metrics := range data {
|
||||
acc.AddFields(measurement,
|
||||
map[string]interface{}(metrics),
|
||||
map[string]string{"type": s.sockType, "id": s.sockId, "collection": tag})
|
||||
map[string]string{"type": s.sockType, "id": s.sockID, "collection": tag})
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
@ -226,13 +226,13 @@ var findSockets = func(c *Ceph) ([]*socket, error) {
|
|||
|
||||
if sockType == typeOsd || sockType == typeMon || sockType == typeMds || sockType == typeRgw {
|
||||
path := filepath.Join(c.SocketDir, f)
|
||||
sockets = append(sockets, &socket{parseSockId(f, sockPrefix, c.SocketSuffix), sockType, path})
|
||||
sockets = append(sockets, &socket{parseSockID(f, sockPrefix, c.SocketSuffix), sockType, path})
|
||||
}
|
||||
}
|
||||
return sockets, nil
|
||||
}
|
||||
|
||||
func parseSockId(fname, prefix, suffix string) string {
|
||||
func parseSockID(fname, prefix, suffix string) string {
|
||||
s := fname
|
||||
s = strings.TrimPrefix(s, prefix)
|
||||
s = strings.TrimSuffix(s, suffix)
|
||||
|
|
@ -241,7 +241,7 @@ func parseSockId(fname, prefix, suffix string) string {
|
|||
}
|
||||
|
||||
type socket struct {
|
||||
sockId string
|
||||
sockID string
|
||||
sockType string
|
||||
socket string
|
||||
}
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ type expectedResult struct {
|
|||
}
|
||||
|
||||
func TestParseSockId(t *testing.T) {
|
||||
s := parseSockId(sockFile(osdPrefix, 1), osdPrefix, sockSuffix)
|
||||
s := parseSockID(sockFile(osdPrefix, 1), osdPrefix, sockSuffix)
|
||||
assert.Equal(t, s, "1")
|
||||
}
|
||||
|
||||
|
|
@ -170,7 +170,7 @@ func assertFoundSocket(t *testing.T, dir, sockType string, i int, sockets []*soc
|
|||
if s.socket == expected {
|
||||
found = true
|
||||
assert.Equal(t, s.sockType, sockType, "Unexpected socket type for '%s'", s)
|
||||
assert.Equal(t, s.sockId, strconv.Itoa(i))
|
||||
assert.Equal(t, s.sockID, strconv.Itoa(i))
|
||||
}
|
||||
}
|
||||
assert.True(t, found, "Did not find socket: %s", expected)
|
||||
|
|
|
|||
|
|
@ -269,12 +269,12 @@ func (ps *PubSub) getPubSubClient() (*pubsub.Client, error) {
|
|||
return client, nil
|
||||
}
|
||||
|
||||
func (ps *PubSub) getGCPSubscription(subId string) (subscription, error) {
|
||||
func (ps *PubSub) getGCPSubscription(subID string) (subscription, error) {
|
||||
client, err := ps.getPubSubClient()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := client.Subscription(subId)
|
||||
s := client.Subscription(subID)
|
||||
s.ReceiveSettings = pubsub.ReceiveSettings{
|
||||
NumGoroutines: ps.MaxReceiverGoRoutines,
|
||||
MaxExtension: ps.MaxExtension.Duration,
|
||||
|
|
|
|||
|
|
@ -16,12 +16,12 @@ const (
|
|||
|
||||
// Test ingesting InfluxDB-format PubSub message
|
||||
func TestRunParse(t *testing.T) {
|
||||
subId := "sub-run-parse"
|
||||
subID := "sub-run-parse"
|
||||
|
||||
testParser, _ := parsers.NewInfluxParser()
|
||||
|
||||
sub := &stubSub{
|
||||
id: subId,
|
||||
id: subID,
|
||||
messages: make(chan *testMsg, 100),
|
||||
}
|
||||
sub.receiver = testMessagesReceive(sub)
|
||||
|
|
@ -31,7 +31,7 @@ func TestRunParse(t *testing.T) {
|
|||
parser: testParser,
|
||||
stubSub: func() subscription { return sub },
|
||||
Project: "projectIDontMatterForTests",
|
||||
Subscription: subId,
|
||||
Subscription: subID,
|
||||
MaxUndeliveredMessages: defaultMaxUndeliveredMessages,
|
||||
}
|
||||
|
||||
|
|
@ -60,12 +60,12 @@ func TestRunParse(t *testing.T) {
|
|||
|
||||
// Test ingesting InfluxDB-format PubSub message
|
||||
func TestRunBase64(t *testing.T) {
|
||||
subId := "sub-run-base64"
|
||||
subID := "sub-run-base64"
|
||||
|
||||
testParser, _ := parsers.NewInfluxParser()
|
||||
|
||||
sub := &stubSub{
|
||||
id: subId,
|
||||
id: subID,
|
||||
messages: make(chan *testMsg, 100),
|
||||
}
|
||||
sub.receiver = testMessagesReceive(sub)
|
||||
|
|
@ -75,7 +75,7 @@ func TestRunBase64(t *testing.T) {
|
|||
parser: testParser,
|
||||
stubSub: func() subscription { return sub },
|
||||
Project: "projectIDontMatterForTests",
|
||||
Subscription: subId,
|
||||
Subscription: subID,
|
||||
MaxUndeliveredMessages: defaultMaxUndeliveredMessages,
|
||||
Base64Data: true,
|
||||
}
|
||||
|
|
@ -104,12 +104,12 @@ func TestRunBase64(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRunInvalidMessages(t *testing.T) {
|
||||
subId := "sub-invalid-messages"
|
||||
subID := "sub-invalid-messages"
|
||||
|
||||
testParser, _ := parsers.NewInfluxParser()
|
||||
|
||||
sub := &stubSub{
|
||||
id: subId,
|
||||
id: subID,
|
||||
messages: make(chan *testMsg, 100),
|
||||
}
|
||||
sub.receiver = testMessagesReceive(sub)
|
||||
|
|
@ -119,7 +119,7 @@ func TestRunInvalidMessages(t *testing.T) {
|
|||
parser: testParser,
|
||||
stubSub: func() subscription { return sub },
|
||||
Project: "projectIDontMatterForTests",
|
||||
Subscription: subId,
|
||||
Subscription: subID,
|
||||
MaxUndeliveredMessages: defaultMaxUndeliveredMessages,
|
||||
}
|
||||
|
||||
|
|
@ -149,14 +149,14 @@ func TestRunInvalidMessages(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRunOverlongMessages(t *testing.T) {
|
||||
subId := "sub-message-too-long"
|
||||
subID := "sub-message-too-long"
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
|
||||
testParser, _ := parsers.NewInfluxParser()
|
||||
|
||||
sub := &stubSub{
|
||||
id: subId,
|
||||
id: subID,
|
||||
messages: make(chan *testMsg, 100),
|
||||
}
|
||||
sub.receiver = testMessagesReceive(sub)
|
||||
|
|
@ -166,7 +166,7 @@ func TestRunOverlongMessages(t *testing.T) {
|
|||
parser: testParser,
|
||||
stubSub: func() subscription { return sub },
|
||||
Project: "projectIDontMatterForTests",
|
||||
Subscription: subId,
|
||||
Subscription: subID,
|
||||
MaxUndeliveredMessages: defaultMaxUndeliveredMessages,
|
||||
// Add MaxMessageLen Param
|
||||
MaxMessageLen: 1,
|
||||
|
|
@ -196,14 +196,14 @@ func TestRunOverlongMessages(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRunErrorInSubscriber(t *testing.T) {
|
||||
subId := "sub-unexpected-error"
|
||||
subID := "sub-unexpected-error"
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
|
||||
testParser, _ := parsers.NewInfluxParser()
|
||||
|
||||
sub := &stubSub{
|
||||
id: subId,
|
||||
id: subID,
|
||||
messages: make(chan *testMsg, 100),
|
||||
}
|
||||
fakeErrStr := "a fake error"
|
||||
|
|
@ -214,7 +214,7 @@ func TestRunErrorInSubscriber(t *testing.T) {
|
|||
parser: testParser,
|
||||
stubSub: func() subscription { return sub },
|
||||
Project: "projectIDontMatterForTests",
|
||||
Subscription: subId,
|
||||
Subscription: subID,
|
||||
MaxUndeliveredMessages: defaultMaxUndeliveredMessages,
|
||||
RetryReceiveDelaySeconds: 1,
|
||||
}
|
||||
|
|
|
|||
|
|
@ -59,8 +59,8 @@ func (c *CPUStats) Gather(acc telegraf.Accumulator) error {
|
|||
"cpu": cts.CPU,
|
||||
}
|
||||
|
||||
total := totalCpuTime(cts)
|
||||
active := activeCpuTime(cts)
|
||||
total := totalCPUTime(cts)
|
||||
active := activeCPUTime(cts)
|
||||
|
||||
if c.CollectCPUTime {
|
||||
// Add cpu time metrics
|
||||
|
|
@ -77,7 +77,7 @@ func (c *CPUStats) Gather(acc telegraf.Accumulator) error {
|
|||
"time_guest_nice": cts.GuestNice,
|
||||
}
|
||||
if c.ReportActive {
|
||||
fieldsC["time_active"] = activeCpuTime(cts)
|
||||
fieldsC["time_active"] = activeCPUTime(cts)
|
||||
}
|
||||
acc.AddCounter("cpu", fieldsC, tags, now)
|
||||
}
|
||||
|
|
@ -92,8 +92,8 @@ func (c *CPUStats) Gather(acc telegraf.Accumulator) error {
|
|||
if !ok {
|
||||
continue
|
||||
}
|
||||
lastTotal := totalCpuTime(lastCts)
|
||||
lastActive := activeCpuTime(lastCts)
|
||||
lastTotal := totalCPUTime(lastCts)
|
||||
lastActive := activeCPUTime(lastCts)
|
||||
totalDelta := total - lastTotal
|
||||
|
||||
if totalDelta < 0 {
|
||||
|
|
@ -131,14 +131,13 @@ func (c *CPUStats) Gather(acc telegraf.Accumulator) error {
|
|||
return err
|
||||
}
|
||||
|
||||
func totalCpuTime(t cpu.TimesStat) float64 {
|
||||
total := t.User + t.System + t.Nice + t.Iowait + t.Irq + t.Softirq + t.Steal +
|
||||
t.Idle
|
||||
func totalCPUTime(t cpu.TimesStat) float64 {
|
||||
total := t.User + t.System + t.Nice + t.Iowait + t.Irq + t.Softirq + t.Steal + t.Idle
|
||||
return total
|
||||
}
|
||||
|
||||
func activeCpuTime(t cpu.TimesStat) float64 {
|
||||
active := totalCpuTime(t) - t.Idle
|
||||
func activeCPUTime(t cpu.TimesStat) float64 {
|
||||
active := totalCPUTime(t) - t.Idle
|
||||
return active
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ const (
|
|||
Error = 2
|
||||
)
|
||||
|
||||
type DnsQuery struct {
|
||||
type DNSQuery struct {
|
||||
// Domains or subdomains to query
|
||||
Domains []string
|
||||
|
||||
|
|
@ -62,14 +62,14 @@ var sampleConfig = `
|
|||
# timeout = 2
|
||||
`
|
||||
|
||||
func (d *DnsQuery) SampleConfig() string {
|
||||
func (d *DNSQuery) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (d *DnsQuery) Description() string {
|
||||
func (d *DNSQuery) Description() string {
|
||||
return "Query given DNS server and gives statistics"
|
||||
}
|
||||
func (d *DnsQuery) Gather(acc telegraf.Accumulator) error {
|
||||
func (d *DNSQuery) Gather(acc telegraf.Accumulator) error {
|
||||
var wg sync.WaitGroup
|
||||
d.setDefaultValues()
|
||||
|
||||
|
|
@ -84,7 +84,7 @@ func (d *DnsQuery) Gather(acc telegraf.Accumulator) error {
|
|||
"record_type": d.RecordType,
|
||||
}
|
||||
|
||||
dnsQueryTime, rcode, err := d.getDnsQueryTime(domain, server)
|
||||
dnsQueryTime, rcode, err := d.getDNSQueryTime(domain, server)
|
||||
if rcode >= 0 {
|
||||
tags["rcode"] = dns.RcodeToString[rcode]
|
||||
fields["rcode_value"] = rcode
|
||||
|
|
@ -110,7 +110,7 @@ func (d *DnsQuery) Gather(acc telegraf.Accumulator) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (d *DnsQuery) setDefaultValues() {
|
||||
func (d *DNSQuery) setDefaultValues() {
|
||||
if d.Network == "" {
|
||||
d.Network = "udp"
|
||||
}
|
||||
|
|
@ -133,7 +133,7 @@ func (d *DnsQuery) setDefaultValues() {
|
|||
}
|
||||
}
|
||||
|
||||
func (d *DnsQuery) getDnsQueryTime(domain string, server string) (float64, int, error) {
|
||||
func (d *DNSQuery) getDNSQueryTime(domain string, server string) (float64, int, error) {
|
||||
dnsQueryTime := float64(0)
|
||||
|
||||
c := new(dns.Client)
|
||||
|
|
@ -159,7 +159,7 @@ func (d *DnsQuery) getDnsQueryTime(domain string, server string) (float64, int,
|
|||
return dnsQueryTime, r.Rcode, nil
|
||||
}
|
||||
|
||||
func (d *DnsQuery) parseRecordType() (uint16, error) {
|
||||
func (d *DNSQuery) parseRecordType() (uint16, error) {
|
||||
var recordType uint16
|
||||
var err error
|
||||
|
||||
|
|
@ -210,6 +210,6 @@ func setResult(result ResultType, fields map[string]interface{}, tags map[string
|
|||
|
||||
func init() {
|
||||
inputs.Add("dns_query", func() telegraf.Input {
|
||||
return &DnsQuery{}
|
||||
return &DNSQuery{}
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ func TestGathering(t *testing.T) {
|
|||
if testing.Short() {
|
||||
t.Skip("Skipping network-dependent test in short mode.")
|
||||
}
|
||||
var dnsConfig = DnsQuery{
|
||||
var dnsConfig = DNSQuery{
|
||||
Servers: servers,
|
||||
Domains: domains,
|
||||
}
|
||||
|
|
@ -37,7 +37,7 @@ func TestGatheringMxRecord(t *testing.T) {
|
|||
if testing.Short() {
|
||||
t.Skip("Skipping network-dependent test in short mode.")
|
||||
}
|
||||
var dnsConfig = DnsQuery{
|
||||
var dnsConfig = DNSQuery{
|
||||
Servers: servers,
|
||||
Domains: domains,
|
||||
}
|
||||
|
|
@ -57,7 +57,7 @@ func TestGatheringRootDomain(t *testing.T) {
|
|||
if testing.Short() {
|
||||
t.Skip("Skipping network-dependent test in short mode.")
|
||||
}
|
||||
var dnsConfig = DnsQuery{
|
||||
var dnsConfig = DNSQuery{
|
||||
Servers: servers,
|
||||
Domains: []string{"."},
|
||||
RecordType: "MX",
|
||||
|
|
@ -89,7 +89,7 @@ func TestMetricContainsServerAndDomainAndRecordTypeTags(t *testing.T) {
|
|||
if testing.Short() {
|
||||
t.Skip("Skipping network-dependent test in short mode.")
|
||||
}
|
||||
var dnsConfig = DnsQuery{
|
||||
var dnsConfig = DNSQuery{
|
||||
Servers: servers,
|
||||
Domains: domains,
|
||||
}
|
||||
|
|
@ -120,7 +120,7 @@ func TestGatheringTimeout(t *testing.T) {
|
|||
if testing.Short() {
|
||||
t.Skip("Skipping network-dependent test in short mode.")
|
||||
}
|
||||
var dnsConfig = DnsQuery{
|
||||
var dnsConfig = DNSQuery{
|
||||
Servers: servers,
|
||||
Domains: domains,
|
||||
}
|
||||
|
|
@ -141,7 +141,7 @@ func TestGatheringTimeout(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestSettingDefaultValues(t *testing.T) {
|
||||
dnsConfig := DnsQuery{}
|
||||
dnsConfig := DNSQuery{}
|
||||
|
||||
dnsConfig.setDefaultValues()
|
||||
|
||||
|
|
@ -150,7 +150,7 @@ func TestSettingDefaultValues(t *testing.T) {
|
|||
assert.Equal(t, 53, dnsConfig.Port, "Default port number not equal 53")
|
||||
assert.Equal(t, 2, dnsConfig.Timeout, "Default timeout not equal 2")
|
||||
|
||||
dnsConfig = DnsQuery{Domains: []string{"."}}
|
||||
dnsConfig = DNSQuery{Domains: []string{"."}}
|
||||
|
||||
dnsConfig.setDefaultValues()
|
||||
|
||||
|
|
@ -158,7 +158,7 @@ func TestSettingDefaultValues(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRecordTypeParser(t *testing.T) {
|
||||
var dnsConfig = DnsQuery{}
|
||||
var dnsConfig = DNSQuery{}
|
||||
var recordType uint16
|
||||
|
||||
dnsConfig.RecordType = "A"
|
||||
|
|
@ -207,7 +207,7 @@ func TestRecordTypeParser(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRecordTypeParserError(t *testing.T) {
|
||||
var dnsConfig = DnsQuery{}
|
||||
var dnsConfig = DNSQuery{}
|
||||
var err error
|
||||
|
||||
dnsConfig.RecordType = "nil"
|
||||
|
|
|
|||
|
|
@ -63,7 +63,7 @@ func TestDovecotIntegration(t *testing.T) {
|
|||
|
||||
// Test type=ip
|
||||
tags = map[string]string{"server": "dovecot.test", "type": "ip", "ip": "192.168.0.100"}
|
||||
buf = bytes.NewBufferString(sampleIp)
|
||||
buf = bytes.NewBufferString(sampleIP)
|
||||
|
||||
err = gatherStats(buf, &acc, "dovecot.test", "ip")
|
||||
require.NoError(t, err)
|
||||
|
|
@ -112,7 +112,7 @@ const sampleGlobal = `reset_timestamp last_update num_logins num_cmds num_connec
|
|||
const sampleDomain = `domain reset_timestamp last_update num_logins num_cmds num_connected_sessions user_cpu sys_cpu clock_time min_faults maj_faults vol_cs invol_cs disk_input disk_output read_count read_bytes write_count write_bytes mail_lookup_path mail_lookup_attr mail_read_count mail_read_bytes mail_cache_hits
|
||||
domain.test 1453969886 1454603963.039864 7503897 52595715 1204 100831175.372000 83849071.112000 4326001931528183.495762 763950011 1112443 4120386897 3685239306 41679480946688 1819070669176832 2368906465 2957928122981169 3545389615 1666822498251286 24396105 302845 20155768 669946617705 1557255080`
|
||||
|
||||
const sampleIp = `ip reset_timestamp last_update num_logins num_cmds num_connected_sessions user_cpu sys_cpu clock_time min_faults maj_faults vol_cs invol_cs disk_input disk_output read_count read_bytes write_count write_bytes mail_lookup_path mail_lookup_attr mail_read_count mail_read_bytes mail_cache_hits
|
||||
const sampleIP = `ip reset_timestamp last_update num_logins num_cmds num_connected_sessions user_cpu sys_cpu clock_time min_faults maj_faults vol_cs invol_cs disk_input disk_output read_count read_bytes write_count write_bytes mail_lookup_path mail_lookup_attr mail_read_count mail_read_bytes mail_cache_hits
|
||||
192.168.0.100 1453969886 1454603963.039864 7503897 52595715 1204 100831175.372000 83849071.112000 4326001931528183.495762 763950011 1112443 4120386897 3685239306 41679480946688 1819070669176832 2368906465 2957928122981169 3545389615 1666822498251286 24396105 302845 20155768 669946617705 1557255080`
|
||||
|
||||
const sampleUser = `user reset_timestamp last_update num_logins num_cmds user_cpu sys_cpu clock_time min_faults maj_faults vol_cs invol_cs disk_input disk_output read_count read_bytes write_count write_bytes mail_lookup_path mail_lookup_attr mail_read_count mail_read_bytes mail_cache_hits
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ import (
|
|||
// Midnight 9/22/2015
|
||||
const baseTimeSeconds = 1442905200
|
||||
|
||||
const validJson = `
|
||||
const validJSON = `
|
||||
{
|
||||
"status": "green",
|
||||
"num_processes": 82,
|
||||
|
|
@ -35,7 +35,7 @@ const validJson = `
|
|||
"users": [0, 1, 2, 3]
|
||||
}`
|
||||
|
||||
const malformedJson = `
|
||||
const malformedJSON = `
|
||||
{
|
||||
"status": "green",
|
||||
`
|
||||
|
|
@ -102,7 +102,7 @@ func TestExec(t *testing.T) {
|
|||
})
|
||||
e := &Exec{
|
||||
Log: testutil.Logger{},
|
||||
runner: newRunnerMock([]byte(validJson), nil, nil),
|
||||
runner: newRunnerMock([]byte(validJSON), nil, nil),
|
||||
Commands: []string{"testcommand arg1"},
|
||||
parser: parser,
|
||||
}
|
||||
|
|
@ -132,7 +132,7 @@ func TestExecMalformed(t *testing.T) {
|
|||
})
|
||||
e := &Exec{
|
||||
Log: testutil.Logger{},
|
||||
runner: newRunnerMock([]byte(malformedJson), nil, nil),
|
||||
runner: newRunnerMock([]byte(malformedJSON), nil, nil),
|
||||
Commands: []string{"badcommand arg1"},
|
||||
parser: parser,
|
||||
}
|
||||
|
|
|
|||
|
|
@ -178,16 +178,16 @@ func (h *GrayLog) gatherServer(
|
|||
if err := json.Unmarshal([]byte(resp), &dat); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, m_item := range dat.Metrics {
|
||||
for _, mItem := range dat.Metrics {
|
||||
fields := make(map[string]interface{})
|
||||
tags := map[string]string{
|
||||
"server": host,
|
||||
"port": port,
|
||||
"name": m_item.Name,
|
||||
"type": m_item.Type,
|
||||
"name": mItem.Name,
|
||||
"type": mItem.Type,
|
||||
}
|
||||
h.flatten(m_item.Fields, fields, "")
|
||||
acc.AddFields(m_item.FullName, fields, tags)
|
||||
h.flatten(mItem.Fields, fields, "")
|
||||
acc.AddFields(mItem.FullName, fields, tags)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -241,12 +241,12 @@ func (h *GrayLog) sendRequest(serverURL string) (string, float64, error) {
|
|||
|
||||
if strings.Contains(requestURL.String(), "multiple") {
|
||||
m := &Messagebody{Metrics: h.Metrics}
|
||||
http_body, err := json.Marshal(m)
|
||||
httpBody, err := json.Marshal(m)
|
||||
if err != nil {
|
||||
return "", -1, fmt.Errorf("Invalid list of Metrics %s", h.Metrics)
|
||||
}
|
||||
method = "POST"
|
||||
content = bytes.NewBuffer(http_body)
|
||||
content = bytes.NewBuffer(httpBody)
|
||||
}
|
||||
req, err := http.NewRequest(method, requestURL.String(), content)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -146,11 +146,11 @@ func (h *HTTPResponse) SampleConfig() string {
|
|||
var ErrRedirectAttempted = errors.New("redirect")
|
||||
|
||||
// Set the proxy. A configured proxy overwrites the system wide proxy.
|
||||
func getProxyFunc(http_proxy string) func(*http.Request) (*url.URL, error) {
|
||||
if http_proxy == "" {
|
||||
func getProxyFunc(httpProxy string) func(*http.Request) (*url.URL, error) {
|
||||
if httpProxy == "" {
|
||||
return http.ProxyFromEnvironment
|
||||
}
|
||||
proxyURL, err := url.Parse(http_proxy)
|
||||
proxyURL, err := url.Parse(httpProxy)
|
||||
if err != nil {
|
||||
return func(_ *http.Request) (*url.URL, error) {
|
||||
return nil, errors.New("bad proxy: " + err.Error())
|
||||
|
|
@ -161,9 +161,9 @@ func getProxyFunc(http_proxy string) func(*http.Request) (*url.URL, error) {
|
|||
}
|
||||
}
|
||||
|
||||
// createHttpClient creates an http client which will timeout at the specified
|
||||
// createHTTPClient creates an http client which will timeout at the specified
|
||||
// timeout period and can follow redirects if specified
|
||||
func (h *HTTPResponse) createHttpClient() (*http.Client, error) {
|
||||
func (h *HTTPResponse) createHTTPClient() (*http.Client, error) {
|
||||
tlsCfg, err := h.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -217,8 +217,8 @@ func localAddress(interfaceName string) (net.Addr, error) {
|
|||
return nil, fmt.Errorf("cannot create local address for interface %q", interfaceName)
|
||||
}
|
||||
|
||||
func setResult(result_string string, fields map[string]interface{}, tags map[string]string) {
|
||||
result_codes := map[string]int{
|
||||
func setResult(resultString string, fields map[string]interface{}, tags map[string]string) {
|
||||
resultCodes := map[string]int{
|
||||
"success": 0,
|
||||
"response_string_mismatch": 1,
|
||||
"body_read_error": 2,
|
||||
|
|
@ -228,9 +228,9 @@ func setResult(result_string string, fields map[string]interface{}, tags map[str
|
|||
"response_status_code_mismatch": 6,
|
||||
}
|
||||
|
||||
tags["result"] = result_string
|
||||
fields["result_type"] = result_string
|
||||
fields["result_code"] = result_codes[result_string]
|
||||
tags["result"] = resultString
|
||||
fields["result_type"] = resultString
|
||||
fields["result_code"] = resultCodes[resultString]
|
||||
}
|
||||
|
||||
func setError(err error, fields map[string]interface{}, tags map[string]string) error {
|
||||
|
|
@ -239,8 +239,8 @@ func setError(err error, fields map[string]interface{}, tags map[string]string)
|
|||
return timeoutError
|
||||
}
|
||||
|
||||
urlErr, isUrlErr := err.(*url.Error)
|
||||
if !isUrlErr {
|
||||
urlErr, isURLErr := err.(*url.Error)
|
||||
if !isURLErr {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -299,7 +299,7 @@ func (h *HTTPResponse) httpGather(u string) (map[string]interface{}, map[string]
|
|||
// Start Timer
|
||||
start := time.Now()
|
||||
resp, err := h.client.Do(request)
|
||||
response_time := time.Since(start).Seconds()
|
||||
responseTime := time.Since(start).Seconds()
|
||||
|
||||
// If an error in returned, it means we are dealing with a network error, as
|
||||
// HTTP error codes do not generate errors in the net/http library
|
||||
|
|
@ -321,7 +321,7 @@ func (h *HTTPResponse) httpGather(u string) (map[string]interface{}, map[string]
|
|||
}
|
||||
|
||||
if _, ok := fields["response_time"]; !ok {
|
||||
fields["response_time"] = response_time
|
||||
fields["response_time"] = responseTime
|
||||
}
|
||||
|
||||
// This function closes the response body, as
|
||||
|
|
@ -396,8 +396,8 @@ func (h *HTTPResponse) httpGather(u string) (map[string]interface{}, map[string]
|
|||
}
|
||||
|
||||
// Set result in case of a body read error
|
||||
func (h *HTTPResponse) setBodyReadError(error_msg string, bodyBytes []byte, fields map[string]interface{}, tags map[string]string) {
|
||||
h.Log.Debugf(error_msg)
|
||||
func (h *HTTPResponse) setBodyReadError(errorMsg string, bodyBytes []byte, fields map[string]interface{}, tags map[string]string) {
|
||||
h.Log.Debugf(errorMsg)
|
||||
setResult("body_read_error", fields, tags)
|
||||
fields["content_length"] = len(bodyBytes)
|
||||
if h.ResponseStringMatch != "" {
|
||||
|
|
@ -435,7 +435,7 @@ func (h *HTTPResponse) Gather(acc telegraf.Accumulator) error {
|
|||
}
|
||||
|
||||
if h.client == nil {
|
||||
client, err := h.createHttpClient()
|
||||
client, err := h.createHTTPClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -21,8 +21,8 @@ var (
|
|||
utf8BOM = []byte("\xef\xbb\xbf")
|
||||
)
|
||||
|
||||
// HttpJson struct
|
||||
type HttpJson struct {
|
||||
// HTTPJSON struct
|
||||
type HTTPJSON struct {
|
||||
Name string
|
||||
Servers []string
|
||||
Method string
|
||||
|
|
@ -113,16 +113,16 @@ var sampleConfig = `
|
|||
# apiVersion = "v1"
|
||||
`
|
||||
|
||||
func (h *HttpJson) SampleConfig() string {
|
||||
func (h *HTTPJSON) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (h *HttpJson) Description() string {
|
||||
func (h *HTTPJSON) Description() string {
|
||||
return "Read flattened metrics from one or more JSON HTTP endpoints"
|
||||
}
|
||||
|
||||
// Gathers data for all servers.
|
||||
func (h *HttpJson) Gather(acc telegraf.Accumulator) error {
|
||||
func (h *HTTPJSON) Gather(acc telegraf.Accumulator) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
if h.client.HTTPClient() == nil {
|
||||
|
|
@ -162,7 +162,7 @@ func (h *HttpJson) Gather(acc telegraf.Accumulator) error {
|
|||
//
|
||||
// Returns:
|
||||
// error: Any error that may have occurred
|
||||
func (h *HttpJson) gatherServer(
|
||||
func (h *HTTPJSON) gatherServer(
|
||||
acc telegraf.Accumulator,
|
||||
serverURL string,
|
||||
) error {
|
||||
|
|
@ -171,11 +171,11 @@ func (h *HttpJson) gatherServer(
|
|||
return err
|
||||
}
|
||||
|
||||
var msrmnt_name string
|
||||
var msrmntName string
|
||||
if h.Name == "" {
|
||||
msrmnt_name = "httpjson"
|
||||
msrmntName = "httpjson"
|
||||
} else {
|
||||
msrmnt_name = "httpjson_" + h.Name
|
||||
msrmntName = "httpjson_" + h.Name
|
||||
}
|
||||
tags := map[string]string{
|
||||
"server": serverURL,
|
||||
|
|
@ -183,7 +183,7 @@ func (h *HttpJson) gatherServer(
|
|||
|
||||
parser, err := parsers.NewParser(&parsers.Config{
|
||||
DataFormat: "json",
|
||||
MetricName: msrmnt_name,
|
||||
MetricName: msrmntName,
|
||||
TagKeys: h.TagKeys,
|
||||
DefaultTags: tags,
|
||||
})
|
||||
|
|
@ -207,7 +207,7 @@ func (h *HttpJson) gatherServer(
|
|||
return nil
|
||||
}
|
||||
|
||||
// Sends an HTTP request to the server using the HttpJson object's HTTPClient.
|
||||
// Sends an HTTP request to the server using the HTTPJSON object's HTTPClient.
|
||||
// This request can be either a GET or a POST.
|
||||
// Parameters:
|
||||
// serverURL: endpoint to send request to
|
||||
|
|
@ -215,7 +215,7 @@ func (h *HttpJson) gatherServer(
|
|||
// Returns:
|
||||
// string: body of the response
|
||||
// error : Any error that may have occurred
|
||||
func (h *HttpJson) sendRequest(serverURL string) (string, float64, error) {
|
||||
func (h *HTTPJSON) sendRequest(serverURL string) (string, float64, error) {
|
||||
// Prepare URL
|
||||
requestURL, err := url.Parse(serverURL)
|
||||
if err != nil {
|
||||
|
|
@ -285,7 +285,7 @@ func (h *HttpJson) sendRequest(serverURL string) (string, float64, error) {
|
|||
|
||||
func init() {
|
||||
inputs.Add("httpjson", func() telegraf.Input {
|
||||
return &HttpJson{
|
||||
return &HTTPJSON{
|
||||
client: &RealHTTPClient{},
|
||||
ResponseTimeout: internal.Duration{
|
||||
Duration: 5 * time.Second,
|
||||
|
|
|
|||
|
|
@ -154,15 +154,15 @@ func (c *mockHTTPClient) HTTPClient() *http.Client {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Generates a pointer to an HttpJson object that uses a mock HTTP client.
|
||||
// Generates a pointer to an HTTPJSON object that uses a mock HTTP client.
|
||||
// Parameters:
|
||||
// response : Body of the response that the mock HTTP client should return
|
||||
// statusCode: HTTP status code the mock HTTP client should return
|
||||
//
|
||||
// Returns:
|
||||
// *HttpJson: Pointer to an HttpJson object that uses the generated mock HTTP client
|
||||
func genMockHttpJson(response string, statusCode int) []*HttpJson {
|
||||
return []*HttpJson{
|
||||
// *HTTPJSON: Pointer to an HTTPJSON object that uses the generated mock HTTP client
|
||||
func genMockHTTPJSON(response string, statusCode int) []*HTTPJSON {
|
||||
return []*HTTPJSON{
|
||||
{
|
||||
client: &mockHTTPClient{responseBody: response, statusCode: statusCode},
|
||||
Servers: []string{
|
||||
|
|
@ -206,7 +206,7 @@ func genMockHttpJson(response string, statusCode int) []*HttpJson {
|
|||
|
||||
// Test that the proper values are ignored or collected
|
||||
func TestHttpJson200(t *testing.T) {
|
||||
httpjson := genMockHttpJson(validJSON, 200)
|
||||
httpjson := genMockHTTPJSON(validJSON, 200)
|
||||
|
||||
for _, service := range httpjson {
|
||||
var acc testutil.Accumulator
|
||||
|
|
@ -237,7 +237,7 @@ func TestHttpJsonGET_URL(t *testing.T) {
|
|||
}))
|
||||
defer ts.Close()
|
||||
|
||||
a := HttpJson{
|
||||
a := HTTPJSON{
|
||||
Servers: []string{ts.URL + "?api_key=mykey"},
|
||||
Name: "",
|
||||
Method: "GET",
|
||||
|
|
@ -309,7 +309,7 @@ func TestHttpJsonGET(t *testing.T) {
|
|||
}))
|
||||
defer ts.Close()
|
||||
|
||||
a := HttpJson{
|
||||
a := HTTPJSON{
|
||||
Servers: []string{ts.URL},
|
||||
Name: "",
|
||||
Method: "GET",
|
||||
|
|
@ -383,7 +383,7 @@ func TestHttpJsonPOST(t *testing.T) {
|
|||
}))
|
||||
defer ts.Close()
|
||||
|
||||
a := HttpJson{
|
||||
a := HTTPJSON{
|
||||
Servers: []string{ts.URL},
|
||||
Name: "",
|
||||
Method: "POST",
|
||||
|
|
@ -445,7 +445,7 @@ func TestHttpJsonPOST(t *testing.T) {
|
|||
|
||||
// Test response to HTTP 500
|
||||
func TestHttpJson500(t *testing.T) {
|
||||
httpjson := genMockHttpJson(validJSON, 500)
|
||||
httpjson := genMockHTTPJSON(validJSON, 500)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := acc.GatherError(httpjson[0].Gather)
|
||||
|
|
@ -456,7 +456,7 @@ func TestHttpJson500(t *testing.T) {
|
|||
|
||||
// Test response to HTTP 405
|
||||
func TestHttpJsonBadMethod(t *testing.T) {
|
||||
httpjson := genMockHttpJson(validJSON, 200)
|
||||
httpjson := genMockHTTPJSON(validJSON, 200)
|
||||
httpjson[0].Method = "NOT_A_REAL_METHOD"
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
|
@ -468,7 +468,7 @@ func TestHttpJsonBadMethod(t *testing.T) {
|
|||
|
||||
// Test response to malformed JSON
|
||||
func TestHttpJsonBadJson(t *testing.T) {
|
||||
httpjson := genMockHttpJson(invalidJSON, 200)
|
||||
httpjson := genMockHTTPJSON(invalidJSON, 200)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := acc.GatherError(httpjson[0].Gather)
|
||||
|
|
@ -479,7 +479,7 @@ func TestHttpJsonBadJson(t *testing.T) {
|
|||
|
||||
// Test response to empty string as response object
|
||||
func TestHttpJsonEmptyResponse(t *testing.T) {
|
||||
httpjson := genMockHttpJson(empty, 200)
|
||||
httpjson := genMockHTTPJSON(empty, 200)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := acc.GatherError(httpjson[0].Gather)
|
||||
|
|
@ -488,7 +488,7 @@ func TestHttpJsonEmptyResponse(t *testing.T) {
|
|||
|
||||
// Test that the proper values are ignored or collected
|
||||
func TestHttpJson200Tags(t *testing.T) {
|
||||
httpjson := genMockHttpJson(validJSONTags, 200)
|
||||
httpjson := genMockHTTPJSON(validJSONTags, 200)
|
||||
|
||||
for _, service := range httpjson {
|
||||
if service.Name == "other_webapp" {
|
||||
|
|
@ -526,7 +526,7 @@ const validJSONArrayTags = `
|
|||
|
||||
// Test that array data is collected correctly
|
||||
func TestHttpJsonArray200Tags(t *testing.T) {
|
||||
httpjson := genMockHttpJson(validJSONArrayTags, 200)
|
||||
httpjson := genMockHTTPJSON(validJSONArrayTags, 200)
|
||||
|
||||
for _, service := range httpjson {
|
||||
if service.Name == "other_webapp" {
|
||||
|
|
@ -563,7 +563,7 @@ var jsonBOM = []byte("\xef\xbb\xbf[{\"value\":17}]")
|
|||
|
||||
// TestHttpJsonBOM tests that UTF-8 JSON with a BOM can be parsed
|
||||
func TestHttpJsonBOM(t *testing.T) {
|
||||
httpjson := genMockHttpJson(string(jsonBOM), 200)
|
||||
httpjson := genMockHTTPJSON(string(jsonBOM), 200)
|
||||
|
||||
for _, service := range httpjson {
|
||||
if service.Name == "other_webapp" {
|
||||
|
|
|
|||
|
|
@ -115,7 +115,7 @@ func (i *Icinga2) GatherStatus(acc telegraf.Accumulator, checks []Object) {
|
|||
}
|
||||
}
|
||||
|
||||
func (i *Icinga2) createHttpClient() (*http.Client, error) {
|
||||
func (i *Icinga2) createHTTPClient() (*http.Client, error) {
|
||||
tlsCfg, err := i.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -137,22 +137,22 @@ func (i *Icinga2) Gather(acc telegraf.Accumulator) error {
|
|||
}
|
||||
|
||||
if i.client == nil {
|
||||
client, err := i.createHttpClient()
|
||||
client, err := i.createHTTPClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
i.client = client
|
||||
}
|
||||
|
||||
requestUrl := "%s/v1/objects/%s?attrs=name&attrs=display_name&attrs=state&attrs=check_command"
|
||||
requestURL := "%s/v1/objects/%s?attrs=name&attrs=display_name&attrs=state&attrs=check_command"
|
||||
|
||||
// Note: attrs=host_name is only valid for 'services' requests, using check.Attrs.HostName for the host
|
||||
// 'hosts' requests will need to use attrs=name only, using check.Attrs.Name for the host
|
||||
if i.ObjectType == "services" {
|
||||
requestUrl += "&attrs=host_name"
|
||||
requestURL += "&attrs=host_name"
|
||||
}
|
||||
|
||||
url := fmt.Sprintf(requestUrl, i.Server, i.ObjectType)
|
||||
url := fmt.Sprintf(requestURL, i.Server, i.ObjectType)
|
||||
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@ func TestInfiniband(t *testing.T) {
|
|||
"port": "1",
|
||||
}
|
||||
|
||||
sample_rdmastats_entries := []rdmamap.RdmaStatEntry{
|
||||
sampleRdmastatsEntries := []rdmamap.RdmaStatEntry{
|
||||
{
|
||||
Name: "excessive_buffer_overrun_errors",
|
||||
Value: uint64(0),
|
||||
|
|
@ -127,7 +127,7 @@ func TestInfiniband(t *testing.T) {
|
|||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
addStats("m1x5_0", "1", sample_rdmastats_entries, &acc)
|
||||
addStats("m1x5_0", "1", sampleRdmastatsEntries, &acc)
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "infiniband", fields, tags)
|
||||
|
||||
|
|
|
|||
|
|
@ -23,8 +23,8 @@ func convertMicroWattToWatt(mW float64) float64 {
|
|||
return mW * microWattToWatt
|
||||
}
|
||||
|
||||
func convertKiloHertzToMegaHertz(kHz float64) float64 {
|
||||
return kHz * kiloHertzToMegaHertz
|
||||
func convertKiloHertzToMegaHertz(kiloHertz float64) float64 {
|
||||
return kiloHertz * kiloHertzToMegaHertz
|
||||
}
|
||||
|
||||
func convertNanoSecondsToSeconds(ns int64) float64 {
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ import (
|
|||
)
|
||||
|
||||
type Interrupts struct {
|
||||
CpuAsTag bool `toml:"cpu_as_tag"`
|
||||
CPUAsTag bool `toml:"cpu_as_tag"`
|
||||
}
|
||||
|
||||
type IRQ struct {
|
||||
|
|
@ -121,7 +121,7 @@ func (s *Interrupts) Gather(acc telegraf.Accumulator) error {
|
|||
acc.AddError(fmt.Errorf("Parsing %s: %s", file, err))
|
||||
continue
|
||||
}
|
||||
reportMetrics(measurement, irqs, acc, s.CpuAsTag)
|
||||
reportMetrics(measurement, irqs, acc, s.CPUAsTag)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -13,13 +13,13 @@ import (
|
|||
// Setup and helper functions
|
||||
// =====================================================================================
|
||||
|
||||
func expectCpuAsTags(m *testutil.Accumulator, t *testing.T, measurement string, irq IRQ) {
|
||||
func expectCPUAsTags(m *testutil.Accumulator, t *testing.T, measurement string, irq IRQ) {
|
||||
for idx, value := range irq.Cpus {
|
||||
m.AssertContainsTaggedFields(t, measurement, map[string]interface{}{"count": value}, map[string]string{"irq": irq.ID, "type": irq.Type, "device": irq.Device, "cpu": fmt.Sprintf("cpu%d", idx)})
|
||||
}
|
||||
}
|
||||
|
||||
func expectCpuAsFields(m *testutil.Accumulator, t *testing.T, measurement string, irq IRQ) {
|
||||
func expectCPUAsFields(m *testutil.Accumulator, t *testing.T, measurement string, irq IRQ) {
|
||||
fields := map[string]interface{}{}
|
||||
total := int64(0)
|
||||
for idx, count := range irq.Cpus {
|
||||
|
|
@ -70,7 +70,7 @@ func TestCpuAsTagsSoftIrqs(t *testing.T) {
|
|||
reportMetrics("soft_interrupts", irqs, acc, true)
|
||||
|
||||
for _, irq := range softIrqsExpectedArgs {
|
||||
expectCpuAsTags(acc, t, "soft_interrupts", irq)
|
||||
expectCPUAsTags(acc, t, "soft_interrupts", irq)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -79,7 +79,7 @@ func TestCpuAsFieldsSoftIrqs(t *testing.T) {
|
|||
reportMetrics("soft_interrupts", irqs, acc, false)
|
||||
|
||||
for _, irq := range softIrqsExpectedArgs {
|
||||
expectCpuAsFields(acc, t, "soft_interrupts", irq)
|
||||
expectCPUAsFields(acc, t, "soft_interrupts", irq)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -142,7 +142,7 @@ func TestCpuAsTagsHwIrqs(t *testing.T) {
|
|||
reportMetrics("interrupts", irqs, acc, true)
|
||||
|
||||
for _, irq := range hwIrqsExpectedArgs {
|
||||
expectCpuAsTags(acc, t, "interrupts", irq)
|
||||
expectCPUAsTags(acc, t, "interrupts", irq)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -151,6 +151,6 @@ func TestCpuAsFieldsHwIrqs(t *testing.T) {
|
|||
reportMetrics("interrupts", irqs, acc, false)
|
||||
|
||||
for _, irq := range hwIrqsExpectedArgs {
|
||||
expectCpuAsFields(acc, t, "interrupts", irq)
|
||||
expectCPUAsFields(acc, t, "interrupts", irq)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -21,10 +21,10 @@ import (
|
|||
|
||||
var (
|
||||
execCommand = exec.Command // execCommand is used to mock commands in tests.
|
||||
re_v1_parse_line = regexp.MustCompile(`^(?P<name>[^|]*)\|(?P<description>[^|]*)\|(?P<status_code>.*)`)
|
||||
re_v2_parse_line = regexp.MustCompile(`^(?P<name>[^|]*)\|[^|]+\|(?P<status_code>[^|]*)\|(?P<entity_id>[^|]*)\|(?:(?P<description>[^|]+))?`)
|
||||
re_v2_parse_description = regexp.MustCompile(`^(?P<analogValue>-?[0-9.]+)\s(?P<analogUnit>.*)|(?P<status>.+)|^$`)
|
||||
re_v2_parse_unit = regexp.MustCompile(`^(?P<realAnalogUnit>[^,]+)(?:,\s*(?P<statusDesc>.*))?`)
|
||||
reV1ParseLine = regexp.MustCompile(`^(?P<name>[^|]*)\|(?P<description>[^|]*)\|(?P<status_code>.*)`)
|
||||
reV2ParseLine = regexp.MustCompile(`^(?P<name>[^|]*)\|[^|]+\|(?P<status_code>[^|]*)\|(?P<entity_id>[^|]*)\|(?:(?P<description>[^|]+))?`)
|
||||
reV2ParseDescription = regexp.MustCompile(`^(?P<analogValue>-?[0-9.]+)\s(?P<analogUnit>.*)|(?P<status>.+)|^$`)
|
||||
reV2ParseUnit = regexp.MustCompile(`^(?P<realAnalogUnit>[^,]+)(?:,\s*(?P<statusDesc>.*))?`)
|
||||
)
|
||||
|
||||
// Ipmi stores the configuration values for the ipmi_sensor input plugin
|
||||
|
|
@ -176,12 +176,12 @@ func (m *Ipmi) parse(acc telegraf.Accumulator, server string) error {
|
|||
return parseV1(acc, hostname, out, timestamp)
|
||||
}
|
||||
|
||||
func parseV1(acc telegraf.Accumulator, hostname string, cmdOut []byte, measured_at time.Time) error {
|
||||
func parseV1(acc telegraf.Accumulator, hostname string, cmdOut []byte, measuredAt time.Time) error {
|
||||
// each line will look something like
|
||||
// Planar VBAT | 3.05 Volts | ok
|
||||
scanner := bufio.NewScanner(bytes.NewReader(cmdOut))
|
||||
for scanner.Scan() {
|
||||
ipmiFields := extractFieldsFromRegex(re_v1_parse_line, scanner.Text())
|
||||
ipmiFields := extractFieldsFromRegex(reV1ParseLine, scanner.Text())
|
||||
if len(ipmiFields) != 3 {
|
||||
continue
|
||||
}
|
||||
|
|
@ -227,20 +227,20 @@ func parseV1(acc telegraf.Accumulator, hostname string, cmdOut []byte, measured_
|
|||
fields["value"] = 0.0
|
||||
}
|
||||
|
||||
acc.AddFields("ipmi_sensor", fields, tags, measured_at)
|
||||
acc.AddFields("ipmi_sensor", fields, tags, measuredAt)
|
||||
}
|
||||
|
||||
return scanner.Err()
|
||||
}
|
||||
|
||||
func parseV2(acc telegraf.Accumulator, hostname string, cmdOut []byte, measured_at time.Time) error {
|
||||
func parseV2(acc telegraf.Accumulator, hostname string, cmdOut []byte, measuredAt time.Time) error {
|
||||
// each line will look something like
|
||||
// CMOS Battery | 65h | ok | 7.1 |
|
||||
// Temp | 0Eh | ok | 3.1 | 55 degrees C
|
||||
// Drive 0 | A0h | ok | 7.1 | Drive Present
|
||||
scanner := bufio.NewScanner(bytes.NewReader(cmdOut))
|
||||
for scanner.Scan() {
|
||||
ipmiFields := extractFieldsFromRegex(re_v2_parse_line, scanner.Text())
|
||||
ipmiFields := extractFieldsFromRegex(reV2ParseLine, scanner.Text())
|
||||
if len(ipmiFields) < 3 || len(ipmiFields) > 4 {
|
||||
continue
|
||||
}
|
||||
|
|
@ -256,7 +256,7 @@ func parseV2(acc telegraf.Accumulator, hostname string, cmdOut []byte, measured_
|
|||
tags["entity_id"] = transform(ipmiFields["entity_id"])
|
||||
tags["status_code"] = trim(ipmiFields["status_code"])
|
||||
fields := make(map[string]interface{})
|
||||
descriptionResults := extractFieldsFromRegex(re_v2_parse_description, trim(ipmiFields["description"]))
|
||||
descriptionResults := extractFieldsFromRegex(reV2ParseDescription, trim(ipmiFields["description"]))
|
||||
// This is an analog value with a unit
|
||||
if descriptionResults["analogValue"] != "" && len(descriptionResults["analogUnit"]) >= 1 {
|
||||
var err error
|
||||
|
|
@ -265,7 +265,7 @@ func parseV2(acc telegraf.Accumulator, hostname string, cmdOut []byte, measured_
|
|||
continue
|
||||
}
|
||||
// Some implementations add an extra status to their analog units
|
||||
unitResults := extractFieldsFromRegex(re_v2_parse_unit, descriptionResults["analogUnit"])
|
||||
unitResults := extractFieldsFromRegex(reV2ParseUnit, descriptionResults["analogUnit"])
|
||||
tags["unit"] = transform(unitResults["realAnalogUnit"])
|
||||
if unitResults["statusDesc"] != "" {
|
||||
tags["status_desc"] = transform(unitResults["statusDesc"])
|
||||
|
|
@ -281,7 +281,7 @@ func parseV2(acc telegraf.Accumulator, hostname string, cmdOut []byte, measured_
|
|||
}
|
||||
}
|
||||
|
||||
acc.AddFields("ipmi_sensor", fields, tags, measured_at)
|
||||
acc.AddFields("ipmi_sensor", fields, tags, measuredAt)
|
||||
}
|
||||
|
||||
return scanner.Err()
|
||||
|
|
|
|||
|
|
@ -611,8 +611,8 @@ Power Supply 1 | 03h | ok | 10.1 | 110 Watts, Presence detected
|
|||
|
||||
for i := range tests {
|
||||
t.Logf("Checking v%d data...", i+1)
|
||||
extractFieldsFromRegex(re_v1_parse_line, tests[i])
|
||||
extractFieldsFromRegex(re_v2_parse_line, tests[i])
|
||||
extractFieldsFromRegex(reV1ParseLine, tests[i])
|
||||
extractFieldsFromRegex(reV2ParseLine, tests[i])
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -168,7 +168,7 @@ func (j *Jolokia) doRequest(req *http.Request) ([]map[string]interface{}, error)
|
|||
}
|
||||
|
||||
func (j *Jolokia) prepareRequest(server Server, metrics []Metric) (*http.Request, error) {
|
||||
var jolokiaUrl *url.URL
|
||||
var jolokiaURL *url.URL
|
||||
context := j.Context // Usually "/jolokia/"
|
||||
|
||||
var bulkBodyContent []map[string]interface{}
|
||||
|
|
@ -188,11 +188,11 @@ func (j *Jolokia) prepareRequest(server Server, metrics []Metric) (*http.Request
|
|||
|
||||
// Add target, only in proxy mode
|
||||
if j.Mode == "proxy" {
|
||||
serviceUrl := fmt.Sprintf("service:jmx:rmi:///jndi/rmi://%s:%s/jmxrmi",
|
||||
serviceURL := fmt.Sprintf("service:jmx:rmi:///jndi/rmi://%s:%s/jmxrmi",
|
||||
server.Host, server.Port)
|
||||
|
||||
target := map[string]string{
|
||||
"url": serviceUrl,
|
||||
"url": serviceURL,
|
||||
}
|
||||
|
||||
if server.Username != "" {
|
||||
|
|
@ -208,26 +208,26 @@ func (j *Jolokia) prepareRequest(server Server, metrics []Metric) (*http.Request
|
|||
proxy := j.Proxy
|
||||
|
||||
// Prepare ProxyURL
|
||||
proxyUrl, err := url.Parse("http://" + proxy.Host + ":" + proxy.Port + context)
|
||||
proxyURL, err := url.Parse("http://" + proxy.Host + ":" + proxy.Port + context)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if proxy.Username != "" || proxy.Password != "" {
|
||||
proxyUrl.User = url.UserPassword(proxy.Username, proxy.Password)
|
||||
proxyURL.User = url.UserPassword(proxy.Username, proxy.Password)
|
||||
}
|
||||
|
||||
jolokiaUrl = proxyUrl
|
||||
jolokiaURL = proxyURL
|
||||
|
||||
} else {
|
||||
serverUrl, err := url.Parse("http://" + server.Host + ":" + server.Port + context)
|
||||
serverURL, err := url.Parse("http://" + server.Host + ":" + server.Port + context)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if server.Username != "" || server.Password != "" {
|
||||
serverUrl.User = url.UserPassword(server.Username, server.Password)
|
||||
serverURL.User = url.UserPassword(server.Username, server.Password)
|
||||
}
|
||||
|
||||
jolokiaUrl = serverUrl
|
||||
jolokiaURL = serverURL
|
||||
}
|
||||
|
||||
bulkBodyContent = append(bulkBodyContent, bodyContent)
|
||||
|
|
@ -238,7 +238,7 @@ func (j *Jolokia) prepareRequest(server Server, metrics []Metric) (*http.Request
|
|||
return nil, err
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", jolokiaUrl.String(), bytes.NewBuffer(requestBody))
|
||||
req, err := http.NewRequest("POST", jolokiaURL.String(), bytes.NewBuffer(requestBody))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -125,14 +125,14 @@ func (c *Client) read(requests []ReadRequest) ([]ReadResponse, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
requestUrl, err := formatReadUrl(c.URL, c.config.Username, c.config.Password)
|
||||
requestURL, err := formatReadURL(c.URL, c.config.Username, c.config.Password)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", requestUrl, bytes.NewBuffer(requestBody))
|
||||
req, err := http.NewRequest("POST", requestURL, bytes.NewBuffer(requestBody))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to create new request '%s': %s", requestUrl, err)
|
||||
return nil, fmt.Errorf("unable to create new request '%s': %s", requestURL, err)
|
||||
}
|
||||
|
||||
req.Header.Add("Content-type", "application/json")
|
||||
|
|
@ -249,22 +249,22 @@ func makeReadResponses(jresponses []jolokiaResponse) []ReadResponse {
|
|||
return rresponses
|
||||
}
|
||||
|
||||
func formatReadUrl(configUrl, username, password string) (string, error) {
|
||||
parsedUrl, err := url.Parse(configUrl)
|
||||
func formatReadURL(configURL, username, password string) (string, error) {
|
||||
parsedURL, err := url.Parse(configURL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
readUrl := url.URL{
|
||||
Host: parsedUrl.Host,
|
||||
Scheme: parsedUrl.Scheme,
|
||||
readURL := url.URL{
|
||||
Host: parsedURL.Host,
|
||||
Scheme: parsedURL.Scheme,
|
||||
}
|
||||
|
||||
if username != "" || password != "" {
|
||||
readUrl.User = url.UserPassword(username, password)
|
||||
readURL.User = url.UserPassword(username, password)
|
||||
}
|
||||
|
||||
readUrl.Path = path.Join(parsedUrl.Path, "read")
|
||||
readUrl.Query().Add("ignoreErrors", "true")
|
||||
return readUrl.String(), nil
|
||||
readURL.Path = path.Join(parsedURL.Path, "read")
|
||||
readURL.Query().Add("ignoreErrors", "true")
|
||||
return readURL.String(), nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -27,20 +27,20 @@ var data = &telemetry.OpenConfigData{
|
|||
Kv: []*telemetry.KeyValue{{Key: "/sensor[tag='tagValue']/intKey", Value: &telemetry.KeyValue_IntValue{IntValue: 10}}},
|
||||
}
|
||||
|
||||
var data_with_prefix = &telemetry.OpenConfigData{
|
||||
var dataWithPrefix = &telemetry.OpenConfigData{
|
||||
Path: "/sensor_with_prefix",
|
||||
Kv: []*telemetry.KeyValue{{Key: "__prefix__", Value: &telemetry.KeyValue_StrValue{StrValue: "/sensor/prefix/"}},
|
||||
{Key: "intKey", Value: &telemetry.KeyValue_IntValue{IntValue: 10}}},
|
||||
}
|
||||
|
||||
var data_with_multiple_tags = &telemetry.OpenConfigData{
|
||||
var dataWithMultipleTags = &telemetry.OpenConfigData{
|
||||
Path: "/sensor_with_multiple_tags",
|
||||
Kv: []*telemetry.KeyValue{{Key: "__prefix__", Value: &telemetry.KeyValue_StrValue{StrValue: "/sensor/prefix/"}},
|
||||
{Key: "tagKey[tag='tagValue']/boolKey", Value: &telemetry.KeyValue_BoolValue{BoolValue: false}},
|
||||
{Key: "intKey", Value: &telemetry.KeyValue_IntValue{IntValue: 10}}},
|
||||
}
|
||||
|
||||
var data_with_string_values = &telemetry.OpenConfigData{
|
||||
var dataWithStringValues = &telemetry.OpenConfigData{
|
||||
Path: "/sensor_with_string_values",
|
||||
Kv: []*telemetry.KeyValue{{Key: "__prefix__", Value: &telemetry.KeyValue_StrValue{StrValue: "/sensor/prefix/"}},
|
||||
{Key: "strKey[tag='tagValue']/strValue", Value: &telemetry.KeyValue_StrValue{StrValue: "10"}}},
|
||||
|
|
@ -54,11 +54,11 @@ func (s *openConfigTelemetryServer) TelemetrySubscribe(req *telemetry.Subscripti
|
|||
if path == "/sensor" {
|
||||
stream.Send(data)
|
||||
} else if path == "/sensor_with_prefix" {
|
||||
stream.Send(data_with_prefix)
|
||||
stream.Send(dataWithPrefix)
|
||||
} else if path == "/sensor_with_multiple_tags" {
|
||||
stream.Send(data_with_multiple_tags)
|
||||
stream.Send(dataWithMultipleTags)
|
||||
} else if path == "/sensor_with_string_values" {
|
||||
stream.Send(data_with_string_values)
|
||||
stream.Send(dataWithStringValues)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -51,7 +51,7 @@ func (*Kapacitor) SampleConfig() string {
|
|||
|
||||
func (k *Kapacitor) Gather(acc telegraf.Accumulator) error {
|
||||
if k.client == nil {
|
||||
client, err := k.createHttpClient()
|
||||
client, err := k.createHTTPClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -73,7 +73,7 @@ func (k *Kapacitor) Gather(acc telegraf.Accumulator) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (k *Kapacitor) createHttpClient() (*http.Client, error) {
|
||||
func (k *Kapacitor) createHTTPClient() (*http.Client, error) {
|
||||
tlsCfg, err := k.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
|||
|
|
@ -17,10 +17,10 @@ import (
|
|||
// /proc/stat file line prefixes to gather stats on:
|
||||
var (
|
||||
interrupts = []byte("intr")
|
||||
context_switches = []byte("ctxt")
|
||||
processes_forked = []byte("processes")
|
||||
disk_pages = []byte("page")
|
||||
boot_time = []byte("btime")
|
||||
contextSwitches = []byte("ctxt")
|
||||
processesForked = []byte("processes")
|
||||
diskPages = []byte("page")
|
||||
bootTime = []byte("btime")
|
||||
)
|
||||
|
||||
type Kernel struct {
|
||||
|
|
@ -65,25 +65,25 @@ func (k *Kernel) Gather(acc telegraf.Accumulator) error {
|
|||
return err
|
||||
}
|
||||
fields["interrupts"] = int64(m)
|
||||
case bytes.Equal(field, context_switches):
|
||||
case bytes.Equal(field, contextSwitches):
|
||||
m, err := strconv.ParseInt(string(dataFields[i+1]), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fields["context_switches"] = int64(m)
|
||||
case bytes.Equal(field, processes_forked):
|
||||
case bytes.Equal(field, processesForked):
|
||||
m, err := strconv.ParseInt(string(dataFields[i+1]), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fields["processes_forked"] = int64(m)
|
||||
case bytes.Equal(field, boot_time):
|
||||
case bytes.Equal(field, bootTime):
|
||||
m, err := strconv.ParseInt(string(dataFields[i+1]), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fields["boot_time"] = int64(m)
|
||||
case bytes.Equal(field, disk_pages):
|
||||
case bytes.Equal(field, diskPages):
|
||||
in, err := strconv.ParseInt(string(dataFields[i+1]), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
|||
|
|
@ -13,8 +13,8 @@ import (
|
|||
)
|
||||
|
||||
func TestFullProcFile(t *testing.T) {
|
||||
tmpfile := makeFakeStatFile([]byte(statFile_Full))
|
||||
tmpfile2 := makeFakeStatFile([]byte(entropyStatFile_Full))
|
||||
tmpfile := makeFakeStatFile([]byte(statFileFull))
|
||||
tmpfile2 := makeFakeStatFile([]byte(entropyStatFileFull))
|
||||
defer os.Remove(tmpfile)
|
||||
defer os.Remove(tmpfile2)
|
||||
|
||||
|
|
@ -40,8 +40,8 @@ func TestFullProcFile(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestPartialProcFile(t *testing.T) {
|
||||
tmpfile := makeFakeStatFile([]byte(statFile_Partial))
|
||||
tmpfile2 := makeFakeStatFile([]byte(entropyStatFile_Partial))
|
||||
tmpfile := makeFakeStatFile([]byte(statFilePartial))
|
||||
tmpfile2 := makeFakeStatFile([]byte(entropyStatFilePartial))
|
||||
defer os.Remove(tmpfile)
|
||||
defer os.Remove(tmpfile2)
|
||||
|
||||
|
|
@ -66,8 +66,8 @@ func TestPartialProcFile(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestInvalidProcFile1(t *testing.T) {
|
||||
tmpfile := makeFakeStatFile([]byte(statFile_Invalid))
|
||||
tmpfile2 := makeFakeStatFile([]byte(entropyStatFile_Invalid))
|
||||
tmpfile := makeFakeStatFile([]byte(statFileInvalid))
|
||||
tmpfile2 := makeFakeStatFile([]byte(entropyStatFileInvalid))
|
||||
defer os.Remove(tmpfile)
|
||||
defer os.Remove(tmpfile2)
|
||||
|
||||
|
|
@ -82,7 +82,7 @@ func TestInvalidProcFile1(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestInvalidProcFile2(t *testing.T) {
|
||||
tmpfile := makeFakeStatFile([]byte(statFile_Invalid2))
|
||||
tmpfile := makeFakeStatFile([]byte(statFileInvalid2))
|
||||
defer os.Remove(tmpfile)
|
||||
|
||||
k := Kernel{
|
||||
|
|
@ -95,7 +95,7 @@ func TestInvalidProcFile2(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestNoProcFile(t *testing.T) {
|
||||
tmpfile := makeFakeStatFile([]byte(statFile_Invalid2))
|
||||
tmpfile := makeFakeStatFile([]byte(statFileInvalid2))
|
||||
os.Remove(tmpfile)
|
||||
|
||||
k := Kernel{
|
||||
|
|
@ -108,7 +108,7 @@ func TestNoProcFile(t *testing.T) {
|
|||
assert.Contains(t, err.Error(), "does not exist")
|
||||
}
|
||||
|
||||
const statFile_Full = `cpu 6796 252 5655 10444977 175 0 101 0 0 0
|
||||
const statFileFull = `cpu 6796 252 5655 10444977 175 0 101 0 0 0
|
||||
cpu0 6796 252 5655 10444977 175 0 101 0 0 0
|
||||
intr 1472736 57 10 0 0 0 0 0 0 0 0 0 0 156 0 0 0 0 0 0 111551 42541 12356 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||
ctxt 2626618
|
||||
|
|
@ -122,7 +122,7 @@ swap 1 0
|
|||
entropy_avail 1024
|
||||
`
|
||||
|
||||
const statFile_Partial = `cpu 6796 252 5655 10444977 175 0 101 0 0 0
|
||||
const statFilePartial = `cpu 6796 252 5655 10444977 175 0 101 0 0 0
|
||||
cpu0 6796 252 5655 10444977 175 0 101 0 0 0
|
||||
intr 1472736 57 10 0 0 0 0 0 0 0 0 0 0 156 0 0 0 0 0 0 111551 42541 12356 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||
ctxt 2626618
|
||||
|
|
@ -134,7 +134,7 @@ page 5741 1808
|
|||
`
|
||||
|
||||
// missing btime measurement
|
||||
const statFile_Invalid = `cpu 6796 252 5655 10444977 175 0 101 0 0 0
|
||||
const statFileInvalid = `cpu 6796 252 5655 10444977 175 0 101 0 0 0
|
||||
cpu0 6796 252 5655 10444977 175 0 101 0 0 0
|
||||
intr 1472736 57 10 0 0 0 0 0 0 0 0 0 0 156 0 0 0 0 0 0 111551 42541 12356 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||
ctxt 2626618
|
||||
|
|
@ -149,7 +149,7 @@ entropy_avail 1024
|
|||
`
|
||||
|
||||
// missing second page measurement
|
||||
const statFile_Invalid2 = `cpu 6796 252 5655 10444977 175 0 101 0 0 0
|
||||
const statFileInvalid2 = `cpu 6796 252 5655 10444977 175 0 101 0 0 0
|
||||
cpu0 6796 252 5655 10444977 175 0 101 0 0 0
|
||||
intr 1472736 57 10 0 0 0 0 0 0 0 0 0 0 156 0 0 0 0 0 0 111551 42541 12356 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||
ctxt 2626618
|
||||
|
|
@ -161,11 +161,11 @@ softirq 1031662 0 649485 20946 111071 11620 0 1 0 994 237545
|
|||
entropy_avail 1024 2048
|
||||
`
|
||||
|
||||
const entropyStatFile_Full = `1024`
|
||||
const entropyStatFileFull = `1024`
|
||||
|
||||
const entropyStatFile_Partial = `1024`
|
||||
const entropyStatFilePartial = `1024`
|
||||
|
||||
const entropyStatFile_Invalid = ``
|
||||
const entropyStatFileInvalid = ``
|
||||
|
||||
func makeFakeStatFile(content []byte) string {
|
||||
tmpfile, err := ioutil.TempFile("", "kernel_test")
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ import (
|
|||
)
|
||||
|
||||
func TestFullVmStatProcFile(t *testing.T) {
|
||||
tmpfile := makeFakeVmStatFile([]byte(vmStatFile_Full))
|
||||
tmpfile := makeFakeVMStatFile([]byte(vmStatFileFull))
|
||||
defer os.Remove(tmpfile)
|
||||
|
||||
k := KernelVmstat{
|
||||
|
|
@ -121,7 +121,7 @@ func TestFullVmStatProcFile(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestPartialVmStatProcFile(t *testing.T) {
|
||||
tmpfile := makeFakeVmStatFile([]byte(vmStatFile_Partial))
|
||||
tmpfile := makeFakeVMStatFile([]byte(vmStatFilePartial))
|
||||
defer os.Remove(tmpfile)
|
||||
|
||||
k := KernelVmstat{
|
||||
|
|
@ -151,7 +151,7 @@ func TestPartialVmStatProcFile(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestInvalidVmStatProcFile1(t *testing.T) {
|
||||
tmpfile := makeFakeVmStatFile([]byte(vmStatFile_Invalid))
|
||||
tmpfile := makeFakeVMStatFile([]byte(vmStatFileInvalid))
|
||||
defer os.Remove(tmpfile)
|
||||
|
||||
k := KernelVmstat{
|
||||
|
|
@ -164,7 +164,7 @@ func TestInvalidVmStatProcFile1(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestNoVmStatProcFile(t *testing.T) {
|
||||
tmpfile := makeFakeVmStatFile([]byte(vmStatFile_Invalid))
|
||||
tmpfile := makeFakeVMStatFile([]byte(vmStatFileInvalid))
|
||||
os.Remove(tmpfile)
|
||||
|
||||
k := KernelVmstat{
|
||||
|
|
@ -177,7 +177,7 @@ func TestNoVmStatProcFile(t *testing.T) {
|
|||
assert.Contains(t, err.Error(), "does not exist")
|
||||
}
|
||||
|
||||
const vmStatFile_Full = `nr_free_pages 78730
|
||||
const vmStatFileFull = `nr_free_pages 78730
|
||||
nr_inactive_anon 426259
|
||||
nr_active_anon 2515657
|
||||
nr_inactive_file 2366791
|
||||
|
|
@ -269,7 +269,7 @@ thp_collapse_alloc 24857
|
|||
thp_collapse_alloc_failed 102214
|
||||
thp_split 9817`
|
||||
|
||||
const vmStatFile_Partial = `unevictable_pgs_culled 1531
|
||||
const vmStatFilePartial = `unevictable_pgs_culled 1531
|
||||
unevictable_pgs_scanned 0
|
||||
unevictable_pgs_rescued 5426
|
||||
unevictable_pgs_mlocked 6988
|
||||
|
|
@ -284,7 +284,7 @@ thp_collapse_alloc_failed 102214
|
|||
thp_split 9817`
|
||||
|
||||
// invalid thp_split measurement
|
||||
const vmStatFile_Invalid = `unevictable_pgs_culled 1531
|
||||
const vmStatFileInvalid = `unevictable_pgs_culled 1531
|
||||
unevictable_pgs_scanned 0
|
||||
unevictable_pgs_rescued 5426
|
||||
unevictable_pgs_mlocked 6988
|
||||
|
|
@ -298,7 +298,7 @@ thp_collapse_alloc 24857
|
|||
thp_collapse_alloc_failed 102214
|
||||
thp_split abcd`
|
||||
|
||||
func makeFakeVmStatFile(content []byte) string {
|
||||
func makeFakeVMStatFile(content []byte) string {
|
||||
tmpfile, err := ioutil.TempFile("", "kernel_vmstat_test")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
|
|
|||
|
|
@ -141,7 +141,7 @@ func (k *Kibana) Description() string {
|
|||
|
||||
func (k *Kibana) Gather(acc telegraf.Accumulator) error {
|
||||
if k.client == nil {
|
||||
client, err := k.createHttpClient()
|
||||
client, err := k.createHTTPClient()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -166,7 +166,7 @@ func (k *Kibana) Gather(acc telegraf.Accumulator) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (k *Kibana) createHttpClient() (*http.Client, error) {
|
||||
func (k *Kibana) createHTTPClient() (*http.Client, error) {
|
||||
tlsCfg, err := k.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -182,12 +182,12 @@ func (k *Kibana) createHttpClient() (*http.Client, error) {
|
|||
return client, nil
|
||||
}
|
||||
|
||||
func (k *Kibana) gatherKibanaStatus(baseUrl string, acc telegraf.Accumulator) error {
|
||||
func (k *Kibana) gatherKibanaStatus(baseURL string, acc telegraf.Accumulator) error {
|
||||
|
||||
kibanaStatus := &kibanaStatus{}
|
||||
url := baseUrl + statusPath
|
||||
url := baseURL + statusPath
|
||||
|
||||
host, err := k.gatherJsonData(url, kibanaStatus)
|
||||
host, err := k.gatherJSONData(url, kibanaStatus)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -237,7 +237,7 @@ func (k *Kibana) gatherKibanaStatus(baseUrl string, acc telegraf.Accumulator) er
|
|||
return nil
|
||||
}
|
||||
|
||||
func (k *Kibana) gatherJsonData(url string, v interface{}) (host string, err error) {
|
||||
func (k *Kibana) gatherJSONData(url string, v interface{}) (host string, err error) {
|
||||
request, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to create new request '%s': %v", url, err)
|
||||
|
|
|
|||
|
|
@ -129,7 +129,7 @@ func buildURL(endpoint string, base string) (*url.URL, error) {
|
|||
|
||||
func (k *Kubernetes) gatherSummary(baseURL string, acc telegraf.Accumulator) error {
|
||||
summaryMetrics := &SummaryMetrics{}
|
||||
err := k.LoadJson(fmt.Sprintf("%s/stats/summary", baseURL), summaryMetrics)
|
||||
err := k.LoadJSON(fmt.Sprintf("%s/stats/summary", baseURL), summaryMetrics)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -193,19 +193,19 @@ func buildNodeMetrics(summaryMetrics *SummaryMetrics, acc telegraf.Accumulator)
|
|||
}
|
||||
|
||||
func (k *Kubernetes) gatherPodInfo(baseURL string) ([]Metadata, error) {
|
||||
var podApi Pods
|
||||
err := k.LoadJson(fmt.Sprintf("%s/pods", baseURL), &podApi)
|
||||
var podAPI Pods
|
||||
err := k.LoadJSON(fmt.Sprintf("%s/pods", baseURL), &podAPI)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var podInfos []Metadata
|
||||
for _, podMetadata := range podApi.Items {
|
||||
for _, podMetadata := range podAPI.Items {
|
||||
podInfos = append(podInfos, podMetadata.Metadata)
|
||||
}
|
||||
return podInfos, nil
|
||||
}
|
||||
|
||||
func (k *Kubernetes) LoadJson(url string, v interface{}) error {
|
||||
func (k *Kubernetes) LoadJSON(url string, v interface{}) error {
|
||||
var req, err = http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ package kubernetes
|
|||
|
||||
type Pods struct {
|
||||
Kind string `json:"kind"`
|
||||
ApiVersion string `json:"apiVersion"`
|
||||
APIVersion string `json:"apiVersion"`
|
||||
Items []Item `json:"items"`
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -54,12 +54,12 @@ func (l *Lanz) Start(acc telegraf.Accumulator) error {
|
|||
}
|
||||
|
||||
for _, server := range l.Servers {
|
||||
deviceUrl, err := url.Parse(server)
|
||||
deviceURL, err := url.Parse(server)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
client := lanz.New(
|
||||
lanz.WithAddr(deviceUrl.Host),
|
||||
lanz.WithAddr(deviceURL.Host),
|
||||
lanz.WithBackoff(1*time.Second),
|
||||
lanz.WithTimeout(10*time.Second),
|
||||
)
|
||||
|
|
@ -72,7 +72,7 @@ func (l *Lanz) Start(acc telegraf.Accumulator) error {
|
|||
l.wg.Add(1)
|
||||
go func() {
|
||||
l.wg.Done()
|
||||
receive(acc, in, deviceUrl)
|
||||
receive(acc, in, deviceURL)
|
||||
}()
|
||||
}
|
||||
return nil
|
||||
|
|
@ -85,19 +85,19 @@ func (l *Lanz) Stop() {
|
|||
l.wg.Wait()
|
||||
}
|
||||
|
||||
func receive(acc telegraf.Accumulator, in <-chan *pb.LanzRecord, deviceUrl *url.URL) {
|
||||
func receive(acc telegraf.Accumulator, in <-chan *pb.LanzRecord, deviceURL *url.URL) {
|
||||
for {
|
||||
select {
|
||||
case msg, ok := <-in:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
msgToAccumulator(acc, msg, deviceUrl)
|
||||
msgToAccumulator(acc, msg, deviceURL)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func msgToAccumulator(acc telegraf.Accumulator, msg *pb.LanzRecord, deviceUrl *url.URL) {
|
||||
func msgToAccumulator(acc telegraf.Accumulator, msg *pb.LanzRecord, deviceURL *url.URL) {
|
||||
cr := msg.GetCongestionRecord()
|
||||
if cr != nil {
|
||||
vals := map[string]interface{}{
|
||||
|
|
@ -114,8 +114,8 @@ func msgToAccumulator(acc telegraf.Accumulator, msg *pb.LanzRecord, deviceUrl *u
|
|||
"entry_type": strconv.FormatInt(int64(cr.GetEntryType()), 10),
|
||||
"traffic_class": strconv.FormatInt(int64(cr.GetTrafficClass()), 10),
|
||||
"fabric_peer_intf_name": cr.GetFabricPeerIntfName(),
|
||||
"source": deviceUrl.Hostname(),
|
||||
"port": deviceUrl.Port(),
|
||||
"source": deviceURL.Hostname(),
|
||||
"port": deviceURL.Port(),
|
||||
}
|
||||
acc.AddFields("lanz_congestion_record", vals, tags)
|
||||
}
|
||||
|
|
@ -129,8 +129,8 @@ func msgToAccumulator(acc telegraf.Accumulator, msg *pb.LanzRecord, deviceUrl *u
|
|||
}
|
||||
tags := map[string]string{
|
||||
"entry_type": strconv.FormatInt(int64(gbur.GetEntryType()), 10),
|
||||
"source": deviceUrl.Hostname(),
|
||||
"port": deviceUrl.Port(),
|
||||
"source": deviceURL.Hostname(),
|
||||
"port": deviceURL.Port(),
|
||||
}
|
||||
acc.AddFields("lanz_global_buffer_usage_record", vals, tags)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -58,16 +58,16 @@ func TestLanzGeneratesMetrics(t *testing.T) {
|
|||
|
||||
l.Servers = append(l.Servers, "tcp://switch01.int.example.com:50001")
|
||||
l.Servers = append(l.Servers, "tcp://switch02.int.example.com:50001")
|
||||
deviceUrl1, err := url.Parse(l.Servers[0])
|
||||
deviceURL1, err := url.Parse(l.Servers[0])
|
||||
if err != nil {
|
||||
t.Fail()
|
||||
}
|
||||
deviceUrl2, err := url.Parse(l.Servers[1])
|
||||
deviceURL2, err := url.Parse(l.Servers[1])
|
||||
if err != nil {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
msgToAccumulator(&acc, testProtoBufCongestionRecord1, deviceUrl1)
|
||||
msgToAccumulator(&acc, testProtoBufCongestionRecord1, deviceURL1)
|
||||
acc.Wait(1)
|
||||
|
||||
vals1 := map[string]interface{}{
|
||||
|
|
@ -92,7 +92,7 @@ func TestLanzGeneratesMetrics(t *testing.T) {
|
|||
acc.AssertContainsTaggedFields(t, "lanz_congestion_record", vals1, tags1)
|
||||
|
||||
acc.ClearMetrics()
|
||||
msgToAccumulator(&acc, testProtoBufCongestionRecord2, deviceUrl2)
|
||||
msgToAccumulator(&acc, testProtoBufCongestionRecord2, deviceURL2)
|
||||
acc.Wait(1)
|
||||
|
||||
vals2 := map[string]interface{}{
|
||||
|
|
@ -117,7 +117,7 @@ func TestLanzGeneratesMetrics(t *testing.T) {
|
|||
acc.AssertContainsTaggedFields(t, "lanz_congestion_record", vals2, tags2)
|
||||
|
||||
acc.ClearMetrics()
|
||||
msgToAccumulator(&acc, testProtoBufGlobalBufferUsageRecord, deviceUrl1)
|
||||
msgToAccumulator(&acc, testProtoBufGlobalBufferUsageRecord, deviceURL1)
|
||||
acc.Wait(1)
|
||||
|
||||
gburVals1 := map[string]interface{}{
|
||||
|
|
|
|||
|
|
@ -157,8 +157,8 @@ func (logstash *Logstash) Init() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// createHttpClient create a clients to access API
|
||||
func (logstash *Logstash) createHttpClient() (*http.Client, error) {
|
||||
// createHTTPClient create a clients to access API
|
||||
func (logstash *Logstash) createHTTPClient() (*http.Client, error) {
|
||||
tlsConfig, err := logstash.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -174,8 +174,8 @@ func (logstash *Logstash) createHttpClient() (*http.Client, error) {
|
|||
return client, nil
|
||||
}
|
||||
|
||||
// gatherJsonData query the data source and parse the response JSON
|
||||
func (logstash *Logstash) gatherJsonData(url string, value interface{}) error {
|
||||
// gatherJSONData query the data source and parse the response JSON
|
||||
func (logstash *Logstash) gatherJSONData(url string, value interface{}) error {
|
||||
request, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -217,7 +217,7 @@ func (logstash *Logstash) gatherJsonData(url string, value interface{}) error {
|
|||
func (logstash *Logstash) gatherJVMStats(url string, accumulator telegraf.Accumulator) error {
|
||||
jvmStats := &JVMStats{}
|
||||
|
||||
err := logstash.gatherJsonData(url, jvmStats)
|
||||
err := logstash.gatherJSONData(url, jvmStats)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -243,7 +243,7 @@ func (logstash *Logstash) gatherJVMStats(url string, accumulator telegraf.Accumu
|
|||
func (logstash *Logstash) gatherProcessStats(url string, accumulator telegraf.Accumulator) error {
|
||||
processStats := &ProcessStats{}
|
||||
|
||||
err := logstash.gatherJsonData(url, processStats)
|
||||
err := logstash.gatherJSONData(url, processStats)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -333,7 +333,7 @@ func (logstash *Logstash) gatherQueueStats(
|
|||
func (logstash *Logstash) gatherPipelineStats(url string, accumulator telegraf.Accumulator) error {
|
||||
pipelineStats := &PipelineStats{}
|
||||
|
||||
err := logstash.gatherJsonData(url, pipelineStats)
|
||||
err := logstash.gatherJSONData(url, pipelineStats)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -377,7 +377,7 @@ func (logstash *Logstash) gatherPipelineStats(url string, accumulator telegraf.A
|
|||
func (logstash *Logstash) gatherPipelinesStats(url string, accumulator telegraf.Accumulator) error {
|
||||
pipelinesStats := &PipelinesStats{}
|
||||
|
||||
err := logstash.gatherJsonData(url, pipelinesStats)
|
||||
err := logstash.gatherJSONData(url, pipelinesStats)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -423,7 +423,7 @@ func (logstash *Logstash) gatherPipelinesStats(url string, accumulator telegraf.
|
|||
// Gather ask this plugin to start gathering metrics
|
||||
func (logstash *Logstash) Gather(accumulator telegraf.Accumulator) error {
|
||||
if logstash.client == nil {
|
||||
client, err := logstash.createHttpClient()
|
||||
client, err := logstash.createHTTPClient()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -432,40 +432,40 @@ func (logstash *Logstash) Gather(accumulator telegraf.Accumulator) error {
|
|||
}
|
||||
|
||||
if choice.Contains("jvm", logstash.Collect) {
|
||||
jvmUrl, err := url.Parse(logstash.URL + jvmStats)
|
||||
jvmURL, err := url.Parse(logstash.URL + jvmStats)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := logstash.gatherJVMStats(jvmUrl.String(), accumulator); err != nil {
|
||||
if err := logstash.gatherJVMStats(jvmURL.String(), accumulator); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if choice.Contains("process", logstash.Collect) {
|
||||
processUrl, err := url.Parse(logstash.URL + processStats)
|
||||
processURL, err := url.Parse(logstash.URL + processStats)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := logstash.gatherProcessStats(processUrl.String(), accumulator); err != nil {
|
||||
if err := logstash.gatherProcessStats(processURL.String(), accumulator); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if choice.Contains("pipelines", logstash.Collect) {
|
||||
if logstash.SinglePipeline {
|
||||
pipelineUrl, err := url.Parse(logstash.URL + pipelineStats)
|
||||
pipelineURL, err := url.Parse(logstash.URL + pipelineStats)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := logstash.gatherPipelineStats(pipelineUrl.String(), accumulator); err != nil {
|
||||
if err := logstash.gatherPipelineStats(pipelineURL.String(), accumulator); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
pipelinesUrl, err := url.Parse(logstash.URL + pipelinesStats)
|
||||
pipelinesURL, err := url.Parse(logstash.URL + pipelinesStats)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := logstash.gatherPipelinesStats(pipelinesUrl.String(), accumulator); err != nil {
|
||||
if err := logstash.gatherPipelinesStats(pipelinesURL.String(), accumulator); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -36,10 +36,10 @@ func Test_Logstash5GatherProcessStats(test *testing.T) {
|
|||
defer fakeServer.Close()
|
||||
|
||||
if logstashTest.client == nil {
|
||||
client, err := logstashTest.createHttpClient()
|
||||
client, err := logstashTest.createHTTPClient()
|
||||
|
||||
if err != nil {
|
||||
test.Logf("Can't createHttpClient")
|
||||
test.Logf("Can't createHTTPClient")
|
||||
}
|
||||
logstashTest.client = client
|
||||
}
|
||||
|
|
@ -85,10 +85,10 @@ func Test_Logstash6GatherProcessStats(test *testing.T) {
|
|||
defer fakeServer.Close()
|
||||
|
||||
if logstashTest.client == nil {
|
||||
client, err := logstashTest.createHttpClient()
|
||||
client, err := logstashTest.createHTTPClient()
|
||||
|
||||
if err != nil {
|
||||
test.Logf("Can't createHttpClient")
|
||||
test.Logf("Can't createHTTPClient")
|
||||
}
|
||||
logstashTest.client = client
|
||||
}
|
||||
|
|
@ -135,10 +135,10 @@ func Test_Logstash5GatherPipelineStats(test *testing.T) {
|
|||
defer fakeServer.Close()
|
||||
|
||||
if logstashTest.client == nil {
|
||||
client, err := logstashTest.createHttpClient()
|
||||
client, err := logstashTest.createHTTPClient()
|
||||
|
||||
if err != nil {
|
||||
test.Logf("Can't createHttpClient")
|
||||
test.Logf("Can't createHTTPClient")
|
||||
}
|
||||
logstashTest.client = client
|
||||
}
|
||||
|
|
@ -237,10 +237,10 @@ func Test_Logstash6GatherPipelinesStats(test *testing.T) {
|
|||
defer fakeServer.Close()
|
||||
|
||||
if logstashTest.client == nil {
|
||||
client, err := logstashTest.createHttpClient()
|
||||
client, err := logstashTest.createHTTPClient()
|
||||
|
||||
if err != nil {
|
||||
test.Logf("Can't createHttpClient")
|
||||
test.Logf("Can't createHTTPClient")
|
||||
}
|
||||
logstashTest.client = client
|
||||
}
|
||||
|
|
@ -566,10 +566,10 @@ func Test_Logstash5GatherJVMStats(test *testing.T) {
|
|||
defer fakeServer.Close()
|
||||
|
||||
if logstashTest.client == nil {
|
||||
client, err := logstashTest.createHttpClient()
|
||||
client, err := logstashTest.createHTTPClient()
|
||||
|
||||
if err != nil {
|
||||
test.Logf("Can't createHttpClient")
|
||||
test.Logf("Can't createHTTPClient")
|
||||
}
|
||||
logstashTest.client = client
|
||||
}
|
||||
|
|
@ -635,10 +635,10 @@ func Test_Logstash6GatherJVMStats(test *testing.T) {
|
|||
defer fakeServer.Close()
|
||||
|
||||
if logstashTest.client == nil {
|
||||
client, err := logstashTest.createHttpClient()
|
||||
client, err := logstashTest.createHTTPClient()
|
||||
|
||||
if err != nil {
|
||||
test.Logf("Can't createHttpClient")
|
||||
test.Logf("Can't createHTTPClient")
|
||||
}
|
||||
logstashTest.client = client
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,11 +15,11 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
reports_endpoint string = "/3.0/reports"
|
||||
reports_endpoint_campaign string = "/3.0/reports/%s"
|
||||
reportsEndpoint string = "/3.0/reports"
|
||||
reportsEndpointCampaign string = "/3.0/reports/%s"
|
||||
)
|
||||
|
||||
var mailchimp_datacenter = regexp.MustCompile("[a-z]+[0-9]+$")
|
||||
var mailchimpDatacenter = regexp.MustCompile("[a-z]+[0-9]+$")
|
||||
|
||||
type ChimpAPI struct {
|
||||
Transport http.RoundTripper
|
||||
|
|
@ -57,7 +57,7 @@ func (p *ReportsParams) String() string {
|
|||
func NewChimpAPI(apiKey string) *ChimpAPI {
|
||||
u := &url.URL{}
|
||||
u.Scheme = "https"
|
||||
u.Host = fmt.Sprintf("%s.api.mailchimp.com", mailchimp_datacenter.FindString(apiKey))
|
||||
u.Host = fmt.Sprintf("%s.api.mailchimp.com", mailchimpDatacenter.FindString(apiKey))
|
||||
u.User = url.UserPassword("", apiKey)
|
||||
return &ChimpAPI{url: u}
|
||||
}
|
||||
|
|
@ -86,7 +86,7 @@ func chimpErrorCheck(body []byte) error {
|
|||
func (a *ChimpAPI) GetReports(params ReportsParams) (ReportsResponse, error) {
|
||||
a.Lock()
|
||||
defer a.Unlock()
|
||||
a.url.Path = reports_endpoint
|
||||
a.url.Path = reportsEndpoint
|
||||
|
||||
var response ReportsResponse
|
||||
rawjson, err := runChimp(a, params)
|
||||
|
|
@ -105,7 +105,7 @@ func (a *ChimpAPI) GetReports(params ReportsParams) (ReportsResponse, error) {
|
|||
func (a *ChimpAPI) GetReport(campaignID string) (Report, error) {
|
||||
a.Lock()
|
||||
defer a.Unlock()
|
||||
a.url.Path = fmt.Sprintf(reports_endpoint_campaign, campaignID)
|
||||
a.url.Path = fmt.Sprintf(reportsEndpointCampaign, campaignID)
|
||||
|
||||
var response Report
|
||||
rawjson, err := runChimp(a, ReportsParams{})
|
||||
|
|
|
|||
|
|
@ -11,9 +11,9 @@ import (
|
|||
type MailChimp struct {
|
||||
api *ChimpAPI
|
||||
|
||||
ApiKey string
|
||||
DaysOld int
|
||||
CampaignId string
|
||||
APIKey string `toml:"api_key"`
|
||||
DaysOld int `toml:"days_old"`
|
||||
CampaignID string `toml:"campaign_id"`
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
|
|
@ -37,11 +37,11 @@ func (m *MailChimp) Description() string {
|
|||
|
||||
func (m *MailChimp) Gather(acc telegraf.Accumulator) error {
|
||||
if m.api == nil {
|
||||
m.api = NewChimpAPI(m.ApiKey)
|
||||
m.api = NewChimpAPI(m.APIKey)
|
||||
}
|
||||
m.api.Debug = false
|
||||
|
||||
if m.CampaignId == "" {
|
||||
if m.CampaignID == "" {
|
||||
since := ""
|
||||
if m.DaysOld > 0 {
|
||||
now := time.Now()
|
||||
|
|
@ -61,7 +61,7 @@ func (m *MailChimp) Gather(acc telegraf.Accumulator) error {
|
|||
gatherReport(acc, report, now)
|
||||
}
|
||||
} else {
|
||||
report, err := m.api.GetReport(m.CampaignId)
|
||||
report, err := m.api.GetReport(m.CampaignID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -94,7 +94,7 @@ func TestMailChimpGatherReport(t *testing.T) {
|
|||
}
|
||||
m := MailChimp{
|
||||
api: api,
|
||||
CampaignId: "test",
|
||||
CampaignID: "test",
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
|
@ -159,7 +159,7 @@ func TestMailChimpGatherError(t *testing.T) {
|
|||
}
|
||||
m := MailChimp{
|
||||
api: api,
|
||||
CampaignId: "test",
|
||||
CampaignID: "test",
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
|
|
|||
|
|
@ -159,7 +159,7 @@ func (m *Mesos) initialize() error {
|
|||
m.slaveURLs = append(m.slaveURLs, u)
|
||||
}
|
||||
|
||||
client, err := m.createHttpClient()
|
||||
client, err := m.createHTTPClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -203,7 +203,7 @@ func (m *Mesos) Gather(acc telegraf.Accumulator) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *Mesos) createHttpClient() (*http.Client, error) {
|
||||
func (m *Mesos) createHTTPClient() (*http.Client, error) {
|
||||
tlsCfg, err := m.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
|||
|
|
@ -121,7 +121,7 @@ func (m *MongoDB) getMongoServer(url *url.URL) *Server {
|
|||
if _, ok := m.mongos[url.Host]; !ok {
|
||||
m.mongos[url.Host] = &Server{
|
||||
Log: m.Log,
|
||||
Url: url,
|
||||
URL: url,
|
||||
}
|
||||
}
|
||||
return m.mongos[url.Host]
|
||||
|
|
@ -130,10 +130,10 @@ func (m *MongoDB) getMongoServer(url *url.URL) *Server {
|
|||
func (m *MongoDB) gatherServer(server *Server, acc telegraf.Accumulator) error {
|
||||
if server.Session == nil {
|
||||
var dialAddrs []string
|
||||
if server.Url.User != nil {
|
||||
dialAddrs = []string{server.Url.String()}
|
||||
if server.URL.User != nil {
|
||||
dialAddrs = []string{server.URL.String()}
|
||||
} else {
|
||||
dialAddrs = []string{server.Url.Host}
|
||||
dialAddrs = []string{server.URL.Host}
|
||||
}
|
||||
dialInfo, err := mgo.ParseURL(dialAddrs[0])
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ import (
|
|||
)
|
||||
|
||||
type Server struct {
|
||||
Url *url.URL
|
||||
URL *url.URL
|
||||
Session *mgo.Session
|
||||
lastResult *MongoStatus
|
||||
|
||||
|
|
@ -21,7 +21,7 @@ type Server struct {
|
|||
|
||||
func (s *Server) getDefaultTags() map[string]string {
|
||||
tags := make(map[string]string)
|
||||
tags["hostname"] = s.Url.Host
|
||||
tags["hostname"] = s.URL.Host
|
||||
return tags
|
||||
}
|
||||
|
||||
|
|
@ -275,7 +275,7 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherClusterStatus bool,
|
|||
durationInSeconds = 1
|
||||
}
|
||||
data := NewMongodbData(
|
||||
NewStatLine(*s.lastResult, *result, s.Url.Host, true, durationInSeconds),
|
||||
NewStatLine(*s.lastResult, *result, s.URL.Host, true, durationInSeconds),
|
||||
s.getDefaultTags(),
|
||||
)
|
||||
data.AddDefaultStats()
|
||||
|
|
|
|||
|
|
@ -20,23 +20,23 @@ func init() {
|
|||
connect_url = os.Getenv("MONGODB_URL")
|
||||
if connect_url == "" {
|
||||
connect_url = "127.0.0.1:27017"
|
||||
server = &Server{Url: &url.URL{Host: connect_url}}
|
||||
server = &Server{URL: &url.URL{Host: connect_url}}
|
||||
} else {
|
||||
full_url, err := url.Parse(connect_url)
|
||||
if err != nil {
|
||||
log.Fatalf("Unable to parse URL (%s), %s\n", full_url, err.Error())
|
||||
}
|
||||
server = &Server{Url: full_url}
|
||||
server = &Server{URL: full_url}
|
||||
}
|
||||
}
|
||||
|
||||
func testSetup(m *testing.M) {
|
||||
var err error
|
||||
var dialAddrs []string
|
||||
if server.Url.User != nil {
|
||||
dialAddrs = []string{server.Url.String()}
|
||||
if server.URL.User != nil {
|
||||
dialAddrs = []string{server.URL.String()}
|
||||
} else {
|
||||
dialAddrs = []string{server.Url.Host}
|
||||
dialAddrs = []string{server.URL.Host}
|
||||
}
|
||||
dialInfo, err := mgo.ParseURL(dialAddrs[0])
|
||||
if err != nil {
|
||||
|
|
@ -49,7 +49,7 @@ func testSetup(m *testing.M) {
|
|||
log.Fatalf("Unable to connect to MongoDB, %s\n", err.Error())
|
||||
}
|
||||
server.Session = sess
|
||||
server.Session, _ = mgo.Dial(server.Url.Host)
|
||||
server.Session, _ = mgo.Dial(server.URL.Host)
|
||||
if err != nil {
|
||||
log.Fatalln(err.Error())
|
||||
}
|
||||
|
|
|
|||
|
|
@ -55,7 +55,7 @@ func (n *Nginx) Gather(acc telegraf.Accumulator) error {
|
|||
// Create an HTTP client that is re-used for each
|
||||
// collection interval
|
||||
if n.client == nil {
|
||||
client, err := n.createHttpClient()
|
||||
client, err := n.createHTTPClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -72,7 +72,7 @@ func (n *Nginx) Gather(acc telegraf.Accumulator) error {
|
|||
wg.Add(1)
|
||||
go func(addr *url.URL) {
|
||||
defer wg.Done()
|
||||
acc.AddError(n.gatherUrl(addr, acc))
|
||||
acc.AddError(n.gatherURL(addr, acc))
|
||||
}(addr)
|
||||
}
|
||||
|
||||
|
|
@ -80,7 +80,7 @@ func (n *Nginx) Gather(acc telegraf.Accumulator) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (n *Nginx) createHttpClient() (*http.Client, error) {
|
||||
func (n *Nginx) createHTTPClient() (*http.Client, error) {
|
||||
tlsCfg, err := n.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -100,7 +100,7 @@ func (n *Nginx) createHttpClient() (*http.Client, error) {
|
|||
return client, nil
|
||||
}
|
||||
|
||||
func (n *Nginx) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
func (n *Nginx) gatherURL(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
resp, err := n.client.Get(addr.String())
|
||||
if err != nil {
|
||||
return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err)
|
||||
|
|
|
|||
|
|
@ -61,16 +61,16 @@ func TestNginxGeneratesMetrics(t *testing.T) {
|
|||
Urls: []string{fmt.Sprintf("%s/tengine_status", ts.URL)},
|
||||
}
|
||||
|
||||
var acc_nginx testutil.Accumulator
|
||||
var acc_tengine testutil.Accumulator
|
||||
var accNginx testutil.Accumulator
|
||||
var accTengine testutil.Accumulator
|
||||
|
||||
err_nginx := acc_nginx.GatherError(n.Gather)
|
||||
err_tengine := acc_tengine.GatherError(nt.Gather)
|
||||
errNginx := accNginx.GatherError(n.Gather)
|
||||
errTengine := accTengine.GatherError(nt.Gather)
|
||||
|
||||
require.NoError(t, err_nginx)
|
||||
require.NoError(t, err_tengine)
|
||||
require.NoError(t, errNginx)
|
||||
require.NoError(t, errTengine)
|
||||
|
||||
fields_nginx := map[string]interface{}{
|
||||
fieldsNginx := map[string]interface{}{
|
||||
"active": uint64(585),
|
||||
"accepts": uint64(85340),
|
||||
"handled": uint64(85340),
|
||||
|
|
@ -80,7 +80,7 @@ func TestNginxGeneratesMetrics(t *testing.T) {
|
|||
"waiting": uint64(446),
|
||||
}
|
||||
|
||||
fields_tengine := map[string]interface{}{
|
||||
fieldsTengine := map[string]interface{}{
|
||||
"active": uint64(403),
|
||||
"accepts": uint64(853),
|
||||
"handled": uint64(8533),
|
||||
|
|
@ -108,6 +108,6 @@ func TestNginxGeneratesMetrics(t *testing.T) {
|
|||
}
|
||||
|
||||
tags := map[string]string{"server": host, "port": port}
|
||||
acc_nginx.AssertContainsTaggedFields(t, "nginx", fields_nginx, tags)
|
||||
acc_tengine.AssertContainsTaggedFields(t, "nginx", fields_tengine, tags)
|
||||
accNginx.AssertContainsTaggedFields(t, "nginx", fieldsNginx, tags)
|
||||
accTengine.AssertContainsTaggedFields(t, "nginx", fieldsTengine, tags)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -56,7 +56,7 @@ func (n *NginxPlus) Gather(acc telegraf.Accumulator) error {
|
|||
// collection interval
|
||||
|
||||
if n.client == nil {
|
||||
client, err := n.createHttpClient()
|
||||
client, err := n.createHTTPClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -73,7 +73,7 @@ func (n *NginxPlus) Gather(acc telegraf.Accumulator) error {
|
|||
wg.Add(1)
|
||||
go func(addr *url.URL) {
|
||||
defer wg.Done()
|
||||
acc.AddError(n.gatherUrl(addr, acc))
|
||||
acc.AddError(n.gatherURL(addr, acc))
|
||||
}(addr)
|
||||
}
|
||||
|
||||
|
|
@ -81,7 +81,7 @@ func (n *NginxPlus) Gather(acc telegraf.Accumulator) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (n *NginxPlus) createHttpClient() (*http.Client, error) {
|
||||
func (n *NginxPlus) createHTTPClient() (*http.Client, error) {
|
||||
if n.ResponseTimeout.Duration < time.Second {
|
||||
n.ResponseTimeout.Duration = time.Second * 5
|
||||
}
|
||||
|
|
@ -101,7 +101,7 @@ func (n *NginxPlus) createHttpClient() (*http.Client, error) {
|
|||
return client, nil
|
||||
}
|
||||
|
||||
func (n *NginxPlus) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
func (n *NginxPlus) gatherURL(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
resp, err := n.client.Get(addr.String())
|
||||
|
||||
if err != nil {
|
||||
|
|
@ -114,7 +114,7 @@ func (n *NginxPlus) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
|
|||
contentType := strings.Split(resp.Header.Get("Content-Type"), ";")[0]
|
||||
switch contentType {
|
||||
case "application/json":
|
||||
return gatherStatusUrl(bufio.NewReader(resp.Body), getTags(addr), acc)
|
||||
return gatherStatusURL(bufio.NewReader(resp.Body), getTags(addr), acc)
|
||||
default:
|
||||
return fmt.Errorf("%s returned unexpected content type %s", addr.String(), contentType)
|
||||
}
|
||||
|
|
@ -283,7 +283,7 @@ type Status struct {
|
|||
} `json:"stream"`
|
||||
}
|
||||
|
||||
func gatherStatusUrl(r *bufio.Reader, tags map[string]string, acc telegraf.Accumulator) error {
|
||||
func gatherStatusURL(r *bufio.Reader, tags map[string]string, acc telegraf.Accumulator) error {
|
||||
dec := json.NewDecoder(r)
|
||||
status := &Status{}
|
||||
if err := dec.Decode(status); err != nil {
|
||||
|
|
|
|||
|
|
@ -270,9 +270,9 @@ func TestNginxPlusGeneratesMetrics(t *testing.T) {
|
|||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err_nginx := n.Gather(&acc)
|
||||
errNginx := n.Gather(&acc)
|
||||
|
||||
require.NoError(t, err_nginx)
|
||||
require.NoError(t, errNginx)
|
||||
|
||||
addr, err := url.Parse(ts.URL)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -13,9 +13,9 @@ import (
|
|||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type NginxPlusApi struct {
|
||||
type NginxPlusAPI struct {
|
||||
Urls []string `toml:"urls"`
|
||||
ApiVersion int64 `toml:"api_version"`
|
||||
APIVersion int64 `toml:"api_version"`
|
||||
ResponseTimeout internal.Duration `toml:"response_timeout"`
|
||||
tls.ClientConfig
|
||||
|
||||
|
|
@ -24,7 +24,7 @@ type NginxPlusApi struct {
|
|||
|
||||
const (
|
||||
// Default settings
|
||||
defaultApiVersion = 3
|
||||
defaultAPIVersion = 3
|
||||
|
||||
// Paths
|
||||
processesPath = "processes"
|
||||
|
|
@ -61,26 +61,26 @@ var sampleConfig = `
|
|||
# insecure_skip_verify = false
|
||||
`
|
||||
|
||||
func (n *NginxPlusApi) SampleConfig() string {
|
||||
func (n *NginxPlusAPI) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (n *NginxPlusApi) Description() string {
|
||||
func (n *NginxPlusAPI) Description() string {
|
||||
return "Read Nginx Plus Api documentation"
|
||||
}
|
||||
|
||||
func (n *NginxPlusApi) Gather(acc telegraf.Accumulator) error {
|
||||
func (n *NginxPlusAPI) Gather(acc telegraf.Accumulator) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Create an HTTP client that is re-used for each
|
||||
// collection interval
|
||||
|
||||
if n.ApiVersion == 0 {
|
||||
n.ApiVersion = defaultApiVersion
|
||||
if n.APIVersion == 0 {
|
||||
n.APIVersion = defaultAPIVersion
|
||||
}
|
||||
|
||||
if n.client == nil {
|
||||
client, err := n.createHttpClient()
|
||||
client, err := n.createHTTPClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -105,7 +105,7 @@ func (n *NginxPlusApi) Gather(acc telegraf.Accumulator) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (n *NginxPlusApi) createHttpClient() (*http.Client, error) {
|
||||
func (n *NginxPlusAPI) createHTTPClient() (*http.Client, error) {
|
||||
if n.ResponseTimeout.Duration < time.Second {
|
||||
n.ResponseTimeout.Duration = time.Second * 5
|
||||
}
|
||||
|
|
@ -127,6 +127,6 @@ func (n *NginxPlusApi) createHttpClient() (*http.Client, error) {
|
|||
|
||||
func init() {
|
||||
inputs.Add("nginx_plus_api", func() telegraf.Input {
|
||||
return &NginxPlusApi{}
|
||||
return &NginxPlusAPI{}
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,19 +19,19 @@ var (
|
|||
errNotFound = errors.New("not found")
|
||||
)
|
||||
|
||||
func (n *NginxPlusApi) gatherMetrics(addr *url.URL, acc telegraf.Accumulator) {
|
||||
func (n *NginxPlusAPI) gatherMetrics(addr *url.URL, acc telegraf.Accumulator) {
|
||||
addError(acc, n.gatherProcessesMetrics(addr, acc))
|
||||
addError(acc, n.gatherConnectionsMetrics(addr, acc))
|
||||
addError(acc, n.gatherSslMetrics(addr, acc))
|
||||
addError(acc, n.gatherHttpRequestsMetrics(addr, acc))
|
||||
addError(acc, n.gatherHttpServerZonesMetrics(addr, acc))
|
||||
addError(acc, n.gatherHttpUpstreamsMetrics(addr, acc))
|
||||
addError(acc, n.gatherHttpCachesMetrics(addr, acc))
|
||||
addError(acc, n.gatherHTTPRequestsMetrics(addr, acc))
|
||||
addError(acc, n.gatherHTTPServerZonesMetrics(addr, acc))
|
||||
addError(acc, n.gatherHTTPUpstreamsMetrics(addr, acc))
|
||||
addError(acc, n.gatherHTTPCachesMetrics(addr, acc))
|
||||
addError(acc, n.gatherStreamServerZonesMetrics(addr, acc))
|
||||
addError(acc, n.gatherStreamUpstreamsMetrics(addr, acc))
|
||||
|
||||
if n.ApiVersion >= 5 {
|
||||
addError(acc, n.gatherHttpLocationZonesMetrics(addr, acc))
|
||||
if n.APIVersion >= 5 {
|
||||
addError(acc, n.gatherHTTPLocationZonesMetrics(addr, acc))
|
||||
addError(acc, n.gatherResolverZonesMetrics(addr, acc))
|
||||
}
|
||||
}
|
||||
|
|
@ -48,8 +48,8 @@ func addError(acc telegraf.Accumulator, err error) {
|
|||
}
|
||||
}
|
||||
|
||||
func (n *NginxPlusApi) gatherUrl(addr *url.URL, path string) ([]byte, error) {
|
||||
url := fmt.Sprintf("%s/%d/%s", addr.String(), n.ApiVersion, path)
|
||||
func (n *NginxPlusAPI) gatherURL(addr *url.URL, path string) ([]byte, error) {
|
||||
url := fmt.Sprintf("%s/%d/%s", addr.String(), n.APIVersion, path)
|
||||
resp, err := n.client.Get(url)
|
||||
|
||||
if err != nil {
|
||||
|
|
@ -81,8 +81,8 @@ func (n *NginxPlusApi) gatherUrl(addr *url.URL, path string) ([]byte, error) {
|
|||
}
|
||||
}
|
||||
|
||||
func (n *NginxPlusApi) gatherProcessesMetrics(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
body, err := n.gatherUrl(addr, processesPath)
|
||||
func (n *NginxPlusAPI) gatherProcessesMetrics(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
body, err := n.gatherURL(addr, processesPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -104,8 +104,8 @@ func (n *NginxPlusApi) gatherProcessesMetrics(addr *url.URL, acc telegraf.Accumu
|
|||
return nil
|
||||
}
|
||||
|
||||
func (n *NginxPlusApi) gatherConnectionsMetrics(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
body, err := n.gatherUrl(addr, connectionsPath)
|
||||
func (n *NginxPlusAPI) gatherConnectionsMetrics(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
body, err := n.gatherURL(addr, connectionsPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -130,8 +130,8 @@ func (n *NginxPlusApi) gatherConnectionsMetrics(addr *url.URL, acc telegraf.Accu
|
|||
return nil
|
||||
}
|
||||
|
||||
func (n *NginxPlusApi) gatherSslMetrics(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
body, err := n.gatherUrl(addr, sslPath)
|
||||
func (n *NginxPlusAPI) gatherSslMetrics(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
body, err := n.gatherURL(addr, sslPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -155,13 +155,13 @@ func (n *NginxPlusApi) gatherSslMetrics(addr *url.URL, acc telegraf.Accumulator)
|
|||
return nil
|
||||
}
|
||||
|
||||
func (n *NginxPlusApi) gatherHttpRequestsMetrics(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
body, err := n.gatherUrl(addr, httpRequestsPath)
|
||||
func (n *NginxPlusAPI) gatherHTTPRequestsMetrics(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
body, err := n.gatherURL(addr, httpRequestsPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var httpRequests = &HttpRequests{}
|
||||
var httpRequests = &HTTPRequests{}
|
||||
|
||||
if err := json.Unmarshal(body, httpRequests); err != nil {
|
||||
return err
|
||||
|
|
@ -179,13 +179,13 @@ func (n *NginxPlusApi) gatherHttpRequestsMetrics(addr *url.URL, acc telegraf.Acc
|
|||
return nil
|
||||
}
|
||||
|
||||
func (n *NginxPlusApi) gatherHttpServerZonesMetrics(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
body, err := n.gatherUrl(addr, httpServerZonesPath)
|
||||
func (n *NginxPlusAPI) gatherHTTPServerZonesMetrics(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
body, err := n.gatherURL(addr, httpServerZonesPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var httpServerZones HttpServerZones
|
||||
var httpServerZones HTTPServerZones
|
||||
|
||||
if err := json.Unmarshal(body, &httpServerZones); err != nil {
|
||||
return err
|
||||
|
|
@ -227,13 +227,13 @@ func (n *NginxPlusApi) gatherHttpServerZonesMetrics(addr *url.URL, acc telegraf.
|
|||
}
|
||||
|
||||
// Added in 5 API version
|
||||
func (n *NginxPlusApi) gatherHttpLocationZonesMetrics(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
body, err := n.gatherUrl(addr, httpLocationZonesPath)
|
||||
func (n *NginxPlusAPI) gatherHTTPLocationZonesMetrics(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
body, err := n.gatherURL(addr, httpLocationZonesPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var httpLocationZones HttpLocationZones
|
||||
var httpLocationZones HTTPLocationZones
|
||||
|
||||
if err := json.Unmarshal(body, &httpLocationZones); err != nil {
|
||||
return err
|
||||
|
|
@ -273,13 +273,13 @@ func (n *NginxPlusApi) gatherHttpLocationZonesMetrics(addr *url.URL, acc telegra
|
|||
return nil
|
||||
}
|
||||
|
||||
func (n *NginxPlusApi) gatherHttpUpstreamsMetrics(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
body, err := n.gatherUrl(addr, httpUpstreamsPath)
|
||||
func (n *NginxPlusAPI) gatherHTTPUpstreamsMetrics(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
body, err := n.gatherURL(addr, httpUpstreamsPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var httpUpstreams HttpUpstreams
|
||||
var httpUpstreams HTTPUpstreams
|
||||
|
||||
if err := json.Unmarshal(body, &httpUpstreams); err != nil {
|
||||
return err
|
||||
|
|
@ -357,13 +357,13 @@ func (n *NginxPlusApi) gatherHttpUpstreamsMetrics(addr *url.URL, acc telegraf.Ac
|
|||
return nil
|
||||
}
|
||||
|
||||
func (n *NginxPlusApi) gatherHttpCachesMetrics(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
body, err := n.gatherUrl(addr, httpCachesPath)
|
||||
func (n *NginxPlusAPI) gatherHTTPCachesMetrics(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
body, err := n.gatherURL(addr, httpCachesPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var httpCaches HttpCaches
|
||||
var httpCaches HTTPCaches
|
||||
|
||||
if err := json.Unmarshal(body, &httpCaches); err != nil {
|
||||
return err
|
||||
|
|
@ -411,8 +411,8 @@ func (n *NginxPlusApi) gatherHttpCachesMetrics(addr *url.URL, acc telegraf.Accum
|
|||
return nil
|
||||
}
|
||||
|
||||
func (n *NginxPlusApi) gatherStreamServerZonesMetrics(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
body, err := n.gatherUrl(addr, streamServerZonesPath)
|
||||
func (n *NginxPlusAPI) gatherStreamServerZonesMetrics(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
body, err := n.gatherURL(addr, streamServerZonesPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -447,8 +447,8 @@ func (n *NginxPlusApi) gatherStreamServerZonesMetrics(addr *url.URL, acc telegra
|
|||
}
|
||||
|
||||
// Added in 5 API version
|
||||
func (n *NginxPlusApi) gatherResolverZonesMetrics(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
body, err := n.gatherUrl(addr, resolverZonesPath)
|
||||
func (n *NginxPlusAPI) gatherResolverZonesMetrics(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
body, err := n.gatherURL(addr, resolverZonesPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -490,8 +490,8 @@ func (n *NginxPlusApi) gatherResolverZonesMetrics(addr *url.URL, acc telegraf.Ac
|
|||
return nil
|
||||
}
|
||||
|
||||
func (n *NginxPlusApi) gatherStreamUpstreamsMetrics(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
body, err := n.gatherUrl(addr, streamUpstreamsPath)
|
||||
func (n *NginxPlusAPI) gatherStreamUpstreamsMetrics(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
body, err := n.gatherURL(addr, streamUpstreamsPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -520,7 +520,7 @@ const streamServerZonesPayload = `
|
|||
`
|
||||
|
||||
func TestGatherProcessesMetrics(t *testing.T) {
|
||||
ts, n := prepareEndpoint(t, processesPath, defaultApiVersion, processesPayload)
|
||||
ts, n := prepareEndpoint(t, processesPath, defaultAPIVersion, processesPayload)
|
||||
defer ts.Close()
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
|
@ -541,7 +541,7 @@ func TestGatherProcessesMetrics(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGatherConnectionsMetrics(t *testing.T) {
|
||||
ts, n := prepareEndpoint(t, connectionsPath, defaultApiVersion, connectionsPayload)
|
||||
ts, n := prepareEndpoint(t, connectionsPath, defaultAPIVersion, connectionsPayload)
|
||||
defer ts.Close()
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
|
@ -565,7 +565,7 @@ func TestGatherConnectionsMetrics(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGatherSslMetrics(t *testing.T) {
|
||||
ts, n := prepareEndpoint(t, sslPath, defaultApiVersion, sslPayload)
|
||||
ts, n := prepareEndpoint(t, sslPath, defaultAPIVersion, sslPayload)
|
||||
defer ts.Close()
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
|
@ -588,13 +588,13 @@ func TestGatherSslMetrics(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGatherHttpRequestsMetrics(t *testing.T) {
|
||||
ts, n := prepareEndpoint(t, httpRequestsPath, defaultApiVersion, httpRequestsPayload)
|
||||
ts, n := prepareEndpoint(t, httpRequestsPath, defaultAPIVersion, httpRequestsPayload)
|
||||
defer ts.Close()
|
||||
|
||||
var acc testutil.Accumulator
|
||||
addr, host, port := prepareAddr(t, ts)
|
||||
|
||||
require.NoError(t, n.gatherHttpRequestsMetrics(addr, &acc))
|
||||
require.NoError(t, n.gatherHTTPRequestsMetrics(addr, &acc))
|
||||
|
||||
acc.AssertContainsTaggedFields(
|
||||
t,
|
||||
|
|
@ -610,13 +610,13 @@ func TestGatherHttpRequestsMetrics(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGatherHttpServerZonesMetrics(t *testing.T) {
|
||||
ts, n := prepareEndpoint(t, httpServerZonesPath, defaultApiVersion, httpServerZonesPayload)
|
||||
ts, n := prepareEndpoint(t, httpServerZonesPath, defaultAPIVersion, httpServerZonesPayload)
|
||||
defer ts.Close()
|
||||
|
||||
var acc testutil.Accumulator
|
||||
addr, host, port := prepareAddr(t, ts)
|
||||
|
||||
require.NoError(t, n.gatherHttpServerZonesMetrics(addr, &acc))
|
||||
require.NoError(t, n.gatherHTTPServerZonesMetrics(addr, &acc))
|
||||
|
||||
acc.AssertContainsTaggedFields(
|
||||
t,
|
||||
|
|
@ -664,13 +664,13 @@ func TestGatherHttpServerZonesMetrics(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGatherHttpLocationZonesMetrics(t *testing.T) {
|
||||
ts, n := prepareEndpoint(t, httpLocationZonesPath, defaultApiVersion, httpLocationZonesPayload)
|
||||
ts, n := prepareEndpoint(t, httpLocationZonesPath, defaultAPIVersion, httpLocationZonesPayload)
|
||||
defer ts.Close()
|
||||
|
||||
var acc testutil.Accumulator
|
||||
addr, host, port := prepareAddr(t, ts)
|
||||
|
||||
require.NoError(t, n.gatherHttpLocationZonesMetrics(addr, &acc))
|
||||
require.NoError(t, n.gatherHTTPLocationZonesMetrics(addr, &acc))
|
||||
|
||||
acc.AssertContainsTaggedFields(
|
||||
t,
|
||||
|
|
@ -716,13 +716,13 @@ func TestGatherHttpLocationZonesMetrics(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGatherHttpUpstreamsMetrics(t *testing.T) {
|
||||
ts, n := prepareEndpoint(t, httpUpstreamsPath, defaultApiVersion, httpUpstreamsPayload)
|
||||
ts, n := prepareEndpoint(t, httpUpstreamsPath, defaultAPIVersion, httpUpstreamsPayload)
|
||||
defer ts.Close()
|
||||
|
||||
var acc testutil.Accumulator
|
||||
addr, host, port := prepareAddr(t, ts)
|
||||
|
||||
require.NoError(t, n.gatherHttpUpstreamsMetrics(addr, &acc))
|
||||
require.NoError(t, n.gatherHTTPUpstreamsMetrics(addr, &acc))
|
||||
|
||||
acc.AssertContainsTaggedFields(
|
||||
t,
|
||||
|
|
@ -888,13 +888,13 @@ func TestGatherHttpUpstreamsMetrics(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGatherHttpCachesMetrics(t *testing.T) {
|
||||
ts, n := prepareEndpoint(t, httpCachesPath, defaultApiVersion, httpCachesPayload)
|
||||
ts, n := prepareEndpoint(t, httpCachesPath, defaultAPIVersion, httpCachesPayload)
|
||||
defer ts.Close()
|
||||
|
||||
var acc testutil.Accumulator
|
||||
addr, host, port := prepareAddr(t, ts)
|
||||
|
||||
require.NoError(t, n.gatherHttpCachesMetrics(addr, &acc))
|
||||
require.NoError(t, n.gatherHTTPCachesMetrics(addr, &acc))
|
||||
|
||||
acc.AssertContainsTaggedFields(
|
||||
t,
|
||||
|
|
@ -966,7 +966,7 @@ func TestGatherHttpCachesMetrics(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGatherResolverZonesMetrics(t *testing.T) {
|
||||
ts, n := prepareEndpoint(t, resolverZonesPath, defaultApiVersion, resolverZonesPayload)
|
||||
ts, n := prepareEndpoint(t, resolverZonesPath, defaultAPIVersion, resolverZonesPayload)
|
||||
defer ts.Close()
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
|
@ -1020,7 +1020,7 @@ func TestGatherResolverZonesMetrics(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGatherStreamUpstreams(t *testing.T) {
|
||||
ts, n := prepareEndpoint(t, streamUpstreamsPath, defaultApiVersion, streamUpstreamsPayload)
|
||||
ts, n := prepareEndpoint(t, streamUpstreamsPath, defaultAPIVersion, streamUpstreamsPayload)
|
||||
defer ts.Close()
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
|
@ -1163,7 +1163,7 @@ func TestGatherStreamUpstreams(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGatherStreamServerZonesMetrics(t *testing.T) {
|
||||
ts, n := prepareEndpoint(t, streamServerZonesPath, defaultApiVersion, streamServerZonesPayload)
|
||||
ts, n := prepareEndpoint(t, streamServerZonesPath, defaultAPIVersion, streamServerZonesPayload)
|
||||
defer ts.Close()
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
|
@ -1208,7 +1208,7 @@ func TestUnavailableEndpoints(t *testing.T) {
|
|||
}))
|
||||
defer ts.Close()
|
||||
|
||||
n := &NginxPlusApi{
|
||||
n := &NginxPlusAPI{
|
||||
client: ts.Client(),
|
||||
}
|
||||
|
||||
|
|
@ -1228,7 +1228,7 @@ func TestServerError(t *testing.T) {
|
|||
}))
|
||||
defer ts.Close()
|
||||
|
||||
n := &NginxPlusApi{
|
||||
n := &NginxPlusAPI{
|
||||
client: ts.Client(),
|
||||
}
|
||||
|
||||
|
|
@ -1249,7 +1249,7 @@ func TestMalformedJSON(t *testing.T) {
|
|||
}))
|
||||
defer ts.Close()
|
||||
|
||||
n := &NginxPlusApi{
|
||||
n := &NginxPlusAPI{
|
||||
client: ts.Client(),
|
||||
}
|
||||
|
||||
|
|
@ -1269,7 +1269,7 @@ func TestUnknownContentType(t *testing.T) {
|
|||
}))
|
||||
defer ts.Close()
|
||||
|
||||
n := &NginxPlusApi{
|
||||
n := &NginxPlusAPI{
|
||||
client: ts.Client(),
|
||||
}
|
||||
|
||||
|
|
@ -1306,7 +1306,7 @@ func prepareAddr(t *testing.T, ts *httptest.Server) (*url.URL, string, string) {
|
|||
return addr, host, port
|
||||
}
|
||||
|
||||
func prepareEndpoint(t *testing.T, path string, apiVersion int64, payload string) (*httptest.Server, *NginxPlusApi) {
|
||||
func prepareEndpoint(t *testing.T, path string, apiVersion int64, payload string) (*httptest.Server, *NginxPlusAPI) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
var rsp string
|
||||
|
||||
|
|
@ -1320,12 +1320,12 @@ func prepareEndpoint(t *testing.T, path string, apiVersion int64, payload string
|
|||
fmt.Fprintln(w, rsp)
|
||||
}))
|
||||
|
||||
n := &NginxPlusApi{
|
||||
n := &NginxPlusAPI{
|
||||
Urls: []string{fmt.Sprintf("%s/api", ts.URL)},
|
||||
ApiVersion: apiVersion,
|
||||
APIVersion: apiVersion,
|
||||
}
|
||||
|
||||
client, err := n.createHttpClient()
|
||||
client, err := n.createHTTPClient()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ type ResolverZones map[string]struct {
|
|||
} `json:"responses"`
|
||||
}
|
||||
|
||||
type HttpRequests struct {
|
||||
type HTTPRequests struct {
|
||||
Total int64 `json:"total"`
|
||||
Current int64 `json:"current"`
|
||||
}
|
||||
|
|
@ -49,7 +49,7 @@ type ResponseStats struct {
|
|||
Total int64 `json:"total"`
|
||||
}
|
||||
|
||||
type HttpServerZones map[string]struct {
|
||||
type HTTPServerZones map[string]struct {
|
||||
Processing int `json:"processing"`
|
||||
Requests int64 `json:"requests"`
|
||||
Responses ResponseStats `json:"responses"`
|
||||
|
|
@ -58,7 +58,7 @@ type HttpServerZones map[string]struct {
|
|||
Sent int64 `json:"sent"`
|
||||
}
|
||||
|
||||
type HttpLocationZones map[string]struct {
|
||||
type HTTPLocationZones map[string]struct {
|
||||
Requests int64 `json:"requests"`
|
||||
Responses ResponseStats `json:"responses"`
|
||||
Discarded *int64 `json:"discarded"` // added in version 6
|
||||
|
|
@ -73,7 +73,7 @@ type HealthCheckStats struct {
|
|||
LastPassed *bool `json:"last_passed"`
|
||||
}
|
||||
|
||||
type HttpUpstreams map[string]struct {
|
||||
type HTTPUpstreams map[string]struct {
|
||||
Peers []struct {
|
||||
ID *int `json:"id"` // added in version 3
|
||||
Server string `json:"server"`
|
||||
|
|
@ -145,7 +145,7 @@ type ExtendedHitStats struct {
|
|||
BytesWritten int64 `json:"bytes_written"`
|
||||
}
|
||||
|
||||
type HttpCaches map[string]struct { // added in version 2
|
||||
type HTTPCaches map[string]struct { // added in version 2
|
||||
Size int64 `json:"size"`
|
||||
MaxSize int64 `json:"max_size"`
|
||||
Cold bool `json:"cold"`
|
||||
|
|
|
|||
|
|
@ -104,8 +104,8 @@ type NginxUpstreamCheckServer struct {
|
|||
Port uint16 `json:"port"`
|
||||
}
|
||||
|
||||
// createHttpClient create a clients to access API
|
||||
func (check *NginxUpstreamCheck) createHttpClient() (*http.Client, error) {
|
||||
// createHTTPClient create a clients to access API
|
||||
func (check *NginxUpstreamCheck) createHTTPClient() (*http.Client, error) {
|
||||
tlsConfig, err := check.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -121,8 +121,8 @@ func (check *NginxUpstreamCheck) createHttpClient() (*http.Client, error) {
|
|||
return client, nil
|
||||
}
|
||||
|
||||
// gatherJsonData query the data source and parse the response JSON
|
||||
func (check *NginxUpstreamCheck) gatherJsonData(url string, value interface{}) error {
|
||||
// gatherJSONData query the data source and parse the response JSON
|
||||
func (check *NginxUpstreamCheck) gatherJSONData(url string, value interface{}) error {
|
||||
|
||||
var method string
|
||||
if check.Method != "" {
|
||||
|
|
@ -168,7 +168,7 @@ func (check *NginxUpstreamCheck) gatherJsonData(url string, value interface{}) e
|
|||
|
||||
func (check *NginxUpstreamCheck) Gather(accumulator telegraf.Accumulator) error {
|
||||
if check.client == nil {
|
||||
client, err := check.createHttpClient()
|
||||
client, err := check.createHTTPClient()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -193,7 +193,7 @@ func (check *NginxUpstreamCheck) Gather(accumulator telegraf.Accumulator) error
|
|||
func (check *NginxUpstreamCheck) gatherStatusData(url string, accumulator telegraf.Accumulator) error {
|
||||
checkData := &NginxUpstreamCheckData{}
|
||||
|
||||
err := check.gatherJsonData(url, checkData)
|
||||
err := check.gatherJSONData(url, checkData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -128,14 +128,14 @@ func (s *NSD) Gather(acc telegraf.Accumulator) error {
|
|||
if strings.HasPrefix(stat, "server") {
|
||||
statTokens := strings.Split(stat, ".")
|
||||
if len(statTokens) > 1 {
|
||||
serverId := strings.TrimPrefix(statTokens[0], "server")
|
||||
if _, err := strconv.Atoi(serverId); err == nil {
|
||||
serverID := strings.TrimPrefix(statTokens[0], "server")
|
||||
if _, err := strconv.Atoi(serverID); err == nil {
|
||||
serverTokens := statTokens[1:]
|
||||
field := strings.Join(serverTokens[:], "_")
|
||||
if fieldsServers[serverId] == nil {
|
||||
fieldsServers[serverId] = make(map[string]interface{})
|
||||
if fieldsServers[serverID] == nil {
|
||||
fieldsServers[serverID] = make(map[string]interface{})
|
||||
}
|
||||
fieldsServers[serverId][field] = fieldValue
|
||||
fieldsServers[serverID][field] = fieldValue
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
|
@ -145,8 +145,8 @@ func (s *NSD) Gather(acc telegraf.Accumulator) error {
|
|||
}
|
||||
|
||||
acc.AddFields("nsd", fields, nil)
|
||||
for thisServerId, thisServerFields := range fieldsServers {
|
||||
thisServerTag := map[string]string{"server": thisServerId}
|
||||
for thisServerID, thisServerFields := range fieldsServers {
|
||||
thisServerTag := map[string]string{"server": thisServerID}
|
||||
acc.AddFields("nsd_servers", thisServerFields, thisServerTag)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -82,7 +82,7 @@ func (n *NSQ) Gather(acc telegraf.Accumulator) error {
|
|||
var err error
|
||||
|
||||
if n.httpClient == nil {
|
||||
n.httpClient, err = n.getHttpClient()
|
||||
n.httpClient, err = n.getHTTPClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -101,7 +101,7 @@ func (n *NSQ) Gather(acc telegraf.Accumulator) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (n *NSQ) getHttpClient() (*http.Client, error) {
|
||||
func (n *NSQ) getHTTPClient() (*http.Client, error) {
|
||||
tlsConfig, err := n.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
|||
|
|
@ -18,18 +18,18 @@ var (
|
|||
|
||||
// default file paths
|
||||
const (
|
||||
NET_NETSTAT = "/net/netstat"
|
||||
NET_SNMP = "/net/snmp"
|
||||
NET_SNMP6 = "/net/snmp6"
|
||||
NET_PROC = "/proc"
|
||||
NetNetstat = "/net/netstat"
|
||||
NetSnmp = "/net/snmp"
|
||||
NetSnmp6 = "/net/snmp6"
|
||||
NetProc = "/proc"
|
||||
)
|
||||
|
||||
// env variable names
|
||||
const (
|
||||
ENV_NETSTAT = "PROC_NET_NETSTAT"
|
||||
ENV_SNMP = "PROC_NET_SNMP"
|
||||
ENV_SNMP6 = "PROC_NET_SNMP6"
|
||||
ENV_ROOT = "PROC_ROOT"
|
||||
EnvNetstat = "PROC_NET_NETSTAT"
|
||||
EnvSnmp = "PROC_NET_SNMP"
|
||||
EnvSnmp6 = "PROC_NET_SNMP6"
|
||||
EnvRoot = "PROC_ROOT"
|
||||
)
|
||||
|
||||
type Nstat struct {
|
||||
|
|
@ -136,13 +136,13 @@ func (ns *Nstat) gatherSNMP6(data []byte, acc telegraf.Accumulator) error {
|
|||
// if it is empty then try read from env variables
|
||||
func (ns *Nstat) loadPaths() {
|
||||
if ns.ProcNetNetstat == "" {
|
||||
ns.ProcNetNetstat = proc(ENV_NETSTAT, NET_NETSTAT)
|
||||
ns.ProcNetNetstat = proc(EnvNetstat, NetNetstat)
|
||||
}
|
||||
if ns.ProcNetSNMP == "" {
|
||||
ns.ProcNetSNMP = proc(ENV_SNMP, NET_SNMP)
|
||||
ns.ProcNetSNMP = proc(EnvSnmp, NetSnmp)
|
||||
}
|
||||
if ns.ProcNetSNMP6 == "" {
|
||||
ns.ProcNetSNMP6 = proc(ENV_SNMP6, NET_SNMP6)
|
||||
ns.ProcNetSNMP6 = proc(EnvSnmp6, NetSnmp6)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -220,9 +220,9 @@ func proc(env, path string) string {
|
|||
return p
|
||||
}
|
||||
// try to read root path, or use default root path
|
||||
root := os.Getenv(ENV_ROOT)
|
||||
root := os.Getenv(EnvRoot)
|
||||
if root == "" {
|
||||
root = NET_PROC
|
||||
root = NetProc
|
||||
}
|
||||
return root + path
|
||||
}
|
||||
|
|
|
|||
|
|
@ -77,8 +77,8 @@ func opensmtpdRunner(cmdName string, Timeout internal.Duration, UseSudo bool) (*
|
|||
// All the dots in stat name will replaced by underscores. Histogram statistics will not be collected.
|
||||
func (s *Opensmtpd) Gather(acc telegraf.Accumulator) error {
|
||||
// Always exclude uptime.human statistics
|
||||
stat_excluded := []string{"uptime.human"}
|
||||
filter_excluded, err := filter.Compile(stat_excluded)
|
||||
statExcluded := []string{"uptime.human"}
|
||||
filterExcluded, err := filter.Compile(statExcluded)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -104,7 +104,7 @@ func (s *Opensmtpd) Gather(acc telegraf.Accumulator) error {
|
|||
value := cols[1]
|
||||
|
||||
// Filter value
|
||||
if filter_excluded.Match(stat) {
|
||||
if filterExcluded.Match(stat) {
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ import (
|
|||
|
||||
var TestTimeout = internal.Duration{Duration: time.Second}
|
||||
|
||||
func SmtpCTL(output string, Timeout internal.Duration, useSudo bool) func(string, internal.Duration, bool) (*bytes.Buffer, error) {
|
||||
func SMTPCTL(output string, Timeout internal.Duration, useSudo bool) func(string, internal.Duration, bool) (*bytes.Buffer, error) {
|
||||
return func(string, internal.Duration, bool) (*bytes.Buffer, error) {
|
||||
return bytes.NewBuffer([]byte(output)), nil
|
||||
}
|
||||
|
|
@ -21,7 +21,7 @@ func SmtpCTL(output string, Timeout internal.Duration, useSudo bool) func(string
|
|||
func TestFilterSomeStats(t *testing.T) {
|
||||
acc := &testutil.Accumulator{}
|
||||
v := &Opensmtpd{
|
||||
run: SmtpCTL(fullOutput, TestTimeout, false),
|
||||
run: SMTPCTL(fullOutput, TestTimeout, false),
|
||||
}
|
||||
err := v.Gather(acc)
|
||||
|
||||
|
|
|
|||
|
|
@ -21,25 +21,25 @@ const (
|
|||
// https://openweathermap.org/current#severalid
|
||||
// Call for several city IDs
|
||||
// The limit of locations is 20.
|
||||
owmRequestSeveralCityId int = 20
|
||||
owmRequestSeveralCityID int = 20
|
||||
|
||||
defaultBaseUrl = "https://api.openweathermap.org/"
|
||||
defaultBaseURL = "https://api.openweathermap.org/"
|
||||
defaultResponseTimeout time.Duration = time.Second * 5
|
||||
defaultUnits string = "metric"
|
||||
defaultLang string = "en"
|
||||
)
|
||||
|
||||
type OpenWeatherMap struct {
|
||||
AppId string `toml:"app_id"`
|
||||
CityId []string `toml:"city_id"`
|
||||
AppID string `toml:"app_id"`
|
||||
CityID []string `toml:"city_id"`
|
||||
Lang string `toml:"lang"`
|
||||
Fetch []string `toml:"fetch"`
|
||||
BaseUrl string `toml:"base_url"`
|
||||
BaseURL string `toml:"base_url"`
|
||||
ResponseTimeout internal.Duration `toml:"response_timeout"`
|
||||
Units string `toml:"units"`
|
||||
|
||||
client *http.Client
|
||||
baseUrl *url.URL
|
||||
baseURL *url.URL
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
|
|
@ -87,12 +87,12 @@ func (n *OpenWeatherMap) Gather(acc telegraf.Accumulator) error {
|
|||
|
||||
for _, fetch := range n.Fetch {
|
||||
if fetch == "forecast" {
|
||||
for _, city := range n.CityId {
|
||||
for _, city := range n.CityID {
|
||||
addr := n.formatURL("/data/2.5/forecast", city)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
status, err := n.gatherUrl(addr)
|
||||
status, err := n.gatherURL(addr)
|
||||
if err != nil {
|
||||
acc.AddError(err)
|
||||
return
|
||||
|
|
@ -103,10 +103,10 @@ func (n *OpenWeatherMap) Gather(acc telegraf.Accumulator) error {
|
|||
}
|
||||
} else if fetch == "weather" {
|
||||
j := 0
|
||||
for j < len(n.CityId) {
|
||||
for j < len(n.CityID) {
|
||||
strs = make([]string, 0)
|
||||
for i := 0; j < len(n.CityId) && i < owmRequestSeveralCityId; i++ {
|
||||
strs = append(strs, n.CityId[j])
|
||||
for i := 0; j < len(n.CityID) && i < owmRequestSeveralCityID; i++ {
|
||||
strs = append(strs, n.CityID[j])
|
||||
j++
|
||||
}
|
||||
cities := strings.Join(strs, ",")
|
||||
|
|
@ -115,7 +115,7 @@ func (n *OpenWeatherMap) Gather(acc telegraf.Accumulator) error {
|
|||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
status, err := n.gatherUrl(addr)
|
||||
status, err := n.gatherURL(addr)
|
||||
if err != nil {
|
||||
acc.AddError(err)
|
||||
return
|
||||
|
|
@ -132,7 +132,7 @@ func (n *OpenWeatherMap) Gather(acc telegraf.Accumulator) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (n *OpenWeatherMap) createHttpClient() (*http.Client, error) {
|
||||
func (n *OpenWeatherMap) createHTTPClient() (*http.Client, error) {
|
||||
if n.ResponseTimeout.Duration < time.Second {
|
||||
n.ResponseTimeout.Duration = defaultResponseTimeout
|
||||
}
|
||||
|
|
@ -145,7 +145,7 @@ func (n *OpenWeatherMap) createHttpClient() (*http.Client, error) {
|
|||
return client, nil
|
||||
}
|
||||
|
||||
func (n *OpenWeatherMap) gatherUrl(addr string) (*Status, error) {
|
||||
func (n *OpenWeatherMap) gatherURL(addr string) (*Status, error) {
|
||||
resp, err := n.client.Get(addr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error making HTTP request to %s: %s", addr, err)
|
||||
|
|
@ -165,7 +165,7 @@ func (n *OpenWeatherMap) gatherUrl(addr string) (*Status, error) {
|
|||
return nil, fmt.Errorf("%s returned unexpected content type %s", addr, mediaType)
|
||||
}
|
||||
|
||||
return gatherWeatherUrl(resp.Body)
|
||||
return gatherWeatherURL(resp.Body)
|
||||
}
|
||||
|
||||
type WeatherEntry struct {
|
||||
|
|
@ -191,7 +191,7 @@ type WeatherEntry struct {
|
|||
Deg float64 `json:"deg"`
|
||||
Speed float64 `json:"speed"`
|
||||
} `json:"wind"`
|
||||
Id int64 `json:"id"`
|
||||
ID int64 `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Coord struct {
|
||||
Lat float64 `json:"lat"`
|
||||
|
|
@ -213,13 +213,13 @@ type Status struct {
|
|||
Lon float64 `json:"lon"`
|
||||
} `json:"coord"`
|
||||
Country string `json:"country"`
|
||||
Id int64 `json:"id"`
|
||||
ID int64 `json:"id"`
|
||||
Name string `json:"name"`
|
||||
} `json:"city"`
|
||||
List []WeatherEntry `json:"list"`
|
||||
}
|
||||
|
||||
func gatherWeatherUrl(r io.Reader) (*Status, error) {
|
||||
func gatherWeatherURL(r io.Reader) (*Status, error) {
|
||||
dec := json.NewDecoder(r)
|
||||
status := &Status{}
|
||||
if err := dec.Decode(status); err != nil {
|
||||
|
|
@ -253,7 +253,7 @@ func gatherWeather(acc telegraf.Accumulator, status *Status) {
|
|||
}
|
||||
tags := map[string]string{
|
||||
"city": e.Name,
|
||||
"city_id": strconv.FormatInt(e.Id, 10),
|
||||
"city_id": strconv.FormatInt(e.ID, 10),
|
||||
"country": e.Sys.Country,
|
||||
"forecast": "*",
|
||||
}
|
||||
|
|
@ -271,7 +271,7 @@ func gatherWeather(acc telegraf.Accumulator, status *Status) {
|
|||
|
||||
func gatherForecast(acc telegraf.Accumulator, status *Status) {
|
||||
tags := map[string]string{
|
||||
"city_id": strconv.FormatInt(status.City.Id, 10),
|
||||
"city_id": strconv.FormatInt(status.City.ID, 10),
|
||||
"forecast": "*",
|
||||
"city": status.City.Name,
|
||||
"country": status.City.Country,
|
||||
|
|
@ -305,21 +305,21 @@ func init() {
|
|||
}
|
||||
return &OpenWeatherMap{
|
||||
ResponseTimeout: tmout,
|
||||
BaseUrl: defaultBaseUrl,
|
||||
BaseURL: defaultBaseURL,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (n *OpenWeatherMap) Init() error {
|
||||
var err error
|
||||
n.baseUrl, err = url.Parse(n.BaseUrl)
|
||||
n.baseURL, err = url.Parse(n.BaseURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create an HTTP client that is re-used for each
|
||||
// collection interval
|
||||
n.client, err = n.createHttpClient()
|
||||
n.client, err = n.createHTTPClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -349,7 +349,7 @@ func (n *OpenWeatherMap) Init() error {
|
|||
func (n *OpenWeatherMap) formatURL(path string, city string) string {
|
||||
v := url.Values{
|
||||
"id": []string{city},
|
||||
"APPID": []string{n.AppId},
|
||||
"APPID": []string{n.AppID},
|
||||
"lang": []string{n.Lang},
|
||||
"units": []string{n.Units},
|
||||
}
|
||||
|
|
@ -359,5 +359,5 @@ func (n *OpenWeatherMap) formatURL(path string, city string) string {
|
|||
RawQuery: v.Encode(),
|
||||
}
|
||||
|
||||
return n.baseUrl.ResolveReference(relative).String()
|
||||
return n.baseURL.ResolveReference(relative).String()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -416,9 +416,9 @@ func TestForecastGeneratesMetrics(t *testing.T) {
|
|||
defer ts.Close()
|
||||
|
||||
n := &OpenWeatherMap{
|
||||
BaseUrl: ts.URL,
|
||||
AppId: "noappid",
|
||||
CityId: []string{"2988507"},
|
||||
BaseURL: ts.URL,
|
||||
AppID: "noappid",
|
||||
CityID: []string{"2988507"},
|
||||
Fetch: []string{"weather", "forecast"},
|
||||
Units: "metric",
|
||||
}
|
||||
|
|
@ -500,9 +500,9 @@ func TestWeatherGeneratesMetrics(t *testing.T) {
|
|||
defer ts.Close()
|
||||
|
||||
n := &OpenWeatherMap{
|
||||
BaseUrl: ts.URL,
|
||||
AppId: "noappid",
|
||||
CityId: []string{"2988507"},
|
||||
BaseURL: ts.URL,
|
||||
AppID: "noappid",
|
||||
CityID: []string{"2988507"},
|
||||
Fetch: []string{"weather"},
|
||||
Units: "metric",
|
||||
}
|
||||
|
|
@ -560,9 +560,9 @@ func TestRainMetrics(t *testing.T) {
|
|||
defer ts.Close()
|
||||
|
||||
n := &OpenWeatherMap{
|
||||
BaseUrl: ts.URL,
|
||||
AppId: "noappid",
|
||||
CityId: []string{"111", "222", "333", "444"},
|
||||
BaseURL: ts.URL,
|
||||
AppID: "noappid",
|
||||
CityID: []string{"111", "222", "333", "444"},
|
||||
Fetch: []string{"weather"},
|
||||
Units: "metric",
|
||||
}
|
||||
|
|
@ -703,9 +703,9 @@ func TestBatchWeatherGeneratesMetrics(t *testing.T) {
|
|||
defer ts.Close()
|
||||
|
||||
n := &OpenWeatherMap{
|
||||
BaseUrl: ts.URL,
|
||||
AppId: "noappid",
|
||||
CityId: []string{"524901", "703448", "2643743"},
|
||||
BaseURL: ts.URL,
|
||||
AppID: "noappid",
|
||||
CityID: []string{"524901", "703448", "2643743"},
|
||||
Fetch: []string{"weather"},
|
||||
Units: "metric",
|
||||
}
|
||||
|
|
@ -803,10 +803,10 @@ func TestBatchWeatherGeneratesMetrics(t *testing.T) {
|
|||
|
||||
func TestFormatURL(t *testing.T) {
|
||||
n := &OpenWeatherMap{
|
||||
AppId: "appid",
|
||||
AppID: "appid",
|
||||
Units: "units",
|
||||
Lang: "lang",
|
||||
BaseUrl: "http://foo.com",
|
||||
BaseURL: "http://foo.com",
|
||||
}
|
||||
n.Init()
|
||||
|
||||
|
|
|
|||
|
|
@ -75,14 +75,14 @@ type process struct {
|
|||
LifeStatus string `xml:"life_status"`
|
||||
Enabled string `xml:"enabled"`
|
||||
HasMetrics bool `xml:"has_metrics"`
|
||||
Cpu int64 `xml:"cpu"`
|
||||
CPU int64 `xml:"cpu"`
|
||||
Rss int64 `xml:"rss"`
|
||||
Pss int64 `xml:"pss"`
|
||||
PrivateDirty int64 `xml:"private_dirty"`
|
||||
Swap int64 `xml:"swap"`
|
||||
RealMemory int64 `xml:"real_memory"`
|
||||
Vmsize int64 `xml:"vmsize"`
|
||||
ProcessGroupId string `xml:"process_group_id"`
|
||||
ProcessGroupID string `xml:"process_group_id"`
|
||||
}
|
||||
|
||||
func (p *process) getUptime() int64 {
|
||||
|
|
@ -211,7 +211,7 @@ func importMetric(stat []byte, acc telegraf.Accumulator) error {
|
|||
"pid": fmt.Sprintf("%d", process.Pid),
|
||||
"code_revision": process.CodeRevision,
|
||||
"life_status": process.LifeStatus,
|
||||
"process_group_id": process.ProcessGroupId,
|
||||
"process_group_id": process.ProcessGroupID,
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"concurrency": process.Concurrency,
|
||||
|
|
@ -223,7 +223,7 @@ func importMetric(stat []byte, acc telegraf.Accumulator) error {
|
|||
"spawn_end_time": process.SpawnEndTime,
|
||||
"last_used": process.LastUsed,
|
||||
"uptime": process.getUptime(),
|
||||
"cpu": process.Cpu,
|
||||
"cpu": process.CPU,
|
||||
"rss": process.Rss,
|
||||
"pss": process.Pss,
|
||||
"private_dirty": process.PrivateDirty,
|
||||
|
|
|
|||
|
|
@ -24,16 +24,16 @@ import (
|
|||
// it's converted to an http.Request.
|
||||
type request struct {
|
||||
pw *io.PipeWriter
|
||||
reqId uint16
|
||||
reqID uint16
|
||||
params map[string]string
|
||||
buf [1024]byte
|
||||
rawParams []byte
|
||||
keepConn bool
|
||||
}
|
||||
|
||||
func newRequest(reqId uint16, flags uint8) *request {
|
||||
func newRequest(reqID uint16, flags uint8) *request {
|
||||
r := &request{
|
||||
reqId: reqId,
|
||||
reqID: reqID,
|
||||
params: map[string]string{},
|
||||
keepConn: flags&flagKeepConn != 0,
|
||||
}
|
||||
|
|
@ -79,7 +79,7 @@ func newResponse(c *child, req *request) *response {
|
|||
return &response{
|
||||
req: req,
|
||||
header: http.Header{},
|
||||
w: newWriter(c.conn, typeStdout, req.reqId),
|
||||
w: newWriter(c.conn, typeStdout, req.reqID),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -173,7 +173,7 @@ var ErrConnClosed = errors.New("fcgi: connection to web server closed")
|
|||
|
||||
func (c *child) handleRecord(rec *record) error {
|
||||
c.mu.Lock()
|
||||
req, ok := c.requests[rec.h.Id]
|
||||
req, ok := c.requests[rec.h.ID]
|
||||
c.mu.Unlock()
|
||||
if !ok && rec.h.Type != typeBeginRequest && rec.h.Type != typeGetValues {
|
||||
// The spec says to ignore unknown request IDs.
|
||||
|
|
@ -193,12 +193,12 @@ func (c *child) handleRecord(rec *record) error {
|
|||
return err
|
||||
}
|
||||
if br.role != roleResponder {
|
||||
c.conn.writeEndRequest(rec.h.Id, 0, statusUnknownRole)
|
||||
c.conn.writeEndRequest(rec.h.ID, 0, statusUnknownRole)
|
||||
return nil
|
||||
}
|
||||
req = newRequest(rec.h.Id, br.flags)
|
||||
req = newRequest(rec.h.ID, br.flags)
|
||||
c.mu.Lock()
|
||||
c.requests[rec.h.Id] = req
|
||||
c.requests[rec.h.ID] = req
|
||||
c.mu.Unlock()
|
||||
return nil
|
||||
case typeParams:
|
||||
|
|
@ -240,9 +240,9 @@ func (c *child) handleRecord(rec *record) error {
|
|||
return nil
|
||||
case typeAbortRequest:
|
||||
c.mu.Lock()
|
||||
delete(c.requests, rec.h.Id)
|
||||
delete(c.requests, rec.h.ID)
|
||||
c.mu.Unlock()
|
||||
c.conn.writeEndRequest(rec.h.Id, 0, statusRequestComplete)
|
||||
c.conn.writeEndRequest(rec.h.ID, 0, statusRequestComplete)
|
||||
if req.pw != nil {
|
||||
req.pw.CloseWithError(ErrRequestAborted)
|
||||
}
|
||||
|
|
@ -265,16 +265,16 @@ func (c *child) serveRequest(req *request, body io.ReadCloser) {
|
|||
if err != nil {
|
||||
// there was an error reading the request
|
||||
r.WriteHeader(http.StatusInternalServerError)
|
||||
c.conn.writeRecord(typeStderr, req.reqId, []byte(err.Error()))
|
||||
c.conn.writeRecord(typeStderr, req.reqID, []byte(err.Error()))
|
||||
} else {
|
||||
httpReq.Body = body
|
||||
c.handler.ServeHTTP(r, httpReq)
|
||||
}
|
||||
r.Close()
|
||||
c.mu.Lock()
|
||||
delete(c.requests, req.reqId)
|
||||
delete(c.requests, req.reqID)
|
||||
c.mu.Unlock()
|
||||
c.conn.writeEndRequest(req.reqId, 0, statusRequestComplete)
|
||||
c.conn.writeEndRequest(req.reqID, 0, statusRequestComplete)
|
||||
|
||||
// Consume the entire body, so the host isn't still writing to
|
||||
// us when we close the socket below in the !keepConn case,
|
||||
|
|
|
|||
|
|
@ -63,7 +63,7 @@ const headerLen = 8
|
|||
type header struct {
|
||||
Version uint8
|
||||
Type recType
|
||||
Id uint16
|
||||
ID uint16
|
||||
ContentLength uint16
|
||||
PaddingLength uint8
|
||||
Reserved uint8
|
||||
|
|
@ -88,10 +88,10 @@ func (br *beginRequest) read(content []byte) error {
|
|||
// not synchronized because we don't care what the contents are
|
||||
var pad [maxPad]byte
|
||||
|
||||
func (h *header) init(recType recType, reqId uint16, contentLength int) {
|
||||
func (h *header) init(recType recType, reqID uint16, contentLength int) {
|
||||
h.Version = 1
|
||||
h.Type = recType
|
||||
h.Id = reqId
|
||||
h.ID = reqID
|
||||
h.ContentLength = uint16(contentLength)
|
||||
h.PaddingLength = uint8(-contentLength & 7)
|
||||
}
|
||||
|
|
@ -140,11 +140,11 @@ func (rec *record) content() []byte {
|
|||
}
|
||||
|
||||
// writeRecord writes and sends a single record.
|
||||
func (c *conn) writeRecord(recType recType, reqId uint16, b []byte) error {
|
||||
func (c *conn) writeRecord(recType recType, reqID uint16, b []byte) error {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
c.buf.Reset()
|
||||
c.h.init(recType, reqId, len(b))
|
||||
c.h.init(recType, reqID, len(b))
|
||||
if err := binary.Write(&c.buf, binary.BigEndian, c.h); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -158,20 +158,20 @@ func (c *conn) writeRecord(recType recType, reqId uint16, b []byte) error {
|
|||
return err
|
||||
}
|
||||
|
||||
func (c *conn) writeBeginRequest(reqId uint16, role uint16, flags uint8) error {
|
||||
func (c *conn) writeBeginRequest(reqID uint16, role uint16, flags uint8) error {
|
||||
b := [8]byte{byte(role >> 8), byte(role), flags}
|
||||
return c.writeRecord(typeBeginRequest, reqId, b[:])
|
||||
return c.writeRecord(typeBeginRequest, reqID, b[:])
|
||||
}
|
||||
|
||||
func (c *conn) writeEndRequest(reqId uint16, appStatus int, protocolStatus uint8) error {
|
||||
func (c *conn) writeEndRequest(reqID uint16, appStatus int, protocolStatus uint8) error {
|
||||
b := make([]byte, 8)
|
||||
binary.BigEndian.PutUint32(b, uint32(appStatus))
|
||||
b[4] = protocolStatus
|
||||
return c.writeRecord(typeEndRequest, reqId, b)
|
||||
return c.writeRecord(typeEndRequest, reqID, b)
|
||||
}
|
||||
|
||||
func (c *conn) writePairs(recType recType, reqId uint16, pairs map[string]string) error {
|
||||
w := newWriter(c, recType, reqId)
|
||||
func (c *conn) writePairs(recType recType, reqID uint16, pairs map[string]string) error {
|
||||
w := newWriter(c, recType, reqID)
|
||||
b := make([]byte, 8)
|
||||
for k, v := range pairs {
|
||||
n := encodeSize(b, uint32(len(k)))
|
||||
|
|
@ -238,8 +238,8 @@ func (w *bufWriter) Close() error {
|
|||
return w.closer.Close()
|
||||
}
|
||||
|
||||
func newWriter(c *conn, recType recType, reqId uint16) *bufWriter {
|
||||
s := &streamWriter{c: c, recType: recType, reqId: reqId}
|
||||
func newWriter(c *conn, recType recType, reqID uint16) *bufWriter {
|
||||
s := &streamWriter{c: c, recType: recType, reqID: reqID}
|
||||
w := bufio.NewWriterSize(s, maxWrite)
|
||||
return &bufWriter{s, w}
|
||||
}
|
||||
|
|
@ -249,7 +249,7 @@ func newWriter(c *conn, recType recType, reqId uint16) *bufWriter {
|
|||
type streamWriter struct {
|
||||
c *conn
|
||||
recType recType
|
||||
reqId uint16
|
||||
reqID uint16
|
||||
}
|
||||
|
||||
func (w *streamWriter) Write(p []byte) (int, error) {
|
||||
|
|
@ -259,7 +259,7 @@ func (w *streamWriter) Write(p []byte) (int, error) {
|
|||
if n > maxWrite {
|
||||
n = maxWrite
|
||||
}
|
||||
if err := w.c.writeRecord(w.recType, w.reqId, p[:n]); err != nil {
|
||||
if err := w.c.writeRecord(w.recType, w.reqID, p[:n]); err != nil {
|
||||
return nn, err
|
||||
}
|
||||
nn += n
|
||||
|
|
@ -270,5 +270,5 @@ func (w *streamWriter) Write(p []byte) (int, error) {
|
|||
|
||||
func (w *streamWriter) Close() error {
|
||||
// send empty record to close the stream
|
||||
return w.c.writeRecord(w.recType, w.reqId, nil)
|
||||
return w.c.writeRecord(w.recType, w.reqID, nil)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -38,20 +38,20 @@ func (c *conn) Request(
|
|||
requestData string,
|
||||
) (retout []byte, reterr []byte, err error) {
|
||||
defer c.rwc.Close()
|
||||
var reqId uint16 = 1
|
||||
var reqID uint16 = 1
|
||||
|
||||
err = c.writeBeginRequest(reqId, uint16(roleResponder), 0)
|
||||
err = c.writeBeginRequest(reqID, uint16(roleResponder), 0)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.writePairs(typeParams, reqId, env)
|
||||
err = c.writePairs(typeParams, reqID, env)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if len(requestData) > 0 {
|
||||
if err = c.writeRecord(typeStdin, reqId, []byte(requestData)); err != nil {
|
||||
if err = c.writeRecord(typeStdin, reqID, []byte(requestData)); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ func TestSize(t *testing.T) {
|
|||
var streamTests = []struct {
|
||||
desc string
|
||||
recType recType
|
||||
reqId uint16
|
||||
reqID uint16
|
||||
content []byte
|
||||
raw []byte
|
||||
}{
|
||||
|
|
@ -90,8 +90,8 @@ outer:
|
|||
t.Errorf("%s: got type %d expected %d", test.desc, rec.h.Type, test.recType)
|
||||
continue
|
||||
}
|
||||
if rec.h.Id != test.reqId {
|
||||
t.Errorf("%s: got request ID %d expected %d", test.desc, rec.h.Id, test.reqId)
|
||||
if rec.h.ID != test.reqID {
|
||||
t.Errorf("%s: got request ID %d expected %d", test.desc, rec.h.ID, test.reqID)
|
||||
continue
|
||||
}
|
||||
if !bytes.Equal(content, test.content) {
|
||||
|
|
@ -100,7 +100,7 @@ outer:
|
|||
}
|
||||
buf.Reset()
|
||||
c := newConn(&nilCloser{buf})
|
||||
w := newWriter(c, test.recType, test.reqId)
|
||||
w := newWriter(c, test.recType, test.reqID)
|
||||
if _, err := w.Write(test.content); err != nil {
|
||||
t.Errorf("%s: error writing record: %v", test.desc, err)
|
||||
continue
|
||||
|
|
@ -164,17 +164,17 @@ func nameValuePair11(nameData, valueData string) []byte {
|
|||
|
||||
func makeRecord(
|
||||
recordType recType,
|
||||
requestId uint16,
|
||||
requestID uint16,
|
||||
contentData []byte,
|
||||
) []byte {
|
||||
requestIdB1 := byte(requestId >> 8)
|
||||
requestIdB0 := byte(requestId)
|
||||
requestIDB1 := byte(requestID >> 8)
|
||||
requestIDB0 := byte(requestID)
|
||||
|
||||
contentLength := len(contentData)
|
||||
contentLengthB1 := byte(contentLength >> 8)
|
||||
contentLengthB0 := byte(contentLength)
|
||||
return bytes.Join([][]byte{
|
||||
{1, byte(recordType), requestIdB1, requestIdB0, contentLengthB1,
|
||||
{1, byte(recordType), requestIDB1, requestIDB0, contentLengthB1,
|
||||
contentLengthB0, 0, 0},
|
||||
contentData,
|
||||
},
|
||||
|
|
|
|||
|
|
@ -19,19 +19,19 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
PF_POOL = "pool"
|
||||
PF_PROCESS_MANAGER = "process manager"
|
||||
PF_START_SINCE = "start since"
|
||||
PF_ACCEPTED_CONN = "accepted conn"
|
||||
PF_LISTEN_QUEUE = "listen queue"
|
||||
PF_MAX_LISTEN_QUEUE = "max listen queue"
|
||||
PF_LISTEN_QUEUE_LEN = "listen queue len"
|
||||
PF_IDLE_PROCESSES = "idle processes"
|
||||
PF_ACTIVE_PROCESSES = "active processes"
|
||||
PF_TOTAL_PROCESSES = "total processes"
|
||||
PF_MAX_ACTIVE_PROCESSES = "max active processes"
|
||||
PF_MAX_CHILDREN_REACHED = "max children reached"
|
||||
PF_SLOW_REQUESTS = "slow requests"
|
||||
PfPool = "pool"
|
||||
PfProcessManager = "process manager"
|
||||
PfStartSince = "start since"
|
||||
PfAcceptedConn = "accepted conn"
|
||||
PfListenQueue = "listen queue"
|
||||
PfMaxListenQueue = "max listen queue"
|
||||
PfListenQueueLen = "listen queue len"
|
||||
PfIdleProcesses = "idle processes"
|
||||
PfActiveProcesses = "active processes"
|
||||
PfTotalProcesses = "total processes"
|
||||
PfMaxActiveProcesses = "max active processes"
|
||||
PfMaxChildrenReached = "max children reached"
|
||||
PfSlowRequests = "slow requests"
|
||||
)
|
||||
|
||||
type metric map[string]int64
|
||||
|
|
@ -131,7 +131,7 @@ func (p *phpfpm) Gather(acc telegraf.Accumulator) error {
|
|||
// Request status page to get stat raw data and import it
|
||||
func (p *phpfpm) gatherServer(addr string, acc telegraf.Accumulator) error {
|
||||
if strings.HasPrefix(addr, "http://") || strings.HasPrefix(addr, "https://") {
|
||||
return p.gatherHttp(addr, acc)
|
||||
return p.gatherHTTP(addr, acc)
|
||||
}
|
||||
|
||||
var (
|
||||
|
|
@ -147,9 +147,9 @@ func (p *phpfpm) gatherServer(addr string, acc telegraf.Accumulator) error {
|
|||
return fmt.Errorf("unable parse server address '%s': %s", addr, err)
|
||||
}
|
||||
socketAddr := strings.Split(u.Host, ":")
|
||||
fcgiIp := socketAddr[0]
|
||||
fcgiIP := socketAddr[0]
|
||||
fcgiPort, _ := strconv.Atoi(socketAddr[1])
|
||||
fcgi, err = newFcgiClient(fcgiIp, fcgiPort)
|
||||
fcgi, err = newFcgiClient(fcgiIP, fcgiPort)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -193,7 +193,7 @@ func (p *phpfpm) gatherFcgi(fcgi *conn, statusPath string, acc telegraf.Accumula
|
|||
}
|
||||
|
||||
// Gather stat using http protocol
|
||||
func (p *phpfpm) gatherHttp(addr string, acc telegraf.Accumulator) error {
|
||||
func (p *phpfpm) gatherHTTP(addr string, acc telegraf.Accumulator) error {
|
||||
u, err := url.Parse(addr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable parse server address '%s': %v", addr, err)
|
||||
|
|
@ -233,7 +233,7 @@ func importMetric(r io.Reader, acc telegraf.Accumulator, addr string) poolStat {
|
|||
}
|
||||
fieldName := strings.Trim(keyvalue[0], " ")
|
||||
// We start to gather data for a new pool here
|
||||
if fieldName == PF_POOL {
|
||||
if fieldName == PfPool {
|
||||
currentPool = strings.Trim(keyvalue[1], " ")
|
||||
stats[currentPool] = make(metric)
|
||||
continue
|
||||
|
|
@ -241,17 +241,17 @@ func importMetric(r io.Reader, acc telegraf.Accumulator, addr string) poolStat {
|
|||
|
||||
// Start to parse metric for current pool
|
||||
switch fieldName {
|
||||
case PF_START_SINCE,
|
||||
PF_ACCEPTED_CONN,
|
||||
PF_LISTEN_QUEUE,
|
||||
PF_MAX_LISTEN_QUEUE,
|
||||
PF_LISTEN_QUEUE_LEN,
|
||||
PF_IDLE_PROCESSES,
|
||||
PF_ACTIVE_PROCESSES,
|
||||
PF_TOTAL_PROCESSES,
|
||||
PF_MAX_ACTIVE_PROCESSES,
|
||||
PF_MAX_CHILDREN_REACHED,
|
||||
PF_SLOW_REQUESTS:
|
||||
case PfStartSince,
|
||||
PfAcceptedConn,
|
||||
PfListenQueue,
|
||||
PfMaxListenQueue,
|
||||
PfListenQueueLen,
|
||||
PfIdleProcesses,
|
||||
PfActiveProcesses,
|
||||
PfTotalProcesses,
|
||||
PfMaxActiveProcesses,
|
||||
PfMaxChildrenReached,
|
||||
PfSlowRequests:
|
||||
fieldValue, err := strconv.ParseInt(strings.Trim(keyvalue[1], " "), 10, 64)
|
||||
if err == nil {
|
||||
stats[currentPool][fieldName] = fieldValue
|
||||
|
|
|
|||
|
|
@ -105,26 +105,26 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error {
|
|||
|
||||
query = `SELECT * FROM pg_stat_bgwriter`
|
||||
|
||||
bg_writer_row, err := p.DB.Query(query)
|
||||
bgWriterRow, err := p.DB.Query(query)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer bg_writer_row.Close()
|
||||
defer bgWriterRow.Close()
|
||||
|
||||
// grab the column information from the result
|
||||
if columns, err = bg_writer_row.Columns(); err != nil {
|
||||
if columns, err = bgWriterRow.Columns(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for bg_writer_row.Next() {
|
||||
err = p.accRow(bg_writer_row, acc, columns)
|
||||
for bgWriterRow.Next() {
|
||||
err = p.accRow(bgWriterRow, acc, columns)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return bg_writer_row.Err()
|
||||
return bgWriterRow.Err()
|
||||
}
|
||||
|
||||
type scanner interface {
|
||||
|
|
|
|||
|
|
@ -157,49 +157,48 @@ func ReadQueryFromFile(filePath string) (string, error) {
|
|||
func (p *Postgresql) Gather(acc telegraf.Accumulator) error {
|
||||
var (
|
||||
err error
|
||||
sql_query string
|
||||
query_addon string
|
||||
db_version int
|
||||
sqlQuery string
|
||||
queryAddon string
|
||||
dbVersion int
|
||||
query string
|
||||
tag_value string
|
||||
meas_name string
|
||||
tagValue string
|
||||
measName string
|
||||
timestamp string
|
||||
columns []string
|
||||
)
|
||||
|
||||
// Retrieving the database version
|
||||
query = `SELECT setting::integer / 100 AS version FROM pg_settings WHERE name = 'server_version_num'`
|
||||
if err = p.DB.QueryRow(query).Scan(&db_version); err != nil {
|
||||
db_version = 0
|
||||
if err = p.DB.QueryRow(query).Scan(&dbVersion); err != nil {
|
||||
dbVersion = 0
|
||||
}
|
||||
|
||||
// We loop in order to process each query
|
||||
// Query is not run if Database version does not match the query version.
|
||||
for i := range p.Query {
|
||||
sql_query = p.Query[i].Sqlquery
|
||||
tag_value = p.Query[i].Tagvalue
|
||||
sqlQuery = p.Query[i].Sqlquery
|
||||
tagValue = p.Query[i].Tagvalue
|
||||
timestamp = p.Query[i].Timestamp
|
||||
|
||||
if p.Query[i].Measurement != "" {
|
||||
meas_name = p.Query[i].Measurement
|
||||
measName = p.Query[i].Measurement
|
||||
} else {
|
||||
meas_name = "postgresql"
|
||||
measName = "postgresql"
|
||||
}
|
||||
|
||||
if p.Query[i].Withdbname {
|
||||
if len(p.Databases) != 0 {
|
||||
query_addon = fmt.Sprintf(` IN ('%s')`,
|
||||
strings.Join(p.Databases, "','"))
|
||||
queryAddon = fmt.Sprintf(` IN ('%s')`, strings.Join(p.Databases, "','"))
|
||||
} else {
|
||||
query_addon = " is not null"
|
||||
queryAddon = " is not null"
|
||||
}
|
||||
} else {
|
||||
query_addon = ""
|
||||
queryAddon = ""
|
||||
}
|
||||
sql_query += query_addon
|
||||
sqlQuery += queryAddon
|
||||
|
||||
if p.Query[i].Version <= db_version {
|
||||
rows, err := p.DB.Query(sql_query)
|
||||
if p.Query[i].Version <= dbVersion {
|
||||
rows, err := p.DB.Query(sqlQuery)
|
||||
if err != nil {
|
||||
p.Log.Error(err.Error())
|
||||
continue
|
||||
|
|
@ -214,17 +213,17 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error {
|
|||
}
|
||||
|
||||
p.AdditionalTags = nil
|
||||
if tag_value != "" {
|
||||
tag_list := strings.Split(tag_value, ",")
|
||||
for t := range tag_list {
|
||||
p.AdditionalTags = append(p.AdditionalTags, tag_list[t])
|
||||
if tagValue != "" {
|
||||
tagList := strings.Split(tagValue, ",")
|
||||
for t := range tagList {
|
||||
p.AdditionalTags = append(p.AdditionalTags, tagList[t])
|
||||
}
|
||||
}
|
||||
|
||||
p.Timestamp = timestamp
|
||||
|
||||
for rows.Next() {
|
||||
err = p.accRow(meas_name, rows, acc, columns)
|
||||
err = p.accRow(measName, rows, acc, columns)
|
||||
if err != nil {
|
||||
p.Log.Error(err.Error())
|
||||
break
|
||||
|
|
@ -239,7 +238,7 @@ type scanner interface {
|
|||
Scan(dest ...interface{}) error
|
||||
}
|
||||
|
||||
func (p *Postgresql) accRow(meas_name string, row scanner, acc telegraf.Accumulator, columns []string) error {
|
||||
func (p *Postgresql) accRow(measName string, row scanner, acc telegraf.Accumulator, columns []string) error {
|
||||
var (
|
||||
err error
|
||||
columnVars []interface{}
|
||||
|
|
@ -329,7 +328,7 @@ COLUMN:
|
|||
fields[col] = *val
|
||||
}
|
||||
}
|
||||
acc.AddFields(meas_name, fields, tags, timestamp)
|
||||
acc.AddFields(measName, fields, tags, timestamp)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ func NewNativeFinder() (PIDFinder, error) {
|
|||
}
|
||||
|
||||
//Uid will return all pids for the given user
|
||||
func (pg *NativeFinder) Uid(user string) ([]PID, error) {
|
||||
func (pg *NativeFinder) UID(user string) ([]PID, error) {
|
||||
var dst []PID
|
||||
procs, err := process.Processes()
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@ func TestGather_RealUserIntegration(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
pg, err := NewNativeFinder()
|
||||
require.NoError(t, err)
|
||||
pids, err := pg.Uid(user.Username)
|
||||
pids, err := pg.UID(user.Username)
|
||||
require.NoError(t, err)
|
||||
fmt.Println(pids)
|
||||
assert.Equal(t, len(pids) > 0, true)
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ func (pg *Pgrep) Pattern(pattern string) ([]PID, error) {
|
|||
return find(pg.path, args)
|
||||
}
|
||||
|
||||
func (pg *Pgrep) Uid(user string) ([]PID, error) {
|
||||
func (pg *Pgrep) UID(user string) ([]PID, error) {
|
||||
args := []string{"-u", user}
|
||||
return find(pg.path, args)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ type Process interface {
|
|||
type PIDFinder interface {
|
||||
PidFile(path string) ([]PID, error)
|
||||
Pattern(pattern string) ([]PID, error)
|
||||
Uid(user string) ([]PID, error)
|
||||
UID(user string) ([]PID, error)
|
||||
FullPattern(path string) ([]PID, error)
|
||||
}
|
||||
|
||||
|
|
@ -68,10 +68,10 @@ func (p *Proc) Username() (string, error) {
|
|||
}
|
||||
|
||||
func (p *Proc) Percent(interval time.Duration) (float64, error) {
|
||||
cpu_perc, err := p.Process.Percent(time.Duration(0))
|
||||
cpuPerc, err := p.Process.Percent(time.Duration(0))
|
||||
if !p.hasCPUTimes && err == nil {
|
||||
p.hasCPUTimes = true
|
||||
return 0, fmt.Errorf("must call Percent twice to compute percent cpu")
|
||||
}
|
||||
return cpu_perc, err
|
||||
return cpuPerc, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -388,7 +388,7 @@ func (p *Procstat) findPids() ([]PID, map[string]string, error) {
|
|||
pids, err = f.FullPattern(p.Pattern)
|
||||
tags = map[string]string{"pattern": p.Pattern}
|
||||
} else if p.User != "" {
|
||||
pids, err = f.Uid(p.User)
|
||||
pids, err = f.UID(p.User)
|
||||
tags = map[string]string{"user": p.User}
|
||||
} else if p.SystemdUnit != "" {
|
||||
pids, err = p.systemdUnitPIDs()
|
||||
|
|
|
|||
|
|
@ -84,7 +84,7 @@ func (pg *testPgrep) Pattern(_ string) ([]PID, error) {
|
|||
return pg.pids, pg.err
|
||||
}
|
||||
|
||||
func (pg *testPgrep) Uid(_ string) ([]PID, error) {
|
||||
func (pg *testPgrep) UID(_ string) ([]PID, error) {
|
||||
return pg.pids, pg.err
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -81,28 +81,28 @@ func init() {
|
|||
}
|
||||
|
||||
func getNodeSearchDomain(px *Proxmox) error {
|
||||
apiUrl := "/nodes/" + px.NodeName + "/dns"
|
||||
jsonData, err := px.requestFunction(px, apiUrl, http.MethodGet, nil)
|
||||
apiURL := "/nodes/" + px.NodeName + "/dns"
|
||||
jsonData, err := px.requestFunction(px, apiURL, http.MethodGet, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var nodeDns NodeDns
|
||||
err = json.Unmarshal(jsonData, &nodeDns)
|
||||
var nodeDNS NodeDNS
|
||||
err = json.Unmarshal(jsonData, &nodeDNS)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if nodeDns.Data.Searchdomain == "" {
|
||||
if nodeDNS.Data.Searchdomain == "" {
|
||||
return errors.New("search domain is not set")
|
||||
}
|
||||
px.nodeSearchDomain = nodeDns.Data.Searchdomain
|
||||
px.nodeSearchDomain = nodeDNS.Data.Searchdomain
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func performRequest(px *Proxmox, apiUrl string, method string, data url.Values) ([]byte, error) {
|
||||
request, err := http.NewRequest(method, px.BaseURL+apiUrl, strings.NewReader(data.Encode()))
|
||||
func performRequest(px *Proxmox, apiURL string, method string, data url.Values) ([]byte, error) {
|
||||
request, err := http.NewRequest(method, px.BaseURL+apiURL, strings.NewReader(data.Encode()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -123,15 +123,15 @@ func performRequest(px *Proxmox, apiUrl string, method string, data url.Values)
|
|||
}
|
||||
|
||||
func gatherLxcData(px *Proxmox, acc telegraf.Accumulator) {
|
||||
gatherVmData(px, acc, LXC)
|
||||
gatherVMData(px, acc, LXC)
|
||||
}
|
||||
|
||||
func gatherQemuData(px *Proxmox, acc telegraf.Accumulator) {
|
||||
gatherVmData(px, acc, QEMU)
|
||||
gatherVMData(px, acc, QEMU)
|
||||
}
|
||||
|
||||
func gatherVmData(px *Proxmox, acc telegraf.Accumulator, rt ResourceType) {
|
||||
vmStats, err := getVmStats(px, rt)
|
||||
func gatherVMData(px *Proxmox, acc telegraf.Accumulator, rt ResourceType) {
|
||||
vmStats, err := getVMStats(px, rt)
|
||||
if err != nil {
|
||||
px.Log.Error("Error getting VM stats: %v", err)
|
||||
return
|
||||
|
|
@ -139,7 +139,7 @@ func gatherVmData(px *Proxmox, acc telegraf.Accumulator, rt ResourceType) {
|
|||
|
||||
// For each VM add metrics to Accumulator
|
||||
for _, vmStat := range vmStats.Data {
|
||||
vmConfig, err := getVmConfig(px, vmStat.ID, rt)
|
||||
vmConfig, err := getVMConfig(px, vmStat.ID, rt)
|
||||
if err != nil {
|
||||
px.Log.Errorf("Error getting VM config: %v", err)
|
||||
return
|
||||
|
|
@ -167,76 +167,76 @@ func gatherVmData(px *Proxmox, acc telegraf.Accumulator, rt ResourceType) {
|
|||
}
|
||||
}
|
||||
|
||||
func getCurrentVMStatus(px *Proxmox, rt ResourceType, id string) (VmStat, error) {
|
||||
apiUrl := "/nodes/" + px.NodeName + "/" + string(rt) + "/" + id + "/status/current"
|
||||
func getCurrentVMStatus(px *Proxmox, rt ResourceType, id string) (VMStat, error) {
|
||||
apiURL := "/nodes/" + px.NodeName + "/" + string(rt) + "/" + id + "/status/current"
|
||||
|
||||
jsonData, err := px.requestFunction(px, apiUrl, http.MethodGet, nil)
|
||||
jsonData, err := px.requestFunction(px, apiURL, http.MethodGet, nil)
|
||||
if err != nil {
|
||||
return VmStat{}, err
|
||||
return VMStat{}, err
|
||||
}
|
||||
|
||||
var currentVmStatus VmCurrentStats
|
||||
err = json.Unmarshal(jsonData, ¤tVmStatus)
|
||||
var currentVMStatus VMCurrentStats
|
||||
err = json.Unmarshal(jsonData, ¤tVMStatus)
|
||||
if err != nil {
|
||||
return VmStat{}, err
|
||||
return VMStat{}, err
|
||||
}
|
||||
|
||||
return currentVmStatus.Data, nil
|
||||
return currentVMStatus.Data, nil
|
||||
}
|
||||
|
||||
func getVmStats(px *Proxmox, rt ResourceType) (VmStats, error) {
|
||||
apiUrl := "/nodes/" + px.NodeName + "/" + string(rt)
|
||||
jsonData, err := px.requestFunction(px, apiUrl, http.MethodGet, nil)
|
||||
func getVMStats(px *Proxmox, rt ResourceType) (VMStats, error) {
|
||||
apiURL := "/nodes/" + px.NodeName + "/" + string(rt)
|
||||
jsonData, err := px.requestFunction(px, apiURL, http.MethodGet, nil)
|
||||
if err != nil {
|
||||
return VmStats{}, err
|
||||
return VMStats{}, err
|
||||
}
|
||||
|
||||
var vmStats VmStats
|
||||
var vmStats VMStats
|
||||
err = json.Unmarshal(jsonData, &vmStats)
|
||||
if err != nil {
|
||||
return VmStats{}, err
|
||||
return VMStats{}, err
|
||||
}
|
||||
|
||||
return vmStats, nil
|
||||
}
|
||||
|
||||
func getVmConfig(px *Proxmox, vmId string, rt ResourceType) (VmConfig, error) {
|
||||
apiUrl := "/nodes/" + px.NodeName + "/" + string(rt) + "/" + vmId + "/config"
|
||||
jsonData, err := px.requestFunction(px, apiUrl, http.MethodGet, nil)
|
||||
func getVMConfig(px *Proxmox, vmID string, rt ResourceType) (VMConfig, error) {
|
||||
apiURL := "/nodes/" + px.NodeName + "/" + string(rt) + "/" + vmID + "/config"
|
||||
jsonData, err := px.requestFunction(px, apiURL, http.MethodGet, nil)
|
||||
if err != nil {
|
||||
return VmConfig{}, err
|
||||
return VMConfig{}, err
|
||||
}
|
||||
|
||||
var vmConfig VmConfig
|
||||
var vmConfig VMConfig
|
||||
err = json.Unmarshal(jsonData, &vmConfig)
|
||||
if err != nil {
|
||||
return VmConfig{}, err
|
||||
return VMConfig{}, err
|
||||
}
|
||||
|
||||
return vmConfig, nil
|
||||
}
|
||||
|
||||
func getFields(vmStat VmStat) (map[string]interface{}, error) {
|
||||
mem_total, mem_used, mem_free, mem_used_percentage := getByteMetrics(vmStat.TotalMem, vmStat.UsedMem)
|
||||
swap_total, swap_used, swap_free, swap_used_percentage := getByteMetrics(vmStat.TotalSwap, vmStat.UsedSwap)
|
||||
disk_total, disk_used, disk_free, disk_used_percentage := getByteMetrics(vmStat.TotalDisk, vmStat.UsedDisk)
|
||||
func getFields(vmStat VMStat) (map[string]interface{}, error) {
|
||||
memTotal, memUsed, memFree, memUsedPercentage := getByteMetrics(vmStat.TotalMem, vmStat.UsedMem)
|
||||
swapTotal, swapUsed, swapFree, swapUsedPercentage := getByteMetrics(vmStat.TotalSwap, vmStat.UsedSwap)
|
||||
diskTotal, diskUsed, diskFree, diskUsedPercentage := getByteMetrics(vmStat.TotalDisk, vmStat.UsedDisk)
|
||||
|
||||
return map[string]interface{}{
|
||||
"status": vmStat.Status,
|
||||
"uptime": jsonNumberToInt64(vmStat.Uptime),
|
||||
"cpuload": jsonNumberToFloat64(vmStat.CpuLoad),
|
||||
"mem_used": mem_used,
|
||||
"mem_total": mem_total,
|
||||
"mem_free": mem_free,
|
||||
"mem_used_percentage": mem_used_percentage,
|
||||
"swap_used": swap_used,
|
||||
"swap_total": swap_total,
|
||||
"swap_free": swap_free,
|
||||
"swap_used_percentage": swap_used_percentage,
|
||||
"disk_used": disk_used,
|
||||
"disk_total": disk_total,
|
||||
"disk_free": disk_free,
|
||||
"disk_used_percentage": disk_used_percentage,
|
||||
"cpuload": jsonNumberToFloat64(vmStat.CPULoad),
|
||||
"mem_used": memUsed,
|
||||
"mem_total": memTotal,
|
||||
"mem_free": memFree,
|
||||
"mem_used_percentage": memUsedPercentage,
|
||||
"swap_used": swapUsed,
|
||||
"swap_total": swapTotal,
|
||||
"swap_free": swapFree,
|
||||
"swap_used_percentage": swapUsedPercentage,
|
||||
"disk_used": diskUsed,
|
||||
"disk_total": diskTotal,
|
||||
"disk_free": diskFree,
|
||||
"disk_used_percentage": diskUsedPercentage,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
@ -270,7 +270,7 @@ func jsonNumberToFloat64(value json.Number) float64 {
|
|||
return float64Value
|
||||
}
|
||||
|
||||
func getTags(px *Proxmox, name string, vmConfig VmConfig, rt ResourceType) map[string]string {
|
||||
func getTags(px *Proxmox, name string, vmConfig VMConfig, rt ResourceType) map[string]string {
|
||||
domain := vmConfig.Data.Searchdomain
|
||||
if len(domain) == 0 {
|
||||
domain = px.nodeSearchDomain
|
||||
|
|
|
|||
|
|
@ -18,22 +18,22 @@ var lxcConfigTestData = `{"data":{"hostname":"container1","searchdomain":"test.e
|
|||
var lxcCurrentStatusTestData = `{"data":{"vmid":"111","type":"lxc","uptime":2078164,"swap":9412608,"disk":"744189952","maxmem":536870912,"mem":98500608,"maxswap":536870912,"cpu":0.00371567669193613,"status":"running","maxdisk":"5217320960","name":"container1"}}`
|
||||
var qemuCurrentStatusTestData = `{"data":{"name":"qemu1","status":"running","maxdisk":10737418240,"cpu":0.029336643550795,"vmid":"113","uptime":2159739,"disk":0,"maxmem":2147483648,"mem":1722451796}}`
|
||||
|
||||
func performTestRequest(px *Proxmox, apiUrl string, method string, data url.Values) ([]byte, error) {
|
||||
func performTestRequest(px *Proxmox, apiURL string, method string, data url.Values) ([]byte, error) {
|
||||
var bytedata = []byte("")
|
||||
|
||||
if strings.HasSuffix(apiUrl, "dns") {
|
||||
if strings.HasSuffix(apiURL, "dns") {
|
||||
bytedata = []byte(nodeSearchDomainTestData)
|
||||
} else if strings.HasSuffix(apiUrl, "qemu") {
|
||||
} else if strings.HasSuffix(apiURL, "qemu") {
|
||||
bytedata = []byte(qemuTestData)
|
||||
} else if strings.HasSuffix(apiUrl, "113/config") {
|
||||
} else if strings.HasSuffix(apiURL, "113/config") {
|
||||
bytedata = []byte(qemuConfigTestData)
|
||||
} else if strings.HasSuffix(apiUrl, "lxc") {
|
||||
} else if strings.HasSuffix(apiURL, "lxc") {
|
||||
bytedata = []byte(lxcTestData)
|
||||
} else if strings.HasSuffix(apiUrl, "111/config") {
|
||||
} else if strings.HasSuffix(apiURL, "111/config") {
|
||||
bytedata = []byte(lxcConfigTestData)
|
||||
} else if strings.HasSuffix(apiUrl, "111/status/current") {
|
||||
} else if strings.HasSuffix(apiURL, "111/status/current") {
|
||||
bytedata = []byte(lxcCurrentStatusTestData)
|
||||
} else if strings.HasSuffix(apiUrl, "113/status/current") {
|
||||
} else if strings.HasSuffix(apiURL, "113/status/current") {
|
||||
bytedata = []byte(qemuCurrentStatusTestData)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -32,15 +32,15 @@ var (
|
|||
LXC ResourceType = "lxc"
|
||||
)
|
||||
|
||||
type VmStats struct {
|
||||
Data []VmStat `json:"data"`
|
||||
type VMStats struct {
|
||||
Data []VMStat `json:"data"`
|
||||
}
|
||||
|
||||
type VmCurrentStats struct {
|
||||
Data VmStat `json:"data"`
|
||||
type VMCurrentStats struct {
|
||||
Data VMStat `json:"data"`
|
||||
}
|
||||
|
||||
type VmStat struct {
|
||||
type VMStat struct {
|
||||
ID string `json:"vmid"`
|
||||
Name string `json:"name"`
|
||||
Status string `json:"status"`
|
||||
|
|
@ -51,10 +51,10 @@ type VmStat struct {
|
|||
UsedSwap json.Number `json:"swap"`
|
||||
TotalSwap json.Number `json:"maxswap"`
|
||||
Uptime json.Number `json:"uptime"`
|
||||
CpuLoad json.Number `json:"cpu"`
|
||||
CPULoad json.Number `json:"cpu"`
|
||||
}
|
||||
|
||||
type VmConfig struct {
|
||||
type VMConfig struct {
|
||||
Data struct {
|
||||
Searchdomain string `json:"searchdomain"`
|
||||
Hostname string `json:"hostname"`
|
||||
|
|
@ -62,7 +62,7 @@ type VmConfig struct {
|
|||
} `json:"data"`
|
||||
}
|
||||
|
||||
type NodeDns struct {
|
||||
type NodeDNS struct {
|
||||
Data struct {
|
||||
Searchdomain string `json:"search"`
|
||||
} `json:"data"`
|
||||
|
|
|
|||
|
|
@ -157,8 +157,8 @@ type Node struct {
|
|||
Uptime int64 `json:"uptime"`
|
||||
MnesiaDiskTxCount int64 `json:"mnesia_disk_tx_count"`
|
||||
MnesiaDiskTxCountDetails Details `json:"mnesia_disk_tx_count_details"`
|
||||
MnesiaRamTxCount int64 `json:"mnesia_ram_tx_count"`
|
||||
MnesiaRamTxCountDetails Details `json:"mnesia_ram_tx_count_details"`
|
||||
MnesiaRAMTxCount int64 `json:"mnesia_ram_tx_count"`
|
||||
MnesiaRAMTxCountDetails Details `json:"mnesia_ram_tx_count_details"`
|
||||
GcNum int64 `json:"gc_num"`
|
||||
GcNumDetails Details `json:"gc_num_details"`
|
||||
GcBytesReclaimed int64 `json:"gc_bytes_reclaimed"`
|
||||
|
|
@ -491,8 +491,8 @@ func gatherNodes(r *RabbitMQ, acc telegraf.Accumulator) {
|
|||
"uptime": node.Uptime,
|
||||
"mnesia_disk_tx_count": node.MnesiaDiskTxCount,
|
||||
"mnesia_disk_tx_count_rate": node.MnesiaDiskTxCountDetails.Rate,
|
||||
"mnesia_ram_tx_count": node.MnesiaRamTxCount,
|
||||
"mnesia_ram_tx_count_rate": node.MnesiaRamTxCountDetails.Rate,
|
||||
"mnesia_ram_tx_count": node.MnesiaRAMTxCount,
|
||||
"mnesia_ram_tx_count_rate": node.MnesiaRAMTxCountDetails.Rate,
|
||||
"gc_num": node.GcNum,
|
||||
"gc_num_rate": node.GcNumDetails.Rate,
|
||||
"gc_bytes_reclaimed": node.GcBytesReclaimed,
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ import (
|
|||
|
||||
type Raindrops struct {
|
||||
Urls []string
|
||||
http_client *http.Client
|
||||
httpClient *http.Client
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
|
|
@ -46,7 +46,7 @@ func (r *Raindrops) Gather(acc telegraf.Accumulator) error {
|
|||
wg.Add(1)
|
||||
go func(addr *url.URL) {
|
||||
defer wg.Done()
|
||||
acc.AddError(r.gatherUrl(addr, acc))
|
||||
acc.AddError(r.gatherURL(addr, acc))
|
||||
}(addr)
|
||||
}
|
||||
|
||||
|
|
@ -55,8 +55,8 @@ func (r *Raindrops) Gather(acc telegraf.Accumulator) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (r *Raindrops) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
resp, err := r.http_client.Get(addr.String())
|
||||
func (r *Raindrops) gatherURL(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
resp, err := r.httpClient.Get(addr.String())
|
||||
if err != nil {
|
||||
return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err)
|
||||
}
|
||||
|
|
@ -101,10 +101,10 @@ func (r *Raindrops) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
|
|||
acc.AddFields("raindrops", fields, tags)
|
||||
|
||||
iterate := true
|
||||
var queued_line_str string
|
||||
var active_line_str string
|
||||
var active_err error
|
||||
var queued_err error
|
||||
var queuedLineStr string
|
||||
var activeLineStr string
|
||||
var activeErr error
|
||||
var queuedErr error
|
||||
|
||||
for iterate {
|
||||
// Listen
|
||||
|
|
@ -114,35 +114,35 @@ func (r *Raindrops) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
|
|||
"active": 0,
|
||||
"queued": 0,
|
||||
}
|
||||
active_line_str, active_err = buf.ReadString('\n')
|
||||
if active_err != nil {
|
||||
activeLineStr, activeErr = buf.ReadString('\n')
|
||||
if activeErr != nil {
|
||||
iterate = false
|
||||
break
|
||||
}
|
||||
if strings.Compare(active_line_str, "\n") == 0 {
|
||||
if strings.Compare(activeLineStr, "\n") == 0 {
|
||||
break
|
||||
}
|
||||
queued_line_str, queued_err = buf.ReadString('\n')
|
||||
if queued_err != nil {
|
||||
queuedLineStr, queuedErr = buf.ReadString('\n')
|
||||
if queuedErr != nil {
|
||||
iterate = false
|
||||
}
|
||||
active_line := strings.Split(active_line_str, " ")
|
||||
listen_name := active_line[0]
|
||||
activeLine := strings.Split(activeLineStr, " ")
|
||||
listenName := activeLine[0]
|
||||
|
||||
active, err := strconv.ParseUint(strings.TrimSpace(active_line[2]), 10, 64)
|
||||
active, err := strconv.ParseUint(strings.TrimSpace(activeLine[2]), 10, 64)
|
||||
if err != nil {
|
||||
active = 0
|
||||
}
|
||||
lis["active"] = active
|
||||
|
||||
queued_line := strings.Split(queued_line_str, " ")
|
||||
queued, err := strconv.ParseUint(strings.TrimSpace(queued_line[2]), 10, 64)
|
||||
queuedLine := strings.Split(queuedLineStr, " ")
|
||||
queued, err := strconv.ParseUint(strings.TrimSpace(queuedLine[2]), 10, 64)
|
||||
if err != nil {
|
||||
queued = 0
|
||||
}
|
||||
lis["queued"] = queued
|
||||
if strings.Contains(listen_name, ":") {
|
||||
listener := strings.Split(listen_name, ":")
|
||||
if strings.Contains(listenName, ":") {
|
||||
listener := strings.Split(listenName, ":")
|
||||
tags = map[string]string{
|
||||
"ip": listener[0],
|
||||
"port": listener[1],
|
||||
|
|
@ -150,7 +150,7 @@ func (r *Raindrops) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
|
|||
|
||||
} else {
|
||||
tags = map[string]string{
|
||||
"socket": listen_name,
|
||||
"socket": listenName,
|
||||
}
|
||||
}
|
||||
acc.AddFields("raindrops_listen", lis, tags)
|
||||
|
|
@ -177,7 +177,7 @@ func (r *Raindrops) getTags(addr *url.URL) map[string]string {
|
|||
|
||||
func init() {
|
||||
inputs.Add("raindrops", func() telegraf.Input {
|
||||
return &Raindrops{http_client: &http.Client{
|
||||
return &Raindrops{httpClient: &http.Client{
|
||||
Transport: &http.Transport{
|
||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||
},
|
||||
|
|
|
|||
|
|
@ -61,7 +61,7 @@ func TestRaindropsGeneratesMetrics(t *testing.T) {
|
|||
|
||||
n := &Raindrops{
|
||||
Urls: []string{fmt.Sprintf("%s/_raindrops", ts.URL)},
|
||||
http_client: &http.Client{Transport: &http.Transport{
|
||||
httpClient: &http.Client{Transport: &http.Transport{
|
||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||
}},
|
||||
}
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ type Redfish struct {
|
|||
Address string `toml:"address"`
|
||||
Username string `toml:"username"`
|
||||
Password string `toml:"password"`
|
||||
ComputerSystemId string `toml:"computer_system_id"`
|
||||
ComputerSystemID string `toml:"computer_system_id"`
|
||||
Timeout config.Duration `toml:"timeout"`
|
||||
|
||||
client http.Client
|
||||
|
|
@ -150,7 +150,7 @@ func (r *Redfish) Init() error {
|
|||
return fmt.Errorf("did not provide username and password")
|
||||
}
|
||||
|
||||
if r.ComputerSystemId == "" {
|
||||
if r.ComputerSystemID == "" {
|
||||
return fmt.Errorf("did not provide the computer system ID of the resource")
|
||||
}
|
||||
|
||||
|
|
@ -258,7 +258,7 @@ func (r *Redfish) Gather(acc telegraf.Accumulator) error {
|
|||
address = r.baseURL.Host
|
||||
}
|
||||
|
||||
system, err := r.getComputerSystem(r.ComputerSystemId)
|
||||
system, err := r.getComputerSystem(r.ComputerSystemID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ func TestDellApis(t *testing.T) {
|
|||
address, _, err := net.SplitHostPort(u.Host)
|
||||
require.NoError(t, err)
|
||||
|
||||
expected_metrics := []telegraf.Metric{
|
||||
expectedMetrics := []telegraf.Metric{
|
||||
testutil.MustMetric(
|
||||
"redfish_thermal_temperatures",
|
||||
map[string]string{
|
||||
|
|
@ -489,7 +489,7 @@ func TestDellApis(t *testing.T) {
|
|||
Address: ts.URL,
|
||||
Username: "test",
|
||||
Password: "test",
|
||||
ComputerSystemId: "System.Embedded.1",
|
||||
ComputerSystemID: "System.Embedded.1",
|
||||
}
|
||||
plugin.Init()
|
||||
var acc testutil.Accumulator
|
||||
|
|
@ -497,7 +497,7 @@ func TestDellApis(t *testing.T) {
|
|||
err = plugin.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
require.True(t, acc.HasMeasurement("redfish_thermal_temperatures"))
|
||||
testutil.RequireMetricsEqual(t, expected_metrics, acc.GetTelegrafMetrics(),
|
||||
testutil.RequireMetricsEqual(t, expectedMetrics, acc.GetTelegrafMetrics(),
|
||||
testutil.IgnoreTime())
|
||||
}
|
||||
|
||||
|
|
@ -531,7 +531,7 @@ func TestHPApis(t *testing.T) {
|
|||
address, _, err := net.SplitHostPort(u.Host)
|
||||
require.NoError(t, err)
|
||||
|
||||
expected_metrics_hp := []telegraf.Metric{
|
||||
expectedMetricsHp := []telegraf.Metric{
|
||||
testutil.MustMetric(
|
||||
"redfish_thermal_temperatures",
|
||||
map[string]string{
|
||||
|
|
@ -647,19 +647,19 @@ func TestHPApis(t *testing.T) {
|
|||
),
|
||||
}
|
||||
|
||||
hp_plugin := &Redfish{
|
||||
hpPlugin := &Redfish{
|
||||
Address: ts.URL,
|
||||
Username: "test",
|
||||
Password: "test",
|
||||
ComputerSystemId: "1",
|
||||
ComputerSystemID: "1",
|
||||
}
|
||||
hp_plugin.Init()
|
||||
var hp_acc testutil.Accumulator
|
||||
hpPlugin.Init()
|
||||
var hpAcc testutil.Accumulator
|
||||
|
||||
err = hp_plugin.Gather(&hp_acc)
|
||||
err = hpPlugin.Gather(&hpAcc)
|
||||
require.NoError(t, err)
|
||||
require.True(t, hp_acc.HasMeasurement("redfish_thermal_temperatures"))
|
||||
testutil.RequireMetricsEqual(t, expected_metrics_hp, hp_acc.GetTelegrafMetrics(),
|
||||
require.True(t, hpAcc.HasMeasurement("redfish_thermal_temperatures"))
|
||||
testutil.RequireMetricsEqual(t, expectedMetricsHp, hpAcc.GetTelegrafMetrics(),
|
||||
testutil.IgnoreTime())
|
||||
}
|
||||
|
||||
|
|
@ -693,7 +693,7 @@ func TestInvalidUsernameorPassword(t *testing.T) {
|
|||
Address: ts.URL,
|
||||
Username: "test",
|
||||
Password: "test",
|
||||
ComputerSystemId: "System.Embedded.1",
|
||||
ComputerSystemID: "System.Embedded.1",
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
|
@ -723,7 +723,7 @@ func TestNoUsernameorPasswordConfiguration(t *testing.T) {
|
|||
|
||||
r := &Redfish{
|
||||
Address: ts.URL,
|
||||
ComputerSystemId: "System.Embedded.1",
|
||||
ComputerSystemID: "System.Embedded.1",
|
||||
}
|
||||
|
||||
err := r.Init()
|
||||
|
|
@ -796,7 +796,7 @@ func TestInvalidDellJSON(t *testing.T) {
|
|||
Address: ts.URL,
|
||||
Username: "test",
|
||||
Password: "test",
|
||||
ComputerSystemId: "System.Embedded.1",
|
||||
ComputerSystemID: "System.Embedded.1",
|
||||
}
|
||||
|
||||
plugin.Init()
|
||||
|
|
@ -867,7 +867,7 @@ func TestInvalidHPJSON(t *testing.T) {
|
|||
Address: ts.URL,
|
||||
Username: "test",
|
||||
Password: "test",
|
||||
ComputerSystemId: "System.Embedded.2",
|
||||
ComputerSystemID: "System.Embedded.2",
|
||||
}
|
||||
|
||||
plugin.Init()
|
||||
|
|
|
|||
|
|
@ -40,7 +40,7 @@ func (r *RethinkDB) Description() string {
|
|||
return "Read metrics from one or many RethinkDB servers"
|
||||
}
|
||||
|
||||
var localhost = &Server{Url: &url.URL{Host: "127.0.0.1:28015"}}
|
||||
var localhost = &Server{URL: &url.URL{Host: "127.0.0.1:28015"}}
|
||||
|
||||
// Reads stats from all configured servers accumulates stats.
|
||||
// Returns one of the errors encountered while gather stats (if any).
|
||||
|
|
@ -64,7 +64,7 @@ func (r *RethinkDB) Gather(acc telegraf.Accumulator) error {
|
|||
wg.Add(1)
|
||||
go func(serv string) {
|
||||
defer wg.Done()
|
||||
acc.AddError(r.gatherServer(&Server{Url: u}, acc))
|
||||
acc.AddError(r.gatherServer(&Server{URL: u}, acc))
|
||||
}(serv)
|
||||
}
|
||||
|
||||
|
|
@ -76,20 +76,20 @@ func (r *RethinkDB) Gather(acc telegraf.Accumulator) error {
|
|||
func (r *RethinkDB) gatherServer(server *Server, acc telegraf.Accumulator) error {
|
||||
var err error
|
||||
connectOpts := gorethink.ConnectOpts{
|
||||
Address: server.Url.Host,
|
||||
Address: server.URL.Host,
|
||||
DiscoverHosts: false,
|
||||
}
|
||||
if server.Url.User != nil {
|
||||
pwd, set := server.Url.User.Password()
|
||||
if server.URL.User != nil {
|
||||
pwd, set := server.URL.User.Password()
|
||||
if set && pwd != "" {
|
||||
connectOpts.AuthKey = pwd
|
||||
connectOpts.HandshakeVersion = gorethink.HandshakeV0_4
|
||||
}
|
||||
}
|
||||
if server.Url.Scheme == "rethinkdb2" && server.Url.User != nil {
|
||||
pwd, set := server.Url.User.Password()
|
||||
if server.URL.Scheme == "rethinkdb2" && server.URL.User != nil {
|
||||
pwd, set := server.URL.User.Password()
|
||||
if set && pwd != "" {
|
||||
connectOpts.Username = server.Url.User.Username()
|
||||
connectOpts.Username = server.URL.User.Username()
|
||||
connectOpts.Password = pwd
|
||||
connectOpts.HandshakeVersion = gorethink.HandshakeV1_0
|
||||
}
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ import (
|
|||
)
|
||||
|
||||
type serverStatus struct {
|
||||
Id string `gorethink:"id"`
|
||||
ID string `gorethink:"id"`
|
||||
Network struct {
|
||||
Addresses []Address `gorethink:"canonical_addresses"`
|
||||
Hostname string `gorethink:"hostname"`
|
||||
|
|
@ -41,7 +41,7 @@ type Engine struct {
|
|||
}
|
||||
|
||||
type tableStatus struct {
|
||||
Id string `gorethink:"id"`
|
||||
ID string `gorethink:"id"`
|
||||
DB string `gorethink:"db"`
|
||||
Name string `gorethink:"name"`
|
||||
}
|
||||
|
|
|
|||
|
|
@ -59,14 +59,14 @@ func TestAddEngineStatsPartial(t *testing.T) {
|
|||
"written_docs_per_sec",
|
||||
}
|
||||
|
||||
missing_keys := []string{
|
||||
missingKeys := []string{
|
||||
"total_queries",
|
||||
"total_reads",
|
||||
"total_writes",
|
||||
}
|
||||
engine.AddEngineStats(keys, &acc, tags)
|
||||
|
||||
for _, metric := range missing_keys {
|
||||
for _, metric := range missingKeys {
|
||||
assert.False(t, acc.HasInt64Field("rethinkdb", metric))
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ import (
|
|||
)
|
||||
|
||||
type Server struct {
|
||||
Url *url.URL
|
||||
URL *url.URL
|
||||
session *gorethink.Session
|
||||
serverStatus serverStatus
|
||||
}
|
||||
|
|
@ -78,9 +78,9 @@ func (s *Server) getServerStatus() error {
|
|||
if err != nil {
|
||||
return errors.New("could not parse server_status results")
|
||||
}
|
||||
host, port, err := net.SplitHostPort(s.Url.Host)
|
||||
host, port, err := net.SplitHostPort(s.URL.Host)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to determine provided hostname from %s", s.Url.Host)
|
||||
return fmt.Errorf("unable to determine provided hostname from %s", s.URL.Host)
|
||||
}
|
||||
driverPort, _ := strconv.Atoi(port)
|
||||
for _, ss := range serverStatuses {
|
||||
|
|
@ -92,12 +92,12 @@ func (s *Server) getServerStatus() error {
|
|||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("unable to determine host id from server_status with %s", s.Url.Host)
|
||||
return fmt.Errorf("unable to determine host id from server_status with %s", s.URL.Host)
|
||||
}
|
||||
|
||||
func (s *Server) getDefaultTags() map[string]string {
|
||||
tags := make(map[string]string)
|
||||
tags["rethinkdb_host"] = s.Url.Host
|
||||
tags["rethinkdb_host"] = s.URL.Host
|
||||
tags["rethinkdb_hostname"] = s.serverStatus.Network.Hostname
|
||||
return tags
|
||||
}
|
||||
|
|
@ -139,7 +139,7 @@ var MemberTracking = []string{
|
|||
}
|
||||
|
||||
func (s *Server) addMemberStats(acc telegraf.Accumulator) error {
|
||||
cursor, err := gorethink.DB("rethinkdb").Table("stats").Get([]string{"server", s.serverStatus.Id}).Run(s.session)
|
||||
cursor, err := gorethink.DB("rethinkdb").Table("stats").Get([]string{"server", s.serverStatus.ID}).Run(s.session)
|
||||
if err != nil {
|
||||
return fmt.Errorf("member stats query error, %s", err.Error())
|
||||
}
|
||||
|
|
@ -176,7 +176,7 @@ func (s *Server) addTableStats(acc telegraf.Accumulator) error {
|
|||
}
|
||||
for _, table := range tables {
|
||||
cursor, err := gorethink.DB("rethinkdb").Table("stats").
|
||||
Get([]string{"table_server", table.Id, s.serverStatus.Id}).
|
||||
Get([]string{"table_server", table.ID, s.serverStatus.ID}).
|
||||
Run(s.session)
|
||||
if err != nil {
|
||||
return fmt.Errorf("table stats query error, %s", err.Error())
|
||||
|
|
|
|||
|
|
@ -28,18 +28,18 @@ func init() {
|
|||
|
||||
func testSetup(m *testing.M) {
|
||||
var err error
|
||||
server = &Server{Url: &url.URL{Host: connect_url}}
|
||||
server = &Server{URL: &url.URL{Host: connect_url}}
|
||||
|
||||
if authKey {
|
||||
server.session, _ = gorethink.Connect(gorethink.ConnectOpts{
|
||||
Address: server.Url.Host,
|
||||
Address: server.URL.Host,
|
||||
AuthKey: authKey,
|
||||
HandshakeVersion: gorethink.HandshakeV0_4,
|
||||
DiscoverHosts: false,
|
||||
})
|
||||
} else {
|
||||
server.session, _ = gorethink.Connect(gorethink.ConnectOpts{
|
||||
Address: server.Url.Host,
|
||||
Address: server.URL.Host,
|
||||
Username: username,
|
||||
Password: password,
|
||||
HandshakeVersion: gorethink.HandshakeV1_0,
|
||||
|
|
|
|||
|
|
@ -31,9 +31,9 @@ func NewRiak() *Riak {
|
|||
|
||||
// Type riakStats represents the data that is received from Riak
|
||||
type riakStats struct {
|
||||
CpuAvg1 int64 `json:"cpu_avg1"`
|
||||
CpuAvg15 int64 `json:"cpu_avg15"`
|
||||
CpuAvg5 int64 `json:"cpu_avg5"`
|
||||
CPUAvg1 int64 `json:"cpu_avg1"`
|
||||
CPUAvg15 int64 `json:"cpu_avg15"`
|
||||
CPUAvg5 int64 `json:"cpu_avg5"`
|
||||
MemoryCode int64 `json:"memory_code"`
|
||||
MemoryEts int64 `json:"memory_ets"`
|
||||
MemoryProcesses int64 `json:"memory_processes"`
|
||||
|
|
@ -144,9 +144,9 @@ func (r *Riak) gatherServer(s string, acc telegraf.Accumulator) error {
|
|||
|
||||
// Build a map of field values
|
||||
fields := map[string]interface{}{
|
||||
"cpu_avg1": stats.CpuAvg1,
|
||||
"cpu_avg15": stats.CpuAvg15,
|
||||
"cpu_avg5": stats.CpuAvg5,
|
||||
"cpu_avg1": stats.CPUAvg1,
|
||||
"cpu_avg15": stats.CPUAvg15,
|
||||
"cpu_avg5": stats.CPUAvg5,
|
||||
"memory_code": stats.MemoryCode,
|
||||
"memory_ets": stats.MemoryEts,
|
||||
"memory_processes": stats.MemoryProcesses,
|
||||
|
|
|
|||
|
|
@ -230,7 +230,7 @@ func (s *Snmp) Description() string {
|
|||
return `DEPRECATED! PLEASE USE inputs.snmp INSTEAD.`
|
||||
}
|
||||
|
||||
func fillnode(parentNode Node, oid_name string, ids []string) {
|
||||
func fillnode(parentNode Node, oidName string, ids []string) {
|
||||
// ids = ["1", "3", "6", ...]
|
||||
id, ids := ids[0], ids[1:]
|
||||
node, ok := parentNode.subnodes[id]
|
||||
|
|
@ -241,12 +241,12 @@ func fillnode(parentNode Node, oid_name string, ids []string) {
|
|||
subnodes: make(map[string]Node),
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
node.name = oid_name
|
||||
node.name = oidName
|
||||
}
|
||||
parentNode.subnodes[id] = node
|
||||
}
|
||||
if len(ids) > 0 {
|
||||
fillnode(node, oid_name, ids)
|
||||
fillnode(node, oidName, ids)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -305,10 +305,10 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error {
|
|||
for _, line := range strings.Split(string(data), "\n") {
|
||||
oids := strings.Fields(line)
|
||||
if len(oids) == 2 && oids[1] != "" {
|
||||
oid_name := oids[0]
|
||||
oidName := oids[0]
|
||||
oid := oids[1]
|
||||
fillnode(s.initNode, oid_name, strings.Split(oid, "."))
|
||||
s.nameToOid[oid_name] = oid
|
||||
fillnode(s.initNode, oidName, strings.Split(oid, "."))
|
||||
s.nameToOid[oidName] = oid
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -348,10 +348,10 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error {
|
|||
host.getOids = append(host.getOids, oid)
|
||||
}
|
||||
|
||||
for _, oid_name := range host.Collect {
|
||||
for _, oidName := range host.Collect {
|
||||
// Get GET oids
|
||||
for _, oid := range s.Get {
|
||||
if oid.Name == oid_name {
|
||||
if oid.Name == oidName {
|
||||
if val, ok := s.nameToOid[oid.Oid]; ok {
|
||||
// TODO should we add the 0 instance ?
|
||||
if oid.Instance != "" {
|
||||
|
|
@ -367,7 +367,7 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error {
|
|||
}
|
||||
// Get GETBULK oids
|
||||
for _, oid := range s.Bulk {
|
||||
if oid.Name == oid_name {
|
||||
if oid.Name == oidName {
|
||||
if val, ok := s.nameToOid[oid.Oid]; ok {
|
||||
oid.rawOid = "." + val
|
||||
} else {
|
||||
|
|
@ -473,15 +473,15 @@ func (h *Host) SNMPMap(
|
|||
// We need to query this table
|
||||
// To get mapping between instance id
|
||||
// and instance name
|
||||
oid_asked := table.mappingTable
|
||||
oid_next := oid_asked
|
||||
need_more_requests := true
|
||||
oidAsked := table.mappingTable
|
||||
oidNext := oidAsked
|
||||
needMoreRequests := true
|
||||
// Set max repetition
|
||||
maxRepetition := uint8(32)
|
||||
// Launch requests
|
||||
for need_more_requests {
|
||||
for needMoreRequests {
|
||||
// Launch request
|
||||
result, err3 := snmpClient.GetBulk([]string{oid_next}, 0, maxRepetition)
|
||||
result, err3 := snmpClient.GetBulk([]string{oidNext}, 0, maxRepetition)
|
||||
if err3 != nil {
|
||||
return err3
|
||||
}
|
||||
|
|
@ -489,7 +489,7 @@ func (h *Host) SNMPMap(
|
|||
lastOid := ""
|
||||
for _, variable := range result.Variables {
|
||||
lastOid = variable.Name
|
||||
if strings.HasPrefix(variable.Name, oid_asked) {
|
||||
if strings.HasPrefix(variable.Name, oidAsked) {
|
||||
switch variable.Type {
|
||||
// handle instance names
|
||||
case gosnmp.OctetString:
|
||||
|
|
@ -519,7 +519,7 @@ func (h *Host) SNMPMap(
|
|||
|
||||
// remove oid table from the complete oid
|
||||
// in order to get the current instance id
|
||||
key := strings.Replace(variable.Name, oid_asked, "", 1)
|
||||
key := strings.Replace(variable.Name, oidAsked, "", 1)
|
||||
|
||||
if len(table.subTables) == 0 {
|
||||
// We have a mapping table
|
||||
|
|
@ -581,11 +581,11 @@ func (h *Host) SNMPMap(
|
|||
}
|
||||
}
|
||||
// Determine if we need more requests
|
||||
if strings.HasPrefix(lastOid, oid_asked) {
|
||||
need_more_requests = true
|
||||
oid_next = lastOid
|
||||
if strings.HasPrefix(lastOid, oidAsked) {
|
||||
needMoreRequests = true
|
||||
oidNext = lastOid
|
||||
} else {
|
||||
need_more_requests = false
|
||||
needMoreRequests = false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -617,15 +617,15 @@ func (h *Host) SNMPGet(acc telegraf.Accumulator, initNode Node) error {
|
|||
|
||||
// gosnmp.MAX_OIDS == 60
|
||||
// TODO use gosnmp.MAX_OIDS instead of hard coded value
|
||||
max_oids := 60
|
||||
maxOids := 60
|
||||
// limit 60 (MAX_OIDS) oids by requests
|
||||
for i := 0; i < len(oidsList); i = i + max_oids {
|
||||
for i := 0; i < len(oidsList); i = i + maxOids {
|
||||
// Launch request
|
||||
max_index := i + max_oids
|
||||
if i+max_oids > len(oidsList) {
|
||||
max_index = len(oidsList)
|
||||
maxIndex := i + maxOids
|
||||
if i+maxOids > len(oidsList) {
|
||||
maxIndex = len(oidsList)
|
||||
}
|
||||
result, err3 := snmpClient.Get(oidsNameList[i:max_index]) // Get() accepts up to g.MAX_OIDS
|
||||
result, err3 := snmpClient.Get(oidsNameList[i:maxIndex]) // Get() accepts up to g.MAX_OIDS
|
||||
if err3 != nil {
|
||||
return err3
|
||||
}
|
||||
|
|
@ -658,31 +658,31 @@ func (h *Host) SNMPBulk(acc telegraf.Accumulator, initNode Node) error {
|
|||
// TODO Trying to make requests with more than one OID
|
||||
// to reduce the number of requests
|
||||
for _, oid := range oidsNameList {
|
||||
oid_asked := oid
|
||||
need_more_requests := true
|
||||
oidAsked := oid
|
||||
needMoreRequests := true
|
||||
// Set max repetition
|
||||
maxRepetition := oidsList[oid].MaxRepetition
|
||||
if maxRepetition <= 0 {
|
||||
maxRepetition = 32
|
||||
}
|
||||
// Launch requests
|
||||
for need_more_requests {
|
||||
for needMoreRequests {
|
||||
// Launch request
|
||||
result, err3 := snmpClient.GetBulk([]string{oid}, 0, maxRepetition)
|
||||
if err3 != nil {
|
||||
return err3
|
||||
}
|
||||
// Handle response
|
||||
last_oid, err := h.HandleResponse(oidsList, result, acc, initNode)
|
||||
lastOid, err := h.HandleResponse(oidsList, result, acc, initNode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Determine if we need more requests
|
||||
if strings.HasPrefix(last_oid, oid_asked) {
|
||||
need_more_requests = true
|
||||
oid = last_oid
|
||||
if strings.HasPrefix(lastOid, oidAsked) {
|
||||
needMoreRequests = true
|
||||
oid = lastOid
|
||||
} else {
|
||||
need_more_requests = false
|
||||
needMoreRequests = false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -700,16 +700,16 @@ func (h *Host) GetSNMPClient() (*gosnmp.GoSNMP, error) {
|
|||
version = gosnmp.Version2c
|
||||
}
|
||||
// Prepare host and port
|
||||
host, port_str, err := net.SplitHostPort(h.Address)
|
||||
host, portStr, err := net.SplitHostPort(h.Address)
|
||||
if err != nil {
|
||||
port_str = string("161")
|
||||
portStr = string("161")
|
||||
}
|
||||
// convert port_str to port in uint16
|
||||
port_64, err := strconv.ParseUint(port_str, 10, 16)
|
||||
port64, err := strconv.ParseUint(portStr, 10, 16)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
port := uint16(port_64)
|
||||
port := uint16(port64)
|
||||
// Get SNMP client
|
||||
snmpClient := &gosnmp.GoSNMP{
|
||||
Target: host,
|
||||
|
|
@ -739,7 +739,7 @@ func (h *Host) HandleResponse(
|
|||
lastOid = variable.Name
|
||||
nextresult:
|
||||
// Get only oid wanted
|
||||
for oid_key, oid := range oids {
|
||||
for oidKey, oid := range oids {
|
||||
// Skip oids already processed
|
||||
for _, processedOid := range h.processedOids {
|
||||
if variable.Name == processedOid {
|
||||
|
|
@ -750,7 +750,7 @@ func (h *Host) HandleResponse(
|
|||
// OR
|
||||
// the result is SNMP table which "." comes right after oid_key.
|
||||
// ex: oid_key: .1.3.6.1.2.1.2.2.1.16, variable.Name: .1.3.6.1.2.1.2.2.1.16.1
|
||||
if variable.Name == oid_key || strings.HasPrefix(variable.Name, oid_key+".") {
|
||||
if variable.Name == oidKey || strings.HasPrefix(variable.Name, oidKey+".") {
|
||||
switch variable.Type {
|
||||
// handle Metrics
|
||||
case gosnmp.Boolean, gosnmp.Integer, gosnmp.Counter32, gosnmp.Gauge32,
|
||||
|
|
@ -761,19 +761,19 @@ func (h *Host) HandleResponse(
|
|||
tags["unit"] = oid.Unit
|
||||
}
|
||||
// Get name and instance
|
||||
var oid_name string
|
||||
var oidName string
|
||||
var instance string
|
||||
// Get oidname and instance from translate file
|
||||
oid_name, instance = findnodename(initNode,
|
||||
oidName, instance = findnodename(initNode,
|
||||
strings.Split(string(variable.Name[1:]), "."))
|
||||
// Set instance tag
|
||||
// From mapping table
|
||||
mapping, inMappingNoSubTable := h.OidInstanceMapping[oid_key]
|
||||
mapping, inMappingNoSubTable := h.OidInstanceMapping[oidKey]
|
||||
if inMappingNoSubTable {
|
||||
// filter if the instance in not in
|
||||
// OidInstanceMapping mapping map
|
||||
if instance_name, exists := mapping[instance]; exists {
|
||||
tags["instance"] = instance_name
|
||||
if instanceName, exists := mapping[instance]; exists {
|
||||
tags["instance"] = instanceName
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
|
|
@ -788,24 +788,24 @@ func (h *Host) HandleResponse(
|
|||
}
|
||||
|
||||
// Set name
|
||||
var field_name string
|
||||
if oid_name != "" {
|
||||
var fieldName string
|
||||
if oidName != "" {
|
||||
// Set fieldname as oid name from translate file
|
||||
field_name = oid_name
|
||||
fieldName = oidName
|
||||
} else {
|
||||
// Set fieldname as oid name from inputs.snmp.get section
|
||||
// Because the result oid is equal to inputs.snmp.get section
|
||||
field_name = oid.Name
|
||||
fieldName = oid.Name
|
||||
}
|
||||
tags["snmp_host"], _, _ = net.SplitHostPort(h.Address)
|
||||
fields := make(map[string]interface{})
|
||||
fields[string(field_name)] = variable.Value
|
||||
fields[string(fieldName)] = variable.Value
|
||||
|
||||
h.processedOids = append(h.processedOids, variable.Name)
|
||||
acc.AddFields(field_name, fields, tags)
|
||||
acc.AddFields(fieldName, fields, tags)
|
||||
case gosnmp.NoSuchObject, gosnmp.NoSuchInstance:
|
||||
// Oid not found
|
||||
log.Printf("E! [inputs.snmp_legacy] oid %q not found", oid_key)
|
||||
log.Printf("E! [inputs.snmp_legacy] oid %q not found", oidKey)
|
||||
default:
|
||||
// delete other data
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1348,7 +1348,7 @@ IF @EngineEdition IN (2,3,4) AND @MajorMinorVersion >= 1050
|
|||
END
|
||||
`
|
||||
|
||||
const sqlServerCpuV2 string = `
|
||||
const sqlServerCPUV2 string = `
|
||||
/*The ring buffer has a new value every minute*/
|
||||
IF SERVERPROPERTY('EngineEdition') IN (2,3,4) /*Standard,Enterpris,Express*/
|
||||
BEGIN
|
||||
|
|
|
|||
|
|
@ -154,7 +154,7 @@ func initQueries(s *SQLServer) error {
|
|||
queries["SQLServerSchedulers"] = Query{ScriptName: "SQLServerSchedulers", Script: sqlServerSchedulers, ResultByRow: false}
|
||||
queries["SQLServerRequests"] = Query{ScriptName: "SQLServerRequests", Script: sqlServerRequests, ResultByRow: false}
|
||||
queries["SQLServerVolumeSpace"] = Query{ScriptName: "SQLServerVolumeSpace", Script: sqlServerVolumeSpace, ResultByRow: false}
|
||||
queries["SQLServerCpu"] = Query{ScriptName: "SQLServerCpu", Script: sqlServerRingBufferCpu, ResultByRow: false}
|
||||
queries["SQLServerCpu"] = Query{ScriptName: "SQLServerCpu", Script: sqlServerRingBufferCPU, ResultByRow: false}
|
||||
queries["SQLServerAvailabilityReplicaStates"] = Query{ScriptName: "SQLServerAvailabilityReplicaStates", Script: sqlServerAvailabilityReplicaStates, ResultByRow: false}
|
||||
queries["SQLServerDatabaseReplicaStates"] = Query{ScriptName: "SQLServerDatabaseReplicaStates", Script: sqlServerDatabaseReplicaStates, ResultByRow: false}
|
||||
} else {
|
||||
|
|
@ -174,7 +174,7 @@ func initQueries(s *SQLServer) error {
|
|||
queries["Schedulers"] = Query{ScriptName: "Schedulers", Script: sqlServerSchedulersV2, ResultByRow: false}
|
||||
queries["SqlRequests"] = Query{ScriptName: "SqlRequests", Script: sqlServerRequestsV2, ResultByRow: false}
|
||||
queries["VolumeSpace"] = Query{ScriptName: "VolumeSpace", Script: sqlServerVolumeSpaceV2, ResultByRow: false}
|
||||
queries["Cpu"] = Query{ScriptName: "Cpu", Script: sqlServerCpuV2, ResultByRow: false}
|
||||
queries["Cpu"] = Query{ScriptName: "Cpu", Script: sqlServerCPUV2, ResultByRow: false}
|
||||
} else {
|
||||
log.Println("W! DEPRECATED: query_version=1 has been deprecated in favor of database_type.")
|
||||
queries["PerformanceCounters"] = Query{ScriptName: "PerformanceCounters", Script: sqlPerformanceCounters, ResultByRow: true}
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue