Revive fixes - part 4: (#8981)
empty-lines Co-authored-by: Pawel Zak <Pawel Zak>
This commit is contained in:
parent
9ddd189cd3
commit
38c61c07ef
|
|
@ -364,7 +364,6 @@ func (a *Agent) testStartInputs(
|
|||
if err != nil {
|
||||
log.Printf("E! [agent] Starting input %s: %v", input.LogName(), err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
unit.inputs = append(unit.inputs, input)
|
||||
|
|
|
|||
|
|
@ -908,7 +908,6 @@ func loadConfig(config string) ([]byte, error) {
|
|||
// If it isn't a https scheme, try it as a file.
|
||||
}
|
||||
return ioutil.ReadFile(config)
|
||||
|
||||
}
|
||||
|
||||
func fetchConfig(u *url.URL) ([]byte, error) {
|
||||
|
|
|
|||
|
|
@ -65,7 +65,6 @@ func (r *GzipReader) Read(b []byte) (int, error) {
|
|||
return n, nil
|
||||
}
|
||||
return n, err
|
||||
|
||||
}
|
||||
|
||||
// NewContentEncoder returns a ContentEncoder for the encoding type.
|
||||
|
|
|
|||
|
|
@ -19,7 +19,6 @@ var (
|
|||
)
|
||||
|
||||
func TestCompileAndMatch(t *testing.T) {
|
||||
|
||||
type test struct {
|
||||
path string
|
||||
matches int
|
||||
|
|
|
|||
|
|
@ -37,7 +37,6 @@ func (h *basicAuthHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request)
|
|||
if !ok ||
|
||||
subtle.ConstantTimeCompare([]byte(reqUsername), []byte(h.username)) != 1 ||
|
||||
subtle.ConstantTimeCompare([]byte(reqPassword), []byte(h.password)) != 1 {
|
||||
|
||||
rw.Header().Set("WWW-Authenticate", "Basic realm=\""+h.realm+"\"")
|
||||
h.onError(rw)
|
||||
http.Error(rw, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
|
||||
|
|
@ -73,7 +72,6 @@ func (h *genericAuthHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request
|
|||
// Scheme checking
|
||||
authorization := req.Header.Get("Authorization")
|
||||
if subtle.ConstantTimeCompare([]byte(authorization), []byte(h.credentials)) != 1 {
|
||||
|
||||
h.onError(rw)
|
||||
http.Error(rw, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
|
||||
return
|
||||
|
|
|
|||
|
|
@ -117,7 +117,6 @@ func newTrackingMetricGroup(group []telegraf.Metric, fn NotifyFunc) ([]telegraf.
|
|||
d: d,
|
||||
}
|
||||
group[i] = dm
|
||||
|
||||
}
|
||||
if finalizer != nil {
|
||||
runtime.SetFinalizer(d, finalizer)
|
||||
|
|
|
|||
|
|
@ -402,7 +402,6 @@ func TestFilter_FilterTagsMatches(t *testing.T) {
|
|||
// both parameters were defined
|
||||
// see: https://github.com/influxdata/telegraf/issues/2860
|
||||
func TestFilter_FilterNamePassAndDrop(t *testing.T) {
|
||||
|
||||
inputData := []string{"name1", "name2", "name3", "name4"}
|
||||
expectedResult := []bool{false, true, false, false}
|
||||
|
||||
|
|
@ -422,7 +421,6 @@ func TestFilter_FilterNamePassAndDrop(t *testing.T) {
|
|||
// both parameters were defined
|
||||
// see: https://github.com/influxdata/telegraf/issues/2860
|
||||
func TestFilter_FilterFieldPassAndDrop(t *testing.T) {
|
||||
|
||||
inputData := []string{"field1", "field2", "field3", "field4"}
|
||||
expectedResult := []bool{false, true, false, false}
|
||||
|
||||
|
|
@ -479,7 +477,6 @@ func TestFilter_FilterTagsPassAndDrop(t *testing.T) {
|
|||
for i, tag := range inputData {
|
||||
require.Equal(t, f.shouldTagsPass(tag), expectedResult[i])
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func BenchmarkFilter(b *testing.B) {
|
||||
|
|
|
|||
|
|
@ -125,7 +125,6 @@ func (r *RunningOutput) Init() error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -167,7 +167,6 @@ func (b *BasicStats) Push(acc telegraf.Accumulator) {
|
|||
for _, aggregate := range b.cache {
|
||||
fields := map[string]interface{}{}
|
||||
for k, v := range aggregate.fields {
|
||||
|
||||
if b.statsConfig.count {
|
||||
fields[k+"_count"] = v.count
|
||||
}
|
||||
|
|
|
|||
|
|
@ -184,7 +184,6 @@ func TestBasicStatsDifferentPeriods(t *testing.T) {
|
|||
|
||||
// Test only aggregating count
|
||||
func TestBasicStatsWithOnlyCount(t *testing.T) {
|
||||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"count"}
|
||||
aggregator.Log = testutil.Logger{}
|
||||
|
|
@ -213,7 +212,6 @@ func TestBasicStatsWithOnlyCount(t *testing.T) {
|
|||
|
||||
// Test only aggregating minimum
|
||||
func TestBasicStatsWithOnlyMin(t *testing.T) {
|
||||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"min"}
|
||||
aggregator.Log = testutil.Logger{}
|
||||
|
|
@ -242,7 +240,6 @@ func TestBasicStatsWithOnlyMin(t *testing.T) {
|
|||
|
||||
// Test only aggregating maximum
|
||||
func TestBasicStatsWithOnlyMax(t *testing.T) {
|
||||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"max"}
|
||||
aggregator.Log = testutil.Logger{}
|
||||
|
|
@ -271,7 +268,6 @@ func TestBasicStatsWithOnlyMax(t *testing.T) {
|
|||
|
||||
// Test only aggregating mean
|
||||
func TestBasicStatsWithOnlyMean(t *testing.T) {
|
||||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"mean"}
|
||||
aggregator.Log = testutil.Logger{}
|
||||
|
|
@ -300,7 +296,6 @@ func TestBasicStatsWithOnlyMean(t *testing.T) {
|
|||
|
||||
// Test only aggregating sum
|
||||
func TestBasicStatsWithOnlySum(t *testing.T) {
|
||||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"sum"}
|
||||
aggregator.Log = testutil.Logger{}
|
||||
|
|
@ -331,7 +326,6 @@ func TestBasicStatsWithOnlySum(t *testing.T) {
|
|||
// implementations of sum were calculated from mean and count, which
|
||||
// e.g. summed "1, 1, 5, 1" as "7.999999..." instead of 8.
|
||||
func TestBasicStatsWithOnlySumFloatingPointErrata(t *testing.T) {
|
||||
|
||||
var sum1, _ = metric.New("m1",
|
||||
map[string]string{},
|
||||
map[string]interface{}{
|
||||
|
|
@ -383,7 +377,6 @@ func TestBasicStatsWithOnlySumFloatingPointErrata(t *testing.T) {
|
|||
|
||||
// Test only aggregating variance
|
||||
func TestBasicStatsWithOnlyVariance(t *testing.T) {
|
||||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"s2"}
|
||||
aggregator.Log = testutil.Logger{}
|
||||
|
|
@ -410,7 +403,6 @@ func TestBasicStatsWithOnlyVariance(t *testing.T) {
|
|||
|
||||
// Test only aggregating standard deviation
|
||||
func TestBasicStatsWithOnlyStandardDeviation(t *testing.T) {
|
||||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"stdev"}
|
||||
aggregator.Log = testutil.Logger{}
|
||||
|
|
@ -437,7 +429,6 @@ func TestBasicStatsWithOnlyStandardDeviation(t *testing.T) {
|
|||
|
||||
// Test only aggregating minimum and maximum
|
||||
func TestBasicStatsWithMinAndMax(t *testing.T) {
|
||||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"min", "max"}
|
||||
aggregator.Log = testutil.Logger{}
|
||||
|
|
@ -473,7 +464,6 @@ func TestBasicStatsWithMinAndMax(t *testing.T) {
|
|||
|
||||
// Test only aggregating diff
|
||||
func TestBasicStatsWithDiff(t *testing.T) {
|
||||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"diff"}
|
||||
aggregator.Log = testutil.Logger{}
|
||||
|
|
@ -499,7 +489,6 @@ func TestBasicStatsWithDiff(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBasicStatsWithRate(t *testing.T) {
|
||||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"rate"}
|
||||
aggregator.Log = testutil.Logger{}
|
||||
|
|
@ -524,7 +513,6 @@ func TestBasicStatsWithRate(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBasicStatsWithNonNegativeRate(t *testing.T) {
|
||||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"non_negative_rate"}
|
||||
aggregator.Log = testutil.Logger{}
|
||||
|
|
@ -548,7 +536,6 @@ func TestBasicStatsWithNonNegativeRate(t *testing.T) {
|
|||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
}
|
||||
func TestBasicStatsWithInterval(t *testing.T) {
|
||||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"interval"}
|
||||
aggregator.Log = testutil.Logger{}
|
||||
|
|
@ -575,7 +562,6 @@ func TestBasicStatsWithInterval(t *testing.T) {
|
|||
|
||||
// Test only aggregating non_negative_diff
|
||||
func TestBasicStatsWithNonNegativeDiff(t *testing.T) {
|
||||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"non_negative_diff"}
|
||||
aggregator.Log = testutil.Logger{}
|
||||
|
|
@ -666,7 +652,6 @@ func TestBasicStatsWithAllStats(t *testing.T) {
|
|||
|
||||
// Test that if an empty array is passed, no points are pushed
|
||||
func TestBasicStatsWithNoStats(t *testing.T) {
|
||||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{}
|
||||
aggregator.Log = testutil.Logger{}
|
||||
|
|
@ -683,7 +668,6 @@ func TestBasicStatsWithNoStats(t *testing.T) {
|
|||
|
||||
// Test that if an unknown stat is configured, it doesn't explode
|
||||
func TestBasicStatsWithUnknownStat(t *testing.T) {
|
||||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"crazy"}
|
||||
aggregator.Log = testutil.Logger{}
|
||||
|
|
@ -703,7 +687,6 @@ func TestBasicStatsWithUnknownStat(t *testing.T) {
|
|||
// otherwise user's working systems will suddenly (and surprisingly) start
|
||||
// capturing sum without their input.
|
||||
func TestBasicStatsWithDefaultStats(t *testing.T) {
|
||||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Log = testutil.Logger{}
|
||||
aggregator.getConfiguredStats()
|
||||
|
|
|
|||
|
|
@ -119,7 +119,6 @@ func TestTwoFullEventsWithoutParameter(t *testing.T) {
|
|||
"value_rate": float64(5),
|
||||
},
|
||||
)
|
||||
|
||||
}
|
||||
|
||||
func TestTwoFullEventsInSeperatePushes(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -210,7 +210,6 @@ func TestHistogramWithAllFieldsNonCumulative(t *testing.T) {
|
|||
// TestHistogramWithTwoPeriodsAndAllFields tests two metrics getting added with a push/reset in between (simulates
|
||||
// getting added in different periods) for all fields
|
||||
func TestHistogramWithTwoPeriodsAndAllFields(t *testing.T) {
|
||||
|
||||
var cfg []config
|
||||
cfg = append(cfg, config{Metric: "first_metric_name", Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}})
|
||||
histogram := NewTestHistogram(cfg, false, true)
|
||||
|
|
|
|||
|
|
@ -105,5 +105,4 @@ func gssapiAuthType(authType string) int {
|
|||
default:
|
||||
return 0
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -53,14 +53,12 @@ func LoadConfig(filePath *string) (loaded loadedConfig, err error) {
|
|||
var data string
|
||||
conf := config{}
|
||||
if filePath != nil && *filePath != "" {
|
||||
|
||||
b, err := ioutil.ReadFile(*filePath)
|
||||
if err != nil {
|
||||
return loadedConfig{}, err
|
||||
}
|
||||
|
||||
data = expandEnvVars(b)
|
||||
|
||||
} else {
|
||||
conf, err = DefaultImportedPlugins()
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -11,7 +11,6 @@ import (
|
|||
)
|
||||
|
||||
func TestGatherQueuesMetrics(t *testing.T) {
|
||||
|
||||
s := `<queues>
|
||||
<queue name="sandra">
|
||||
<stats size="0" consumerCount="0" enqueueCount="0" dequeueCount="0"/>
|
||||
|
|
@ -57,7 +56,6 @@ func TestGatherQueuesMetrics(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGatherTopicsMetrics(t *testing.T) {
|
||||
|
||||
s := `<topics>
|
||||
<topic name="ActiveMQ.Advisory.MasterBroker ">
|
||||
<stats size="0" consumerCount="0" enqueueCount="1" dequeueCount="0"/>
|
||||
|
|
@ -104,7 +102,6 @@ func TestGatherTopicsMetrics(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGatherSubscribersMetrics(t *testing.T) {
|
||||
|
||||
s := `<subscribers>
|
||||
<subscriber clientId="AAA" subscriptionName="AAA" connectionId="NOTSET" destinationName="AAA" selector="AA" active="no">
|
||||
<stats pendingQueueSize="0" dispatchedQueueSize="0" dispatchedCounter="0" enqueueCounter="0" dequeueCounter="0"/>
|
||||
|
|
|
|||
|
|
@ -248,7 +248,6 @@ func (a *Aerospike) parseNodeInfo(stats map[string]string, hostPort string, node
|
|||
for k, v := range stats {
|
||||
key := strings.Replace(k, "-", "_", -1)
|
||||
fields[key] = parseAerospikeValue(key, v)
|
||||
|
||||
}
|
||||
acc.AddFields("aerospike_node", fields, tags, time.Now())
|
||||
|
||||
|
|
@ -279,7 +278,6 @@ func (a *Aerospike) getNamespaceInfo(namespace string, n *as.Node) (map[string]s
|
|||
return stats, err
|
||||
}
|
||||
func (a *Aerospike) parseNamespaceInfo(stats map[string]string, hostPort string, namespace string, nodeName string, acc telegraf.Accumulator) {
|
||||
|
||||
nTags := map[string]string{
|
||||
"aerospike_host": hostPort,
|
||||
"node_name": nodeName,
|
||||
|
|
@ -348,7 +346,6 @@ func (a *Aerospike) getSetInfo(namespaceSet string, n *as.Node) (map[string]stri
|
|||
}
|
||||
|
||||
func (a *Aerospike) parseSetInfo(stats map[string]string, hostPort string, namespaceSet string, nodeName string, acc telegraf.Accumulator) {
|
||||
|
||||
stat := strings.Split(
|
||||
strings.TrimSuffix(
|
||||
stats[fmt.Sprintf("sets/%s", namespaceSet)], ";"), ":")
|
||||
|
|
@ -383,7 +380,6 @@ func (a *Aerospike) getTTLHistogram(hostPort string, namespace string, set strin
|
|||
}
|
||||
|
||||
func (a *Aerospike) getObjectSizeLinearHistogram(hostPort string, namespace string, set string, n *as.Node, acc telegraf.Accumulator) error {
|
||||
|
||||
stats, err := a.getHistogram(namespace, set, "object-size-linear", n)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -406,11 +402,9 @@ func (a *Aerospike) getHistogram(namespace string, set string, histogramType str
|
|||
return nil, err
|
||||
}
|
||||
return stats, nil
|
||||
|
||||
}
|
||||
|
||||
func (a *Aerospike) parseHistogram(stats map[string]string, hostPort string, namespace string, set string, histogramType string, nodeName string, acc telegraf.Accumulator) {
|
||||
|
||||
nTags := map[string]string{
|
||||
"aerospike_host": hostPort,
|
||||
"node_name": nodeName,
|
||||
|
|
@ -463,7 +457,6 @@ func (a *Aerospike) parseHistogram(stats map[string]string, hostPort string, nam
|
|||
nFields[strconv.Itoa(bucketName)] = bucketSum
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -31,7 +31,6 @@ func TestAerospikeStatisticsIntegration(t *testing.T) {
|
|||
|
||||
namespaceName := acc.TagValue("aerospike_namespace", "namespace")
|
||||
assert.Equal(t, namespaceName, "test")
|
||||
|
||||
}
|
||||
|
||||
func TestAerospikeStatisticsPartialErrIntegration(t *testing.T) {
|
||||
|
|
@ -165,7 +164,6 @@ func TestQuerySetsIntegration(t *testing.T) {
|
|||
assert.True(t, acc.HasMeasurement("aerospike_set"))
|
||||
assert.True(t, acc.HasTag("aerospike_set", "set"))
|
||||
assert.True(t, acc.HasInt64Field("aerospike_set", "memory_data_bytes"))
|
||||
|
||||
}
|
||||
|
||||
func TestSelectQuerySetsIntegration(t *testing.T) {
|
||||
|
|
@ -215,7 +213,6 @@ func TestSelectQuerySetsIntegration(t *testing.T) {
|
|||
assert.True(t, acc.HasMeasurement("aerospike_set"))
|
||||
assert.True(t, acc.HasTag("aerospike_set", "set"))
|
||||
assert.True(t, acc.HasInt64Field("aerospike_set", "memory_data_bytes"))
|
||||
|
||||
}
|
||||
|
||||
func TestDisableTTLHistogramIntegration(t *testing.T) {
|
||||
|
|
@ -264,7 +261,6 @@ func TestTTLHistogramIntegration(t *testing.T) {
|
|||
|
||||
assert.True(t, acc.HasMeasurement("aerospike_histogram_ttl"))
|
||||
assert.True(t, FindTagValue(&acc, "aerospike_histogram_ttl", "namespace", "test"))
|
||||
|
||||
}
|
||||
func TestDisableObjectSizeLinearHistogramIntegration(t *testing.T) {
|
||||
if testing.Short() {
|
||||
|
|
@ -287,7 +283,6 @@ func TestDisableObjectSizeLinearHistogramIntegration(t *testing.T) {
|
|||
assert.False(t, acc.HasMeasurement("aerospike_histogram_object_size_linear"))
|
||||
}
|
||||
func TestObjectSizeLinearHistogramIntegration(t *testing.T) {
|
||||
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping aerospike integration tests.")
|
||||
} else {
|
||||
|
|
@ -419,7 +414,6 @@ func TestParseHistogramSet(t *testing.T) {
|
|||
|
||||
a.parseHistogram(stats, "127.0.0.1:3000", "test", "foo", "object-size-linear", "TestNodeName", &acc)
|
||||
acc.AssertContainsTaggedFields(t, "aerospike_histogram_object_size_linear", expectedFields, expectedTags)
|
||||
|
||||
}
|
||||
func TestParseHistogramNamespace(t *testing.T) {
|
||||
a := &Aerospike{
|
||||
|
|
@ -450,7 +444,6 @@ func TestParseHistogramNamespace(t *testing.T) {
|
|||
|
||||
a.parseHistogram(stats, "127.0.0.1:3000", "test", "", "object-size-linear", "TestNodeName", &acc)
|
||||
acc.AssertContainsTaggedFields(t, "aerospike_histogram_object_size_linear", expectedFields, expectedTags)
|
||||
|
||||
}
|
||||
func TestAerospikeParseValue(t *testing.T) {
|
||||
// uint64 with value bigger than int64 max
|
||||
|
|
@ -484,7 +477,6 @@ func FindTagValue(acc *testutil.Accumulator, measurement string, key string, val
|
|||
if ok && v == value {
|
||||
return true
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
return false
|
||||
|
|
|
|||
|
|
@ -187,7 +187,6 @@ func (s *AliyunCMS) Description() string {
|
|||
}
|
||||
|
||||
func (s *AliyunCMS) Init() error {
|
||||
|
||||
if s.Project == "" {
|
||||
return errors.New("project is not set")
|
||||
}
|
||||
|
|
@ -275,7 +274,6 @@ func (s *AliyunCMS) Start(telegraf.Accumulator) error {
|
|||
|
||||
// Gather implements telegraf.Inputs interface
|
||||
func (s *AliyunCMS) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
s.updateWindow(time.Now())
|
||||
|
||||
// limit concurrency or we can easily exhaust user connection limit
|
||||
|
|
@ -288,7 +286,6 @@ func (s *AliyunCMS) Gather(acc telegraf.Accumulator) error {
|
|||
s.prepareTagsAndDimensions(metric)
|
||||
wg.Add(len(metric.MetricNames))
|
||||
for _, metricName := range metric.MetricNames {
|
||||
|
||||
<-lmtr.C
|
||||
go func(metricName string, metric *Metric) {
|
||||
defer wg.Done()
|
||||
|
|
@ -308,7 +305,6 @@ func (s *AliyunCMS) Stop() {
|
|||
}
|
||||
|
||||
func (s *AliyunCMS) updateWindow(relativeTo time.Time) {
|
||||
|
||||
//https://help.aliyun.com/document_detail/51936.html?spm=a2c4g.11186623.6.701.54025679zh6wiR
|
||||
//The start and end times are executed in the mode of
|
||||
//opening left and closing right, and startTime cannot be equal
|
||||
|
|
@ -329,7 +325,6 @@ func (s *AliyunCMS) updateWindow(relativeTo time.Time) {
|
|||
|
||||
// Gather given metric and emit error
|
||||
func (s *AliyunCMS) gatherMetric(acc telegraf.Accumulator, metricName string, metric *Metric) error {
|
||||
|
||||
req := cms.CreateDescribeMetricListRequest()
|
||||
req.Period = strconv.FormatInt(int64(s.Period.Duration.Seconds()), 10)
|
||||
req.MetricName = metricName
|
||||
|
|
@ -368,7 +363,6 @@ func (s *AliyunCMS) gatherMetric(acc telegraf.Accumulator, metricName string, me
|
|||
case "instanceId", "BucketName":
|
||||
tags[key] = value.(string)
|
||||
if metric.discoveryTags != nil { //discovery can be not activated
|
||||
|
||||
//Skipping data point if discovery data not exist
|
||||
if _, ok := metric.discoveryTags[value.(string)]; !ok &&
|
||||
!metric.AllowDataPointWODiscoveryData {
|
||||
|
|
@ -401,7 +395,6 @@ func (s *AliyunCMS) gatherMetric(acc telegraf.Accumulator, metricName string, me
|
|||
|
||||
//Tag helper
|
||||
func parseTag(tagSpec string, data interface{}) (string, string, error) {
|
||||
|
||||
tagKey := tagSpec
|
||||
queryPath := tagSpec
|
||||
|
||||
|
|
@ -452,9 +445,8 @@ L:
|
|||
}
|
||||
}
|
||||
|
||||
if newData || //new data arrives, process it
|
||||
len(metric.discoveryTags) == 0 { //or this is the first call
|
||||
|
||||
//new data arrives (so process it) or this is the first call
|
||||
if newData || len(metric.discoveryTags) == 0 {
|
||||
metric.dtLock.Lock()
|
||||
defer metric.dtLock.Unlock()
|
||||
|
||||
|
|
@ -467,14 +459,12 @@ L:
|
|||
|
||||
//Preparing tags & dims...
|
||||
for instanceId, elem := range s.discoveryData {
|
||||
|
||||
//Start filing tags
|
||||
//Remove old value if exist
|
||||
delete(metric.discoveryTags, instanceId)
|
||||
metric.discoveryTags[instanceId] = make(map[string]string, len(metric.TagsQueryPath)+len(defaulTags))
|
||||
|
||||
for _, tagQueryPath := range metric.TagsQueryPath {
|
||||
|
||||
tagKey, tagValue, err := parseTag(tagQueryPath, elem)
|
||||
if err != nil {
|
||||
s.Log.Errorf("%v", err)
|
||||
|
|
@ -510,7 +500,6 @@ L:
|
|||
metric.requestDimensions = append(
|
||||
metric.requestDimensions,
|
||||
map[string]string{s.dimensionKey: instanceId})
|
||||
|
||||
}
|
||||
|
||||
//Get final dimension (need to get full lis of
|
||||
|
|
@ -531,7 +520,6 @@ L:
|
|||
} else {
|
||||
metric.requestDimensionsStr = string(reqDim)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -25,7 +25,6 @@ const inputTitle = "inputs.aliyuncms"
|
|||
type mockGatherAliyunCMSClient struct{}
|
||||
|
||||
func (m *mockGatherAliyunCMSClient) DescribeMetricList(request *cms.DescribeMetricListRequest) (*cms.DescribeMetricListResponse, error) {
|
||||
|
||||
resp := new(cms.DescribeMetricListResponse)
|
||||
|
||||
//switch request.Metric {
|
||||
|
|
@ -221,7 +220,6 @@ func TestUpdateWindow(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGatherMetric(t *testing.T) {
|
||||
|
||||
plugin := &AliyunCMS{
|
||||
Project: "acs_slb_dashboard",
|
||||
client: new(mockGatherAliyunCMSClient),
|
||||
|
|
@ -255,13 +253,11 @@ func TestGatherMetric(t *testing.T) {
|
|||
t.Run(tt.name, func(t *testing.T) {
|
||||
var acc telegraf.Accumulator
|
||||
require.EqualError(t, plugin.gatherMetric(acc, tt.metricName, metric), tt.expectedErrorString)
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGather(t *testing.T) {
|
||||
|
||||
metric := &Metric{
|
||||
MetricNames: []string{},
|
||||
Dimensions: `{"instanceId": "i-abcdefgh123456"}`,
|
||||
|
|
@ -332,7 +328,6 @@ func TestGather(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGetDiscoveryDataAllRegions(t *testing.T) {
|
||||
|
||||
//test table:
|
||||
tests := []struct {
|
||||
name string
|
||||
|
|
@ -403,8 +398,6 @@ func TestGetDiscoveryDataAllRegions(t *testing.T) {
|
|||
if err != nil {
|
||||
require.EqualError(t, err, tt.expectedErrorString)
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -72,7 +72,6 @@ type discoveryTool struct {
|
|||
//getRpcReqFromDiscoveryRequest - utility function to map between aliyun request primitives
|
||||
//discoveryRequest represents different type of discovery requests
|
||||
func getRpcReqFromDiscoveryRequest(req discoveryRequest) (*requests.RpcRequest, error) {
|
||||
|
||||
if reflect.ValueOf(req).Type().Kind() != reflect.Ptr ||
|
||||
reflect.ValueOf(req).IsNil() {
|
||||
return nil, errors.Errorf("Not expected type of the discovery request object: %q, %q", reflect.ValueOf(req).Type(), reflect.ValueOf(req).Kind())
|
||||
|
|
@ -81,7 +80,6 @@ func getRpcReqFromDiscoveryRequest(req discoveryRequest) (*requests.RpcRequest,
|
|||
ptrV := reflect.Indirect(reflect.ValueOf(req))
|
||||
|
||||
for i := 0; i < ptrV.NumField(); i++ {
|
||||
|
||||
if ptrV.Field(i).Type().String() == "*requests.RpcRequest" {
|
||||
if !ptrV.Field(i).CanInterface() {
|
||||
return nil, errors.Errorf("Can't get interface of %v", ptrV.Field(i))
|
||||
|
|
@ -323,7 +321,6 @@ func (dt *discoveryTool) parseDiscoveryResponse(resp *responses.CommonResponse)
|
|||
|
||||
//It should contain the array with discovered data
|
||||
for _, item := range rootKeyVal {
|
||||
|
||||
if discData, foundDataItem = item.([]interface{}); foundDataItem {
|
||||
break
|
||||
}
|
||||
|
|
@ -338,7 +335,6 @@ func (dt *discoveryTool) parseDiscoveryResponse(resp *responses.CommonResponse)
|
|||
case "PageNumber":
|
||||
pageNumber = int(val.(float64))
|
||||
}
|
||||
|
||||
}
|
||||
if !foundRootKey {
|
||||
return nil, 0, 0, 0, errors.Errorf("Didn't find root key %q in discovery response", dt.respRootKey)
|
||||
|
|
@ -390,14 +386,11 @@ func (dt *discoveryTool) getDiscoveryData(cli aliyunSdkClient, req *requests.Com
|
|||
} else {
|
||||
return nil, errors.Errorf("Can't parse input data element, not a map[string]interface{} type")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return preparedData, nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (dt *discoveryTool) getDiscoveryDataAllRegions(limiter chan bool) (map[string]interface{}, error) {
|
||||
|
|
@ -468,7 +461,6 @@ func (dt *discoveryTool) Start() {
|
|||
case <-dt.done:
|
||||
return
|
||||
case <-ticker.C:
|
||||
|
||||
data, err = dt.getDiscoveryDataAllRegions(lmtr.C)
|
||||
if err != nil {
|
||||
dt.lg.Errorf("Can't get discovery data: %v", err)
|
||||
|
|
@ -485,14 +477,12 @@ func (dt *discoveryTool) Start() {
|
|||
//send discovery data in blocking mode
|
||||
dt.dataChan <- data
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (dt *discoveryTool) Stop() {
|
||||
|
||||
close(dt.done)
|
||||
|
||||
//Shutdown timer
|
||||
|
|
|
|||
|
|
@ -162,7 +162,6 @@ func (n *Apache) gatherScores(data string) map[string]interface{} {
|
|||
var S, R, W, K, D, C, L, G, I int = 0, 0, 0, 0, 0, 0, 0, 0, 0
|
||||
|
||||
for _, s := range strings.Split(data, "") {
|
||||
|
||||
switch s {
|
||||
case "_":
|
||||
waiting++
|
||||
|
|
|
|||
|
|
@ -102,7 +102,6 @@ func TestConfig(t *testing.T) {
|
|||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestApcupsdGather(t *testing.T) {
|
||||
|
|
@ -155,7 +154,6 @@ func TestApcupsdGather(t *testing.T) {
|
|||
)
|
||||
|
||||
for _, tt := range tests {
|
||||
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
|
|
|
|||
|
|
@ -224,5 +224,4 @@ func Test_BeatRequest(test *testing.T) {
|
|||
if err != nil {
|
||||
test.Logf("Can't gather stats")
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -129,9 +129,7 @@ func (j javaMetric) addTagsFields(out map[string]interface{}) {
|
|||
}
|
||||
}
|
||||
|
||||
func addCassandraMetric(mbean string, c cassandraMetric,
|
||||
values map[string]interface{}) {
|
||||
|
||||
func addCassandraMetric(mbean string, c cassandraMetric, values map[string]interface{}) {
|
||||
tags := make(map[string]string)
|
||||
fields := make(map[string]interface{})
|
||||
tokens := parseJmxMetricRequest(mbean)
|
||||
|
|
@ -139,11 +137,9 @@ func addCassandraMetric(mbean string, c cassandraMetric,
|
|||
tags["cassandra_host"] = c.host
|
||||
addValuesAsFields(values, fields, tags["mname"])
|
||||
c.acc.AddFields(tokens["class"]+tokens["type"], fields, tags)
|
||||
|
||||
}
|
||||
|
||||
func (c cassandraMetric) addTagsFields(out map[string]interface{}) {
|
||||
|
||||
r := out["request"]
|
||||
|
||||
tokens := parseJmxMetricRequest(r.(map[string]interface{})["mbean"].(string))
|
||||
|
|
|
|||
|
|
@ -198,9 +198,7 @@ func TestHttpJsonJavaMultiType(t *testing.T) {
|
|||
|
||||
// Test that the proper values are ignored or collected
|
||||
func TestHttp404(t *testing.T) {
|
||||
|
||||
jolokia := genJolokiaClientStub(invalidJSON, 404, Servers,
|
||||
[]string{HeapMetric})
|
||||
jolokia := genJolokiaClientStub(invalidJSON, 404, Servers, []string{HeapMetric})
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := acc.GatherError(jolokia.Gather)
|
||||
|
|
|
|||
|
|
@ -211,17 +211,14 @@ var findSockets = func(c *Ceph) ([]*socket, error) {
|
|||
if strings.HasPrefix(f, c.OsdPrefix) {
|
||||
sockType = typeOsd
|
||||
sockPrefix = osdPrefix
|
||||
|
||||
}
|
||||
if strings.HasPrefix(f, c.MdsPrefix) {
|
||||
sockType = typeMds
|
||||
sockPrefix = mdsPrefix
|
||||
|
||||
}
|
||||
if strings.HasPrefix(f, c.RgwPrefix) {
|
||||
sockType = typeRgw
|
||||
sockPrefix = rgwPrefix
|
||||
|
||||
}
|
||||
|
||||
if sockType == typeOsd || sockType == typeMon || sockType == typeMds || sockType == typeRgw {
|
||||
|
|
|
|||
|
|
@ -105,7 +105,6 @@ func TestGather(t *testing.T) {
|
|||
acc := &testutil.Accumulator{}
|
||||
c := &Ceph{}
|
||||
c.Gather(acc)
|
||||
|
||||
}
|
||||
|
||||
func TestFindSockets(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -49,7 +49,6 @@ func TestGather(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "chrony", fields, tags)
|
||||
|
||||
}
|
||||
|
||||
// fackeExecCommand is a helper function that mock
|
||||
|
|
@ -102,7 +101,6 @@ Leap status : Not synchronized
|
|||
} else {
|
||||
fmt.Fprint(os.Stdout, "command not found")
|
||||
os.Exit(1)
|
||||
|
||||
}
|
||||
os.Exit(0)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -588,5 +588,4 @@ func TestGRPCDialoutMultiple(t *testing.T) {
|
|||
tags = map[string]string{"path": "type:model/other/path", "name": "str", "source": "hostname", "subscription": "subscription"}
|
||||
fields = map[string]interface{}{"value": int64(-1)}
|
||||
acc.AssertContainsTaggedFields(t, "other", fields, tags)
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -195,7 +195,6 @@ func (ch *ClickHouse) Gather(acc telegraf.Accumulator) (err error) {
|
|||
}
|
||||
|
||||
for _, conn := range connects {
|
||||
|
||||
metricsFuncs := []func(acc telegraf.Accumulator, conn *connect) error{
|
||||
ch.tables,
|
||||
ch.zookeeper,
|
||||
|
|
@ -212,7 +211,6 @@ func (ch *ClickHouse) Gather(acc telegraf.Accumulator) (err error) {
|
|||
if err := metricFunc(acc, &conn); err != nil {
|
||||
acc.AddError(err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
for metric := range commonMetrics {
|
||||
|
|
@ -342,7 +340,6 @@ func (ch *ClickHouse) replicationQueue(acc telegraf.Accumulator, conn *connect)
|
|||
}
|
||||
|
||||
func (ch *ClickHouse) detachedParts(acc telegraf.Accumulator, conn *connect) error {
|
||||
|
||||
var detachedParts []struct {
|
||||
DetachedParts chUInt64 `json:"detached_parts"`
|
||||
}
|
||||
|
|
@ -363,7 +360,6 @@ func (ch *ClickHouse) detachedParts(acc telegraf.Accumulator, conn *connect) err
|
|||
}
|
||||
|
||||
func (ch *ClickHouse) dictionaries(acc telegraf.Accumulator, conn *connect) error {
|
||||
|
||||
var brokenDictionaries []struct {
|
||||
Origin string `json:"origin"`
|
||||
BytesAllocated chUInt64 `json:"bytes_allocated"`
|
||||
|
|
@ -397,7 +393,6 @@ func (ch *ClickHouse) dictionaries(acc telegraf.Accumulator, conn *connect) erro
|
|||
}
|
||||
|
||||
func (ch *ClickHouse) mutations(acc telegraf.Accumulator, conn *connect) error {
|
||||
|
||||
var mutationsStatus []struct {
|
||||
Failed chUInt64 `json:"failed"`
|
||||
Running chUInt64 `json:"running"`
|
||||
|
|
@ -424,7 +419,6 @@ func (ch *ClickHouse) mutations(acc telegraf.Accumulator, conn *connect) error {
|
|||
}
|
||||
|
||||
func (ch *ClickHouse) disks(acc telegraf.Accumulator, conn *connect) error {
|
||||
|
||||
var disksStatus []struct {
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
|
|
@ -448,14 +442,12 @@ func (ch *ClickHouse) disks(acc telegraf.Accumulator, conn *connect) error {
|
|||
},
|
||||
tags,
|
||||
)
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ch *ClickHouse) processes(acc telegraf.Accumulator, conn *connect) error {
|
||||
|
||||
var processesStats []struct {
|
||||
QueryType string `json:"query_type"`
|
||||
Percentile50 float64 `json:"p50"`
|
||||
|
|
@ -479,7 +471,6 @@ func (ch *ClickHouse) processes(acc telegraf.Accumulator, conn *connect) error {
|
|||
},
|
||||
tags,
|
||||
)
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -583,5 +583,4 @@ func TestAutoDiscovery(t *testing.T) {
|
|||
)
|
||||
defer ts.Close()
|
||||
ch.Gather(acc)
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -41,7 +41,6 @@ func TestGatherServer(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestSanitizeURI(t *testing.T) {
|
||||
|
||||
var sanitizeTest = []struct {
|
||||
input string
|
||||
expected string
|
||||
|
|
|
|||
|
|
@ -74,8 +74,8 @@ func init() {
|
|||
func (s *CSGO) gatherServer(
|
||||
server []string,
|
||||
request func(string, string) (string, error),
|
||||
acc telegraf.Accumulator) error {
|
||||
|
||||
acc telegraf.Accumulator,
|
||||
) error {
|
||||
if len(server) != 2 {
|
||||
return errors.New("incorrect server config")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -229,7 +229,6 @@ func (c *ClusterClient) GetContainers(ctx context.Context, node string) ([]Conta
|
|||
containers := make([]Container, 0, len(list))
|
||||
for _, c := range list {
|
||||
containers = append(containers, Container{ID: c})
|
||||
|
||||
}
|
||||
|
||||
return containers, nil
|
||||
|
|
|
|||
|
|
@ -142,7 +142,6 @@ func TestGetSummary(t *testing.T) {
|
|||
require.Equal(t, tt.expectedValue, summary)
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestGetNodeMetrics(t *testing.T) {
|
||||
|
|
@ -184,7 +183,6 @@ func TestGetNodeMetrics(t *testing.T) {
|
|||
require.Equal(t, tt.expectedValue, m)
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestGetContainerMetrics(t *testing.T) {
|
||||
|
|
@ -226,5 +224,4 @@ func TestGetContainerMetrics(t *testing.T) {
|
|||
require.Equal(t, tt.expectedValue, m)
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -203,7 +203,6 @@ func TestAddNodeMetrics(t *testing.T) {
|
|||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestAddContainerMetrics(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -104,7 +104,6 @@ func (d *DiskIO) Gather(acc telegraf.Accumulator) error {
|
|||
}
|
||||
|
||||
for _, io := range diskio {
|
||||
|
||||
match := false
|
||||
if d.deviceFilter != nil && d.deviceFilter.Match(io.Name) {
|
||||
match = true
|
||||
|
|
|
|||
|
|
@ -71,7 +71,6 @@ func TestDiskInfo(t *testing.T) {
|
|||
assert.Equal(t, "myval1", di["MY_PARAM_1"])
|
||||
assert.Equal(t, "myval2", di["MY_PARAM_2"])
|
||||
assert.Equal(t, "/dev/foo/bar/devlink /dev/foo/bar/devlink1", di["DEVLINKS"])
|
||||
|
||||
// unfortunately we can't adjust mtime on /dev/null to test cache invalidation
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -102,7 +102,6 @@ const defaultPort = "7711"
|
|||
|
||||
func (d *Disque) gatherServer(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
if d.c == nil {
|
||||
|
||||
_, _, err := net.SplitHostPort(addr.Host)
|
||||
if err != nil {
|
||||
addr.Host = addr.Host + ":" + defaultPort
|
||||
|
|
|
|||
|
|
@ -1122,7 +1122,6 @@ func TestHostnameFromID(t *testing.T) {
|
|||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func Test_parseContainerStatsPerDeviceAndTotal(t *testing.T) {
|
||||
|
|
@ -1269,7 +1268,6 @@ func Test_parseContainerStatsPerDeviceAndTotal(t *testing.T) {
|
|||
[]string{"docker_container_cpu", "docker_container_net", "docker_container_blkio"})
|
||||
})
|
||||
testutil.RequireMetricsEqual(t, tt.expected, actual, testutil.OnlyTags(), testutil.SortMetrics())
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -1360,7 +1358,6 @@ func TestDocker_Init(t *testing.T) {
|
|||
t.Errorf("Total include: got '%v', want '%v'", d.TotalInclude, tt.wantTotalInclude)
|
||||
}
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -110,7 +110,6 @@ func (d *Dovecot) gatherServer(addr string, acc telegraf.Accumulator, qtype stri
|
|||
}
|
||||
|
||||
func gatherStats(buf *bytes.Buffer, acc telegraf.Accumulator, host string, qtype string) error {
|
||||
|
||||
lines := strings.Split(buf.String(), "\n")
|
||||
head := strings.Split(lines[0], "\t")
|
||||
vals := lines[1:]
|
||||
|
|
@ -169,13 +168,11 @@ func splitSec(tm string) (sec int64, msec int64) {
|
|||
}
|
||||
|
||||
func timeParser(tm string) time.Time {
|
||||
|
||||
sec, msec := splitSec(tm)
|
||||
return time.Unix(sec, msec)
|
||||
}
|
||||
|
||||
func secParser(tm string) float64 {
|
||||
|
||||
sec, msec := splitSec(tm)
|
||||
return float64(sec) + (float64(msec) / 1000000.0)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -10,7 +10,6 @@ import (
|
|||
)
|
||||
|
||||
func TestDovecotIntegration(t *testing.T) {
|
||||
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
|
@ -103,7 +102,6 @@ func TestDovecotIntegration(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "dovecot", fields, tags)
|
||||
|
||||
}
|
||||
|
||||
const sampleGlobal = `reset_timestamp last_update num_logins num_cmds num_connected_sessions user_cpu sys_cpu clock_time min_faults maj_faults vol_cs invol_cs disk_input disk_output read_count read_bytes write_count write_bytes mail_lookup_path mail_lookup_attr mail_read_count mail_read_bytes mail_cache_hits
|
||||
|
|
|
|||
|
|
@ -152,7 +152,6 @@ func (c *EcsClient) ContainerStats() (map[string]types.StatsJSON, error) {
|
|||
// PollSync executes Task and ContainerStats in parallel. If both succeed, both structs are returned.
|
||||
// If either errors, a single error is returned.
|
||||
func PollSync(c Client) (*Task, map[string]types.StatsJSON, error) {
|
||||
|
||||
var task *Task
|
||||
var stats map[string]types.StatsJSON
|
||||
var err error
|
||||
|
|
|
|||
|
|
@ -27,7 +27,6 @@ func (p *pollMock) ContainerStats() (map[string]types.StatsJSON, error) {
|
|||
}
|
||||
|
||||
func TestEcsClient_PollSync(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
mock *pollMock
|
||||
|
|
|
|||
|
|
@ -284,7 +284,6 @@ func blkstats(id string, stats types.StatsJSON, acc telegraf.Accumulator, tags m
|
|||
} else {
|
||||
totalStatMap[field] = uintV
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -277,7 +277,6 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error {
|
|||
e.serverInfoMutex.Lock()
|
||||
e.serverInfo[s] = info
|
||||
e.serverInfoMutex.Unlock()
|
||||
|
||||
}(serv, acc)
|
||||
}
|
||||
wgC.Wait()
|
||||
|
|
@ -640,7 +639,6 @@ func (e *Elasticsearch) gatherSingleIndexStats(name string, index indexStat, now
|
|||
if e.IndicesLevel == "shards" {
|
||||
for shardNumber, shards := range index.Shards {
|
||||
for _, shard := range shards {
|
||||
|
||||
// Get Shard Stats
|
||||
flattened := jsonparser.JSONFlattener{}
|
||||
err := flattened.FullFlattenJSON("", shard, true, true)
|
||||
|
|
|
|||
|
|
@ -137,7 +137,6 @@ func removeCarriageReturns(b bytes.Buffer) bytes.Buffer {
|
|||
b = buf
|
||||
}
|
||||
return b
|
||||
|
||||
}
|
||||
|
||||
func (e *Exec) ProcessCommand(command string, acc telegraf.Accumulator, wg *sync.WaitGroup) {
|
||||
|
|
|
|||
|
|
@ -186,5 +186,4 @@ func runCounterProgram() {
|
|||
}
|
||||
fmt.Fprint(os.Stdout, string(b))
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -121,7 +121,6 @@ func (f *Fibaro) getJSON(path string, dataStruct interface{}) error {
|
|||
|
||||
// Gather fetches all required information to output metrics
|
||||
func (f *Fibaro) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
if f.client == nil {
|
||||
f.client = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
|
|
|
|||
|
|
@ -292,7 +292,6 @@ func (fc *FileCount) initGlobPaths(acc telegraf.Accumulator) {
|
|||
fc.globPaths = append(fc.globPaths, *glob)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func NewFileCount() *FileCount {
|
||||
|
|
|
|||
|
|
@ -229,9 +229,7 @@ func getFakeFileSystem(basePath string) fakeFileSystem {
|
|||
basePath + "/subdir/nested2/qux": {name: "qux", filemode: uint32(fmask), modtime: mtime, size: int64(400)},
|
||||
}
|
||||
|
||||
fs := fakeFileSystem{files: fileList}
|
||||
return fs
|
||||
|
||||
return fakeFileSystem{files: fileList}
|
||||
}
|
||||
|
||||
func fileCountEquals(t *testing.T, fc FileCount, expectedCount int, expectedSize int) {
|
||||
|
|
|
|||
|
|
@ -69,5 +69,4 @@ func (f fakeFileSystem) Stat(name string) (os.FileInfo, error) {
|
|||
return fakeInfo, nil
|
||||
}
|
||||
return nil, &os.PathError{Op: "Stat", Path: name, Err: errors.New("No such file or directory")}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -89,7 +89,5 @@ func getTestFileSystem() fakeFileSystem {
|
|||
"/testdata/foo": {name: "foo", filemode: uint32(fmask), modtime: mtime},
|
||||
}
|
||||
|
||||
fs := fakeFileSystem{files: fileList}
|
||||
return fs
|
||||
|
||||
return fakeFileSystem{files: fileList}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -69,7 +69,6 @@ func (r *Fireboard) Description() string {
|
|||
|
||||
// Init the things
|
||||
func (r *Fireboard) Init() error {
|
||||
|
||||
if len(r.AuthToken) == 0 {
|
||||
return fmt.Errorf("You must specify an authToken")
|
||||
}
|
||||
|
|
@ -88,7 +87,6 @@ func (r *Fireboard) Init() error {
|
|||
|
||||
// Gather Reads stats from all configured servers.
|
||||
func (r *Fireboard) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
// Perform the GET request to the fireboard servers
|
||||
req, err := http.NewRequest("GET", r.URL, nil)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -81,14 +81,12 @@ func (h *Fluentd) SampleConfig() string { return sampleConfig }
|
|||
|
||||
// Gather - Main code responsible for gathering, processing and creating metrics
|
||||
func (h *Fluentd) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
_, err := url.Parse(h.Endpoint)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid URL \"%s\"", h.Endpoint)
|
||||
}
|
||||
|
||||
if h.client == nil {
|
||||
|
||||
tr := &http.Transport{
|
||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||
}
|
||||
|
|
@ -127,7 +125,6 @@ func (h *Fluentd) Gather(acc telegraf.Accumulator) error {
|
|||
|
||||
// Go through all plugins one by one
|
||||
for _, p := range dataPoints {
|
||||
|
||||
skip := false
|
||||
|
||||
// Check if this specific type was excluded in configuration
|
||||
|
|
@ -149,7 +146,6 @@ func (h *Fluentd) Gather(acc telegraf.Accumulator) error {
|
|||
|
||||
if p.BufferQueueLength != nil {
|
||||
tmpFields["buffer_queue_length"] = *p.BufferQueueLength
|
||||
|
||||
}
|
||||
if p.RetryCount != nil {
|
||||
tmpFields["retry_count"] = *p.RetryCount
|
||||
|
|
|
|||
|
|
@ -111,14 +111,12 @@ var (
|
|||
)
|
||||
|
||||
func Test_parse(t *testing.T) {
|
||||
|
||||
t.Log("Testing parser function")
|
||||
_, err := parse([]byte(sampleJSON))
|
||||
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func Test_Gather(t *testing.T) {
|
||||
|
|
@ -159,5 +157,4 @@ func Test_Gather(t *testing.T) {
|
|||
assert.Equal(t, *expectedOutput[1].RetryCount, acc.Metrics[1].Fields["retry_count"])
|
||||
assert.Equal(t, *expectedOutput[1].BufferQueueLength, acc.Metrics[1].Fields["buffer_queue_length"])
|
||||
assert.Equal(t, *expectedOutput[1].BufferTotalQueuedSize, acc.Metrics[1].Fields["buffer_total_queued_size"])
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -403,7 +403,6 @@ func (c *GNMI) handlePath(path *gnmi.Path, tags map[string]string, prefix string
|
|||
} else {
|
||||
tags[key] = val
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -79,7 +79,6 @@ func (h *haproxy) Gather(acc telegraf.Accumulator) error {
|
|||
endpoints := make([]string, 0, len(h.Servers))
|
||||
|
||||
for _, endpoint := range h.Servers {
|
||||
|
||||
if strings.HasPrefix(endpoint, "http") {
|
||||
endpoints = append(endpoints, endpoint)
|
||||
continue
|
||||
|
|
|
|||
|
|
@ -27,7 +27,6 @@ func (h *mockFetcher) Fetch(address string) ([]hddtemp.Disk, error) {
|
|||
Unit: "C",
|
||||
},
|
||||
}, nil
|
||||
|
||||
}
|
||||
func newMockFetcher() *mockFetcher {
|
||||
return &mockFetcher{}
|
||||
|
|
@ -79,5 +78,4 @@ func TestFetch(t *testing.T) {
|
|||
for _, test := range tests {
|
||||
acc.AssertContainsTaggedFields(t, "hddtemp", test.fields, test.tags)
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -313,7 +313,6 @@ func (h *HTTPListenerV2) authenticateIfSet(handler http.HandlerFunc, res http.Re
|
|||
if !ok ||
|
||||
subtle.ConstantTimeCompare([]byte(reqUsername), []byte(h.BasicUsername)) != 1 ||
|
||||
subtle.ConstantTimeCompare([]byte(reqPassword), []byte(h.BasicPassword)) != 1 {
|
||||
|
||||
http.Error(res, "Unauthorized.", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
|
|
|||
|
|
@ -130,5 +130,4 @@ func TestInfiniband(t *testing.T) {
|
|||
addStats("m1x5_0", "1", sampleRdmastatsEntries, &acc)
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "infiniband", fields, tags)
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -327,7 +327,6 @@ func (h *InfluxDBListener) handleWrite() http.HandlerFunc {
|
|||
}
|
||||
|
||||
h.acc.AddMetric(m)
|
||||
|
||||
}
|
||||
if err != influx.EOF {
|
||||
h.Log.Debugf("Error parsing the request body: %v", err.Error())
|
||||
|
|
|
|||
|
|
@ -92,7 +92,6 @@ func (r *raplServiceImpl) getConstraintMaxPowerWatts(socketID string) (float64,
|
|||
|
||||
socketMaxPower, _, err := r.fs.readFileToFloat64(socketMaxPowerFile)
|
||||
return convertMicroWattToWatt(socketMaxPower), err
|
||||
|
||||
}
|
||||
|
||||
func (r *raplServiceImpl) prepareData() {
|
||||
|
|
@ -176,8 +175,8 @@ func (r *raplServiceImpl) findDramFolder(raplFolders []string, socketID string)
|
|||
}
|
||||
|
||||
func (r *raplServiceImpl) calculateData(socketID string, socketEnergyUjFile io.Reader, dramEnergyUjFile io.Reader,
|
||||
socketMaxEnergyUjFile io.Reader, dramMaxEnergyUjFile io.Reader) error {
|
||||
|
||||
socketMaxEnergyUjFile io.Reader, dramMaxEnergyUjFile io.Reader,
|
||||
) error {
|
||||
newSocketEnergy, _, err := r.readEnergyInJoules(socketEnergyUjFile)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
|||
|
|
@ -298,11 +298,9 @@ func (r *IntelRDT) processOutput(cmdReader io.ReadCloser, processesPIDsAssociati
|
|||
*/
|
||||
toOmit := pqosInitOutputLinesNumber
|
||||
|
||||
// omit first measurements which are zeroes
|
||||
if len(r.parsedCores) != 0 {
|
||||
if len(r.parsedCores) != 0 { // omit first measurements which are zeroes
|
||||
toOmit = toOmit + len(r.parsedCores)
|
||||
// specify how many lines should pass before stopping
|
||||
} else if len(processesPIDsAssociation) != 0 {
|
||||
} else if len(processesPIDsAssociation) != 0 { // specify how many lines should pass before stopping
|
||||
toOmit = toOmit + len(processesPIDsAssociation)
|
||||
}
|
||||
for omitCounter := 0; omitCounter < toOmit; omitCounter++ {
|
||||
|
|
|
|||
|
|
@ -378,7 +378,6 @@ OS RealTime Mod | 0x00 | ok
|
|||
} else {
|
||||
fmt.Fprint(os.Stdout, "command not found")
|
||||
os.Exit(1)
|
||||
|
||||
}
|
||||
os.Exit(0)
|
||||
}
|
||||
|
|
@ -573,7 +572,6 @@ Power Supply 1 | 03h | ok | 10.1 | 110 Watts, Presence detected
|
|||
} else {
|
||||
fmt.Fprint(os.Stdout, "command not found")
|
||||
os.Exit(1)
|
||||
|
||||
}
|
||||
os.Exit(0)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -197,7 +197,6 @@ func (j *Jenkins) initialize(client *http.Client) error {
|
|||
}
|
||||
|
||||
func (j *Jenkins) gatherNodeData(n node, acc telegraf.Accumulator) error {
|
||||
|
||||
tags := map[string]string{}
|
||||
if n.DisplayName == "" {
|
||||
return fmt.Errorf("error empty node name")
|
||||
|
|
@ -249,7 +248,6 @@ func (j *Jenkins) gatherNodeData(n node, acc telegraf.Accumulator) error {
|
|||
}
|
||||
|
||||
func (j *Jenkins) gatherNodesData(acc telegraf.Accumulator) {
|
||||
|
||||
nodeResp, err := j.client.getAllNodes(context.Background())
|
||||
if err != nil {
|
||||
acc.AddError(err)
|
||||
|
|
|
|||
|
|
@ -846,7 +846,6 @@ func TestGatherJobs(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -217,7 +217,6 @@ func (j *Jolokia) prepareRequest(server Server, metrics []Metric) (*http.Request
|
|||
}
|
||||
|
||||
jolokiaURL = proxyURL
|
||||
|
||||
} else {
|
||||
serverURL, err := url.Parse("http://" + server.Host + ":" + server.Port + context)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -239,9 +239,7 @@ func TestHttpJsonThreeLevelMultiValue(t *testing.T) {
|
|||
|
||||
// Test that the proper values are ignored or collected
|
||||
func TestHttp404(t *testing.T) {
|
||||
|
||||
jolokia := genJolokiaClientStub(invalidJSON, 404, Servers,
|
||||
[]Metric{UsedHeapMetric})
|
||||
jolokia := genJolokiaClientStub(invalidJSON, 404, Servers, []Metric{UsedHeapMetric})
|
||||
|
||||
var acc testutil.Accumulator
|
||||
acc.SetDebug(true)
|
||||
|
|
@ -254,9 +252,7 @@ func TestHttp404(t *testing.T) {
|
|||
|
||||
// Test that the proper values are ignored or collected
|
||||
func TestHttpInvalidJson(t *testing.T) {
|
||||
|
||||
jolokia := genJolokiaClientStub(invalidJSON, 200, Servers,
|
||||
[]Metric{UsedHeapMetric})
|
||||
jolokia := genJolokiaClientStub(invalidJSON, 200, Servers, []Metric{UsedHeapMetric})
|
||||
|
||||
var acc testutil.Accumulator
|
||||
acc.SetDebug(true)
|
||||
|
|
|
|||
|
|
@ -195,7 +195,6 @@ func tagSetsMatch(a, b map[string]string) bool {
|
|||
func makeReadRequests(metrics []Metric) []ReadRequest {
|
||||
var requests []ReadRequest
|
||||
for _, metric := range metrics {
|
||||
|
||||
if len(metric.Paths) == 0 {
|
||||
requests = append(requests, ReadRequest{
|
||||
Mbean: metric.Mbean,
|
||||
|
|
|
|||
|
|
@ -83,9 +83,8 @@ func (ja *JolokiaAgent) Gather(acc telegraf.Accumulator) error {
|
|||
|
||||
err := ja.gatherer.Gather(client, acc)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("Unable to gather metrics for %s: %v", client.URL, err))
|
||||
acc.AddError(fmt.Errorf("unable to gather metrics for %s: %v", client.URL, err))
|
||||
}
|
||||
|
||||
}(client)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -40,7 +40,6 @@ func (pb *pointBuilder) Build(mbean string, value interface{}) []point {
|
|||
|
||||
points := make([]point, 0)
|
||||
for mbean, value := range valueMap {
|
||||
|
||||
points = append(points, point{
|
||||
Tags: pb.extractTags(mbean),
|
||||
Fields: pb.extractFields(mbean, value),
|
||||
|
|
@ -99,13 +98,11 @@ func (pb *pointBuilder) extractFields(mbean string, value interface{}) map[strin
|
|||
// if there were no attributes requested,
|
||||
// then the keys are attributes
|
||||
pb.fillFields("", valueMap, fieldMap)
|
||||
|
||||
} else if len(pb.objectAttributes) == 1 {
|
||||
// if there was a single attribute requested,
|
||||
// then the keys are the attribute's properties
|
||||
fieldName := pb.formatFieldName(pb.objectAttributes[0], pb.objectPath)
|
||||
pb.fillFields(fieldName, valueMap, fieldMap)
|
||||
|
||||
} else {
|
||||
// if there were multiple attributes requested,
|
||||
// then the keys are the attribute names
|
||||
|
|
@ -199,7 +196,6 @@ func (pb *pointBuilder) applySubstitutions(mbean string, fieldMap map[string]int
|
|||
properties := makePropertyMap(mbean)
|
||||
|
||||
for i, subKey := range pb.substitutions[1:] {
|
||||
|
||||
symbol := fmt.Sprintf("$%d", i+1)
|
||||
substitution := properties[subKey]
|
||||
|
||||
|
|
|
|||
|
|
@ -272,7 +272,6 @@ func (m *OpenConfigTelemetry) splitSensorConfig() int {
|
|||
m.sensorsConfig = append(m.sensorsConfig, sensorConfig{
|
||||
measurementName: measurementName, pathList: pathlist,
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
return len(m.sensorsConfig)
|
||||
|
|
|
|||
|
|
@ -216,7 +216,6 @@ func (k *Kapacitor) gatherURL(
|
|||
|
||||
if s.Kapacitor != nil {
|
||||
for _, obj := range *s.Kapacitor {
|
||||
|
||||
// Strip out high-cardinality or duplicative tags
|
||||
excludeTags := []string{"host", "cluster_id", "server_id"}
|
||||
for _, key := range excludeTags {
|
||||
|
|
|
|||
|
|
@ -35,7 +35,6 @@ func (k *Kernel) Description() string {
|
|||
func (k *Kernel) SampleConfig() string { return "" }
|
||||
|
||||
func (k *Kernel) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
data, err := k.getProcStat()
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
|||
|
|
@ -35,7 +35,6 @@ func (k *KernelVmstat) Gather(acc telegraf.Accumulator) error {
|
|||
|
||||
dataFields := bytes.Fields(data)
|
||||
for i, field := range dataFields {
|
||||
|
||||
// dataFields is an array of {"stat1_name", "stat1_value", "stat2_name",
|
||||
// "stat2_value", ...}
|
||||
// We only want the even number index as that contain the stat name.
|
||||
|
|
|
|||
|
|
@ -183,7 +183,6 @@ func (k *Kibana) createHTTPClient() (*http.Client, error) {
|
|||
}
|
||||
|
||||
func (k *Kibana) gatherKibanaStatus(baseURL string, acc telegraf.Accumulator) error {
|
||||
|
||||
kibanaStatus := &kibanaStatus{}
|
||||
url := baseURL + statusPath
|
||||
|
||||
|
|
@ -229,9 +228,7 @@ func (k *Kibana) gatherKibanaStatus(baseURL string, acc telegraf.Accumulator) er
|
|||
fields["heap_max_bytes"] = kibanaStatus.Metrics.Process.Mem.HeapMaxInBytes
|
||||
fields["heap_total_bytes"] = kibanaStatus.Metrics.Process.Mem.HeapMaxInBytes
|
||||
fields["heap_used_bytes"] = kibanaStatus.Metrics.Process.Mem.HeapUsedInBytes
|
||||
|
||||
}
|
||||
|
||||
acc.AddFields("kibana", fields, tags)
|
||||
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -89,7 +89,6 @@ func (k *Kubernetes) Description() string {
|
|||
}
|
||||
|
||||
func (k *Kubernetes) Init() error {
|
||||
|
||||
// If neither are provided, use the default service account.
|
||||
if k.BearerToken == "" && k.BearerTokenString == "" {
|
||||
k.BearerToken = defaultServiceAccountPath
|
||||
|
|
|
|||
|
|
@ -21,7 +21,6 @@ func TestKubernetesStats(t *testing.T) {
|
|||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintln(w, responsePods)
|
||||
}
|
||||
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
|
|
@ -155,7 +154,6 @@ func TestKubernetesStats(t *testing.T) {
|
|||
"pod_name": "foopod",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "kubernetes_pod_network", fields, tags)
|
||||
|
||||
}
|
||||
|
||||
var responsePods = `
|
||||
|
|
|
|||
|
|
@ -48,7 +48,6 @@ func (l *Lanz) Gather(acc telegraf.Accumulator) error {
|
|||
}
|
||||
|
||||
func (l *Lanz) Start(acc telegraf.Accumulator) error {
|
||||
|
||||
if len(l.Servers) == 0 {
|
||||
l.Servers = append(l.Servers, "tcp://127.0.0.1:50001")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -51,7 +51,6 @@ var testProtoBufGlobalBufferUsageRecord = &pb.LanzRecord{
|
|||
}
|
||||
|
||||
func TestLanzGeneratesMetrics(t *testing.T) {
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
l := NewLanz()
|
||||
|
|
@ -133,5 +132,4 @@ func TestLanzGeneratesMetrics(t *testing.T) {
|
|||
|
||||
acc.AssertContainsFields(t, "lanz_global_buffer_usage_record", gburVals1)
|
||||
acc.AssertContainsTaggedFields(t, "lanz_global_buffer_usage_record", gburVals1, gburTags1)
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -91,7 +91,6 @@ func GetHostProc() string {
|
|||
}
|
||||
|
||||
func init() {
|
||||
|
||||
inputs.Add("linux_sysctl_fs", func() telegraf.Input {
|
||||
return &SysctlFS{
|
||||
path: path.Join(GetHostProc(), "/sys/fs"),
|
||||
|
|
|
|||
|
|
@ -271,7 +271,6 @@ func (l *LogParserPlugin) receiver(tailer *tail.Tail) {
|
|||
|
||||
var line *tail.Line
|
||||
for line = range tailer.Lines {
|
||||
|
||||
if line.Err != nil {
|
||||
l.Log.Errorf("Error tailing file %s, Error: %s",
|
||||
tailer.Filename, line.Err)
|
||||
|
|
@ -321,7 +320,6 @@ func (l *LogParserPlugin) parser() {
|
|||
} else {
|
||||
l.Log.Errorf("Error parsing log line: %s", err.Error())
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -270,8 +270,8 @@ func (logstash *Logstash) gatherPluginsStats(
|
|||
plugins []Plugin,
|
||||
pluginType string,
|
||||
tags map[string]string,
|
||||
accumulator telegraf.Accumulator) error {
|
||||
|
||||
accumulator telegraf.Accumulator,
|
||||
) error {
|
||||
for _, plugin := range plugins {
|
||||
pluginTags := map[string]string{
|
||||
"plugin_name": plugin.Name,
|
||||
|
|
@ -295,9 +295,8 @@ func (logstash *Logstash) gatherPluginsStats(
|
|||
func (logstash *Logstash) gatherQueueStats(
|
||||
queue *PipelineQueue,
|
||||
tags map[string]string,
|
||||
accumulator telegraf.Accumulator) error {
|
||||
|
||||
var err error
|
||||
accumulator telegraf.Accumulator,
|
||||
) error {
|
||||
queueTags := map[string]string{
|
||||
"queue_type": queue.Type,
|
||||
}
|
||||
|
|
@ -311,7 +310,7 @@ func (logstash *Logstash) gatherQueueStats(
|
|||
|
||||
if queue.Type != "memory" {
|
||||
flattener := jsonParser.JSONFlattener{}
|
||||
err = flattener.FlattenJSON("", queue.Capacity)
|
||||
err := flattener.FlattenJSON("", queue.Capacity)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -549,7 +549,6 @@ func Test_Logstash6GatherPipelinesStats(test *testing.T) {
|
|||
"queue_type": string("persisted"),
|
||||
},
|
||||
)
|
||||
|
||||
}
|
||||
|
||||
func Test_Logstash5GatherJVMStats(test *testing.T) {
|
||||
|
|
@ -618,7 +617,6 @@ func Test_Logstash5GatherJVMStats(test *testing.T) {
|
|||
"node_version": string("5.3.0"),
|
||||
},
|
||||
)
|
||||
|
||||
}
|
||||
|
||||
func Test_Logstash6GatherJVMStats(test *testing.T) {
|
||||
|
|
@ -687,5 +685,4 @@ func Test_Logstash6GatherJVMStats(test *testing.T) {
|
|||
"node_version": string("6.4.2"),
|
||||
},
|
||||
)
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -133,7 +133,6 @@ const mdtJobStatsContents = `job_stats:
|
|||
`
|
||||
|
||||
func TestLustre2GeneratesMetrics(t *testing.T) {
|
||||
|
||||
tempdir := os.TempDir() + "/telegraf/proc/fs/lustre/"
|
||||
ostName := "OST0001"
|
||||
|
||||
|
|
@ -206,7 +205,6 @@ func TestLustre2GeneratesMetrics(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestLustre2GeneratesJobstatsMetrics(t *testing.T) {
|
||||
|
||||
tempdir := os.TempDir() + "/telegraf/proc/fs/lustre/"
|
||||
ostName := "OST0001"
|
||||
jobNames := []string{"cluster-testjob1", "testjob2"}
|
||||
|
|
|
|||
|
|
@ -137,7 +137,6 @@ func TestMailChimpGatherReport(t *testing.T) {
|
|||
"industry_type": "Social Networks and Online Communities",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "mailchimp", fields, tags)
|
||||
|
||||
}
|
||||
|
||||
func TestMailChimpGatherError(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -108,7 +108,6 @@ var sampleConfig = `
|
|||
|
||||
// Init parse all source URLs and place on the Marklogic struct
|
||||
func (c *Marklogic) Init() error {
|
||||
|
||||
if len(c.URL) == 0 {
|
||||
c.URL = "http://localhost:8002/"
|
||||
}
|
||||
|
|
|
|||
|
|
@ -76,7 +76,6 @@ func TestMarklogic(t *testing.T) {
|
|||
}
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "marklogic", expectFields, expectTags)
|
||||
|
||||
}
|
||||
|
||||
var response = `
|
||||
|
|
|
|||
|
|
@ -278,7 +278,6 @@ func generateMetrics() {
|
|||
for _, k := range slaveMetricNames {
|
||||
slaveMetrics[k] = rand.Float64()
|
||||
}
|
||||
|
||||
// slaveTaskMetrics = map[string]interface{}{
|
||||
// "executor_id": fmt.Sprintf("task_name.%s", randUUID()),
|
||||
// "executor_name": "Some task description",
|
||||
|
|
|
|||
|
|
@ -157,7 +157,6 @@ func parsePlayers(input string) ([]string, error) {
|
|||
continue
|
||||
}
|
||||
players = append(players, name)
|
||||
|
||||
}
|
||||
return players, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -483,7 +483,6 @@ func (m *Modbus) getFields() error {
|
|||
|
||||
register.Fields[i].value = convertDataType(register.Fields[i], valuesT)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1131,8 +1131,7 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec
|
|||
returnVal.NodeType = "ARB"
|
||||
} else {
|
||||
returnVal.NodeType = "UNK"
|
||||
}
|
||||
// END code modification
|
||||
} // END code modification
|
||||
} else if returnVal.IsMongos {
|
||||
returnVal.NodeType = "RTR"
|
||||
}
|
||||
|
|
|
|||
|
|
@ -9,7 +9,6 @@ import (
|
|||
)
|
||||
|
||||
func TestLatencyStats(t *testing.T) {
|
||||
|
||||
sl := NewStatLine(
|
||||
MongoStatus{
|
||||
ServerStatus: &ServerStatus{
|
||||
|
|
@ -65,7 +64,6 @@ func TestLatencyStats(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestLatencyStatsDiffZero(t *testing.T) {
|
||||
|
||||
sl := NewStatLine(
|
||||
MongoStatus{
|
||||
ServerStatus: &ServerStatus{
|
||||
|
|
@ -135,7 +133,6 @@ func TestLatencyStatsDiffZero(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestLatencyStatsDiff(t *testing.T) {
|
||||
|
||||
sl := NewStatLine(
|
||||
MongoStatus{
|
||||
ServerStatus: &ServerStatus{
|
||||
|
|
|
|||
|
|
@ -229,7 +229,6 @@ func (m *Monit) Init() error {
|
|||
}
|
||||
|
||||
func (m *Monit) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
req, err := http.NewRequest("GET", fmt.Sprintf("%s/_status?format=xml", m.Address), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -245,7 +244,6 @@ func (m *Monit) Gather(acc telegraf.Accumulator) error {
|
|||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode == 200 {
|
||||
|
||||
var status Status
|
||||
decoder := xml.NewDecoder(resp.Body)
|
||||
decoder.CharsetReader = charset.NewReaderLabel
|
||||
|
|
@ -345,10 +343,7 @@ func (m *Monit) Gather(acc telegraf.Accumulator) error {
|
|||
}
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("received status code %d (%s), expected 200",
|
||||
resp.StatusCode,
|
||||
http.StatusText(resp.StatusCode))
|
||||
|
||||
return fmt.Errorf("received status code %d (%s), expected 200", resp.StatusCode, http.StatusText(resp.StatusCode))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -555,7 +555,6 @@ func checkAuth(r *http.Request, username, password string) bool {
|
|||
}
|
||||
|
||||
func TestAllowHosts(t *testing.T) {
|
||||
|
||||
r := &Monit{
|
||||
Address: "http://127.0.0.1:2812",
|
||||
Username: "test",
|
||||
|
|
@ -591,9 +590,7 @@ func TestConnection(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestInvalidUsernameOrPassword(t *testing.T) {
|
||||
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
if !checkAuth(r, "testing", "testing") {
|
||||
http.Error(w, "Unauthorized.", 401)
|
||||
return
|
||||
|
|
@ -625,9 +622,7 @@ func TestInvalidUsernameOrPassword(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestNoUsernameOrPasswordConfiguration(t *testing.T) {
|
||||
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
if !checkAuth(r, "testing", "testing") {
|
||||
http.Error(w, "Unauthorized.", 401)
|
||||
return
|
||||
|
|
@ -657,7 +652,6 @@ func TestNoUsernameOrPasswordConfiguration(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestInvalidXMLAndInvalidTypes(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
filename string
|
||||
|
|
|
|||
|
|
@ -1439,7 +1439,6 @@ func (m *Mysql) gatherPerfSummaryPerAccountPerEvent(db *sql.DB, serv string, acc
|
|||
"sum_no_good_index_used": sumNoGoodIndexUsed,
|
||||
}
|
||||
acc.AddFields("mysql_perf_acc_event", sqlLWFields, sqlLWTags)
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -1662,8 +1661,8 @@ func (m *Mysql) gatherPerfFileEventsStatuses(db *sql.DB, serv string, acc telegr
|
|||
fields["file_events_seconds_total"] = sumTimerWrite / picoSeconds
|
||||
fields["file_events_bytes_totals"] = sumNumBytesWrite
|
||||
acc.AddFields("mysql_perf_schema", fields, writeTags)
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -51,7 +51,6 @@ func TestGather(t *testing.T) {
|
|||
t.Errorf("Number of servers mismatch. got=%d, want=%d",
|
||||
len(acc.Errors), len(test.servers))
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -239,7 +239,6 @@ func (n *NFSClient) parseStat(mountpoint string, export string, version string,
|
|||
acc.AddFields("nfs_ops", fields, tags)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -304,7 +303,6 @@ func (n *NFSClient) processText(scanner *bufio.Scanner, acc telegraf.Accumulator
|
|||
}
|
||||
|
||||
func (n *NFSClient) getMountStatsPath() string {
|
||||
|
||||
path := "/proc/self/mountstats"
|
||||
if os.Getenv("MOUNT_PROC") != "" {
|
||||
path = os.Getenv("MOUNT_PROC")
|
||||
|
|
@ -314,7 +312,6 @@ func (n *NFSClient) getMountStatsPath() string {
|
|||
}
|
||||
|
||||
func (n *NFSClient) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
file, err := os.Open(n.mountstatsPath)
|
||||
if err != nil {
|
||||
n.Log.Errorf("Failed opening the [%s] file: %s ", file, err)
|
||||
|
|
@ -334,7 +331,6 @@ func (n *NFSClient) Gather(acc telegraf.Accumulator) error {
|
|||
}
|
||||
|
||||
func (n *NFSClient) Init() error {
|
||||
|
||||
var nfs3Fields = []string{
|
||||
"NULL",
|
||||
"GETATTR",
|
||||
|
|
|
|||
|
|
@ -9,7 +9,6 @@ import (
|
|||
)
|
||||
|
||||
func getMountStatsPath() string {
|
||||
|
||||
path := "./testdata/mountstats"
|
||||
if os.Getenv("MOUNT_PROC") != "" {
|
||||
path = os.Getenv("MOUNT_PROC")
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue