chore: Fix linter findings for `revive:comment-spacings` (part 2) (#15897)

This commit is contained in:
Paweł Żak 2024-09-19 11:03:28 +02:00 committed by GitHub
parent 43590ca730
commit 453d32bd81
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
64 changed files with 305 additions and 312 deletions

View File

@ -61,16 +61,16 @@ type (
Metric struct { Metric struct {
ObjectsFilter string `toml:"objects_filter"` ObjectsFilter string `toml:"objects_filter"`
MetricNames []string `toml:"names"` MetricNames []string `toml:"names"`
Dimensions string `toml:"dimensions"` //String representation of JSON dimensions Dimensions string `toml:"dimensions"` // String representation of JSON dimensions
TagsQueryPath []string `toml:"tag_query_path"` TagsQueryPath []string `toml:"tag_query_path"`
AllowDataPointWODiscoveryData bool `toml:"allow_dps_without_discovery"` //Allow data points without discovery data (if no discovery data found) AllowDataPointWODiscoveryData bool `toml:"allow_dps_without_discovery"` // Allow data points without discovery data (if no discovery data found)
dtLock sync.Mutex //Guard for discoveryTags & dimensions dtLock sync.Mutex // Guard for discoveryTags & dimensions
discoveryTags map[string]map[string]string //Internal data structure that can enrich metrics with tags discoveryTags map[string]map[string]string // Internal data structure that can enrich metrics with tags
dimensionsUdObj map[string]string dimensionsUdObj map[string]string
dimensionsUdArr []map[string]string //Parsed Dimesnsions JSON string (unmarshalled) dimensionsUdArr []map[string]string // Parsed Dimesnsions JSON string (unmarshalled)
requestDimensions []map[string]string //this is the actual dimensions list that would be used in API request requestDimensions []map[string]string // this is the actual dimensions list that would be used in API request
requestDimensionsStr string //String representation of the above requestDimensionsStr string // String representation of the above
} }
@ -149,7 +149,7 @@ func (s *AliyunCMS) Init() error {
return fmt.Errorf("failed to create cms client: %w", err) return fmt.Errorf("failed to create cms client: %w", err)
} }
//check metrics dimensions consistency // check metrics dimensions consistency
for i := range s.Metrics { for i := range s.Metrics {
metric := s.Metrics[i] metric := s.Metrics[i]
if metric.Dimensions == "" { if metric.Dimensions == "" {
@ -172,15 +172,15 @@ func (s *AliyunCMS) Init() error {
s.measurement = formatMeasurement(s.Project) s.measurement = formatMeasurement(s.Project)
//Check regions // Check regions
if len(s.Regions) == 0 { if len(s.Regions) == 0 {
s.Regions = aliyunRegionList s.Regions = aliyunRegionList
s.Log.Infof("'regions' is not set. Metrics will be queried across %d regions:\n%s", s.Log.Infof("'regions' is not set. Metrics will be queried across %d regions:\n%s",
len(s.Regions), strings.Join(s.Regions, ",")) len(s.Regions), strings.Join(s.Regions, ","))
} }
//Init discovery... // Init discovery...
if s.dt == nil { //Support for tests if s.dt == nil { // Support for tests
s.dt, err = newDiscoveryTool(s.Regions, s.Project, s.Log, credential, int(float32(s.RateLimit)*0.2), time.Duration(s.DiscoveryInterval)) s.dt, err = newDiscoveryTool(s.Regions, s.Project, s.Log, credential, int(float32(s.RateLimit)*0.2), time.Duration(s.DiscoveryInterval))
if err != nil { if err != nil {
s.Log.Errorf("Discovery tool is not activated: %v", err) s.Log.Errorf("Discovery tool is not activated: %v", err)
@ -198,7 +198,7 @@ func (s *AliyunCMS) Init() error {
s.Log.Infof("%d object(s) discovered...", len(s.discoveryData)) s.Log.Infof("%d object(s) discovered...", len(s.discoveryData))
//Special setting for acs_oss project since the API differs // Special setting for acs_oss project since the API differs
if s.Project == "acs_oss" { if s.Project == "acs_oss" {
s.dimensionKey = "BucketName" s.dimensionKey = "BucketName"
} }
@ -208,7 +208,7 @@ func (s *AliyunCMS) Init() error {
// Start plugin discovery loop, metrics are gathered through Gather // Start plugin discovery loop, metrics are gathered through Gather
func (s *AliyunCMS) Start(telegraf.Accumulator) error { func (s *AliyunCMS) Start(telegraf.Accumulator) error {
//Start periodic discovery process // Start periodic discovery process
if s.dt != nil { if s.dt != nil {
s.dt.start() s.dt.start()
} }
@ -226,7 +226,7 @@ func (s *AliyunCMS) Gather(acc telegraf.Accumulator) error {
var wg sync.WaitGroup var wg sync.WaitGroup
for _, metric := range s.Metrics { for _, metric := range s.Metrics {
//Prepare internal structure with data from discovery // Prepare internal structure with data from discovery
s.prepareTagsAndDimensions(metric) s.prepareTagsAndDimensions(metric)
wg.Add(len(metric.MetricNames)) wg.Add(len(metric.MetricNames))
for _, metricName := range metric.MetricNames { for _, metricName := range metric.MetricNames {
@ -250,10 +250,10 @@ func (s *AliyunCMS) Stop() {
} }
func (s *AliyunCMS) updateWindow(relativeTo time.Time) { func (s *AliyunCMS) updateWindow(relativeTo time.Time) {
//https://help.aliyun.com/document_detail/51936.html?spm=a2c4g.11186623.6.701.54025679zh6wiR // https://help.aliyun.com/document_detail/51936.html?spm=a2c4g.11186623.6.701.54025679zh6wiR
//The start and end times are executed in the mode of // The start and end times are executed in the mode of
//opening left and closing right, and startTime cannot be equal // opening left and closing right, and startTime cannot be equal
//to or greater than endTime. // to or greater than endTime.
windowEnd := relativeTo.Add(-time.Duration(s.Delay)) windowEnd := relativeTo.Add(-time.Duration(s.Delay))
@ -310,8 +310,8 @@ func (s *AliyunCMS) gatherMetric(acc telegraf.Accumulator, metricName string, me
switch key { switch key {
case "instanceId", "BucketName": case "instanceId", "BucketName":
tags[key] = value.(string) tags[key] = value.(string)
if metric.discoveryTags != nil { //discovery can be not activated if metric.discoveryTags != nil { // discovery can be not activated
//Skipping data point if discovery data not exist // Skipping data point if discovery data not exist
_, ok := metric.discoveryTags[value.(string)] _, ok := metric.discoveryTags[value.(string)]
if !ok && if !ok &&
!metric.AllowDataPointWODiscoveryData { !metric.AllowDataPointWODiscoveryData {
@ -349,7 +349,7 @@ func parseTag(tagSpec string, data interface{}) (tagKey, tagValue string, err er
) )
tagKey = tagSpec tagKey = tagSpec
//Split query path to tagKey and query path // Split query path to tagKey and query path
if splitted := strings.Split(tagSpec, ":"); len(splitted) == 2 { if splitted := strings.Split(tagSpec, ":"); len(splitted) == 2 {
tagKey = splitted[0] tagKey = splitted[0]
queryPath = splitted[1] queryPath = splitted[1]
@ -360,7 +360,7 @@ func parseTag(tagSpec string, data interface{}) (tagKey, tagValue string, err er
return "", "", fmt.Errorf("can't query data from discovery data using query path %q: %w", queryPath, err) return "", "", fmt.Errorf("can't query data from discovery data using query path %q: %w", queryPath, err)
} }
if tagRawValue == nil { //Nothing found if tagRawValue == nil { // Nothing found
return "", "", nil return "", "", nil
} }
@ -378,11 +378,11 @@ func (s *AliyunCMS) prepareTagsAndDimensions(metric *Metric) {
defaultTags = []string{"RegionId:RegionId"} defaultTags = []string{"RegionId:RegionId"}
) )
if s.dt == nil { //Discovery is not activated if s.dt == nil { // Discovery is not activated
return return
} }
//Reading all data from buffered channel // Reading all data from buffered channel
L: L:
for { for {
select { select {
@ -394,7 +394,7 @@ L:
} }
} }
//new data arrives (so process it) or this is the first call // new data arrives (so process it) or this is the first call
if newData || len(metric.discoveryTags) == 0 { if newData || len(metric.discoveryTags) == 0 {
metric.dtLock.Lock() metric.dtLock.Lock()
defer metric.dtLock.Unlock() defer metric.dtLock.Unlock()
@ -403,13 +403,13 @@ L:
metric.discoveryTags = make(map[string]map[string]string, len(s.discoveryData)) metric.discoveryTags = make(map[string]map[string]string, len(s.discoveryData))
} }
metric.requestDimensions = nil //erasing metric.requestDimensions = nil // erasing
metric.requestDimensions = make([]map[string]string, 0, len(s.discoveryData)) metric.requestDimensions = make([]map[string]string, 0, len(s.discoveryData))
//Preparing tags & dims... // Preparing tags & dims...
for instanceID, elem := range s.discoveryData { for instanceID, elem := range s.discoveryData {
//Start filing tags // Start filing tags
//Remove old value if exist // Remove old value if exist
delete(metric.discoveryTags, instanceID) delete(metric.discoveryTags, instanceID)
metric.discoveryTags[instanceID] = make(map[string]string, len(metric.TagsQueryPath)+len(defaultTags)) metric.discoveryTags[instanceID] = make(map[string]string, len(metric.TagsQueryPath)+len(defaultTags))
@ -419,7 +419,7 @@ L:
s.Log.Errorf("%v", err) s.Log.Errorf("%v", err)
continue continue
} }
if err == nil && tagValue == "" { //Nothing found if err == nil && tagValue == "" { // Nothing found
s.Log.Debugf("Data by query path %q: is not found, for instance %q", tagQueryPath, instanceID) s.Log.Debugf("Data by query path %q: is not found, for instance %q", tagQueryPath, instanceID)
continue continue
} }
@ -427,7 +427,7 @@ L:
metric.discoveryTags[instanceID][tagKey] = tagValue metric.discoveryTags[instanceID][tagKey] = tagValue
} }
//Adding default tags if not already there // Adding default tags if not already there
for _, defaultTagQP := range defaultTags { for _, defaultTagQP := range defaultTags {
tagKey, tagValue, err := parseTag(defaultTagQP, elem) tagKey, tagValue, err := parseTag(defaultTagQP, elem)
@ -436,7 +436,7 @@ L:
continue continue
} }
if err == nil && tagValue == "" { //Nothing found if err == nil && tagValue == "" { // Nothing found
s.Log.Debugf("Data by query path %q: is not found, for instance %q", s.Log.Debugf("Data by query path %q: is not found, for instance %q",
defaultTagQP, instanceID) defaultTagQP, instanceID)
continue continue
@ -445,7 +445,7 @@ L:
metric.discoveryTags[instanceID][tagKey] = tagValue metric.discoveryTags[instanceID][tagKey] = tagValue
} }
//if no dimension configured in config file, use discovery data // if no dimension configured in config file, use discovery data
if len(metric.dimensionsUdArr) == 0 && len(metric.dimensionsUdObj) == 0 { if len(metric.dimensionsUdArr) == 0 && len(metric.dimensionsUdObj) == 0 {
metric.requestDimensions = append( metric.requestDimensions = append(
metric.requestDimensions, metric.requestDimensions,
@ -453,7 +453,7 @@ L:
} }
} }
//add dimensions filter from config file // add dimensions filter from config file
if len(metric.dimensionsUdArr) != 0 { if len(metric.dimensionsUdArr) != 0 {
metric.requestDimensions = append(metric.requestDimensions, metric.dimensionsUdArr...) metric.requestDimensions = append(metric.requestDimensions, metric.dimensionsUdArr...)
} }
@ -461,7 +461,7 @@ L:
metric.requestDimensions = append(metric.requestDimensions, metric.dimensionsUdObj) metric.requestDimensions = append(metric.requestDimensions, metric.dimensionsUdObj)
} }
//Unmarshalling to string // Unmarshalling to string
reqDim, err := json.Marshal(metric.requestDimensions) reqDim, err := json.Marshal(metric.requestDimensions)
if err != nil { if err != nil {
s.Log.Errorf("Can't marshal metric request dimensions %v :%v", s.Log.Errorf("Can't marshal metric request dimensions %v :%v",

View File

@ -29,7 +29,7 @@ type mockGatherAliyunCMSClient struct{}
func (m *mockGatherAliyunCMSClient) DescribeMetricList(request *cms.DescribeMetricListRequest) (*cms.DescribeMetricListResponse, error) { func (m *mockGatherAliyunCMSClient) DescribeMetricList(request *cms.DescribeMetricListRequest) (*cms.DescribeMetricListResponse, error) {
resp := new(cms.DescribeMetricListResponse) resp := new(cms.DescribeMetricListResponse)
//switch request.Metric { // switch request.Metric {
switch request.MetricName { switch request.MetricName {
case "InstanceActiveConnection": case "InstanceActiveConnection":
resp.Code = "200" resp.Code = "200"
@ -193,7 +193,7 @@ func TestPluginInitialize(t *testing.T) {
} else { } else {
require.NoError(t, plugin.Init()) require.NoError(t, plugin.Init())
} }
if len(tt.regions) == 0 { //Check if set to default if len(tt.regions) == 0 { // Check if set to default
require.Equal(t, plugin.Regions, aliyunRegionList) require.Equal(t, plugin.Regions, aliyunRegionList)
} }
}) })
@ -390,7 +390,7 @@ func TestGather(t *testing.T) {
Log: testutil.Logger{Name: inputTitle}, Log: testutil.Logger{Name: inputTitle},
} }
//test table: // test table:
tests := []struct { tests := []struct {
name string name string
hasMeasurement bool hasMeasurement bool
@ -444,7 +444,7 @@ func TestGather(t *testing.T) {
} }
func TestGetDiscoveryDataAcrossRegions(t *testing.T) { func TestGetDiscoveryDataAcrossRegions(t *testing.T) {
//test table: // test table:
tests := []struct { tests := []struct {
name string name string
project string project string

View File

@ -30,21 +30,21 @@ type aliyunSdkClient interface {
ProcessCommonRequest(req *requests.CommonRequest) (response *responses.CommonResponse, err error) ProcessCommonRequest(req *requests.CommonRequest) (response *responses.CommonResponse, err error)
} }
// discoveryTool is a object that provides discovery feature // discoveryTool is an object that provides discovery feature
type discoveryTool struct { type discoveryTool struct {
req map[string]discoveryRequest //Discovery request (specific per object type) req map[string]discoveryRequest // Discovery request (specific per object type)
rateLimit int //Rate limit for API query, as it is limited by API backend rateLimit int // Rate limit for API query, as it is limited by API backend
reqDefaultPageSize int //Default page size while querying data from API (how many objects per request) reqDefaultPageSize int // Default page size while querying data from API (how many objects per request)
cli map[string]aliyunSdkClient //API client, which perform discovery request cli map[string]aliyunSdkClient // API client, which perform discovery request
respRootKey string //Root key in JSON response where to look for discovery data respRootKey string // Root key in JSON response where to look for discovery data
respObjectIDKey string //Key in element of array under root key, that stores object ID respObjectIDKey string // Key in element of array under root key, that stores object ID
//for ,majority of cases it would be InstanceId, for OSS it is BucketName. This key is also used in dimension filtering// ) // for, the majority of cases it would be InstanceId, for OSS it is BucketName. This key is also used in dimension filtering
wg sync.WaitGroup //WG for primary discovery goroutine wg sync.WaitGroup // WG for primary discovery goroutine
interval time.Duration //Discovery interval interval time.Duration // Discovery interval
done chan bool //Done channel to stop primary discovery goroutine done chan bool // Done channel to stop primary discovery goroutine
dataChan chan map[string]interface{} //Discovery data dataChan chan map[string]interface{} // Discovery data
lg telegraf.Logger //Telegraf logger (should be provided) lg telegraf.Logger // Telegraf logger (should be provided)
} }
type parsedDResp struct { type parsedDResp struct {
@ -111,7 +111,7 @@ func newDiscoveryTool(
len(aliyunRegionList), strings.Join(aliyunRegionList, ",")) len(aliyunRegionList), strings.Join(aliyunRegionList, ","))
} }
if rateLimit == 0 { //Can be a rounding case if rateLimit == 0 { // Can be a rounding case
rateLimit = 1 rateLimit = 1
} }
@ -145,7 +145,7 @@ func newDiscoveryTool(
case "acs_mns_new": case "acs_mns_new":
return nil, noDiscoverySupportErr return nil, noDiscoverySupportErr
case "acs_cdn": case "acs_cdn":
//API replies are in its own format. // API replies are in its own format.
return nil, noDiscoverySupportErr return nil, noDiscoverySupportErr
case "acs_polardb": case "acs_polardb":
return nil, noDiscoverySupportErr return nil, noDiscoverySupportErr
@ -260,7 +260,7 @@ func (dt *discoveryTool) parseDiscoveryResponse(resp *responses.CommonResponse)
) )
data = resp.GetHttpContentBytes() data = resp.GetHttpContentBytes()
if data == nil { //No data if data == nil { // No data
return nil, errors.New("no data in response to be parsed") return nil, errors.New("no data in response to be parsed")
} }
@ -277,7 +277,7 @@ func (dt *discoveryTool) parseDiscoveryResponse(resp *responses.CommonResponse)
return nil, fmt.Errorf("content of root key %q, is not an object: %q", key, val) return nil, fmt.Errorf("content of root key %q, is not an object: %q", key, val)
} }
//It should contain the array with discovered data // It should contain the array with discovered data
for _, item := range rootKeyVal { for _, item := range rootKeyVal {
if pdResp.data, foundDataItem = item.([]interface{}); foundDataItem { if pdResp.data, foundDataItem = item.([]interface{}); foundDataItem {
break break
@ -314,7 +314,7 @@ func (dt *discoveryTool) getDiscoveryData(cli aliyunSdkClient, req *requests.Com
for { for {
if lmtr != nil { if lmtr != nil {
<-lmtr //Rate limiting <-lmtr // Rate limiting
} }
resp, err = cli.ProcessCommonRequest(req) resp, err = cli.ProcessCommonRequest(req)
@ -330,12 +330,12 @@ func (dt *discoveryTool) getDiscoveryData(cli aliyunSdkClient, req *requests.Com
pageNumber = pDResp.pageNumber pageNumber = pDResp.pageNumber
totalCount = pDResp.totalCount totalCount = pDResp.totalCount
//Pagination // Pagination
pageNumber++ pageNumber++
req.QueryParams["PageNumber"] = strconv.Itoa(pageNumber) req.QueryParams["PageNumber"] = strconv.Itoa(pageNumber)
if len(discoveryData) == totalCount { //All data received if len(discoveryData) == totalCount { // All data received
//Map data to appropriate shape before return // Map data to appropriate shape before return
preparedData := map[string]interface{}{} preparedData := map[string]interface{}{}
for _, raw := range discoveryData { for _, raw := range discoveryData {
@ -359,8 +359,8 @@ func (dt *discoveryTool) getDiscoveryDataAcrossRegions(lmtr chan bool) (map[stri
) )
for region, cli := range dt.cli { for region, cli := range dt.cli {
//Building common request, as the code below is the same no matter // Building common request, as the code below is the same no matter
//which aliyun object type (project) is used // which aliyun object type (project) is used
dscReq, ok := dt.req[region] dscReq, ok := dt.req[region]
if !ok { if !ok {
return nil, fmt.Errorf("error building common discovery request: not valid region %q", region) return nil, fmt.Errorf("error building common discovery request: not valid region %q", region)
@ -382,7 +382,7 @@ func (dt *discoveryTool) getDiscoveryDataAcrossRegions(lmtr chan bool) (map[stri
commonRequest.QueryParams["PageSize"] = strconv.Itoa(dt.reqDefaultPageSize) commonRequest.QueryParams["PageSize"] = strconv.Itoa(dt.reqDefaultPageSize)
commonRequest.TransToAcsRequest() commonRequest.TransToAcsRequest()
//Get discovery data using common request // Get discovery data using common request
data, err = dt.getDiscoveryData(cli, commonRequest, lmtr) data, err = dt.getDiscoveryData(cli, commonRequest, lmtr)
if err != nil { if err != nil {
return nil, err return nil, err
@ -404,7 +404,7 @@ func (dt *discoveryTool) start() {
lastData map[string]interface{} lastData map[string]interface{}
) )
//Initializing channel // Initializing channel
dt.done = make(chan bool) dt.done = make(chan bool)
dt.wg.Add(1) dt.wg.Add(1)
@ -435,7 +435,7 @@ func (dt *discoveryTool) start() {
lastData[k] = v lastData[k] = v
} }
//send discovery data in blocking mode // send discovery data in blocking mode
dt.dataChan <- data dt.dataChan <- data
} }
} }
@ -448,11 +448,11 @@ func (dt *discoveryTool) start() {
func (dt *discoveryTool) stop() { func (dt *discoveryTool) stop() {
close(dt.done) close(dt.done)
//Shutdown timer // Shutdown timer
timer := time.NewTimer(time.Second * 3) timer := time.NewTimer(time.Second * 3)
defer timer.Stop() defer timer.Stop()
L: L:
for { //Unblock go routine by reading from dt.dataChan for { // Unblock go routine by reading from dt.dataChan
select { select {
case <-timer.C: case <-timer.C:
break L break L

View File

@ -61,7 +61,7 @@ func addJSONCounter(acc telegraf.Accumulator, commonTags map[string]string, stat
grouper.Add("bind_counter", tags, ts, name, value) grouper.Add("bind_counter", tags, ts, name, value)
} }
//Add grouped metrics // Add grouped metrics
for _, groupedMetric := range grouper.Metrics() { for _, groupedMetric := range grouper.Metrics() {
acc.AddMetric(groupedMetric) acc.AddMetric(groupedMetric)
} }
@ -142,7 +142,7 @@ func (b *Bind) addStatsJSON(stats jsonStats, acc telegraf.Accumulator, urlTag st
} }
} }
//Add grouped metrics // Add grouped metrics
for _, groupedMetric := range grouper.Metrics() { for _, groupedMetric := range grouper.Metrics() {
acc.AddMetric(groupedMetric) acc.AddMetric(groupedMetric)
} }

View File

@ -78,7 +78,7 @@ func addXMLv2Counter(acc telegraf.Accumulator, commonTags map[string]string, sta
grouper.Add("bind_counter", tags, ts, c.Name, c.Value) grouper.Add("bind_counter", tags, ts, c.Name, c.Value)
} }
//Add grouped metrics // Add grouped metrics
for _, groupedMetric := range grouper.Metrics() { for _, groupedMetric := range grouper.Metrics() {
acc.AddMetric(groupedMetric) acc.AddMetric(groupedMetric)
} }

View File

@ -127,7 +127,7 @@ func (b *Bind) addStatsXMLv3(stats v3Stats, acc telegraf.Accumulator, hostPort s
} }
} }
//Add grouped metrics // Add grouped metrics
for _, groupedMetric := range grouper.Metrics() { for _, groupedMetric := range grouper.Metrics() {
acc.AddMetric(groupedMetric) acc.AddMetric(groupedMetric)
} }

View File

@ -76,7 +76,7 @@ type CiscoTelemetryMDT struct {
dmesFuncs map[string]string dmesFuncs map[string]string
warned map[string]struct{} warned map[string]struct{}
extraTags map[string]map[string]struct{} extraTags map[string]map[string]struct{}
nxpathMap map[string]map[string]string //per path map nxpathMap map[string]map[string]string // per path map
propMap map[string]func(field *telemetry.TelemetryField, value interface{}) interface{} propMap map[string]func(field *telemetry.TelemetryField, value interface{}) interface{}
mutex sync.Mutex mutex sync.Mutex
acc telegraf.Accumulator acc telegraf.Accumulator
@ -109,9 +109,9 @@ func (c *CiscoTelemetryMDT) Start(acc telegraf.Accumulator) error {
c.propMap = make(map[string]func(field *telemetry.TelemetryField, value interface{}) interface{}, 100) c.propMap = make(map[string]func(field *telemetry.TelemetryField, value interface{}) interface{}, 100)
c.propMap["test"] = nxosValueXformUint64Toint64 c.propMap["test"] = nxosValueXformUint64Toint64
c.propMap["asn"] = nxosValueXformUint64ToString //uint64 to string. c.propMap["asn"] = nxosValueXformUint64ToString // uint64 to string.
c.propMap["subscriptionId"] = nxosValueXformUint64ToString //uint64 to string. c.propMap["subscriptionId"] = nxosValueXformUint64ToString // uint64 to string.
c.propMap["operState"] = nxosValueXformUint64ToString //uint64 to string. c.propMap["operState"] = nxosValueXformUint64ToString // uint64 to string.
// Invert aliases list // Invert aliases list
c.warned = make(map[string]struct{}) c.warned = make(map[string]struct{})
@ -530,7 +530,7 @@ func (c *CiscoTelemetryMDT) parseRib(grouper *metric.SeriesGrouper, field *telem
// RIB // RIB
measurement := encodingPath measurement := encodingPath
for _, subfield := range field.Fields { for _, subfield := range field.Fields {
//For Every table fill the keys which are vrfName, address and masklen // For Every table fill the keys which are vrfName, address and masklen
switch subfield.Name { switch subfield.Name {
case "vrfName", "address", "maskLen": case "vrfName", "address", "maskLen":
tags[subfield.Name] = decodeTag(subfield) tags[subfield.Name] = decodeTag(subfield)
@ -541,7 +541,7 @@ func (c *CiscoTelemetryMDT) parseRib(grouper *metric.SeriesGrouper, field *telem
if subfield.Name != "nextHop" { if subfield.Name != "nextHop" {
continue continue
} }
//For next hop table fill the keys in the tag - which is address and vrfname // For next hop table fill the keys in the tag - which is address and vrfname
for _, subf := range subfield.Fields { for _, subf := range subfield.Fields {
for _, ff := range subf.Fields { for _, ff := range subf.Fields {
switch ff.Name { switch ff.Name {
@ -606,12 +606,12 @@ func (c *CiscoTelemetryMDT) parseClassAttributeField(grouper *metric.SeriesGroup
var nxAttributes *telemetry.TelemetryField var nxAttributes *telemetry.TelemetryField
isDme := strings.Contains(encodingPath, "sys/") isDme := strings.Contains(encodingPath, "sys/")
if encodingPath == "rib" { if encodingPath == "rib" {
//handle native data path rib // handle native data path rib
c.parseRib(grouper, field, encodingPath, tags, timestamp) c.parseRib(grouper, field, encodingPath, tags, timestamp)
return return
} }
if encodingPath == "microburst" { if encodingPath == "microburst" {
//dump microburst // dump microburst
c.parseMicroburst(grouper, field, encodingPath, tags, timestamp) c.parseMicroburst(grouper, field, encodingPath, tags, timestamp)
return return
} }
@ -704,9 +704,9 @@ func (c *CiscoTelemetryMDT) parseContentField(
nxAttributes = sub[0].Fields[1].Fields[0].Fields[0].Fields[0].Fields[0].Fields[0] nxAttributes = sub[0].Fields[1].Fields[0].Fields[0].Fields[0].Fields[0].Fields[0]
} }
} }
//if nxAttributes == NULL then class based query. // if nxAttributes == NULL then class based query.
if nxAttributes == nil { if nxAttributes == nil {
//call function walking over walking list. // call function walking over walking list.
for _, sub := range subfield.Fields { for _, sub := range subfield.Fields {
c.parseClassAttributeField(grouper, sub, encodingPath, tags, timestamp) c.parseClassAttributeField(grouper, sub, encodingPath, tags, timestamp)
} }
@ -726,7 +726,7 @@ func (c *CiscoTelemetryMDT) parseContentField(
for i, subfield := range row.Fields { for i, subfield := range row.Fields {
if i == 0 { // First subfield contains the index, promote it from value to tag if i == 0 { // First subfield contains the index, promote it from value to tag
tags[prefix] = decodeTag(subfield) tags[prefix] = decodeTag(subfield)
//We can have subfield so recursively handle it. // We can have subfield so recursively handle it.
if len(row.Fields) == 1 { if len(row.Fields) == 1 {
tags["row_number"] = strconv.FormatInt(int64(i), 10) tags["row_number"] = strconv.FormatInt(int64(i), 10)
c.parseContentField(grouper, subfield, "", encodingPath, tags, timestamp) c.parseContentField(grouper, subfield, "", encodingPath, tags, timestamp)

View File

@ -728,7 +728,7 @@ func TestHandleNXXformMulti(t *testing.T) {
c.handleTelemetry(data) c.handleTelemetry(data)
require.Empty(t, acc.Errors) require.Empty(t, acc.Errors)
//validate various transformation scenaarios newly added in the code. // validate various transformation scenaarios newly added in the code.
fields := map[string]interface{}{ fields := map[string]interface{}{
"portIdV": "12", "portIdV": "12",
"portDesc": "100", "portDesc": "100",

View File

@ -41,7 +41,7 @@ func nxosValueXformUint64Toint64(field *telemetry.TelemetryField, value interfac
// xform string to float // xform string to float
func nxosValueXformStringTofloat(field *telemetry.TelemetryField, _ interface{}) interface{} { func nxosValueXformStringTofloat(field *telemetry.TelemetryField, _ interface{}) interface{} {
//convert property to float from string. // convert property to float from string.
vals := field.GetStringValue() vals := field.GetStringValue()
if vals != "" { if vals != "" {
if valf, err := strconv.ParseFloat(vals, 64); err == nil { if valf, err := strconv.ParseFloat(vals, 64); err == nil {
@ -53,7 +53,7 @@ func nxosValueXformStringTofloat(field *telemetry.TelemetryField, _ interface{})
// xform string to uint64 // xform string to uint64
func nxosValueXformStringToUint64(field *telemetry.TelemetryField, _ interface{}) interface{} { func nxosValueXformStringToUint64(field *telemetry.TelemetryField, _ interface{}) interface{} {
//string to uint64 // string to uint64
vals := field.GetStringValue() vals := field.GetStringValue()
if vals != "" { if vals != "" {
if val64, err := strconv.ParseUint(vals, 10, 64); err == nil { if val64, err := strconv.ParseUint(vals, 10, 64); err == nil {
@ -65,7 +65,7 @@ func nxosValueXformStringToUint64(field *telemetry.TelemetryField, _ interface{}
// xform string to int64 // xform string to int64
func nxosValueXformStringToInt64(field *telemetry.TelemetryField, _ interface{}) interface{} { func nxosValueXformStringToInt64(field *telemetry.TelemetryField, _ interface{}) interface{} {
//string to int64 // string to int64
vals := field.GetStringValue() vals := field.GetStringValue()
if vals != "" { if vals != "" {
if val64, err := strconv.ParseInt(vals, 10, 64); err == nil { if val64, err := strconv.ParseInt(vals, 10, 64); err == nil {
@ -77,7 +77,7 @@ func nxosValueXformStringToInt64(field *telemetry.TelemetryField, _ interface{})
// auto-xform float properties // auto-xform float properties
func nxosValueAutoXformFloatProp(field *telemetry.TelemetryField, _ interface{}) interface{} { func nxosValueAutoXformFloatProp(field *telemetry.TelemetryField, _ interface{}) interface{} {
//check if we want auto xformation // check if we want auto xformation
vals := field.GetStringValue() vals := field.GetStringValue()
if vals != "" { if vals != "" {
if valf, err := strconv.ParseFloat(vals, 64); err == nil { if valf, err := strconv.ParseFloat(vals, 64); err == nil {
@ -109,17 +109,17 @@ func (c *CiscoTelemetryMDT) nxosValueXform(field *telemetry.TelemetryField, valu
if _, ok := c.propMap[field.Name]; ok { if _, ok := c.propMap[field.Name]; ok {
return c.propMap[field.Name](field, value) return c.propMap[field.Name](field, value)
} }
//check if we want auto xformation // check if we want auto xformation
if _, ok := c.propMap["auto-prop-xfromi"]; ok { if _, ok := c.propMap["auto-prop-xfromi"]; ok {
return c.propMap["auto-prop-xfrom"](field, value) return c.propMap["auto-prop-xfrom"](field, value)
} }
//Now check path based conversion. // Now check path based conversion.
//If mapping is found then do the required transformation. // If mapping is found then do the required transformation.
if c.nxpathMap[path] == nil { if c.nxpathMap[path] == nil {
return nil return nil
} }
switch c.nxpathMap[path][field.Name] { switch c.nxpathMap[path][field.Name] {
//Xformation supported is only from String, Uint32 and Uint64 // Xformation supported is only from String, Uint32 and Uint64
case "integer": case "integer":
switch val := field.ValueByType.(type) { switch val := field.ValueByType.(type) {
case *telemetry.TelemetryField_StringValue: case *telemetry.TelemetryField_StringValue:
@ -136,9 +136,9 @@ func (c *CiscoTelemetryMDT) nxosValueXform(field *telemetry.TelemetryField, valu
if ok { if ok {
return vali return vali
} }
} //switch } // switch
return nil return nil
//Xformation supported is only from String // Xformation supported is only from String
case "float": case "float":
//nolint:revive // switch needed for `.(type)` //nolint:revive // switch needed for `.(type)`
switch val := field.ValueByType.(type) { switch val := field.ValueByType.(type) {
@ -158,8 +158,8 @@ func (c *CiscoTelemetryMDT) nxosValueXform(field *telemetry.TelemetryField, valu
} }
case *telemetry.TelemetryField_Uint64Value: case *telemetry.TelemetryField_Uint64Value:
return int64(value.(uint64)) return int64(value.(uint64))
} //switch } // switch
} //switch } // switch
return nil return nil
} }

View File

@ -541,7 +541,7 @@ func TestWrongJSONMarshalling(t *testing.T) {
Data interface{} `json:"data"` Data interface{} `json:"data"`
} }
enc := json.NewEncoder(w) enc := json.NewEncoder(w)
//wrong data section json // wrong data section json
err := enc.Encode(result{ err := enc.Encode(result{
Data: []struct{}{}, Data: []struct{}{},
}) })

View File

@ -250,7 +250,7 @@ func TestCollectStatsPerCpu(t *testing.T) {
} }
require.NoError(t, err) require.NoError(t, err)
//cpu0 // cpu0
expectedFields := map[string]interface{}{ expectedFields := map[string]interface{}{
"entries": uint32(59), "entries": uint32(59),
"searched": uint32(10), "searched": uint32(10),
@ -276,7 +276,7 @@ func TestCollectStatsPerCpu(t *testing.T) {
"cpu": "cpu0", "cpu": "cpu0",
}) })
//cpu1 // cpu1
expectedFields1 := map[string]interface{}{ expectedFields1 := map[string]interface{}{
"entries": uint32(79), "entries": uint32(79),
"searched": uint32(10), "searched": uint32(10),
@ -341,6 +341,6 @@ func TestCollectPsSystemInit(t *testing.T) {
if err != nil && strings.Contains(err.Error(), "Is the conntrack kernel module loaded?") { if err != nil && strings.Contains(err.Error(), "Is the conntrack kernel module loaded?") {
t.Skip("Conntrack kernel module not loaded.") t.Skip("Conntrack kernel module not loaded.")
} }
//make sure Conntrack.ps gets initialized without mocking // make sure Conntrack.ps gets initialized without mocking
require.NoError(t, err) require.NoError(t, err)
} }

View File

@ -11,7 +11,7 @@ import (
docker "github.com/docker/docker/client" docker "github.com/docker/docker/client"
) )
/*This file is inherited from telegraf docker input plugin*/ // This file is inherited from telegraf docker input plugin
var ( var (
version = "1.24" version = "1.24"
defaultHeaders = map[string]string{"User-Agent": "engine-api-cli-1.0"} defaultHeaders = map[string]string{"User-Agent": "engine-api-cli-1.0"}

View File

@ -13,7 +13,7 @@ import (
) )
func TestMTime(t *testing.T) { func TestMTime(t *testing.T) {
//this is the time our foo file should have // this is the time our foo file should have
mtime := time.Date(2015, time.December, 14, 18, 25, 5, 0, time.UTC) mtime := time.Date(2015, time.December, 14, 18, 25, 5, 0, time.UTC)
fs := getTestFileSystem() fs := getTestFileSystem()
@ -23,7 +23,7 @@ func TestMTime(t *testing.T) {
} }
func TestSize(t *testing.T) { func TestSize(t *testing.T) {
//this is the time our foo file should have // this is the time our foo file should have
size := int64(4096) size := int64(4096)
fs := getTestFileSystem() fs := getTestFileSystem()
fileInfo, err := fs.Stat("/testdata") fileInfo, err := fs.Stat("/testdata")
@ -32,7 +32,7 @@ func TestSize(t *testing.T) {
} }
func TestIsDir(t *testing.T) { func TestIsDir(t *testing.T) {
//this is the time our foo file should have // this is the time our foo file should have
dir := true dir := true
fs := getTestFileSystem() fs := getTestFileSystem()
fileInfo, err := fs.Stat("/testdata") fileInfo, err := fs.Stat("/testdata")
@ -41,9 +41,9 @@ func TestIsDir(t *testing.T) {
} }
func TestRealFS(t *testing.T) { func TestRealFS(t *testing.T) {
//test that the default (non-test) empty FS causes expected behaviour // test that the default (non-test) empty FS causes expected behaviour
var fs fileSystem = osFS{} var fs fileSystem = osFS{}
//the following file exists on disk - and not in our fake fs // the following file exists on disk - and not in our fake fs
fileInfo, err := fs.Stat(getTestdataDir() + "/qux") fileInfo, err := fs.Stat(getTestdataDir() + "/qux")
require.NoError(t, err) require.NoError(t, err)
require.False(t, fileInfo.IsDir()) require.False(t, fileInfo.IsDir())

View File

@ -956,7 +956,7 @@ func TestNotification(t *testing.T) {
Ext: &gnmiExt.Extension_RegisteredExt{ Ext: &gnmiExt.Extension_RegisteredExt{
RegisteredExt: &gnmiExt.RegisteredExtension{ RegisteredExt: &gnmiExt.RegisteredExtension{
// Juniper Header Extension // Juniper Header Extension
//EID_JUNIPER_TELEMETRY_HEADER = 1; // EID_JUNIPER_TELEMETRY_HEADER = 1;
Id: 1, Id: 1,
Msg: func(jnprExt *jnprHeader.GnmiJuniperTelemetryHeaderExtension) []byte { Msg: func(jnprExt *jnprHeader.GnmiJuniperTelemetryHeaderExtension) []byte {
b, err := proto.Marshal(jnprExt) b, err := proto.Marshal(jnprExt)

View File

@ -24,7 +24,7 @@ import (
//go:embed sample.conf //go:embed sample.conf
var sampleConfig string var sampleConfig string
//CSV format: https://cbonte.github.io/haproxy-dconv/1.5/configuration.html#9.1 // CSV format: https://cbonte.github.io/haproxy-dconv/1.5/configuration.html#9.1
type haproxy struct { type haproxy struct {
Servers []string Servers []string
@ -258,14 +258,14 @@ func (h *haproxy) importCsvResult(r io.Reader, acc telegraf.Accumulator, host st
case "lastsess": case "lastsess":
vi, err := strconv.ParseInt(v, 10, 64) vi, err := strconv.ParseInt(v, 10, 64)
if err != nil { if err != nil {
//TODO log the error. And just once (per column) so we don't spam the log // TODO log the error. And just once (per column) so we don't spam the log
continue continue
} }
fields[fieldName] = vi fields[fieldName] = vi
default: default:
vi, err := strconv.ParseUint(v, 10, 64) vi, err := strconv.ParseUint(v, 10, 64)
if err != nil { if err != nil {
//TODO log the error. And just once (per column) so we don't spam the log // TODO log the error. And just once (per column) so we don't spam the log
continue continue
} }
fields[fieldName] = vi fields[fieldName] = vi

View File

@ -44,7 +44,7 @@ func (s statServer) serverSocket(l net.Listener) {
} }
func TestHaproxyGeneratesMetricsWithAuthentication(t *testing.T) { func TestHaproxyGeneratesMetricsWithAuthentication(t *testing.T) {
//We create a fake server to return test data // We create a fake server to return test data
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
username, password, ok := r.BasicAuth() username, password, ok := r.BasicAuth()
if !ok { if !ok {
@ -65,7 +65,7 @@ func TestHaproxyGeneratesMetricsWithAuthentication(t *testing.T) {
})) }))
defer ts.Close() defer ts.Close()
//Now we tested again above server, with our authentication data // Now we tested again above server, with our authentication data
r := &haproxy{ r := &haproxy{
Servers: []string{strings.Replace(ts.URL, "http://", "http://user:password@", 1)}, Servers: []string{strings.Replace(ts.URL, "http://", "http://user:password@", 1)},
} }
@ -85,7 +85,7 @@ func TestHaproxyGeneratesMetricsWithAuthentication(t *testing.T) {
fields := HaproxyGetFieldValues() fields := HaproxyGetFieldValues()
acc.AssertContainsTaggedFields(t, "haproxy", fields, tags) acc.AssertContainsTaggedFields(t, "haproxy", fields, tags)
//Here, we should get error because we don't pass authentication data // Here, we should get error because we don't pass authentication data
r = &haproxy{ r = &haproxy{
Servers: []string{ts.URL}, Servers: []string{ts.URL},
} }

View File

@ -690,7 +690,7 @@ func TestMethod(t *testing.T) {
absentFields = []string{"response_string_match"} absentFields = []string{"response_string_match"}
checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil) checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil)
//check that lowercase methods work correctly // check that lowercase methods work correctly
h = &HTTPResponse{ h = &HTTPResponse{
Log: testutil.Logger{}, Log: testutil.Logger{},
URLs: []string{ts.URL + "/mustbepostmethod"}, URLs: []string{ts.URL + "/mustbepostmethod"},

View File

@ -28,7 +28,7 @@ func TestIcinga2Default(t *testing.T) {
func TestIcinga2DeprecatedHostConfig(t *testing.T) { func TestIcinga2DeprecatedHostConfig(t *testing.T) {
icinga2 := &Icinga2{ icinga2 := &Icinga2{
ObjectType: "hosts", //deprecated ObjectType: "hosts", // deprecated
Objects: []string{}, Objects: []string{},
} }
require.NoError(t, icinga2.Init()) require.NoError(t, icinga2.Init())
@ -38,7 +38,7 @@ func TestIcinga2DeprecatedHostConfig(t *testing.T) {
func TestIcinga2DeprecatedServicesConfig(t *testing.T) { func TestIcinga2DeprecatedServicesConfig(t *testing.T) {
icinga2 := &Icinga2{ icinga2 := &Icinga2{
ObjectType: "services", //deprecated ObjectType: "services", // deprecated
Objects: []string{}, Objects: []string{},
} }
require.NoError(t, icinga2.Init()) require.NoError(t, icinga2.Init())

View File

@ -491,8 +491,8 @@ func TestWriteLargeLine(t *testing.T) {
resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBufferString(hugeMetricString+testMsgs)) resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBufferString(hugeMetricString+testMsgs))
require.NoError(t, err) require.NoError(t, err)
require.NoError(t, resp.Body.Close()) require.NoError(t, resp.Body.Close())
//todo: with the new parser, long lines aren't a problem. Do we need to skip them? // TODO: with the new parser, long lines aren't a problem. Do we need to skip them?
//require.EqualValues(t, 400, resp.StatusCode) // require.EqualValues(t, 400, resp.StatusCode)
expected := testutil.MustMetric( expected := testutil.MustMetric(
"super_long_metric", "super_long_metric",

View File

@ -342,8 +342,8 @@ func TestWriteLargeLine(t *testing.T) {
resp, err := http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBufferString(hugeMetricString+testMsgs)) resp, err := http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBufferString(hugeMetricString+testMsgs))
require.NoError(t, err) require.NoError(t, err)
require.NoError(t, resp.Body.Close()) require.NoError(t, resp.Body.Close())
//todo: with the new parser, long lines aren't a problem. Do we need to skip them? // TODO: with the new parser, long lines aren't a problem. Do we need to skip them?
//require.EqualValues(t, 400, resp.StatusCode) // require.EqualValues(t, 400, resp.StatusCode)
expected := testutil.MustMetric( expected := testutil.MustMetric(
"super_long_metric", "super_long_metric",

View File

@ -46,7 +46,7 @@ type Baseband struct {
SocketPath string `toml:"socket_path"` SocketPath string `toml:"socket_path"`
FileLogPath string `toml:"log_file_path"` FileLogPath string `toml:"log_file_path"`
//optional params // optional params
UnreachableSocketBehavior string `toml:"unreachable_socket_behavior"` UnreachableSocketBehavior string `toml:"unreachable_socket_behavior"`
SocketAccessTimeout config.Duration `toml:"socket_access_timeout"` SocketAccessTimeout config.Duration `toml:"socket_access_timeout"`
WaitForTelemetryTimeout config.Duration `toml:"wait_for_telemetry_timeout"` WaitForTelemetryTimeout config.Duration `toml:"wait_for_telemetry_timeout"`

View File

@ -20,7 +20,7 @@ func TestInit(t *testing.T) {
err := baseband.Init() err := baseband.Init()
//check default variables // check default variables
// check empty values // check empty values
require.Empty(t, baseband.SocketPath) require.Empty(t, baseband.SocketPath)
require.Empty(t, baseband.FileLogPath) require.Empty(t, baseband.FileLogPath)

View File

@ -89,7 +89,7 @@ func (lc *logConnector) checkLogFreshness() error {
// - file is not empty // - file is not empty
// - file doesn't contain clear_log command (it may appear for few milliseconds, just before file is cleared) // - file doesn't contain clear_log command (it may appear for few milliseconds, just before file is cleared)
if !lc.lastModTime.Equal(currModTime) && fileInfo.Size() != 0 && !lc.isClearLogContainedInFile() { if !lc.lastModTime.Equal(currModTime) && fileInfo.Size() != 0 && !lc.isClearLogContainedInFile() {
//refreshing succeed // refreshing succeed
lc.lastModTime = currModTime lc.lastModTime = currModTime
return nil return nil
} }
@ -202,7 +202,7 @@ func (lc *logConnector) getMetric(offsetLine int, name string) (int, *logMetric,
// infoData eg: Thu Apr 13 13:28:40 2023:INFO:12 0 // infoData eg: Thu Apr 13 13:28:40 2023:INFO:12 0
infoData := strings.Split(lc.lines[i+1], infoLine) infoData := strings.Split(lc.lines[i+1], infoLine)
if len(infoData) != 2 { if len(infoData) != 2 {
//info data must be in format : some data + keyword "INFO:" + metrics // info data must be in format : some data + keyword "INFO:" + metrics
return offsetLine, nil, fmt.Errorf("the content of the log file is incorrect, couldn't find %q separator", infoLine) return offsetLine, nil, fmt.Errorf("the content of the log file is incorrect, couldn't find %q separator", infoLine)
} }

View File

@ -92,7 +92,7 @@ func TestGenerate(t *testing.T) {
cpuMetrics: []cpuMetricType{ cpuMetrics: []cpuMetricType{
cpuC7StateResidency, cpuC7StateResidency,
}, },
msrReadTimeout: 0, //timeout disabled msrReadTimeout: 0, // timeout disabled
}) })
require.Len(t, opts, 1) require.Len(t, opts, 1)

View File

@ -106,8 +106,8 @@ func parseCoresMeasurement(measurements string) (parsedCoresMeasurement, error)
func (p *Publisher) addToAccumulatorCores(measurement parsedCoresMeasurement) { func (p *Publisher) addToAccumulatorCores(measurement parsedCoresMeasurement) {
for i, value := range measurement.values { for i, value := range measurement.values {
if p.shortenedMetrics { if p.shortenedMetrics {
//0: "IPC" // 0: "IPC"
//1: "LLC_Misses" // 1: "LLC_Misses"
if i == 0 || i == 1 { if i == 0 || i == 1 {
continue continue
} }
@ -157,8 +157,8 @@ func parseProcessesMeasurement(measurement processMeasurement) (parsedProcessMea
func (p *Publisher) addToAccumulatorProcesses(measurement parsedProcessMeasurement) { func (p *Publisher) addToAccumulatorProcesses(measurement parsedProcessMeasurement) {
for i, value := range measurement.values { for i, value := range measurement.values {
if p.shortenedMetrics { if p.shortenedMetrics {
//0: "IPC" // 0: "IPC"
//1: "LLC_Misses" // 1: "LLC_Misses"
if i == 0 || i == 1 { if i == 0 || i == 1 {
continue continue
} }

View File

@ -411,7 +411,7 @@ func TestGatherV2(t *testing.T) {
fields map[string]interface{} fields map[string]interface{}
tags map[string]string tags map[string]string
}{ }{
//SEL | 72h | ns | 7.1 | No Reading // SEL | 72h | ns | 7.1 | No Reading
{ {
map[string]interface{}{ map[string]interface{}{
"value": float64(0), "value": float64(0),
@ -444,7 +444,7 @@ func TestGatherV2(t *testing.T) {
fields map[string]interface{} fields map[string]interface{}
tags map[string]string tags map[string]string
}{ }{
//SEL | 72h | ns | 7.1 | No Reading // SEL | 72h | ns | 7.1 | No Reading
{ {
map[string]interface{}{ map[string]interface{}{
"value": float64(0), "value": float64(0),
@ -456,7 +456,7 @@ func TestGatherV2(t *testing.T) {
"status_desc": "no_reading", "status_desc": "no_reading",
}, },
}, },
//Intrusion | 73h | ok | 7.1 | // Intrusion | 73h | ok | 7.1 |
{ {
map[string]interface{}{ map[string]interface{}{
"value": float64(0), "value": float64(0),
@ -468,7 +468,7 @@ func TestGatherV2(t *testing.T) {
"status_desc": "ok", "status_desc": "ok",
}, },
}, },
//Fan1 | 30h | ok | 7.1 | 5040 RPM // Fan1 | 30h | ok | 7.1 | 5040 RPM
{ {
map[string]interface{}{ map[string]interface{}{
"value": float64(5040), "value": float64(5040),
@ -480,7 +480,7 @@ func TestGatherV2(t *testing.T) {
"unit": "rpm", "unit": "rpm",
}, },
}, },
//Inlet Temp | 04h | ok | 7.1 | 25 degrees C // Inlet Temp | 04h | ok | 7.1 | 25 degrees C
{ {
map[string]interface{}{ map[string]interface{}{
"value": float64(25), "value": float64(25),
@ -492,7 +492,7 @@ func TestGatherV2(t *testing.T) {
"unit": "degrees_c", "unit": "degrees_c",
}, },
}, },
//USB Cable Pres | 50h | ok | 7.1 | Connected // USB Cable Pres | 50h | ok | 7.1 | Connected
{ {
map[string]interface{}{ map[string]interface{}{
"value": float64(0), "value": float64(0),
@ -504,7 +504,7 @@ func TestGatherV2(t *testing.T) {
"status_desc": "connected", "status_desc": "connected",
}, },
}, },
//Current 1 | 6Ah | ok | 10.1 | 7.20 Amps // Current 1 | 6Ah | ok | 10.1 | 7.20 Amps
{ {
map[string]interface{}{ map[string]interface{}{
"value": float64(7.2), "value": float64(7.2),
@ -516,7 +516,7 @@ func TestGatherV2(t *testing.T) {
"unit": "amps", "unit": "amps",
}, },
}, },
//Power Supply 1 | 03h | ok | 10.1 | 110 Watts, Presence detected // Power Supply 1 | 03h | ok | 10.1 | 110 Watts, Presence detected
{ {
map[string]interface{}{ map[string]interface{}{
"value": float64(110), "value": float64(110),

View File

@ -333,7 +333,7 @@ func (k *KafkaConsumer) Start(acc telegraf.Accumulator) error {
handler.MaxMessageLen = k.MaxMessageLen handler.MaxMessageLen = k.MaxMessageLen
handler.TopicTag = k.TopicTag handler.TopicTag = k.TopicTag
handler.MsgHeaderToMetricName = k.MsgHeaderAsMetricName handler.MsgHeaderToMetricName = k.MsgHeaderAsMetricName
//if message headers list specified, put it as map to handler // if message headers list specified, put it as map to handler
msgHeadersMap := make(map[string]bool, len(k.MsgHeadersAsTags)) msgHeadersMap := make(map[string]bool, len(k.MsgHeadersAsTags))
if len(k.MsgHeadersAsTags) > 0 { if len(k.MsgHeadersAsTags) > 0 {
for _, header := range k.MsgHeadersAsTags { for _, header := range k.MsgHeadersAsTags {
@ -508,7 +508,7 @@ func (h *consumerGroupHandler) Handle(session sarama.ConsumerGroupSession, msg *
// Check if any message header should override metric name or should be pass as tag // Check if any message header should override metric name or should be pass as tag
if len(h.MsgHeadersToTags) > 0 || h.MsgHeaderToMetricName != "" { if len(h.MsgHeadersToTags) > 0 || h.MsgHeaderToMetricName != "" {
for _, header := range msg.Headers { for _, header := range msg.Headers {
//convert to a string as the header and value are byte arrays. // convert to a string as the header and value are byte arrays.
headerKey := string(header.Key) headerKey := string(header.Key)
if _, exists := h.MsgHeadersToTags[headerKey]; exists { if _, exists := h.MsgHeadersToTags[headerKey]; exists {
// If message header should be pass as tag then add it to the metrics // If message header should be pass as tag then add it to the metrics

View File

@ -316,7 +316,7 @@ func TestConsumerGroupHandler_Lifecycle(t *testing.T) {
// This produces a flappy testcase probably due to a race between context cancellation and consumption. // This produces a flappy testcase probably due to a race between context cancellation and consumption.
// Furthermore, it is not clear what the outcome of this test should be... // Furthermore, it is not clear what the outcome of this test should be...
// err = cg.ConsumeClaim(session, &claim) // err = cg.ConsumeClaim(session, &claim)
//require.NoError(t, err) // require.NoError(t, err)
// So stick with the line below for now. // So stick with the line below for now.
//nolint:errcheck // see above //nolint:errcheck // see above
cg.ConsumeClaim(session, &claim) cg.ConsumeClaim(session, &claim)
@ -660,7 +660,7 @@ func TestExponentialBackoff(t *testing.T) {
require.NoError(t, parser.Init()) require.NoError(t, parser.Init())
input.SetParser(parser) input.SetParser(parser)
//time how long initialization (connection) takes // time how long initialization (connection) takes
start := time.Now() start := time.Now()
require.NoError(t, input.Init()) require.NoError(t, input.Init())

View File

@ -71,7 +71,7 @@ func TestGather(t *testing.T) {
} }
checkKibanaStatusResult(defaultTags6_3()["version"], t, &acc1) checkKibanaStatusResult(defaultTags6_3()["version"], t, &acc1)
//Unit test for Kibana version >= 6.4 // Unit test for Kibana version >= 6.4
ks.client.Transport = newTransportMock(http.StatusOK, kibanaStatusResponse6_5) ks.client.Transport = newTransportMock(http.StatusOK, kibanaStatusResponse6_5)
var acc2 testutil.Accumulator var acc2 testutil.Accumulator
if err := acc2.GatherError(ks.Gather); err != nil { if err := acc2.GatherError(ks.Gather); err != nil {

View File

@ -288,7 +288,7 @@ func (l *LogParserPlugin) Stop() {
} }
err := t.Stop() err := t.Stop()
//message for a stopped tailer // message for a stopped tailer
l.Log.Debugf("Tail dropped for file: %v", t.Filename) l.Log.Debugf("Tail dropped for file: %v", t.Filename)
if err != nil { if err != nil {

View File

@ -113,11 +113,11 @@ func TestGrokParseLogFilesAppearLater(t *testing.T) {
// test.a.log file. This seems like an issue with the tail package, it // test.a.log file. This seems like an issue with the tail package, it
// is not closing the os.File properly on Stop. // is not closing the os.File properly on Stop.
// === RUN TestGrokParseLogFilesAppearLater // === RUN TestGrokParseLogFilesAppearLater
//2022/04/16 11:05:13 D! [] Tail added for file: C:\Users\circleci\AppData\Local\Temp\TestGrokParseLogFilesAppearLater3687440534\001\test_a.log // 2022/04/16 11:05:13 D! [] Tail added for file: C:\Users\circleci\AppData\Local\Temp\TestGrokParseLogFilesAppearLater3687440534\001\test_a.log
//2022/04/16 11:05:13 D! [] Tail dropped for file: C:\Users\circleci\AppData\Local\Temp\TestGrokParseLogFilesAppearLater3687440534\001\test_a.log // 2022/04/16 11:05:13 D! [] Tail dropped for file: C:\Users\circleci\AppData\Local\Temp\TestGrokParseLogFilesAppearLater3687440534\001\test_a.log
// testing.go:1090: TempDir RemoveAll cleanup: // testing.go:1090: TempDir RemoveAll cleanup:
// CreateFile C:\Users\circleci\AppData\Local\Temp\TestGrokParseLogFilesAppearLater3687440534\001: Access is denied. // CreateFile C:\Users\circleci\AppData\Local\Temp\TestGrokParseLogFilesAppearLater3687440534\001: Access is denied.
//--- FAIL: TestGrokParseLogFilesAppearLater (1.68s) // --- FAIL: TestGrokParseLogFilesAppearLater (1.68s)
emptydir, err := os.MkdirTemp("", "TestGrokParseLogFilesAppearLater") emptydir, err := os.MkdirTemp("", "TestGrokParseLogFilesAppearLater")
require.NoError(t, err) require.NoError(t, err)
defer os.RemoveAll(emptydir) defer os.RemoveAll(emptydir)

View File

@ -397,7 +397,7 @@ func (l *Lustre2) GetLustreHealth() error {
// it was moved in https://github.com/lustre/lustre-release/commit/5d368bd0b2 // it was moved in https://github.com/lustre/lustre-release/commit/5d368bd0b2
filename = filepath.Join(rootdir, "proc", "fs", "lustre", "health_check") filename = filepath.Join(rootdir, "proc", "fs", "lustre", "health_check")
if _, err = os.Stat(filename); err != nil { if _, err = os.Stat(filename); err != nil {
return nil //nolint: nilerr // we don't want to return an error if the file doesn't exist return nil //nolint:nilerr // we don't want to return an error if the file doesn't exist
} }
} }
contents, err := os.ReadFile(filename) contents, err := os.ReadFile(filename)

View File

@ -30,7 +30,7 @@ func TestMarklogic(t *testing.T) {
ml := &Marklogic{ ml := &Marklogic{
Hosts: []string{"example1"}, Hosts: []string{"example1"},
URL: ts.URL, URL: ts.URL,
//Sources: []string{"http://localhost:8002/manage/v2/hosts/hostname1?view=status&format=json"}, // Sources: []string{"http://localhost:8002/manage/v2/hosts/hostname1?view=status&format=json"},
} }
// Create a test accumulator // Create a test accumulator

View File

@ -129,7 +129,7 @@ func (m *Modbus) SampleConfig() string {
} }
func (m *Modbus) Init() error { func (m *Modbus) Init() error {
//check device name // check device name
if m.Name == "" { if m.Name == "" {
return errors.New("device name is empty") return errors.New("device name is empty")
} }

View File

@ -1,9 +1,7 @@
/*** // The code contained here came from https://github.com/mongodb/mongo-tools/blob/master/mongostat/stat_types.go
The code contained here came from https://github.com/mongodb/mongo-tools/blob/master/mongostat/stat_types.go // and contains modifications so that no other dependency from that project is needed. Other modifications included
and contains modifications so that no other dependency from that project is needed. Other modifications included // removing unnecessary code specific to formatting the output and determine the current state of the database. It
removing unnecessary code specific to formatting the output and determine the current state of the database. It // is licensed under Apache Version 2.0, http://www.apache.org/licenses/LICENSE-2.0.html
is licensed under Apache Version 2.0, http://www.apache.org/licenses/LICENSE-2.0.html
***/
package mongodb package mongodb
@ -706,7 +704,7 @@ type statLine struct {
// Document fields // Document fields
DeletedD, InsertedD, ReturnedD, UpdatedD int64 DeletedD, InsertedD, ReturnedD, UpdatedD int64
//Commands fields // Commands fields
AggregateCommandTotal, AggregateCommandFailed int64 AggregateCommandTotal, AggregateCommandFailed int64
CountCommandTotal, CountCommandFailed int64 CountCommandTotal, CountCommandFailed int64
DeleteCommandTotal, DeleteCommandFailed int64 DeleteCommandTotal, DeleteCommandFailed int64
@ -1289,7 +1287,7 @@ func NewStatLine(oldMongo, newMongo mongoStatus, key string, all bool, sampleSec
if newStat.GlobalLock != nil { if newStat.GlobalLock != nil {
hasWT := newStat.WiredTiger != nil && oldStat.WiredTiger != nil hasWT := newStat.WiredTiger != nil && oldStat.WiredTiger != nil
//If we have wiredtiger stats, use those instead // If we have wiredtiger stats, use those instead
if newStat.GlobalLock.CurrentQueue != nil { if newStat.GlobalLock.CurrentQueue != nil {
if hasWT { if hasWT {
returnVal.QueuedReaders = newStat.GlobalLock.CurrentQueue.Readers + newStat.GlobalLock.ActiveClients.Readers - returnVal.QueuedReaders = newStat.GlobalLock.CurrentQueue.Readers + newStat.GlobalLock.ActiveClients.Readers -

View File

@ -164,9 +164,7 @@ func (n *NeptuneApex) parseXML(acc telegraf.Accumulator, data []byte) error {
if o.Xstatus != nil { if o.Xstatus != nil {
fields["xstatus"] = *o.Xstatus fields["xstatus"] = *o.Xstatus
} }
// Try to determine outlet type. Focus on accuracy, leaving the // Try to determine outlet type. Focus on accuracy, leaving the outlet_type "unknown" when ambiguous. 24v and vortech cannot be determined.
//outlet_type "unknown" when ambiguous. 24v and vortech cannot be
// determined.
switch { switch {
case strings.HasPrefix(o.DeviceID, "base_Var"): case strings.HasPrefix(o.DeviceID, "base_Var"):
tags["output_type"] = "variable" tags["output_type"] = "variable"

View File

@ -385,8 +385,8 @@ func (n *NginxPlusAPI) gatherHTTPUpstreamsMetrics(addr *url.URL, acc telegraf.Ac
"healthchecks_fails": peer.HealthChecks.Fails, "healthchecks_fails": peer.HealthChecks.Fails,
"healthchecks_unhealthy": peer.HealthChecks.Unhealthy, "healthchecks_unhealthy": peer.HealthChecks.Unhealthy,
"downtime": peer.Downtime, "downtime": peer.Downtime,
//"selected": peer.Selected.toInt64, // "selected": peer.Selected.toInt64,
//"downstart": peer.Downstart.toInt64, // "downstart": peer.Downstart.toInt64,
} }
if peer.HealthChecks.LastPassed != nil { if peer.HealthChecks.LastPassed != nil {
peerFields["healthchecks_last_passed"] = *peer.HealthChecks.LastPassed peerFields["healthchecks_last_passed"] = *peer.HealthChecks.LastPassed

View File

@ -19,7 +19,7 @@ func fakePassengerStatus(stat string) (string, error) {
fileExtension = ".bat" fileExtension = ".bat"
content = "@echo off\n" content = "@echo off\n"
for _, line := range strings.Split(strings.TrimSuffix(stat, "\n"), "\n") { for _, line := range strings.Split(strings.TrimSuffix(stat, "\n"), "\n") {
content += "for /f \"delims=\" %%A in (\"" + line + "\") do echo %%~A\n" //my eyes are bleeding content += "for /f \"delims=\" %%A in (\"" + line + "\") do echo %%~A\n" // my eyes are bleeding
} }
} else { } else {
content = fmt.Sprintf("#!/bin/sh\ncat << EOF\n%s\nEOF", stat) content = fmt.Sprintf("#!/bin/sh\ncat << EOF\n%s\nEOF", stat)
@ -86,7 +86,7 @@ func TestPassengerGenerateMetric(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
defer teardown(tempFilePath) defer teardown(tempFilePath)
//Now we tested again above server, with our authentication data // Now we tested again above server, with our authentication data
r := &passenger{ r := &passenger{
Command: tempFilePath, Command: tempFilePath,
} }

View File

@ -116,7 +116,7 @@ func TestPhpFpmGeneratesMetrics_From_Fcgi(t *testing.T) {
s := statServer{} s := statServer{}
go fcgi.Serve(tcp, s) //nolint:errcheck // ignore the returned error as we cannot do anything about it anyway go fcgi.Serve(tcp, s) //nolint:errcheck // ignore the returned error as we cannot do anything about it anyway
//Now we tested again above server // Now we tested again above server
r := &phpfpm{ r := &phpfpm{
Urls: []string{"fcgi://" + tcp.Addr().String() + "/status"}, Urls: []string{"fcgi://" + tcp.Addr().String() + "/status"},
Log: &testutil.Logger{}, Log: &testutil.Logger{},
@ -167,7 +167,7 @@ func TestPhpFpmTimeout_From_Fcgi(t *testing.T) {
time.Sleep(2 * timeout) time.Sleep(2 * timeout)
}() }()
//Now we tested again above server // Now we tested again above server
r := &phpfpm{ r := &phpfpm{
Urls: []string{"fcgi://" + tcp.Addr().String() + "/status"}, Urls: []string{"fcgi://" + tcp.Addr().String() + "/status"},
Timeout: config.Duration(timeout), Timeout: config.Duration(timeout),
@ -199,7 +199,7 @@ func TestPhpFpmCrashWithTimeout_From_Fcgi(t *testing.T) {
const timeout = 200 * time.Millisecond const timeout = 200 * time.Millisecond
//Now we tested again above server // Now we tested again above server
r := &phpfpm{ r := &phpfpm{
Urls: []string{"fcgi://" + tcpAddress + "/status"}, Urls: []string{"fcgi://" + tcpAddress + "/status"},
Timeout: config.Duration(timeout), Timeout: config.Duration(timeout),
@ -447,7 +447,7 @@ func TestGatherDespiteUnavailable(t *testing.T) {
s := statServer{} s := statServer{}
go fcgi.Serve(tcp, s) //nolint:errcheck // ignore the returned error as we cannot do anything about it anyway go fcgi.Serve(tcp, s) //nolint:errcheck // ignore the returned error as we cannot do anything about it anyway
//Now we tested again above server // Now we tested again above server
r := &phpfpm{ r := &phpfpm{
Urls: []string{"fcgi://" + tcp.Addr().String() + "/status", "/lala"}, Urls: []string{"fcgi://" + tcp.Addr().String() + "/status", "/lala"},
Log: &testutil.Logger{}, Log: &testutil.Logger{},

View File

@ -276,7 +276,7 @@ func mockUnreachableHostPinger(string, float64, ...string) (string, error) {
return UnreachablePingOutput, errors.New("So very bad") return UnreachablePingOutput, errors.New("So very bad")
} }
//Reply from 185.28.251.217: TTL expired in transit. // Reply from 185.28.251.217: TTL expired in transit.
// in case 'Destination net unreachable' ping app return receive packet which is not what we need // in case 'Destination net unreachable' ping app return receive packet which is not what we need
// it's not contain valid metric so treat it as lost one // it's not contain valid metric so treat it as lost one

View File

@ -23,8 +23,7 @@ func (pg *NativeFinder) UID(user string) ([]PID, error) {
for _, p := range procs { for _, p := range procs {
username, err := p.Username() username, err := p.Username()
if err != nil { if err != nil {
//skip, this can happen if we don't have permissions or // skip, this can be caused by the pid no longer exists, or you don't have permissions to access it
//the pid no longer exists
continue continue
} }
if username == user { if username == user {
@ -63,8 +62,7 @@ func (pg *NativeFinder) FullPattern(pattern string) ([]PID, error) {
for _, p := range procs { for _, p := range procs {
cmd, err := p.Cmdline() cmd, err := p.Cmdline()
if err != nil { if err != nil {
//skip, this can be caused by the pid no longer existing // skip, this can be caused by the pid no longer exists, or you don't have permissions to access it
//or you having no permissions to access it
continue continue
} }
if regxPattern.MatchString(cmd) { if regxPattern.MatchString(cmd) {
@ -122,8 +120,7 @@ func (pg *NativeFinder) Pattern(pattern string) ([]PID, error) {
for _, p := range procs { for _, p := range procs {
name, err := processName(p) name, err := processName(p)
if err != nil { if err != nil {
//skip, this can be caused by the pid no longer existing // skip, this can be caused by the pid no longer exists, or you don't have permissions to access it
//or you having no permissions to access it
continue continue
} }
if regxPattern.MatchString(name) { if regxPattern.MatchString(name) {

View File

@ -571,7 +571,7 @@ func TestGather_systemdUnitPIDs(t *testing.T) {
} }
func TestGather_cgroupPIDs(t *testing.T) { func TestGather_cgroupPIDs(t *testing.T) {
//no cgroups in windows // no cgroups in windows
if runtime.GOOS == "windows" { if runtime.GOOS == "windows" {
t.Skip("no cgroups in windows") t.Skip("no cgroups in windows")
} }

View File

@ -148,7 +148,7 @@ func (rsl *riemannListener) removeConnection(c net.Conn) {
rsl.connectionsMtx.Unlock() rsl.connectionsMtx.Unlock()
} }
//Utilities // Utilities
/* /*
readMessages will read Riemann messages in binary format readMessages will read Riemann messages in binary format

View File

@ -14,7 +14,7 @@ import (
func TestIPv4SW(t *testing.T) { func TestIPv4SW(t *testing.T) {
str := `00000005` + // version str := `00000005` + // version
`00000001` + //address type `00000001` + // address type
`c0a80102` + // ip address `c0a80102` + // ip address
`00000010` + // sub agent id `00000010` + // sub agent id
`0000f3d4` + // sequence number `0000f3d4` + // sequence number

View File

@ -393,18 +393,18 @@ func (*Smart) SampleConfig() string {
// Init performs one time setup of the plugin and returns an error if the configuration is invalid. // Init performs one time setup of the plugin and returns an error if the configuration is invalid.
func (m *Smart) Init() error { func (m *Smart) Init() error {
//if deprecated `path` (to smartctl binary) is provided in config and `path_smartctl` override does not exist // if deprecated `path` (to smartctl binary) is provided in config and `path_smartctl` override does not exist
if len(m.Path) > 0 && len(m.PathSmartctl) == 0 { if len(m.Path) > 0 && len(m.PathSmartctl) == 0 {
m.PathSmartctl = m.Path m.PathSmartctl = m.Path
} }
//if `path_smartctl` is not provided in config, try to find smartctl binary in PATH // if `path_smartctl` is not provided in config, try to find smartctl binary in PATH
if len(m.PathSmartctl) == 0 { if len(m.PathSmartctl) == 0 {
//nolint:errcheck // error handled later //nolint:errcheck // error handled later
m.PathSmartctl, _ = exec.LookPath("smartctl") m.PathSmartctl, _ = exec.LookPath("smartctl")
} }
//if `path_nvme` is not provided in config, try to find nvme binary in PATH // if `path_nvme` is not provided in config, try to find nvme binary in PATH
if len(m.PathNVMe) == 0 { if len(m.PathNVMe) == 0 {
//nolint:errcheck // error handled later //nolint:errcheck // error handled later
m.PathNVMe, _ = exec.LookPath("nvme") m.PathNVMe, _ = exec.LookPath("nvme")
@ -417,14 +417,14 @@ func (m *Smart) Init() error {
err := validatePath(m.PathSmartctl) err := validatePath(m.PathSmartctl)
if err != nil { if err != nil {
m.PathSmartctl = "" m.PathSmartctl = ""
//without smartctl, plugin will not be able to gather basic metrics // without smartctl, plugin will not be able to gather basic metrics
return fmt.Errorf("smartctl not found: verify that smartctl is installed and it is in your PATH (or specified in config): %w", err) return fmt.Errorf("smartctl not found: verify that smartctl is installed and it is in your PATH (or specified in config): %w", err)
} }
err = validatePath(m.PathNVMe) err = validatePath(m.PathNVMe)
if err != nil { if err != nil {
m.PathNVMe = "" m.PathNVMe = ""
//without nvme, plugin will not be able to gather vendor specific attributes (but it can work without it) // without nvme, plugin will not be able to gather vendor specific attributes (but it can work without it)
m.Log.Warnf( m.Log.Warnf(
"nvme not found: verify that nvme is installed and it is in your PATH (or specified in config) to gather vendor specific attributes: %s", "nvme not found: verify that nvme is installed and it is in your PATH (or specified in config) to gather vendor specific attributes: %s",
err.Error(), err.Error(),
@ -813,7 +813,7 @@ func (m *Smart) gatherDisk(acc telegraf.Accumulator, device string, wg *sync.Wai
fields := make(map[string]interface{}) fields := make(map[string]interface{})
if m.Attributes { if m.Attributes {
//add power mode // add power mode
keys := [...]string{"device", "device_type", "model", "serial_no", "wwn", "capacity", "enabled", "power"} keys := [...]string{"device", "device_type", "model", "serial_no", "wwn", "capacity", "enabled", "power"}
for _, key := range keys { for _, key := range keys {
if value, ok := deviceTags[key]; ok { if value, ok := deviceTags[key]; ok {

View File

@ -171,7 +171,7 @@ func TestReceiveTrap(t *testing.T) {
entries []entry entries []entry
metrics []telegraf.Metric metrics []telegraf.Metric
}{ }{
//ordinary v2c coldStart trap // ordinary v2c coldStart trap
{ {
name: "v2c coldStart", name: "v2c coldStart",
version: gosnmp.Version2c, version: gosnmp.Version2c,
@ -230,10 +230,10 @@ func TestReceiveTrap(t *testing.T) {
), ),
}, },
}, },
//Check that we're not running snmptranslate to look up oids // Check that we're not running snmptranslate to look up oids
//when we shouldn't be. This sends and receives a valid trap // when we shouldn't be. This sends and receives a valid trap
//but metric production should fail because the oids aren't in // but metric production should fail because the oids aren't in
//the cache and oid lookup is intentionally mocked to fail. // the cache and oid lookup is intentionally mocked to fail.
{ {
name: "missing oid", name: "missing oid",
version: gosnmp.Version2c, version: gosnmp.Version2c,
@ -251,10 +251,10 @@ func TestReceiveTrap(t *testing.T) {
}, },
}, },
}, },
entries: []entry{}, //nothing in cache entries: []entry{}, // nothing in cache
metrics: []telegraf.Metric{}, metrics: []telegraf.Metric{},
}, },
//v1 enterprise specific trap // v1 enterprise specific trap
{ {
name: "v1 trap enterprise", name: "v1 trap enterprise",
version: gosnmp.Version1, version: gosnmp.Version1,
@ -308,7 +308,7 @@ func TestReceiveTrap(t *testing.T) {
), ),
}, },
}, },
//v1 generic trap // v1 generic trap
{ {
name: "v1 trap generic", name: "v1 trap generic",
version: gosnmp.Version1, version: gosnmp.Version1,
@ -327,7 +327,7 @@ func TestReceiveTrap(t *testing.T) {
}, },
Enterprise: ".1.2.3", Enterprise: ".1.2.3",
AgentAddress: "10.20.30.40", AgentAddress: "10.20.30.40",
GenericTrap: 0, //coldStart GenericTrap: 0, // coldStart
SpecificTrap: 0, SpecificTrap: 0,
Timestamp: uint(now), Timestamp: uint(now),
}, },
@ -375,7 +375,7 @@ func TestReceiveTrap(t *testing.T) {
), ),
}, },
}, },
//ordinary v3 coldStart trap no auth and no priv // ordinary v3 coldStart trap no auth and no priv
{ {
name: "v3 coldStart noAuthNoPriv", name: "v3 coldStart noAuthNoPriv",
version: gosnmp.Version3, version: gosnmp.Version3,
@ -439,7 +439,7 @@ func TestReceiveTrap(t *testing.T) {
), ),
}, },
}, },
//ordinary v3 coldstart trap SHA auth and no priv // ordinary v3 coldstart trap SHA auth and no priv
{ {
name: "v3 coldStart authShaNoPriv", name: "v3 coldStart authShaNoPriv",
version: gosnmp.Version3, version: gosnmp.Version3,
@ -501,7 +501,7 @@ func TestReceiveTrap(t *testing.T) {
), ),
}, },
}, },
//ordinary v3 coldstart trap SHA224 auth and no priv // ordinary v3 coldstart trap SHA224 auth and no priv
{ {
name: "v3 coldStart authShaNoPriv", name: "v3 coldStart authShaNoPriv",
version: gosnmp.Version3, version: gosnmp.Version3,
@ -563,7 +563,7 @@ func TestReceiveTrap(t *testing.T) {
), ),
}, },
}, },
//ordinary v3 coldstart trap SHA256 auth and no priv // ordinary v3 coldstart trap SHA256 auth and no priv
{ {
name: "v3 coldStart authSha256NoPriv", name: "v3 coldStart authSha256NoPriv",
version: gosnmp.Version3, version: gosnmp.Version3,
@ -625,7 +625,7 @@ func TestReceiveTrap(t *testing.T) {
), ),
}, },
}, },
//ordinary v3 coldstart trap SHA384 auth and no priv // ordinary v3 coldstart trap SHA384 auth and no priv
{ {
name: "v3 coldStart authSha384NoPriv", name: "v3 coldStart authSha384NoPriv",
version: gosnmp.Version3, version: gosnmp.Version3,
@ -687,7 +687,7 @@ func TestReceiveTrap(t *testing.T) {
), ),
}, },
}, },
//ordinary v3 coldstart trap SHA512 auth and no priv // ordinary v3 coldstart trap SHA512 auth and no priv
{ {
name: "v3 coldStart authShaNoPriv", name: "v3 coldStart authShaNoPriv",
version: gosnmp.Version3, version: gosnmp.Version3,
@ -749,7 +749,7 @@ func TestReceiveTrap(t *testing.T) {
), ),
}, },
}, },
//ordinary v3 coldstart trap SHA auth and no priv // ordinary v3 coldstart trap SHA auth and no priv
{ {
name: "v3 coldStart authShaNoPriv", name: "v3 coldStart authShaNoPriv",
version: gosnmp.Version3, version: gosnmp.Version3,
@ -811,7 +811,7 @@ func TestReceiveTrap(t *testing.T) {
), ),
}, },
}, },
//ordinary v3 coldstart trap MD5 auth and no priv // ordinary v3 coldstart trap MD5 auth and no priv
{ {
name: "v3 coldStart authMD5NoPriv", name: "v3 coldStart authMD5NoPriv",
version: gosnmp.Version3, version: gosnmp.Version3,
@ -873,7 +873,7 @@ func TestReceiveTrap(t *testing.T) {
), ),
}, },
}, },
//ordinary v3 coldStart SHA trap auth and AES priv // ordinary v3 coldStart SHA trap auth and AES priv
{ {
name: "v3 coldStart authSHAPrivAES", name: "v3 coldStart authSHAPrivAES",
version: gosnmp.Version3, version: gosnmp.Version3,
@ -937,7 +937,7 @@ func TestReceiveTrap(t *testing.T) {
), ),
}, },
}, },
//ordinary v3 coldStart SHA trap auth and DES priv // ordinary v3 coldStart SHA trap auth and DES priv
{ {
name: "v3 coldStart authSHAPrivDES", name: "v3 coldStart authSHAPrivDES",
version: gosnmp.Version3, version: gosnmp.Version3,
@ -1001,7 +1001,7 @@ func TestReceiveTrap(t *testing.T) {
), ),
}, },
}, },
//ordinary v3 coldStart SHA trap auth and AES192 priv // ordinary v3 coldStart SHA trap auth and AES192 priv
{ {
name: "v3 coldStart authSHAPrivAES192", name: "v3 coldStart authSHAPrivAES192",
version: gosnmp.Version3, version: gosnmp.Version3,
@ -1065,7 +1065,7 @@ func TestReceiveTrap(t *testing.T) {
), ),
}, },
}, },
//ordinary v3 coldStart SHA trap auth and AES192C priv // ordinary v3 coldStart SHA trap auth and AES192C priv
{ {
name: "v3 coldStart authSHAPrivAES192C", name: "v3 coldStart authSHAPrivAES192C",
version: gosnmp.Version3, version: gosnmp.Version3,
@ -1129,7 +1129,7 @@ func TestReceiveTrap(t *testing.T) {
), ),
}, },
}, },
//ordinary v3 coldStart SHA trap auth and AES256 priv // ordinary v3 coldStart SHA trap auth and AES256 priv
{ {
name: "v3 coldStart authSHAPrivAES256", name: "v3 coldStart authSHAPrivAES256",
version: gosnmp.Version3, version: gosnmp.Version3,
@ -1193,7 +1193,7 @@ func TestReceiveTrap(t *testing.T) {
), ),
}, },
}, },
//ordinary v3 coldStart SHA trap auth and AES256C priv // ordinary v3 coldStart SHA trap auth and AES256C priv
{ {
name: "v3 coldStart authSHAPrivAES256C", name: "v3 coldStart authSHAPrivAES256C",
version: gosnmp.Version3, version: gosnmp.Version3,
@ -1284,7 +1284,7 @@ func TestReceiveTrap(t *testing.T) {
timeFunc: func() time.Time { timeFunc: func() time.Time {
return fakeTime return fakeTime
}, },
//if cold start be answer otherwise err // if cold start be answer otherwise err
Log: testutil.Logger{}, Log: testutil.Logger{},
Version: tt.version.String(), Version: tt.version.String(),
SecName: config.NewSecret([]byte(tt.secName)), SecName: config.NewSecret([]byte(tt.secName)),
@ -1298,7 +1298,7 @@ func TestReceiveTrap(t *testing.T) {
require.NoError(t, s.Init()) require.NoError(t, s.Init())
//inject test translator // inject test translator
s.transl = newTestTranslator(tt.entries) s.transl = newTestTranslator(tt.entries)
var acc testutil.Accumulator var acc testutil.Accumulator
@ -1359,7 +1359,7 @@ func TestReceiveTrapMultipleConfig(t *testing.T) {
entries []entry entries []entry
metrics []telegraf.Metric metrics []telegraf.Metric
}{ }{
//ordinary v3 coldStart SHA trap auth and AES priv // ordinary v3 coldStart SHA trap auth and AES priv
{ {
name: "v3 coldStart authSHAPrivAES", name: "v3 coldStart authSHAPrivAES",
version: gosnmp.Version3, version: gosnmp.Version3,
@ -1437,7 +1437,7 @@ func TestReceiveTrapMultipleConfig(t *testing.T) {
), ),
}, },
}, },
//ordinary v3 coldStart SHA trap auth and AES256 priv // ordinary v3 coldStart SHA trap auth and AES256 priv
{ {
name: "v3 coldStart authSHAPrivAES256", name: "v3 coldStart authSHAPrivAES256",
version: gosnmp.Version3, version: gosnmp.Version3,
@ -1514,7 +1514,7 @@ func TestReceiveTrapMultipleConfig(t *testing.T) {
fakeTime, fakeTime,
)}, )},
}, },
//ordinary v3 coldStart SHA trap auth and AES256C priv // ordinary v3 coldStart SHA trap auth and AES256C priv
{ {
name: "v3 coldStart authSHAPrivAES256C", name: "v3 coldStart authSHAPrivAES256C",
version: gosnmp.Version3, version: gosnmp.Version3,
@ -1620,7 +1620,7 @@ func TestReceiveTrapMultipleConfig(t *testing.T) {
timeFunc: func() time.Time { timeFunc: func() time.Time {
return fakeTime return fakeTime
}, },
//if cold start be answer otherwise err // if cold start be answer otherwise err
Log: testutil.Logger{}, Log: testutil.Logger{},
Version: tt.version.String(), Version: tt.version.String(),
SecName: config.NewSecret([]byte(tt.secName + "1")), SecName: config.NewSecret([]byte(tt.secName + "1")),
@ -1649,7 +1649,7 @@ func TestReceiveTrapMultipleConfig(t *testing.T) {
timeFunc: func() time.Time { timeFunc: func() time.Time {
return fakeTime return fakeTime
}, },
//if cold start be answer otherwise err // if cold start be answer otherwise err
Log: testutil.Logger{}, Log: testutil.Logger{},
Version: tt.version.String(), Version: tt.version.String(),
SecName: config.NewSecret([]byte(tt.secName + "2")), SecName: config.NewSecret([]byte(tt.secName + "2")),
@ -1664,7 +1664,7 @@ func TestReceiveTrapMultipleConfig(t *testing.T) {
require.NoError(t, s1.Init()) require.NoError(t, s1.Init())
require.NoError(t, s2.Init()) require.NoError(t, s2.Init())
//inject test translator // inject test translator
s1.transl = newTestTranslator(tt.entries) s1.transl = newTestTranslator(tt.entries)
s2.transl = newTestTranslator(tt.entries) s2.transl = newTestTranslator(tt.entries)

View File

@ -137,7 +137,7 @@ func (s *SQLServer) initQueries() error {
Query{ScriptName: "AzureArcSQLMIPerformanceCounters", Script: sqlAzureArcMIPerformanceCounters, ResultByRow: false} Query{ScriptName: "AzureArcSQLMIPerformanceCounters", Script: sqlAzureArcMIPerformanceCounters, ResultByRow: false}
queries["AzureArcSQLMIRequests"] = Query{ScriptName: "AzureArcSQLMIRequests", Script: sqlAzureArcMIRequests, ResultByRow: false} queries["AzureArcSQLMIRequests"] = Query{ScriptName: "AzureArcSQLMIRequests", Script: sqlAzureArcMIRequests, ResultByRow: false}
queries["AzureArcSQLMISchedulers"] = Query{ScriptName: "AzureArcSQLMISchedulers", Script: sqlAzureArcMISchedulers, ResultByRow: false} queries["AzureArcSQLMISchedulers"] = Query{ScriptName: "AzureArcSQLMISchedulers", Script: sqlAzureArcMISchedulers, ResultByRow: false}
} else if s.DatabaseType == typeSQLServer { //These are still V2 queries and have not been refactored yet. } else if s.DatabaseType == typeSQLServer { // These are still V2 queries and have not been refactored yet.
queries["SQLServerPerformanceCounters"] = Query{ScriptName: "SQLServerPerformanceCounters", Script: sqlServerPerformanceCounters, ResultByRow: false} queries["SQLServerPerformanceCounters"] = Query{ScriptName: "SQLServerPerformanceCounters", Script: sqlServerPerformanceCounters, ResultByRow: false}
queries["SQLServerWaitStatsCategorized"] = Query{ScriptName: "SQLServerWaitStatsCategorized", Script: sqlServerWaitStatsCategorized, ResultByRow: false} queries["SQLServerWaitStatsCategorized"] = Query{ScriptName: "SQLServerWaitStatsCategorized", Script: sqlServerWaitStatsCategorized, ResultByRow: false}
queries["SQLServerDatabaseIO"] = Query{ScriptName: "SQLServerDatabaseIO", Script: sqlServerDatabaseIO, ResultByRow: false} queries["SQLServerDatabaseIO"] = Query{ScriptName: "SQLServerDatabaseIO", Script: sqlServerDatabaseIO, ResultByRow: false}

View File

@ -187,7 +187,7 @@ func (s *Suricata) parseAlert(acc telegraf.Accumulator, result map[string]interf
totalmap := make(map[string]interface{}) totalmap := make(map[string]interface{})
for k, v := range result["alert"].(map[string]interface{}) { for k, v := range result["alert"].(map[string]interface{}) {
//source and target fields are maps // source and target fields are maps
err := flexFlatten(totalmap, k, v, s.Delimiter) err := flexFlatten(totalmap, k, v, s.Delimiter)
if err != nil { if err != nil {
s.Log.Debugf("Flattening alert failed: %v", err) s.Log.Debugf("Flattening alert failed: %v", err)
@ -196,7 +196,7 @@ func (s *Suricata) parseAlert(acc telegraf.Accumulator, result map[string]interf
} }
} }
//threads field do not exist in alert output, always global // threads field do not exist in alert output, always global
acc.AddFields("suricata_alert", totalmap, nil) acc.AddFields("suricata_alert", totalmap, nil)
} }

View File

@ -48,7 +48,7 @@ func TestSuricataLarge(t *testing.T) {
_, err = c.Write([]byte("\n")) _, err = c.Write([]byte("\n"))
require.NoError(t, err) require.NoError(t, err)
//test suricata alerts // test suricata alerts
data2, err := os.ReadFile("testdata/test2.json") data2, err := os.ReadFile("testdata/test2.json")
require.NoError(t, err) require.NoError(t, err)
_, err = c.Write(data2) _, err = c.Write(data2)

View File

@ -162,7 +162,7 @@ func TestTailDosLineEndings(t *testing.T) {
} }
func TestGrokParseLogFilesWithMultiline(t *testing.T) { func TestGrokParseLogFilesWithMultiline(t *testing.T) {
//we make sure the timeout won't kick in // we make sure the timeout won't kick in
d, err := time.ParseDuration("100s") d, err := time.ParseDuration("100s")
require.NoError(t, err) require.NoError(t, err)
duration := config.Duration(d) duration := config.Duration(d)
@ -281,7 +281,7 @@ func TestGrokParseLogFilesWithMultilineTimeout(t *testing.T) {
} }
func TestGrokParseLogFilesWithMultilineTailerCloseFlushesMultilineBuffer(t *testing.T) { func TestGrokParseLogFilesWithMultilineTailerCloseFlushesMultilineBuffer(t *testing.T) {
//we make sure the timeout won't kick in // we make sure the timeout won't kick in
duration := config.Duration(100 * time.Second) duration := config.Duration(100 * time.Second)
tt := NewTestTail() tt := NewTestTail()

View File

@ -311,7 +311,7 @@ func sensorsTemperaturesOld(syspath string) ([]host.TemperatureStat, error) {
//nolint:errcheck // skip on error //nolint:errcheck // skip on error
c, _ := os.ReadFile(filepath.Join(filepath.Dir(file), filename[0]+"_label")) c, _ := os.ReadFile(filepath.Join(filepath.Dir(file), filename[0]+"_label"))
if c != nil { if c != nil {
//format the label from "Core 0" to "core0_" // format the label from "Core 0" to "core0_"
label = strings.Join(strings.Split(strings.TrimSpace(strings.ToLower(string(c))), " "), "") + "_" label = strings.Join(strings.Split(strings.TrimSpace(strings.ToLower(string(c))), " "), "") + "_"
} }

View File

@ -121,7 +121,7 @@ func (u *Upsd) gatherUps(acc telegraf.Accumulator, upsname string, variables []n
tags := map[string]string{ tags := map[string]string{
"serial": fmt.Sprintf("%v", metrics["device.serial"]), "serial": fmt.Sprintf("%v", metrics["device.serial"]),
"ups_name": upsname, "ups_name": upsname,
//"variables": variables.Status not sure if it's a good idea to provide this // "variables": variables.Status not sure if it's a good idea to provide this
"model": fmt.Sprintf("%v", metrics["device.model"]), "model": fmt.Sprintf("%v", metrics["device.model"]),
} }
@ -194,16 +194,16 @@ func (u *Upsd) mapStatus(metrics map[string]interface{}, tags map[string]string)
status := uint64(0) status := uint64(0)
statusString := fmt.Sprintf("%v", metrics["ups.status"]) statusString := fmt.Sprintf("%v", metrics["ups.status"])
statuses := strings.Fields(statusString) statuses := strings.Fields(statusString)
//Source: 1.3.2 at http://rogerprice.org/NUT/ConfigExamples.A5.pdf // Source: 1.3.2 at http://rogerprice.org/NUT/ConfigExamples.A5.pdf
//apcupsd bits: // apcupsd bits:
//0 Runtime calibration occurring (Not reported by Smart UPS v/s and BackUPS Pro) // 0 Runtime calibration occurring (Not reported by Smart UPS v/s and BackUPS Pro)
//1 SmartTrim (Not reported by 1st and 2nd generation SmartUPS models) // 1 SmartTrim (Not reported by 1st and 2nd generation SmartUPS models)
//2 SmartBoost // 2 SmartBoost
//3 On line (this is the normal condition) // 3 On line (this is the normal condition)
//4 On battery // 4 On battery
//5 Overloaded output // 5 Overloaded output
//6 Battery low // 6 Battery low
//7 Replace battery // 7 Replace battery
if choice.Contains("CAL", statuses) { if choice.Contains("CAL", statuses) {
status |= 1 << 0 status |= 1 << 0
tags["status_CAL"] = "true" tags["status_CAL"] = "true"

View File

@ -33,25 +33,25 @@ var (
defaultAdmBinary = "/usr/bin/varnishadm" defaultAdmBinary = "/usr/bin/varnishadm"
defaultTimeout = config.Duration(time.Second) defaultTimeout = config.Duration(time.Second)
//vcl name and backend restriction regexp [A-Za-z][A-Za-z0-9_-]* // vcl name and backend restriction regexp [A-Za-z][A-Za-z0-9_-]*
defaultRegexps = []*regexp.Regexp{ defaultRegexps = []*regexp.Regexp{
//dynamic backends // dynamic backends
//nolint:lll // conditionally long line allowed to have a better understanding of following regexp //nolint:lll // conditionally long line allowed to have a better understanding of following regexp
//VBE.VCL_xxxx_xxx_VOD_SHIELD_Vxxxxxxxxxxxxx_xxxxxxxxxxxxx.goto.000007c8.(xx.xx.xxx.xx).(http://xxxxxxx-xxxxx-xxxxx-xxxxxx-xx-xxxx-x-xxxx.xx-xx-xxxx-x.amazonaws.com:80).(ttl:5.000000).fail_eaddrnotavail // VBE.VCL_xxxx_xxx_VOD_SHIELD_Vxxxxxxxxxxxxx_xxxxxxxxxxxxx.goto.000007c8.(xx.xx.xxx.xx).(http://xxxxxxx-xxxxx-xxxxx-xxxxxx-xx-xxxx-x-xxxx.xx-xx-xxxx-x.amazonaws.com:80).(ttl:5.000000).fail_eaddrnotavail
regexp.MustCompile( regexp.MustCompile(
`^VBE\.(?P<_vcl>[\w\-]*)\.goto\.[[:alnum:]]+\.\((?P<backend>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\)\.\((?P<server>.*)\)\.\(ttl:\d*\.\d*.*\)`, `^VBE\.(?P<_vcl>[\w\-]*)\.goto\.[[:alnum:]]+\.\((?P<backend>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\)\.\((?P<server>.*)\)\.\(ttl:\d*\.\d*.*\)`,
), ),
//VBE.reload_20210622_153544_23757.default.unhealthy // VBE.reload_20210622_153544_23757.default.unhealthy
regexp.MustCompile(`^VBE\.(?P<_vcl>[\w\-]*)\.(?P<backend>[\w\-]*)\.([\w\-]*)`), regexp.MustCompile(`^VBE\.(?P<_vcl>[\w\-]*)\.(?P<backend>[\w\-]*)\.([\w\-]*)`),
//KVSTORE values // KVSTORE values
regexp.MustCompile(`^KVSTORE\.(?P<id>[\w\-]*)\.(?P<_vcl>[\w\-]*)\.([\w\-]*)`), regexp.MustCompile(`^KVSTORE\.(?P<id>[\w\-]*)\.(?P<_vcl>[\w\-]*)\.([\w\-]*)`),
//XCNT.abc1234.XXX+_YYYY.cr.pass.val // XCNT.abc1234.XXX+_YYYY.cr.pass.val
regexp.MustCompile(`^XCNT\.(?P<_vcl>[\w\-]*)(\.)*(?P<group>[\w\-.+]*)\.(?P<_field>[\w\-.+]*)\.val`), regexp.MustCompile(`^XCNT\.(?P<_vcl>[\w\-]*)(\.)*(?P<group>[\w\-.+]*)\.(?P<_field>[\w\-.+]*)\.val`),
//generic metric like MSE_STORE.store-1-1.g_aio_running_bytes_write // generic metric like MSE_STORE.store-1-1.g_aio_running_bytes_write
regexp.MustCompile(`([\w\-]*)\.(?P<_field>[\w\-.]*)`), regexp.MustCompile(`([\w\-]*)\.(?P<_field>[\w\-.]*)`),
} }
) )
@ -146,7 +146,7 @@ func (s *Varnish) Gather(acc telegraf.Accumulator) error {
} }
if s.MetricVersion == 2 { if s.MetricVersion == 2 {
//run varnishadm to get active vcl // run varnishadm to get active vcl
var activeVcl = "boot" var activeVcl = "boot"
if s.admRun != nil { if s.admRun != nil {
admOut, err := s.admRun(s.AdmBinary, s.UseSudo, admArgs, s.Timeout) admOut, err := s.admRun(s.AdmBinary, s.UseSudo, admArgs, s.Timeout)
@ -165,26 +165,26 @@ func (s *Varnish) Gather(acc telegraf.Accumulator) error {
// Prepare varnish cli tools arguments // Prepare varnish cli tools arguments
func (s *Varnish) prepareCmdArgs() ([]string, []string) { func (s *Varnish) prepareCmdArgs() ([]string, []string) {
//default varnishadm arguments // default varnishadm arguments
admArgs := []string{"vcl.list", "-j"} admArgs := []string{"vcl.list", "-j"}
//default varnish stats arguments // default varnish stats arguments
statsArgs := []string{"-j"} statsArgs := []string{"-j"}
if s.MetricVersion == 1 { if s.MetricVersion == 1 {
statsArgs = []string{"-1"} statsArgs = []string{"-1"}
} }
//add optional instance name // add optional instance name
if s.InstanceName != "" { if s.InstanceName != "" {
statsArgs = append(statsArgs, []string{"-n", s.InstanceName}...) statsArgs = append(statsArgs, []string{"-n", s.InstanceName}...)
admArgs = append([]string{"-n", s.InstanceName}, admArgs...) admArgs = append([]string{"-n", s.InstanceName}, admArgs...)
} }
//override custom arguments // override custom arguments
if len(s.AdmBinaryArgs) > 0 { if len(s.AdmBinaryArgs) > 0 {
admArgs = s.AdmBinaryArgs admArgs = s.AdmBinaryArgs
} }
//override custom arguments // override custom arguments
if len(s.BinaryArgs) > 0 { if len(s.BinaryArgs) > 0 {
statsArgs = s.BinaryArgs statsArgs = s.BinaryArgs
} }
@ -268,13 +268,13 @@ func (s *Varnish) processMetricsV2(activeVcl string, acc telegraf.Accumulator, o
if value, ok := data["value"]; ok { if value, ok := data["value"]; ok {
if number, ok := value.(json.Number); ok { if number, ok := value.(json.Number); ok {
//parse bitmap value // parse bitmap value
if flag == "b" { if flag == "b" {
if metricValue, parseError = strconv.ParseUint(number.String(), 10, 64); parseError != nil { if metricValue, parseError = strconv.ParseUint(number.String(), 10, 64); parseError != nil {
parseError = fmt.Errorf("%q value uint64 error: %w", fieldName, parseError) parseError = fmt.Errorf("%q value uint64 error: %w", fieldName, parseError)
} }
} else if metricValue, parseError = number.Int64(); parseError != nil { } else if metricValue, parseError = number.Int64(); parseError != nil {
//try parse float // try parse float
if metricValue, parseError = number.Float64(); parseError != nil { if metricValue, parseError = number.Float64(); parseError != nil {
parseError = fmt.Errorf("stat %q value %q is not valid number: %w", fieldName, value, parseError) parseError = fmt.Errorf("stat %q value %q is not valid number: %w", fieldName, value, parseError)
} }
@ -291,7 +291,7 @@ func (s *Varnish) processMetricsV2(activeVcl string, acc telegraf.Accumulator, o
metric := s.parseMetricV2(fieldName) metric := s.parseMetricV2(fieldName)
if metric.vclName != "" && activeVcl != "" && metric.vclName != activeVcl { if metric.vclName != "" && activeVcl != "" && metric.vclName != activeVcl {
//skip not active vcl // skip not active vcl
continue continue
} }
@ -336,7 +336,7 @@ func getActiveVCLJson(out io.Reader) (string, error) {
return s["name"].(string), nil return s["name"].(string), nil
} }
default: default:
//ignore // ignore
continue continue
} }
} }
@ -345,7 +345,7 @@ func getActiveVCLJson(out io.Reader) (string, error) {
// Gets the "counters" section from varnishstat json (there is change in schema structure in varnish 6.5+) // Gets the "counters" section from varnishstat json (there is change in schema structure in varnish 6.5+)
func getCountersJSON(rootJSON map[string]interface{}) map[string]interface{} { func getCountersJSON(rootJSON map[string]interface{}) map[string]interface{} {
//version 1 contains "counters" wrapper // version 1 contains "counters" wrapper
if counters, exists := rootJSON["counters"]; exists { if counters, exists := rootJSON["counters"]; exists {
return counters.(map[string]interface{}) return counters.(map[string]interface{})
} }
@ -364,7 +364,7 @@ func (s *Varnish) parseMetricV2(name string) (metric varnishMetric) {
"section": section, "section": section,
} }
//parse name using regexpsCompiled // parse name using regexpsCompiled
for _, re := range s.regexpsCompiled { for _, re := range s.regexpsCompiled {
submatch := re.FindStringSubmatch(name) submatch := re.FindStringSubmatch(name)
if len(submatch) < 1 { if len(submatch) < 1 {

View File

@ -98,7 +98,7 @@ type objectRef struct {
name string name string
altID string altID string
ref types.ManagedObjectReference ref types.ManagedObjectReference
parentRef *types.ManagedObjectReference //Pointer because it must be nillable parentRef *types.ManagedObjectReference // Pointer because it must be nillable
guest string guest string
dcname string dcname string
rpname string rpname string
@ -734,13 +734,13 @@ func getResourcePools(ctx context.Context, e *Endpoint, resourceFilter *Resource
} }
func getResourcePoolName(rp types.ManagedObjectReference, rps objectMap) string { func getResourcePoolName(rp types.ManagedObjectReference, rps objectMap) string {
//Loop through the Resource Pools objectmap to find the corresponding one // Loop through the Resource Pools objectmap to find the corresponding one
for _, r := range rps { for _, r := range rps {
if r.ref == rp { if r.ref == rp {
return r.name return r.name
} }
} }
return "Resources" //Default value return "Resources" // Default value
} }
// noinspection GoUnusedParameter // noinspection GoUnusedParameter
@ -777,7 +777,7 @@ func getVMs(ctx context.Context, e *Endpoint, resourceFilter *ResourceFilter) (o
if err != nil { if err != nil {
return nil, err return nil, err
} }
//Create a ResourcePool Filter and get the list of Resource Pools // Create a ResourcePool Filter and get the list of Resource Pools
rprf := ResourceFilter{ rprf := ResourceFilter{
finder: &Finder{client}, finder: &Finder{client},
resType: "ResourcePool", resType: "ResourcePool",

View File

@ -165,7 +165,7 @@ func getCmmdsMap(ctx context.Context, client *vim25.Client, clusterObj *object.C
{Type: "DISK"}, {Type: "DISK"},
} }
//Some esx host can be down or in maintenance mode. Hence cmmds query might fail on such hosts. // Some esx host can be down or in maintenance mode. Hence cmmds query might fail on such hosts.
// We iterate until be get proper api response // We iterate until be get proper api response
var resp *types.QueryCmmdsResponse var resp *types.QueryCmmdsResponse
for _, host := range hosts { for _, host := range hosts {

View File

@ -149,7 +149,7 @@ func createSim(folders int) (*simulator.Model, *simulator.Server, error) {
model.Folder = folders model.Folder = folders
model.Datacenter = 2 model.Datacenter = 2
//model.App = 1 // model.App = 1
err := model.Create() err := model.Create()
if err != nil { if err != nil {

View File

@ -46,47 +46,47 @@ type pdhFmtCountervalueItemDouble struct {
// pdhCounterInfo structure contains information describing the properties of a counter. This information also includes the counter path. // pdhCounterInfo structure contains information describing the properties of a counter. This information also includes the counter path.
type pdhCounterInfo struct { type pdhCounterInfo struct {
//Size of the structure, including the appended strings, in bytes. // Size of the structure, including the appended strings, in bytes.
DwLength uint32 DwLength uint32
//Counter type. For a list of counter types, // Counter type. For a list of counter types,
//see the Counter Types section of the <a "href=http://go.microsoft.com/fwlink/p/?linkid=84422">Windows Server 2003 Deployment Kit</a>. // see the Counter Types section of the <a "href=http://go.microsoft.com/fwlink/p/?linkid=84422">Windows Server 2003 Deployment Kit</a>.
//The counter type constants are defined in Winperf.h. // The counter type constants are defined in Winperf.h.
DwType uint32 DwType uint32
//Counter version information. Not used. // Counter version information. Not used.
CVersion uint32 CVersion uint32
//Counter status that indicates if the counter value is valid. For a list of possible values, // Counter status that indicates if the counter value is valid. For a list of possible values,
//see <a href="https://msdn.microsoft.com/en-us/library/windows/desktop/aa371894(v=vs.85).aspx">Checking PDH Interface Return Values</a>. // see <a href="https://msdn.microsoft.com/en-us/library/windows/desktop/aa371894(v=vs.85).aspx">Checking PDH Interface Return Values</a>.
CStatus uint32 CStatus uint32
//Scale factor to use when computing the displayable value of the counter. The scale factor is a power of ten. // Scale factor to use when computing the displayable value of the counter. The scale factor is a power of ten.
//The valid range of this parameter is PDH_MIN_SCALE (7) (the returned value is the actual value times 10⁷) to // The valid range of this parameter is PDH_MIN_SCALE (7) (the returned value is the actual value times 10⁷) to
//PDH_MAX_SCALE (+7) (the returned value is the actual value times 10⁺⁷). A value of zero will set the scale to one, so that the actual value is returned // PDH_MAX_SCALE (+7) (the returned value is the actual value times 10⁺⁷). A value of zero will set the scale to one, so that the actual value is returned
LScale int32 LScale int32
//Default scale factor as suggested by the counter's provider. // Default scale factor as suggested by the counter's provider.
LDefaultScale int32 LDefaultScale int32
//The value passed in the dwUserData parameter when calling PdhAddCounter. // The value passed in the dwUserData parameter when calling PdhAddCounter.
DwUserData *uint32 DwUserData *uint32
//The value passed in the dwUserData parameter when calling PdhOpenQuery. // The value passed in the dwUserData parameter when calling PdhOpenQuery.
DwQueryUserData *uint32 DwQueryUserData *uint32
//Null-terminated string that specifies the full counter path. The string follows this structure in memory. // Null-terminated string that specifies the full counter path. The string follows this structure in memory.
SzFullPath *uint16 // pointer to a string SzFullPath *uint16 // pointer to a string
//Null-terminated string that contains the name of the computer specified in the counter path. Is NULL, if the path does not specify a computer. // Null-terminated string that contains the name of the computer specified in the counter path. Is NULL, if the path does not specify a computer.
//The string follows this structure in memory. // The string follows this structure in memory.
SzMachineName *uint16 // pointer to a string SzMachineName *uint16 // pointer to a string
//Null-terminated string that contains the name of the performance object specified in the counter path. The string follows this structure in memory. // Null-terminated string that contains the name of the performance object specified in the counter path. The string follows this structure in memory.
SzObjectName *uint16 // pointer to a string SzObjectName *uint16 // pointer to a string
//Null-terminated string that contains the name of the object instance specified in the counter path. Is NULL, if the path does not specify an instance. // Null-terminated string that contains the name of the object instance specified in the counter path. Is NULL, if the path does not specify an instance.
//The string follows this structure in memory. // The string follows this structure in memory.
SzInstanceName *uint16 // pointer to a string SzInstanceName *uint16 // pointer to a string
//Null-terminated string that contains the name of the parent instance specified in the counter path. // Null-terminated string that contains the name of the parent instance specified in the counter path.
//Is NULL, if the path does not specify a parent instance. The string follows this structure in memory. // Is NULL, if the path does not specify a parent instance. The string follows this structure in memory.
SzParentInstance *uint16 // pointer to a string SzParentInstance *uint16 // pointer to a string
//Instance index specified in the counter path. Is 0, if the path does not specify an instance index. // Instance index specified in the counter path. Is 0, if the path does not specify an instance index.
DwInstanceIndex uint32 // pointer to a string DwInstanceIndex uint32 // pointer to a string
//Null-terminated string that contains the counter name. The string follows this structure in memory. // Null-terminated string that contains the counter name. The string follows this structure in memory.
SzCounterName *uint16 // pointer to a string SzCounterName *uint16 // pointer to a string
//Help text that describes the counter. Is NULL if the source is a log file. // Help text that describes the counter. Is NULL if the source is a log file.
SzExplainText *uint16 // pointer to a string SzExplainText *uint16 // pointer to a string
//Start of the string data that is appended to the structure. // Start of the string data that is appended to the structure.
DataBuffer [1]uint32 // pointer to an extra space DataBuffer [1]uint32 // pointer to an extra space
} }
@ -110,6 +110,6 @@ type pdhRawCounter struct {
type pdhRawCounterItem struct { type pdhRawCounterItem struct {
// Pointer to a null-terminated string that specifies the instance name of the counter. The string is appended to the end of this structure. // Pointer to a null-terminated string that specifies the instance name of the counter. The string is appended to the end of this structure.
SzName *uint16 SzName *uint16
//A pdhRawCounter structure that contains the raw counter value of the instance // A pdhRawCounter structure that contains the raw counter value of the instance
RawValue pdhRawCounter RawValue pdhRawCounter
} }

View File

@ -425,7 +425,7 @@ func (m *WinPerfCounters) Gather(acc telegraf.Accumulator) error {
return err return err
} }
for _, hostCounterSet := range m.hostCounters { for _, hostCounterSet := range m.hostCounters {
//some counters need two data samples before computing a value // some counters need two data samples before computing a value
if err = hostCounterSet.query.CollectData(); err != nil { if err = hostCounterSet.query.CollectData(); err != nil {
return m.checkError(err) return m.checkError(err)
} }
@ -449,7 +449,7 @@ func (m *WinPerfCounters) Gather(acc telegraf.Accumulator) error {
} }
} }
var wg sync.WaitGroup var wg sync.WaitGroup
//iterate over computers // iterate over computers
for _, hostCounterInfo := range m.hostCounters { for _, hostCounterInfo := range m.hostCounters {
wg.Add(1) wg.Add(1)
go func(hostInfo *hostCountersInfo) { go func(hostInfo *hostCountersInfo) {
@ -482,7 +482,7 @@ func (m *WinPerfCounters) gatherComputerCounters(hostCounterInfo *hostCountersIn
value, err = hostCounterInfo.query.GetFormattedCounterValueDouble(metric.counterHandle) value, err = hostCounterInfo.query.GetFormattedCounterValueDouble(metric.counterHandle)
} }
if err != nil { if err != nil {
//ignore invalid data as some counters from process instances returns this sometimes // ignore invalid data as some counters from process instances returns this sometimes
if !isKnownCounterDataError(err) { if !isKnownCounterDataError(err) {
return fmt.Errorf("error while getting value for counter %q: %w", metric.counterPath, err) return fmt.Errorf("error while getting value for counter %q: %w", metric.counterPath, err)
} }
@ -498,7 +498,7 @@ func (m *WinPerfCounters) gatherComputerCounters(hostCounterInfo *hostCountersIn
counterValues, err = hostCounterInfo.query.GetFormattedCounterArrayDouble(metric.counterHandle) counterValues, err = hostCounterInfo.query.GetFormattedCounterArrayDouble(metric.counterHandle)
} }
if err != nil { if err != nil {
//ignore invalid data as some counters from process instances returns this sometimes // ignore invalid data as some counters from process instances returns this sometimes
if !isKnownCounterDataError(err) { if !isKnownCounterDataError(err) {
return fmt.Errorf("error while getting value for counter %q: %w", metric.counterPath, err) return fmt.Errorf("error while getting value for counter %q: %w", metric.counterPath, err)
} }

View File

@ -1514,7 +1514,7 @@ func TestGatherRefreshingWithExpansion(t *testing.T) {
"source": hostname(), "source": hostname(),
} }
//test before elapsing CounterRefreshRate counters are not refreshed // test before elapsing CounterRefreshRate counters are not refreshed
err = m.Gather(&acc2) err = m.Gather(&acc2)
require.NoError(t, err) require.NoError(t, err)
counters, ok = m.hostCounters["localhost"] counters, ok = m.hostCounters["localhost"]
@ -1594,7 +1594,7 @@ func TestGatherRefreshingWithoutExpansion(t *testing.T) {
"source": hostname(), "source": hostname(),
} }
acc1.AssertContainsTaggedFields(t, measurement, fields2, tags2) acc1.AssertContainsTaggedFields(t, measurement, fields2, tags2)
//test finding new instance // test finding new instance
cps2 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(I2)\\C1", "\\O(I2)\\C2", "\\O(I3)\\C1", "\\O(I3)\\C2"} cps2 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(I2)\\C1", "\\O(I2)\\C2", "\\O(I3)\\C1", "\\O(I3)\\C2"}
fpm = &FakePerformanceQuery{ fpm = &FakePerformanceQuery{
counters: createCounterMap( counters: createCounterMap(
@ -1628,7 +1628,7 @@ func TestGatherRefreshingWithoutExpansion(t *testing.T) {
"source": hostname(), "source": hostname(),
} }
//test before elapsing CounterRefreshRate counters are not refreshed // test before elapsing CounterRefreshRate counters are not refreshed
err = m.Gather(&acc2) err = m.Gather(&acc2)
require.NoError(t, err) require.NoError(t, err)
@ -1640,7 +1640,7 @@ func TestGatherRefreshingWithoutExpansion(t *testing.T) {
acc2.AssertContainsTaggedFields(t, measurement, fields1, tags1) acc2.AssertContainsTaggedFields(t, measurement, fields1, tags1)
acc2.AssertContainsTaggedFields(t, measurement, fields2, tags2) acc2.AssertContainsTaggedFields(t, measurement, fields2, tags2)
acc2.AssertContainsTaggedFields(t, measurement, fields3, tags3) acc2.AssertContainsTaggedFields(t, measurement, fields3, tags3)
//test changed configuration // test changed configuration
perfObjects = createPerfObject("", measurement, "O", []string{"*"}, []string{"C1", "C2", "C3"}, true, false, false) perfObjects = createPerfObject("", measurement, "O", []string{"*"}, []string{"C1", "C2", "C3"}, true, false, false)
cps3 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(I1)\\C3", "\\O(I2)\\C1", "\\O(I2)\\C2", "\\O(I2)\\C3"} cps3 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(I1)\\C3", "\\O(I2)\\C1", "\\O(I2)\\C2", "\\O(I2)\\C3"}
fpm = &FakePerformanceQuery{ fpm = &FakePerformanceQuery{
@ -1963,7 +1963,7 @@ func TestGatherRaw(t *testing.T) {
counters, ok = m.hostCounters["localhost"] counters, ok = m.hostCounters["localhost"]
require.True(t, ok) require.True(t, ok)
require.Len(t, counters.counters, 4) //expanded counters require.Len(t, counters.counters, 4) // expanded counters
require.Len(t, acc2.Metrics, 2) require.Len(t, acc2.Metrics, 2)
acc2.AssertContainsTaggedFields(t, measurement, fields1, tags1) acc2.AssertContainsTaggedFields(t, measurement, fields1, tags1)
@ -2053,9 +2053,9 @@ func TestLocalizeWildcardsExpansion(t *testing.T) {
require.NoError(t, m.Gather(&acc)) require.NoError(t, m.Gather(&acc))
require.Len(t, acc.Metrics, 1) require.Len(t, acc.Metrics, 1)
//running on localized windows with UseWildcardsExpansion and // running on localized windows with UseWildcardsExpansion and
//with LocalizeWildcardsExpansion, this will be localized. Using LocalizeWildcardsExpansion=false it will // with LocalizeWildcardsExpansion, this will be localized. Using LocalizeWildcardsExpansion=false it will
//be English. // be English.
require.Contains(t, acc.Metrics[0].Fields, sanitizedChars.Replace(counter)) require.Contains(t, acc.Metrics[0].Fields, sanitizedChars.Replace(counter))
} }

View File

@ -167,7 +167,7 @@ func (m *WinServices) Gather(acc telegraf.Accumulator) error {
tags := map[string]string{ tags := map[string]string{
"service_name": service.ServiceName, "service_name": service.ServiceName,
} }
//display name could be empty, but still valid service // display name could be empty, but still valid service
if len(service.DisplayName) > 0 { if len(service.DisplayName) > 0 {
tags["display_name"] = service.DisplayName tags["display_name"] = service.DisplayName
} }

View File

@ -18,7 +18,7 @@ import (
// testData is DD wrapper for unit testing of WinServices // testData is DD wrapper for unit testing of WinServices
type testData struct { type testData struct {
//collection that will be returned in ListServices if service array passed into WinServices constructor is empty // collection that will be returned in ListServices if service array passed into WinServices constructor is empty
queryServiceList []string queryServiceList []string
mgrConnectError error mgrConnectError error
mgrListServicesError error mgrListServicesError error
@ -124,7 +124,7 @@ var testErrors = []testData{
} }
func TestMgrErrors(t *testing.T) { func TestMgrErrors(t *testing.T) {
//mgr.connect error // mgr.connect error
winServices := &WinServices{ winServices := &WinServices{
Log: testutil.Logger{}, Log: testutil.Logger{},
mgrProvider: &FakeMgProvider{testErrors[0]}, mgrProvider: &FakeMgProvider{testErrors[0]},
@ -134,7 +134,7 @@ func TestMgrErrors(t *testing.T) {
require.Error(t, err) require.Error(t, err)
require.Contains(t, err.Error(), testErrors[0].mgrConnectError.Error()) require.Contains(t, err.Error(), testErrors[0].mgrConnectError.Error())
////mgr.listServices error // mgr.listServices error
winServices = &WinServices{ winServices = &WinServices{
Log: testutil.Logger{}, Log: testutil.Logger{},
mgrProvider: &FakeMgProvider{testErrors[1]}, mgrProvider: &FakeMgProvider{testErrors[1]},
@ -144,7 +144,7 @@ func TestMgrErrors(t *testing.T) {
require.Error(t, err) require.Error(t, err)
require.Contains(t, err.Error(), testErrors[1].mgrListServicesError.Error()) require.Contains(t, err.Error(), testErrors[1].mgrListServicesError.Error())
////mgr.listServices error 2 // mgr.listServices error 2
winServices = &WinServices{ winServices = &WinServices{
Log: testutil.Logger{}, Log: testutil.Logger{},
ServiceNames: []string{"Fake service 1"}, ServiceNames: []string{"Fake service 1"},
@ -174,11 +174,11 @@ func TestServiceErrors(t *testing.T) {
log.SetOutput(buf) log.SetOutput(buf)
require.NoError(t, winServices.Gather(&acc1)) require.NoError(t, winServices.Gather(&acc1))
//open service error // open service error
require.Contains(t, buf.String(), testErrors[2].services[0].serviceOpenError.Error()) require.Contains(t, buf.String(), testErrors[2].services[0].serviceOpenError.Error())
//query service error // query service error
require.Contains(t, buf.String(), testErrors[2].services[1].serviceQueryError.Error()) require.Contains(t, buf.String(), testErrors[2].services[1].serviceQueryError.Error())
//config service error // config service error
require.Contains(t, buf.String(), testErrors[2].services[2].serviceConfigError.Error()) require.Contains(t, buf.String(), testErrors[2].services[2].serviceConfigError.Error())
} }

View File

@ -252,7 +252,7 @@ func TestZfsPoolMetrics(t *testing.T) {
err = z.Gather(&acc) err = z.Gather(&acc)
require.NoError(t, err) require.NoError(t, err)
//one pool, all metrics // one pool, all metrics
tags := map[string]string{ tags := map[string]string{
"pool": "HOME", "pool": "HOME",
} }
@ -318,7 +318,7 @@ func TestZfsGeneratesMetrics(t *testing.T) {
var acc testutil.Accumulator var acc testutil.Accumulator
//one pool, all metrics // one pool, all metrics
tags := map[string]string{ tags := map[string]string{
"pools": "HOME", "pools": "HOME",
} }
@ -330,7 +330,7 @@ func TestZfsGeneratesMetrics(t *testing.T) {
acc.AssertContainsTaggedFields(t, "zfs", intMetrics, tags) acc.AssertContainsTaggedFields(t, "zfs", intMetrics, tags)
acc.Metrics = nil acc.Metrics = nil
//two pools, all metrics // two pools, all metrics
err = os.MkdirAll(testKstatPath+"/STORAGE", 0750) err = os.MkdirAll(testKstatPath+"/STORAGE", 0750)
require.NoError(t, err) require.NoError(t, err)
@ -351,7 +351,7 @@ func TestZfsGeneratesMetrics(t *testing.T) {
intMetrics = getKstatMetricsArcOnly() intMetrics = getKstatMetricsArcOnly()
//two pools, one metric // two pools, one metric
z = &Zfs{KstatPath: testKstatPath, KstatMetrics: []string{"arcstats"}} z = &Zfs{KstatPath: testKstatPath, KstatMetrics: []string{"arcstats"}}
acc3 := testutil.Accumulator{} acc3 := testutil.Accumulator{}
err = z.Gather(&acc3) err = z.Gather(&acc3)

View File

@ -262,7 +262,7 @@ func TestLineProtocolConverter_Record(t *testing.T) {
wantErr: false, wantErr: false,
}, },
//// Test data from distributed trace repo sample json // Test data from distributed trace repo sample json
// https://github.com/mattkanwisher/distributedtrace/blob/master/testclient/sample.json // https://github.com/mattkanwisher/distributedtrace/blob/master/testclient/sample.json
{ {
name: "distributed_trace_sample", name: "distributed_trace_sample",

View File

@ -53,7 +53,7 @@ func TestZipkinPlugin(t *testing.T) {
"trace_id": "22c4fc8ab3669045", "trace_id": "22c4fc8ab3669045",
"name": "child", "name": "child",
"service_name": "trivial", "service_name": "trivial",
"annotation": "trivial", //base64: dHJpdmlhbA== "annotation": "trivial", // base64: dHJpdmlhbA==
"endpoint_host": "127.0.0.1", "endpoint_host": "127.0.0.1",
"annotation_key": "lc", "annotation_key": "lc",
}, },
@ -86,7 +86,7 @@ func TestZipkinPlugin(t *testing.T) {
"trace_id": "22c4fc8ab3669045", "trace_id": "22c4fc8ab3669045",
"name": "child", "name": "child",
"service_name": "trivial", "service_name": "trivial",
"annotation": "trivial", //base64: dHJpdmlhbA== "annotation": "trivial", // base64: dHJpdmlhbA==
"endpoint_host": "127.0.0.1", "endpoint_host": "127.0.0.1",
"annotation_key": "lc", "annotation_key": "lc",
}, },
@ -167,7 +167,7 @@ func TestZipkinPlugin(t *testing.T) {
Tags: map[string]string{ Tags: map[string]string{
"trace_id": "22c4fc8ab3669045", "trace_id": "22c4fc8ab3669045",
"service_name": "trivial", "service_name": "trivial",
"annotation": "trivial", //base64: dHJpdmlhbA== "annotation": "trivial", // base64: dHJpdmlhbA==
"annotation_key": "lc", "annotation_key": "lc",
"id": "5195e96239641e", "id": "5195e96239641e",
"parent_id": "5195e96239641e", "parent_id": "5195e96239641e",
@ -618,7 +618,7 @@ func TestZipkinPlugin(t *testing.T) {
} }
mockAcc.Wait( mockAcc.Wait(
len(tt.want), len(tt.want),
) //Since the server is running concurrently, we need to wait for the number of data points we want to test to be added to the Accumulator. ) // Since the server is running concurrently, we need to wait for the number of data points we want to test to be added to the Accumulator.
if len(mockAcc.Errors) > 0 != tt.wantErr { if len(mockAcc.Errors) > 0 != tt.wantErr {
t.Fatalf("Got unexpected errors. want error = %v, errors = %v\n", tt.wantErr, mockAcc.Errors) t.Fatalf("Got unexpected errors. want error = %v, errors = %v\n", tt.wantErr, mockAcc.Errors)
} }