chore: Fix linter findings for `revive:comment-spacings` (part 2) (#15897)
This commit is contained in:
parent
43590ca730
commit
453d32bd81
|
|
@ -61,16 +61,16 @@ type (
|
|||
Metric struct {
|
||||
ObjectsFilter string `toml:"objects_filter"`
|
||||
MetricNames []string `toml:"names"`
|
||||
Dimensions string `toml:"dimensions"` //String representation of JSON dimensions
|
||||
Dimensions string `toml:"dimensions"` // String representation of JSON dimensions
|
||||
TagsQueryPath []string `toml:"tag_query_path"`
|
||||
AllowDataPointWODiscoveryData bool `toml:"allow_dps_without_discovery"` //Allow data points without discovery data (if no discovery data found)
|
||||
AllowDataPointWODiscoveryData bool `toml:"allow_dps_without_discovery"` // Allow data points without discovery data (if no discovery data found)
|
||||
|
||||
dtLock sync.Mutex //Guard for discoveryTags & dimensions
|
||||
discoveryTags map[string]map[string]string //Internal data structure that can enrich metrics with tags
|
||||
dtLock sync.Mutex // Guard for discoveryTags & dimensions
|
||||
discoveryTags map[string]map[string]string // Internal data structure that can enrich metrics with tags
|
||||
dimensionsUdObj map[string]string
|
||||
dimensionsUdArr []map[string]string //Parsed Dimesnsions JSON string (unmarshalled)
|
||||
requestDimensions []map[string]string //this is the actual dimensions list that would be used in API request
|
||||
requestDimensionsStr string //String representation of the above
|
||||
dimensionsUdArr []map[string]string // Parsed Dimesnsions JSON string (unmarshalled)
|
||||
requestDimensions []map[string]string // this is the actual dimensions list that would be used in API request
|
||||
requestDimensionsStr string // String representation of the above
|
||||
|
||||
}
|
||||
|
||||
|
|
@ -149,7 +149,7 @@ func (s *AliyunCMS) Init() error {
|
|||
return fmt.Errorf("failed to create cms client: %w", err)
|
||||
}
|
||||
|
||||
//check metrics dimensions consistency
|
||||
// check metrics dimensions consistency
|
||||
for i := range s.Metrics {
|
||||
metric := s.Metrics[i]
|
||||
if metric.Dimensions == "" {
|
||||
|
|
@ -172,15 +172,15 @@ func (s *AliyunCMS) Init() error {
|
|||
|
||||
s.measurement = formatMeasurement(s.Project)
|
||||
|
||||
//Check regions
|
||||
// Check regions
|
||||
if len(s.Regions) == 0 {
|
||||
s.Regions = aliyunRegionList
|
||||
s.Log.Infof("'regions' is not set. Metrics will be queried across %d regions:\n%s",
|
||||
len(s.Regions), strings.Join(s.Regions, ","))
|
||||
}
|
||||
|
||||
//Init discovery...
|
||||
if s.dt == nil { //Support for tests
|
||||
// Init discovery...
|
||||
if s.dt == nil { // Support for tests
|
||||
s.dt, err = newDiscoveryTool(s.Regions, s.Project, s.Log, credential, int(float32(s.RateLimit)*0.2), time.Duration(s.DiscoveryInterval))
|
||||
if err != nil {
|
||||
s.Log.Errorf("Discovery tool is not activated: %v", err)
|
||||
|
|
@ -198,7 +198,7 @@ func (s *AliyunCMS) Init() error {
|
|||
|
||||
s.Log.Infof("%d object(s) discovered...", len(s.discoveryData))
|
||||
|
||||
//Special setting for acs_oss project since the API differs
|
||||
// Special setting for acs_oss project since the API differs
|
||||
if s.Project == "acs_oss" {
|
||||
s.dimensionKey = "BucketName"
|
||||
}
|
||||
|
|
@ -208,7 +208,7 @@ func (s *AliyunCMS) Init() error {
|
|||
|
||||
// Start plugin discovery loop, metrics are gathered through Gather
|
||||
func (s *AliyunCMS) Start(telegraf.Accumulator) error {
|
||||
//Start periodic discovery process
|
||||
// Start periodic discovery process
|
||||
if s.dt != nil {
|
||||
s.dt.start()
|
||||
}
|
||||
|
|
@ -226,7 +226,7 @@ func (s *AliyunCMS) Gather(acc telegraf.Accumulator) error {
|
|||
|
||||
var wg sync.WaitGroup
|
||||
for _, metric := range s.Metrics {
|
||||
//Prepare internal structure with data from discovery
|
||||
// Prepare internal structure with data from discovery
|
||||
s.prepareTagsAndDimensions(metric)
|
||||
wg.Add(len(metric.MetricNames))
|
||||
for _, metricName := range metric.MetricNames {
|
||||
|
|
@ -250,10 +250,10 @@ func (s *AliyunCMS) Stop() {
|
|||
}
|
||||
|
||||
func (s *AliyunCMS) updateWindow(relativeTo time.Time) {
|
||||
//https://help.aliyun.com/document_detail/51936.html?spm=a2c4g.11186623.6.701.54025679zh6wiR
|
||||
//The start and end times are executed in the mode of
|
||||
//opening left and closing right, and startTime cannot be equal
|
||||
//to or greater than endTime.
|
||||
// https://help.aliyun.com/document_detail/51936.html?spm=a2c4g.11186623.6.701.54025679zh6wiR
|
||||
// The start and end times are executed in the mode of
|
||||
// opening left and closing right, and startTime cannot be equal
|
||||
// to or greater than endTime.
|
||||
|
||||
windowEnd := relativeTo.Add(-time.Duration(s.Delay))
|
||||
|
||||
|
|
@ -310,8 +310,8 @@ func (s *AliyunCMS) gatherMetric(acc telegraf.Accumulator, metricName string, me
|
|||
switch key {
|
||||
case "instanceId", "BucketName":
|
||||
tags[key] = value.(string)
|
||||
if metric.discoveryTags != nil { //discovery can be not activated
|
||||
//Skipping data point if discovery data not exist
|
||||
if metric.discoveryTags != nil { // discovery can be not activated
|
||||
// Skipping data point if discovery data not exist
|
||||
_, ok := metric.discoveryTags[value.(string)]
|
||||
if !ok &&
|
||||
!metric.AllowDataPointWODiscoveryData {
|
||||
|
|
@ -349,7 +349,7 @@ func parseTag(tagSpec string, data interface{}) (tagKey, tagValue string, err er
|
|||
)
|
||||
tagKey = tagSpec
|
||||
|
||||
//Split query path to tagKey and query path
|
||||
// Split query path to tagKey and query path
|
||||
if splitted := strings.Split(tagSpec, ":"); len(splitted) == 2 {
|
||||
tagKey = splitted[0]
|
||||
queryPath = splitted[1]
|
||||
|
|
@ -360,7 +360,7 @@ func parseTag(tagSpec string, data interface{}) (tagKey, tagValue string, err er
|
|||
return "", "", fmt.Errorf("can't query data from discovery data using query path %q: %w", queryPath, err)
|
||||
}
|
||||
|
||||
if tagRawValue == nil { //Nothing found
|
||||
if tagRawValue == nil { // Nothing found
|
||||
return "", "", nil
|
||||
}
|
||||
|
||||
|
|
@ -378,11 +378,11 @@ func (s *AliyunCMS) prepareTagsAndDimensions(metric *Metric) {
|
|||
defaultTags = []string{"RegionId:RegionId"}
|
||||
)
|
||||
|
||||
if s.dt == nil { //Discovery is not activated
|
||||
if s.dt == nil { // Discovery is not activated
|
||||
return
|
||||
}
|
||||
|
||||
//Reading all data from buffered channel
|
||||
// Reading all data from buffered channel
|
||||
L:
|
||||
for {
|
||||
select {
|
||||
|
|
@ -394,7 +394,7 @@ L:
|
|||
}
|
||||
}
|
||||
|
||||
//new data arrives (so process it) or this is the first call
|
||||
// new data arrives (so process it) or this is the first call
|
||||
if newData || len(metric.discoveryTags) == 0 {
|
||||
metric.dtLock.Lock()
|
||||
defer metric.dtLock.Unlock()
|
||||
|
|
@ -403,13 +403,13 @@ L:
|
|||
metric.discoveryTags = make(map[string]map[string]string, len(s.discoveryData))
|
||||
}
|
||||
|
||||
metric.requestDimensions = nil //erasing
|
||||
metric.requestDimensions = nil // erasing
|
||||
metric.requestDimensions = make([]map[string]string, 0, len(s.discoveryData))
|
||||
|
||||
//Preparing tags & dims...
|
||||
// Preparing tags & dims...
|
||||
for instanceID, elem := range s.discoveryData {
|
||||
//Start filing tags
|
||||
//Remove old value if exist
|
||||
// Start filing tags
|
||||
// Remove old value if exist
|
||||
delete(metric.discoveryTags, instanceID)
|
||||
metric.discoveryTags[instanceID] = make(map[string]string, len(metric.TagsQueryPath)+len(defaultTags))
|
||||
|
||||
|
|
@ -419,7 +419,7 @@ L:
|
|||
s.Log.Errorf("%v", err)
|
||||
continue
|
||||
}
|
||||
if err == nil && tagValue == "" { //Nothing found
|
||||
if err == nil && tagValue == "" { // Nothing found
|
||||
s.Log.Debugf("Data by query path %q: is not found, for instance %q", tagQueryPath, instanceID)
|
||||
continue
|
||||
}
|
||||
|
|
@ -427,7 +427,7 @@ L:
|
|||
metric.discoveryTags[instanceID][tagKey] = tagValue
|
||||
}
|
||||
|
||||
//Adding default tags if not already there
|
||||
// Adding default tags if not already there
|
||||
for _, defaultTagQP := range defaultTags {
|
||||
tagKey, tagValue, err := parseTag(defaultTagQP, elem)
|
||||
|
||||
|
|
@ -436,7 +436,7 @@ L:
|
|||
continue
|
||||
}
|
||||
|
||||
if err == nil && tagValue == "" { //Nothing found
|
||||
if err == nil && tagValue == "" { // Nothing found
|
||||
s.Log.Debugf("Data by query path %q: is not found, for instance %q",
|
||||
defaultTagQP, instanceID)
|
||||
continue
|
||||
|
|
@ -445,7 +445,7 @@ L:
|
|||
metric.discoveryTags[instanceID][tagKey] = tagValue
|
||||
}
|
||||
|
||||
//if no dimension configured in config file, use discovery data
|
||||
// if no dimension configured in config file, use discovery data
|
||||
if len(metric.dimensionsUdArr) == 0 && len(metric.dimensionsUdObj) == 0 {
|
||||
metric.requestDimensions = append(
|
||||
metric.requestDimensions,
|
||||
|
|
@ -453,7 +453,7 @@ L:
|
|||
}
|
||||
}
|
||||
|
||||
//add dimensions filter from config file
|
||||
// add dimensions filter from config file
|
||||
if len(metric.dimensionsUdArr) != 0 {
|
||||
metric.requestDimensions = append(metric.requestDimensions, metric.dimensionsUdArr...)
|
||||
}
|
||||
|
|
@ -461,7 +461,7 @@ L:
|
|||
metric.requestDimensions = append(metric.requestDimensions, metric.dimensionsUdObj)
|
||||
}
|
||||
|
||||
//Unmarshalling to string
|
||||
// Unmarshalling to string
|
||||
reqDim, err := json.Marshal(metric.requestDimensions)
|
||||
if err != nil {
|
||||
s.Log.Errorf("Can't marshal metric request dimensions %v :%v",
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ type mockGatherAliyunCMSClient struct{}
|
|||
func (m *mockGatherAliyunCMSClient) DescribeMetricList(request *cms.DescribeMetricListRequest) (*cms.DescribeMetricListResponse, error) {
|
||||
resp := new(cms.DescribeMetricListResponse)
|
||||
|
||||
//switch request.Metric {
|
||||
// switch request.Metric {
|
||||
switch request.MetricName {
|
||||
case "InstanceActiveConnection":
|
||||
resp.Code = "200"
|
||||
|
|
@ -193,7 +193,7 @@ func TestPluginInitialize(t *testing.T) {
|
|||
} else {
|
||||
require.NoError(t, plugin.Init())
|
||||
}
|
||||
if len(tt.regions) == 0 { //Check if set to default
|
||||
if len(tt.regions) == 0 { // Check if set to default
|
||||
require.Equal(t, plugin.Regions, aliyunRegionList)
|
||||
}
|
||||
})
|
||||
|
|
@ -390,7 +390,7 @@ func TestGather(t *testing.T) {
|
|||
Log: testutil.Logger{Name: inputTitle},
|
||||
}
|
||||
|
||||
//test table:
|
||||
// test table:
|
||||
tests := []struct {
|
||||
name string
|
||||
hasMeasurement bool
|
||||
|
|
@ -444,7 +444,7 @@ func TestGather(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGetDiscoveryDataAcrossRegions(t *testing.T) {
|
||||
//test table:
|
||||
// test table:
|
||||
tests := []struct {
|
||||
name string
|
||||
project string
|
||||
|
|
|
|||
|
|
@ -30,21 +30,21 @@ type aliyunSdkClient interface {
|
|||
ProcessCommonRequest(req *requests.CommonRequest) (response *responses.CommonResponse, err error)
|
||||
}
|
||||
|
||||
// discoveryTool is a object that provides discovery feature
|
||||
// discoveryTool is an object that provides discovery feature
|
||||
type discoveryTool struct {
|
||||
req map[string]discoveryRequest //Discovery request (specific per object type)
|
||||
rateLimit int //Rate limit for API query, as it is limited by API backend
|
||||
reqDefaultPageSize int //Default page size while querying data from API (how many objects per request)
|
||||
cli map[string]aliyunSdkClient //API client, which perform discovery request
|
||||
req map[string]discoveryRequest // Discovery request (specific per object type)
|
||||
rateLimit int // Rate limit for API query, as it is limited by API backend
|
||||
reqDefaultPageSize int // Default page size while querying data from API (how many objects per request)
|
||||
cli map[string]aliyunSdkClient // API client, which perform discovery request
|
||||
|
||||
respRootKey string //Root key in JSON response where to look for discovery data
|
||||
respObjectIDKey string //Key in element of array under root key, that stores object ID
|
||||
//for ,majority of cases it would be InstanceId, for OSS it is BucketName. This key is also used in dimension filtering// )
|
||||
wg sync.WaitGroup //WG for primary discovery goroutine
|
||||
interval time.Duration //Discovery interval
|
||||
done chan bool //Done channel to stop primary discovery goroutine
|
||||
dataChan chan map[string]interface{} //Discovery data
|
||||
lg telegraf.Logger //Telegraf logger (should be provided)
|
||||
respRootKey string // Root key in JSON response where to look for discovery data
|
||||
respObjectIDKey string // Key in element of array under root key, that stores object ID
|
||||
// for, the majority of cases it would be InstanceId, for OSS it is BucketName. This key is also used in dimension filtering
|
||||
wg sync.WaitGroup // WG for primary discovery goroutine
|
||||
interval time.Duration // Discovery interval
|
||||
done chan bool // Done channel to stop primary discovery goroutine
|
||||
dataChan chan map[string]interface{} // Discovery data
|
||||
lg telegraf.Logger // Telegraf logger (should be provided)
|
||||
}
|
||||
|
||||
type parsedDResp struct {
|
||||
|
|
@ -111,7 +111,7 @@ func newDiscoveryTool(
|
|||
len(aliyunRegionList), strings.Join(aliyunRegionList, ","))
|
||||
}
|
||||
|
||||
if rateLimit == 0 { //Can be a rounding case
|
||||
if rateLimit == 0 { // Can be a rounding case
|
||||
rateLimit = 1
|
||||
}
|
||||
|
||||
|
|
@ -145,7 +145,7 @@ func newDiscoveryTool(
|
|||
case "acs_mns_new":
|
||||
return nil, noDiscoverySupportErr
|
||||
case "acs_cdn":
|
||||
//API replies are in its own format.
|
||||
// API replies are in its own format.
|
||||
return nil, noDiscoverySupportErr
|
||||
case "acs_polardb":
|
||||
return nil, noDiscoverySupportErr
|
||||
|
|
@ -260,7 +260,7 @@ func (dt *discoveryTool) parseDiscoveryResponse(resp *responses.CommonResponse)
|
|||
)
|
||||
|
||||
data = resp.GetHttpContentBytes()
|
||||
if data == nil { //No data
|
||||
if data == nil { // No data
|
||||
return nil, errors.New("no data in response to be parsed")
|
||||
}
|
||||
|
||||
|
|
@ -277,7 +277,7 @@ func (dt *discoveryTool) parseDiscoveryResponse(resp *responses.CommonResponse)
|
|||
return nil, fmt.Errorf("content of root key %q, is not an object: %q", key, val)
|
||||
}
|
||||
|
||||
//It should contain the array with discovered data
|
||||
// It should contain the array with discovered data
|
||||
for _, item := range rootKeyVal {
|
||||
if pdResp.data, foundDataItem = item.([]interface{}); foundDataItem {
|
||||
break
|
||||
|
|
@ -314,7 +314,7 @@ func (dt *discoveryTool) getDiscoveryData(cli aliyunSdkClient, req *requests.Com
|
|||
|
||||
for {
|
||||
if lmtr != nil {
|
||||
<-lmtr //Rate limiting
|
||||
<-lmtr // Rate limiting
|
||||
}
|
||||
|
||||
resp, err = cli.ProcessCommonRequest(req)
|
||||
|
|
@ -330,12 +330,12 @@ func (dt *discoveryTool) getDiscoveryData(cli aliyunSdkClient, req *requests.Com
|
|||
pageNumber = pDResp.pageNumber
|
||||
totalCount = pDResp.totalCount
|
||||
|
||||
//Pagination
|
||||
// Pagination
|
||||
pageNumber++
|
||||
req.QueryParams["PageNumber"] = strconv.Itoa(pageNumber)
|
||||
|
||||
if len(discoveryData) == totalCount { //All data received
|
||||
//Map data to appropriate shape before return
|
||||
if len(discoveryData) == totalCount { // All data received
|
||||
// Map data to appropriate shape before return
|
||||
preparedData := map[string]interface{}{}
|
||||
|
||||
for _, raw := range discoveryData {
|
||||
|
|
@ -359,8 +359,8 @@ func (dt *discoveryTool) getDiscoveryDataAcrossRegions(lmtr chan bool) (map[stri
|
|||
)
|
||||
|
||||
for region, cli := range dt.cli {
|
||||
//Building common request, as the code below is the same no matter
|
||||
//which aliyun object type (project) is used
|
||||
// Building common request, as the code below is the same no matter
|
||||
// which aliyun object type (project) is used
|
||||
dscReq, ok := dt.req[region]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("error building common discovery request: not valid region %q", region)
|
||||
|
|
@ -382,7 +382,7 @@ func (dt *discoveryTool) getDiscoveryDataAcrossRegions(lmtr chan bool) (map[stri
|
|||
commonRequest.QueryParams["PageSize"] = strconv.Itoa(dt.reqDefaultPageSize)
|
||||
commonRequest.TransToAcsRequest()
|
||||
|
||||
//Get discovery data using common request
|
||||
// Get discovery data using common request
|
||||
data, err = dt.getDiscoveryData(cli, commonRequest, lmtr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -404,7 +404,7 @@ func (dt *discoveryTool) start() {
|
|||
lastData map[string]interface{}
|
||||
)
|
||||
|
||||
//Initializing channel
|
||||
// Initializing channel
|
||||
dt.done = make(chan bool)
|
||||
|
||||
dt.wg.Add(1)
|
||||
|
|
@ -435,7 +435,7 @@ func (dt *discoveryTool) start() {
|
|||
lastData[k] = v
|
||||
}
|
||||
|
||||
//send discovery data in blocking mode
|
||||
// send discovery data in blocking mode
|
||||
dt.dataChan <- data
|
||||
}
|
||||
}
|
||||
|
|
@ -448,11 +448,11 @@ func (dt *discoveryTool) start() {
|
|||
func (dt *discoveryTool) stop() {
|
||||
close(dt.done)
|
||||
|
||||
//Shutdown timer
|
||||
// Shutdown timer
|
||||
timer := time.NewTimer(time.Second * 3)
|
||||
defer timer.Stop()
|
||||
L:
|
||||
for { //Unblock go routine by reading from dt.dataChan
|
||||
for { // Unblock go routine by reading from dt.dataChan
|
||||
select {
|
||||
case <-timer.C:
|
||||
break L
|
||||
|
|
|
|||
|
|
@ -61,7 +61,7 @@ func addJSONCounter(acc telegraf.Accumulator, commonTags map[string]string, stat
|
|||
grouper.Add("bind_counter", tags, ts, name, value)
|
||||
}
|
||||
|
||||
//Add grouped metrics
|
||||
// Add grouped metrics
|
||||
for _, groupedMetric := range grouper.Metrics() {
|
||||
acc.AddMetric(groupedMetric)
|
||||
}
|
||||
|
|
@ -142,7 +142,7 @@ func (b *Bind) addStatsJSON(stats jsonStats, acc telegraf.Accumulator, urlTag st
|
|||
}
|
||||
}
|
||||
|
||||
//Add grouped metrics
|
||||
// Add grouped metrics
|
||||
for _, groupedMetric := range grouper.Metrics() {
|
||||
acc.AddMetric(groupedMetric)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -78,7 +78,7 @@ func addXMLv2Counter(acc telegraf.Accumulator, commonTags map[string]string, sta
|
|||
grouper.Add("bind_counter", tags, ts, c.Name, c.Value)
|
||||
}
|
||||
|
||||
//Add grouped metrics
|
||||
// Add grouped metrics
|
||||
for _, groupedMetric := range grouper.Metrics() {
|
||||
acc.AddMetric(groupedMetric)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -127,7 +127,7 @@ func (b *Bind) addStatsXMLv3(stats v3Stats, acc telegraf.Accumulator, hostPort s
|
|||
}
|
||||
}
|
||||
|
||||
//Add grouped metrics
|
||||
// Add grouped metrics
|
||||
for _, groupedMetric := range grouper.Metrics() {
|
||||
acc.AddMetric(groupedMetric)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -76,7 +76,7 @@ type CiscoTelemetryMDT struct {
|
|||
dmesFuncs map[string]string
|
||||
warned map[string]struct{}
|
||||
extraTags map[string]map[string]struct{}
|
||||
nxpathMap map[string]map[string]string //per path map
|
||||
nxpathMap map[string]map[string]string // per path map
|
||||
propMap map[string]func(field *telemetry.TelemetryField, value interface{}) interface{}
|
||||
mutex sync.Mutex
|
||||
acc telegraf.Accumulator
|
||||
|
|
@ -109,9 +109,9 @@ func (c *CiscoTelemetryMDT) Start(acc telegraf.Accumulator) error {
|
|||
|
||||
c.propMap = make(map[string]func(field *telemetry.TelemetryField, value interface{}) interface{}, 100)
|
||||
c.propMap["test"] = nxosValueXformUint64Toint64
|
||||
c.propMap["asn"] = nxosValueXformUint64ToString //uint64 to string.
|
||||
c.propMap["subscriptionId"] = nxosValueXformUint64ToString //uint64 to string.
|
||||
c.propMap["operState"] = nxosValueXformUint64ToString //uint64 to string.
|
||||
c.propMap["asn"] = nxosValueXformUint64ToString // uint64 to string.
|
||||
c.propMap["subscriptionId"] = nxosValueXformUint64ToString // uint64 to string.
|
||||
c.propMap["operState"] = nxosValueXformUint64ToString // uint64 to string.
|
||||
|
||||
// Invert aliases list
|
||||
c.warned = make(map[string]struct{})
|
||||
|
|
@ -530,7 +530,7 @@ func (c *CiscoTelemetryMDT) parseRib(grouper *metric.SeriesGrouper, field *telem
|
|||
// RIB
|
||||
measurement := encodingPath
|
||||
for _, subfield := range field.Fields {
|
||||
//For Every table fill the keys which are vrfName, address and masklen
|
||||
// For Every table fill the keys which are vrfName, address and masklen
|
||||
switch subfield.Name {
|
||||
case "vrfName", "address", "maskLen":
|
||||
tags[subfield.Name] = decodeTag(subfield)
|
||||
|
|
@ -541,7 +541,7 @@ func (c *CiscoTelemetryMDT) parseRib(grouper *metric.SeriesGrouper, field *telem
|
|||
if subfield.Name != "nextHop" {
|
||||
continue
|
||||
}
|
||||
//For next hop table fill the keys in the tag - which is address and vrfname
|
||||
// For next hop table fill the keys in the tag - which is address and vrfname
|
||||
for _, subf := range subfield.Fields {
|
||||
for _, ff := range subf.Fields {
|
||||
switch ff.Name {
|
||||
|
|
@ -606,12 +606,12 @@ func (c *CiscoTelemetryMDT) parseClassAttributeField(grouper *metric.SeriesGroup
|
|||
var nxAttributes *telemetry.TelemetryField
|
||||
isDme := strings.Contains(encodingPath, "sys/")
|
||||
if encodingPath == "rib" {
|
||||
//handle native data path rib
|
||||
// handle native data path rib
|
||||
c.parseRib(grouper, field, encodingPath, tags, timestamp)
|
||||
return
|
||||
}
|
||||
if encodingPath == "microburst" {
|
||||
//dump microburst
|
||||
// dump microburst
|
||||
c.parseMicroburst(grouper, field, encodingPath, tags, timestamp)
|
||||
return
|
||||
}
|
||||
|
|
@ -704,9 +704,9 @@ func (c *CiscoTelemetryMDT) parseContentField(
|
|||
nxAttributes = sub[0].Fields[1].Fields[0].Fields[0].Fields[0].Fields[0].Fields[0]
|
||||
}
|
||||
}
|
||||
//if nxAttributes == NULL then class based query.
|
||||
// if nxAttributes == NULL then class based query.
|
||||
if nxAttributes == nil {
|
||||
//call function walking over walking list.
|
||||
// call function walking over walking list.
|
||||
for _, sub := range subfield.Fields {
|
||||
c.parseClassAttributeField(grouper, sub, encodingPath, tags, timestamp)
|
||||
}
|
||||
|
|
@ -726,7 +726,7 @@ func (c *CiscoTelemetryMDT) parseContentField(
|
|||
for i, subfield := range row.Fields {
|
||||
if i == 0 { // First subfield contains the index, promote it from value to tag
|
||||
tags[prefix] = decodeTag(subfield)
|
||||
//We can have subfield so recursively handle it.
|
||||
// We can have subfield so recursively handle it.
|
||||
if len(row.Fields) == 1 {
|
||||
tags["row_number"] = strconv.FormatInt(int64(i), 10)
|
||||
c.parseContentField(grouper, subfield, "", encodingPath, tags, timestamp)
|
||||
|
|
|
|||
|
|
@ -728,7 +728,7 @@ func TestHandleNXXformMulti(t *testing.T) {
|
|||
|
||||
c.handleTelemetry(data)
|
||||
require.Empty(t, acc.Errors)
|
||||
//validate various transformation scenaarios newly added in the code.
|
||||
// validate various transformation scenaarios newly added in the code.
|
||||
fields := map[string]interface{}{
|
||||
"portIdV": "12",
|
||||
"portDesc": "100",
|
||||
|
|
|
|||
|
|
@ -41,7 +41,7 @@ func nxosValueXformUint64Toint64(field *telemetry.TelemetryField, value interfac
|
|||
|
||||
// xform string to float
|
||||
func nxosValueXformStringTofloat(field *telemetry.TelemetryField, _ interface{}) interface{} {
|
||||
//convert property to float from string.
|
||||
// convert property to float from string.
|
||||
vals := field.GetStringValue()
|
||||
if vals != "" {
|
||||
if valf, err := strconv.ParseFloat(vals, 64); err == nil {
|
||||
|
|
@ -53,7 +53,7 @@ func nxosValueXformStringTofloat(field *telemetry.TelemetryField, _ interface{})
|
|||
|
||||
// xform string to uint64
|
||||
func nxosValueXformStringToUint64(field *telemetry.TelemetryField, _ interface{}) interface{} {
|
||||
//string to uint64
|
||||
// string to uint64
|
||||
vals := field.GetStringValue()
|
||||
if vals != "" {
|
||||
if val64, err := strconv.ParseUint(vals, 10, 64); err == nil {
|
||||
|
|
@ -65,7 +65,7 @@ func nxosValueXformStringToUint64(field *telemetry.TelemetryField, _ interface{}
|
|||
|
||||
// xform string to int64
|
||||
func nxosValueXformStringToInt64(field *telemetry.TelemetryField, _ interface{}) interface{} {
|
||||
//string to int64
|
||||
// string to int64
|
||||
vals := field.GetStringValue()
|
||||
if vals != "" {
|
||||
if val64, err := strconv.ParseInt(vals, 10, 64); err == nil {
|
||||
|
|
@ -77,7 +77,7 @@ func nxosValueXformStringToInt64(field *telemetry.TelemetryField, _ interface{})
|
|||
|
||||
// auto-xform float properties
|
||||
func nxosValueAutoXformFloatProp(field *telemetry.TelemetryField, _ interface{}) interface{} {
|
||||
//check if we want auto xformation
|
||||
// check if we want auto xformation
|
||||
vals := field.GetStringValue()
|
||||
if vals != "" {
|
||||
if valf, err := strconv.ParseFloat(vals, 64); err == nil {
|
||||
|
|
@ -109,17 +109,17 @@ func (c *CiscoTelemetryMDT) nxosValueXform(field *telemetry.TelemetryField, valu
|
|||
if _, ok := c.propMap[field.Name]; ok {
|
||||
return c.propMap[field.Name](field, value)
|
||||
}
|
||||
//check if we want auto xformation
|
||||
// check if we want auto xformation
|
||||
if _, ok := c.propMap["auto-prop-xfromi"]; ok {
|
||||
return c.propMap["auto-prop-xfrom"](field, value)
|
||||
}
|
||||
//Now check path based conversion.
|
||||
//If mapping is found then do the required transformation.
|
||||
// Now check path based conversion.
|
||||
// If mapping is found then do the required transformation.
|
||||
if c.nxpathMap[path] == nil {
|
||||
return nil
|
||||
}
|
||||
switch c.nxpathMap[path][field.Name] {
|
||||
//Xformation supported is only from String, Uint32 and Uint64
|
||||
// Xformation supported is only from String, Uint32 and Uint64
|
||||
case "integer":
|
||||
switch val := field.ValueByType.(type) {
|
||||
case *telemetry.TelemetryField_StringValue:
|
||||
|
|
@ -136,9 +136,9 @@ func (c *CiscoTelemetryMDT) nxosValueXform(field *telemetry.TelemetryField, valu
|
|||
if ok {
|
||||
return vali
|
||||
}
|
||||
} //switch
|
||||
} // switch
|
||||
return nil
|
||||
//Xformation supported is only from String
|
||||
// Xformation supported is only from String
|
||||
case "float":
|
||||
//nolint:revive // switch needed for `.(type)`
|
||||
switch val := field.ValueByType.(type) {
|
||||
|
|
@ -158,8 +158,8 @@ func (c *CiscoTelemetryMDT) nxosValueXform(field *telemetry.TelemetryField, valu
|
|||
}
|
||||
case *telemetry.TelemetryField_Uint64Value:
|
||||
return int64(value.(uint64))
|
||||
} //switch
|
||||
} //switch
|
||||
} // switch
|
||||
} // switch
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -541,7 +541,7 @@ func TestWrongJSONMarshalling(t *testing.T) {
|
|||
Data interface{} `json:"data"`
|
||||
}
|
||||
enc := json.NewEncoder(w)
|
||||
//wrong data section json
|
||||
// wrong data section json
|
||||
err := enc.Encode(result{
|
||||
Data: []struct{}{},
|
||||
})
|
||||
|
|
|
|||
|
|
@ -250,7 +250,7 @@ func TestCollectStatsPerCpu(t *testing.T) {
|
|||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
//cpu0
|
||||
// cpu0
|
||||
expectedFields := map[string]interface{}{
|
||||
"entries": uint32(59),
|
||||
"searched": uint32(10),
|
||||
|
|
@ -276,7 +276,7 @@ func TestCollectStatsPerCpu(t *testing.T) {
|
|||
"cpu": "cpu0",
|
||||
})
|
||||
|
||||
//cpu1
|
||||
// cpu1
|
||||
expectedFields1 := map[string]interface{}{
|
||||
"entries": uint32(79),
|
||||
"searched": uint32(10),
|
||||
|
|
@ -341,6 +341,6 @@ func TestCollectPsSystemInit(t *testing.T) {
|
|||
if err != nil && strings.Contains(err.Error(), "Is the conntrack kernel module loaded?") {
|
||||
t.Skip("Conntrack kernel module not loaded.")
|
||||
}
|
||||
//make sure Conntrack.ps gets initialized without mocking
|
||||
// make sure Conntrack.ps gets initialized without mocking
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ import (
|
|||
docker "github.com/docker/docker/client"
|
||||
)
|
||||
|
||||
/*This file is inherited from telegraf docker input plugin*/
|
||||
// This file is inherited from telegraf docker input plugin
|
||||
var (
|
||||
version = "1.24"
|
||||
defaultHeaders = map[string]string{"User-Agent": "engine-api-cli-1.0"}
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ import (
|
|||
)
|
||||
|
||||
func TestMTime(t *testing.T) {
|
||||
//this is the time our foo file should have
|
||||
// this is the time our foo file should have
|
||||
mtime := time.Date(2015, time.December, 14, 18, 25, 5, 0, time.UTC)
|
||||
|
||||
fs := getTestFileSystem()
|
||||
|
|
@ -23,7 +23,7 @@ func TestMTime(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestSize(t *testing.T) {
|
||||
//this is the time our foo file should have
|
||||
// this is the time our foo file should have
|
||||
size := int64(4096)
|
||||
fs := getTestFileSystem()
|
||||
fileInfo, err := fs.Stat("/testdata")
|
||||
|
|
@ -32,7 +32,7 @@ func TestSize(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestIsDir(t *testing.T) {
|
||||
//this is the time our foo file should have
|
||||
// this is the time our foo file should have
|
||||
dir := true
|
||||
fs := getTestFileSystem()
|
||||
fileInfo, err := fs.Stat("/testdata")
|
||||
|
|
@ -41,9 +41,9 @@ func TestIsDir(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRealFS(t *testing.T) {
|
||||
//test that the default (non-test) empty FS causes expected behaviour
|
||||
// test that the default (non-test) empty FS causes expected behaviour
|
||||
var fs fileSystem = osFS{}
|
||||
//the following file exists on disk - and not in our fake fs
|
||||
// the following file exists on disk - and not in our fake fs
|
||||
fileInfo, err := fs.Stat(getTestdataDir() + "/qux")
|
||||
require.NoError(t, err)
|
||||
require.False(t, fileInfo.IsDir())
|
||||
|
|
|
|||
|
|
@ -956,7 +956,7 @@ func TestNotification(t *testing.T) {
|
|||
Ext: &gnmiExt.Extension_RegisteredExt{
|
||||
RegisteredExt: &gnmiExt.RegisteredExtension{
|
||||
// Juniper Header Extension
|
||||
//EID_JUNIPER_TELEMETRY_HEADER = 1;
|
||||
// EID_JUNIPER_TELEMETRY_HEADER = 1;
|
||||
Id: 1,
|
||||
Msg: func(jnprExt *jnprHeader.GnmiJuniperTelemetryHeaderExtension) []byte {
|
||||
b, err := proto.Marshal(jnprExt)
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ import (
|
|||
//go:embed sample.conf
|
||||
var sampleConfig string
|
||||
|
||||
//CSV format: https://cbonte.github.io/haproxy-dconv/1.5/configuration.html#9.1
|
||||
// CSV format: https://cbonte.github.io/haproxy-dconv/1.5/configuration.html#9.1
|
||||
|
||||
type haproxy struct {
|
||||
Servers []string
|
||||
|
|
@ -258,14 +258,14 @@ func (h *haproxy) importCsvResult(r io.Reader, acc telegraf.Accumulator, host st
|
|||
case "lastsess":
|
||||
vi, err := strconv.ParseInt(v, 10, 64)
|
||||
if err != nil {
|
||||
//TODO log the error. And just once (per column) so we don't spam the log
|
||||
// TODO log the error. And just once (per column) so we don't spam the log
|
||||
continue
|
||||
}
|
||||
fields[fieldName] = vi
|
||||
default:
|
||||
vi, err := strconv.ParseUint(v, 10, 64)
|
||||
if err != nil {
|
||||
//TODO log the error. And just once (per column) so we don't spam the log
|
||||
// TODO log the error. And just once (per column) so we don't spam the log
|
||||
continue
|
||||
}
|
||||
fields[fieldName] = vi
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ func (s statServer) serverSocket(l net.Listener) {
|
|||
}
|
||||
|
||||
func TestHaproxyGeneratesMetricsWithAuthentication(t *testing.T) {
|
||||
//We create a fake server to return test data
|
||||
// We create a fake server to return test data
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
username, password, ok := r.BasicAuth()
|
||||
if !ok {
|
||||
|
|
@ -65,7 +65,7 @@ func TestHaproxyGeneratesMetricsWithAuthentication(t *testing.T) {
|
|||
}))
|
||||
defer ts.Close()
|
||||
|
||||
//Now we tested again above server, with our authentication data
|
||||
// Now we tested again above server, with our authentication data
|
||||
r := &haproxy{
|
||||
Servers: []string{strings.Replace(ts.URL, "http://", "http://user:password@", 1)},
|
||||
}
|
||||
|
|
@ -85,7 +85,7 @@ func TestHaproxyGeneratesMetricsWithAuthentication(t *testing.T) {
|
|||
fields := HaproxyGetFieldValues()
|
||||
acc.AssertContainsTaggedFields(t, "haproxy", fields, tags)
|
||||
|
||||
//Here, we should get error because we don't pass authentication data
|
||||
// Here, we should get error because we don't pass authentication data
|
||||
r = &haproxy{
|
||||
Servers: []string{ts.URL},
|
||||
}
|
||||
|
|
|
|||
|
|
@ -690,7 +690,7 @@ func TestMethod(t *testing.T) {
|
|||
absentFields = []string{"response_string_match"}
|
||||
checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil)
|
||||
|
||||
//check that lowercase methods work correctly
|
||||
// check that lowercase methods work correctly
|
||||
h = &HTTPResponse{
|
||||
Log: testutil.Logger{},
|
||||
URLs: []string{ts.URL + "/mustbepostmethod"},
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ func TestIcinga2Default(t *testing.T) {
|
|||
|
||||
func TestIcinga2DeprecatedHostConfig(t *testing.T) {
|
||||
icinga2 := &Icinga2{
|
||||
ObjectType: "hosts", //deprecated
|
||||
ObjectType: "hosts", // deprecated
|
||||
Objects: []string{},
|
||||
}
|
||||
require.NoError(t, icinga2.Init())
|
||||
|
|
@ -38,7 +38,7 @@ func TestIcinga2DeprecatedHostConfig(t *testing.T) {
|
|||
|
||||
func TestIcinga2DeprecatedServicesConfig(t *testing.T) {
|
||||
icinga2 := &Icinga2{
|
||||
ObjectType: "services", //deprecated
|
||||
ObjectType: "services", // deprecated
|
||||
Objects: []string{},
|
||||
}
|
||||
require.NoError(t, icinga2.Init())
|
||||
|
|
|
|||
|
|
@ -491,8 +491,8 @@ func TestWriteLargeLine(t *testing.T) {
|
|||
resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBufferString(hugeMetricString+testMsgs))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, resp.Body.Close())
|
||||
//todo: with the new parser, long lines aren't a problem. Do we need to skip them?
|
||||
//require.EqualValues(t, 400, resp.StatusCode)
|
||||
// TODO: with the new parser, long lines aren't a problem. Do we need to skip them?
|
||||
// require.EqualValues(t, 400, resp.StatusCode)
|
||||
|
||||
expected := testutil.MustMetric(
|
||||
"super_long_metric",
|
||||
|
|
|
|||
|
|
@ -342,8 +342,8 @@ func TestWriteLargeLine(t *testing.T) {
|
|||
resp, err := http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBufferString(hugeMetricString+testMsgs))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, resp.Body.Close())
|
||||
//todo: with the new parser, long lines aren't a problem. Do we need to skip them?
|
||||
//require.EqualValues(t, 400, resp.StatusCode)
|
||||
// TODO: with the new parser, long lines aren't a problem. Do we need to skip them?
|
||||
// require.EqualValues(t, 400, resp.StatusCode)
|
||||
|
||||
expected := testutil.MustMetric(
|
||||
"super_long_metric",
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ type Baseband struct {
|
|||
SocketPath string `toml:"socket_path"`
|
||||
FileLogPath string `toml:"log_file_path"`
|
||||
|
||||
//optional params
|
||||
// optional params
|
||||
UnreachableSocketBehavior string `toml:"unreachable_socket_behavior"`
|
||||
SocketAccessTimeout config.Duration `toml:"socket_access_timeout"`
|
||||
WaitForTelemetryTimeout config.Duration `toml:"wait_for_telemetry_timeout"`
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ func TestInit(t *testing.T) {
|
|||
|
||||
err := baseband.Init()
|
||||
|
||||
//check default variables
|
||||
// check default variables
|
||||
// check empty values
|
||||
require.Empty(t, baseband.SocketPath)
|
||||
require.Empty(t, baseband.FileLogPath)
|
||||
|
|
|
|||
|
|
@ -89,7 +89,7 @@ func (lc *logConnector) checkLogFreshness() error {
|
|||
// - file is not empty
|
||||
// - file doesn't contain clear_log command (it may appear for few milliseconds, just before file is cleared)
|
||||
if !lc.lastModTime.Equal(currModTime) && fileInfo.Size() != 0 && !lc.isClearLogContainedInFile() {
|
||||
//refreshing succeed
|
||||
// refreshing succeed
|
||||
lc.lastModTime = currModTime
|
||||
return nil
|
||||
}
|
||||
|
|
@ -202,7 +202,7 @@ func (lc *logConnector) getMetric(offsetLine int, name string) (int, *logMetric,
|
|||
// infoData eg: Thu Apr 13 13:28:40 2023:INFO:12 0
|
||||
infoData := strings.Split(lc.lines[i+1], infoLine)
|
||||
if len(infoData) != 2 {
|
||||
//info data must be in format : some data + keyword "INFO:" + metrics
|
||||
// info data must be in format : some data + keyword "INFO:" + metrics
|
||||
return offsetLine, nil, fmt.Errorf("the content of the log file is incorrect, couldn't find %q separator", infoLine)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -92,7 +92,7 @@ func TestGenerate(t *testing.T) {
|
|||
cpuMetrics: []cpuMetricType{
|
||||
cpuC7StateResidency,
|
||||
},
|
||||
msrReadTimeout: 0, //timeout disabled
|
||||
msrReadTimeout: 0, // timeout disabled
|
||||
})
|
||||
|
||||
require.Len(t, opts, 1)
|
||||
|
|
|
|||
|
|
@ -106,8 +106,8 @@ func parseCoresMeasurement(measurements string) (parsedCoresMeasurement, error)
|
|||
func (p *Publisher) addToAccumulatorCores(measurement parsedCoresMeasurement) {
|
||||
for i, value := range measurement.values {
|
||||
if p.shortenedMetrics {
|
||||
//0: "IPC"
|
||||
//1: "LLC_Misses"
|
||||
// 0: "IPC"
|
||||
// 1: "LLC_Misses"
|
||||
if i == 0 || i == 1 {
|
||||
continue
|
||||
}
|
||||
|
|
@ -157,8 +157,8 @@ func parseProcessesMeasurement(measurement processMeasurement) (parsedProcessMea
|
|||
func (p *Publisher) addToAccumulatorProcesses(measurement parsedProcessMeasurement) {
|
||||
for i, value := range measurement.values {
|
||||
if p.shortenedMetrics {
|
||||
//0: "IPC"
|
||||
//1: "LLC_Misses"
|
||||
// 0: "IPC"
|
||||
// 1: "LLC_Misses"
|
||||
if i == 0 || i == 1 {
|
||||
continue
|
||||
}
|
||||
|
|
|
|||
|
|
@ -411,7 +411,7 @@ func TestGatherV2(t *testing.T) {
|
|||
fields map[string]interface{}
|
||||
tags map[string]string
|
||||
}{
|
||||
//SEL | 72h | ns | 7.1 | No Reading
|
||||
// SEL | 72h | ns | 7.1 | No Reading
|
||||
{
|
||||
map[string]interface{}{
|
||||
"value": float64(0),
|
||||
|
|
@ -444,7 +444,7 @@ func TestGatherV2(t *testing.T) {
|
|||
fields map[string]interface{}
|
||||
tags map[string]string
|
||||
}{
|
||||
//SEL | 72h | ns | 7.1 | No Reading
|
||||
// SEL | 72h | ns | 7.1 | No Reading
|
||||
{
|
||||
map[string]interface{}{
|
||||
"value": float64(0),
|
||||
|
|
@ -456,7 +456,7 @@ func TestGatherV2(t *testing.T) {
|
|||
"status_desc": "no_reading",
|
||||
},
|
||||
},
|
||||
//Intrusion | 73h | ok | 7.1 |
|
||||
// Intrusion | 73h | ok | 7.1 |
|
||||
{
|
||||
map[string]interface{}{
|
||||
"value": float64(0),
|
||||
|
|
@ -468,7 +468,7 @@ func TestGatherV2(t *testing.T) {
|
|||
"status_desc": "ok",
|
||||
},
|
||||
},
|
||||
//Fan1 | 30h | ok | 7.1 | 5040 RPM
|
||||
// Fan1 | 30h | ok | 7.1 | 5040 RPM
|
||||
{
|
||||
map[string]interface{}{
|
||||
"value": float64(5040),
|
||||
|
|
@ -480,7 +480,7 @@ func TestGatherV2(t *testing.T) {
|
|||
"unit": "rpm",
|
||||
},
|
||||
},
|
||||
//Inlet Temp | 04h | ok | 7.1 | 25 degrees C
|
||||
// Inlet Temp | 04h | ok | 7.1 | 25 degrees C
|
||||
{
|
||||
map[string]interface{}{
|
||||
"value": float64(25),
|
||||
|
|
@ -492,7 +492,7 @@ func TestGatherV2(t *testing.T) {
|
|||
"unit": "degrees_c",
|
||||
},
|
||||
},
|
||||
//USB Cable Pres | 50h | ok | 7.1 | Connected
|
||||
// USB Cable Pres | 50h | ok | 7.1 | Connected
|
||||
{
|
||||
map[string]interface{}{
|
||||
"value": float64(0),
|
||||
|
|
@ -504,7 +504,7 @@ func TestGatherV2(t *testing.T) {
|
|||
"status_desc": "connected",
|
||||
},
|
||||
},
|
||||
//Current 1 | 6Ah | ok | 10.1 | 7.20 Amps
|
||||
// Current 1 | 6Ah | ok | 10.1 | 7.20 Amps
|
||||
{
|
||||
map[string]interface{}{
|
||||
"value": float64(7.2),
|
||||
|
|
@ -516,7 +516,7 @@ func TestGatherV2(t *testing.T) {
|
|||
"unit": "amps",
|
||||
},
|
||||
},
|
||||
//Power Supply 1 | 03h | ok | 10.1 | 110 Watts, Presence detected
|
||||
// Power Supply 1 | 03h | ok | 10.1 | 110 Watts, Presence detected
|
||||
{
|
||||
map[string]interface{}{
|
||||
"value": float64(110),
|
||||
|
|
|
|||
|
|
@ -333,7 +333,7 @@ func (k *KafkaConsumer) Start(acc telegraf.Accumulator) error {
|
|||
handler.MaxMessageLen = k.MaxMessageLen
|
||||
handler.TopicTag = k.TopicTag
|
||||
handler.MsgHeaderToMetricName = k.MsgHeaderAsMetricName
|
||||
//if message headers list specified, put it as map to handler
|
||||
// if message headers list specified, put it as map to handler
|
||||
msgHeadersMap := make(map[string]bool, len(k.MsgHeadersAsTags))
|
||||
if len(k.MsgHeadersAsTags) > 0 {
|
||||
for _, header := range k.MsgHeadersAsTags {
|
||||
|
|
@ -508,7 +508,7 @@ func (h *consumerGroupHandler) Handle(session sarama.ConsumerGroupSession, msg *
|
|||
// Check if any message header should override metric name or should be pass as tag
|
||||
if len(h.MsgHeadersToTags) > 0 || h.MsgHeaderToMetricName != "" {
|
||||
for _, header := range msg.Headers {
|
||||
//convert to a string as the header and value are byte arrays.
|
||||
// convert to a string as the header and value are byte arrays.
|
||||
headerKey := string(header.Key)
|
||||
if _, exists := h.MsgHeadersToTags[headerKey]; exists {
|
||||
// If message header should be pass as tag then add it to the metrics
|
||||
|
|
|
|||
|
|
@ -316,7 +316,7 @@ func TestConsumerGroupHandler_Lifecycle(t *testing.T) {
|
|||
// This produces a flappy testcase probably due to a race between context cancellation and consumption.
|
||||
// Furthermore, it is not clear what the outcome of this test should be...
|
||||
// err = cg.ConsumeClaim(session, &claim)
|
||||
//require.NoError(t, err)
|
||||
// require.NoError(t, err)
|
||||
// So stick with the line below for now.
|
||||
//nolint:errcheck // see above
|
||||
cg.ConsumeClaim(session, &claim)
|
||||
|
|
@ -660,7 +660,7 @@ func TestExponentialBackoff(t *testing.T) {
|
|||
require.NoError(t, parser.Init())
|
||||
input.SetParser(parser)
|
||||
|
||||
//time how long initialization (connection) takes
|
||||
// time how long initialization (connection) takes
|
||||
start := time.Now()
|
||||
require.NoError(t, input.Init())
|
||||
|
||||
|
|
|
|||
|
|
@ -71,7 +71,7 @@ func TestGather(t *testing.T) {
|
|||
}
|
||||
checkKibanaStatusResult(defaultTags6_3()["version"], t, &acc1)
|
||||
|
||||
//Unit test for Kibana version >= 6.4
|
||||
// Unit test for Kibana version >= 6.4
|
||||
ks.client.Transport = newTransportMock(http.StatusOK, kibanaStatusResponse6_5)
|
||||
var acc2 testutil.Accumulator
|
||||
if err := acc2.GatherError(ks.Gather); err != nil {
|
||||
|
|
|
|||
|
|
@ -288,7 +288,7 @@ func (l *LogParserPlugin) Stop() {
|
|||
}
|
||||
err := t.Stop()
|
||||
|
||||
//message for a stopped tailer
|
||||
// message for a stopped tailer
|
||||
l.Log.Debugf("Tail dropped for file: %v", t.Filename)
|
||||
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -113,11 +113,11 @@ func TestGrokParseLogFilesAppearLater(t *testing.T) {
|
|||
// test.a.log file. This seems like an issue with the tail package, it
|
||||
// is not closing the os.File properly on Stop.
|
||||
// === RUN TestGrokParseLogFilesAppearLater
|
||||
//2022/04/16 11:05:13 D! [] Tail added for file: C:\Users\circleci\AppData\Local\Temp\TestGrokParseLogFilesAppearLater3687440534\001\test_a.log
|
||||
//2022/04/16 11:05:13 D! [] Tail dropped for file: C:\Users\circleci\AppData\Local\Temp\TestGrokParseLogFilesAppearLater3687440534\001\test_a.log
|
||||
// 2022/04/16 11:05:13 D! [] Tail added for file: C:\Users\circleci\AppData\Local\Temp\TestGrokParseLogFilesAppearLater3687440534\001\test_a.log
|
||||
// 2022/04/16 11:05:13 D! [] Tail dropped for file: C:\Users\circleci\AppData\Local\Temp\TestGrokParseLogFilesAppearLater3687440534\001\test_a.log
|
||||
// testing.go:1090: TempDir RemoveAll cleanup:
|
||||
// CreateFile C:\Users\circleci\AppData\Local\Temp\TestGrokParseLogFilesAppearLater3687440534\001: Access is denied.
|
||||
//--- FAIL: TestGrokParseLogFilesAppearLater (1.68s)
|
||||
// --- FAIL: TestGrokParseLogFilesAppearLater (1.68s)
|
||||
emptydir, err := os.MkdirTemp("", "TestGrokParseLogFilesAppearLater")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(emptydir)
|
||||
|
|
|
|||
|
|
@ -397,7 +397,7 @@ func (l *Lustre2) GetLustreHealth() error {
|
|||
// it was moved in https://github.com/lustre/lustre-release/commit/5d368bd0b2
|
||||
filename = filepath.Join(rootdir, "proc", "fs", "lustre", "health_check")
|
||||
if _, err = os.Stat(filename); err != nil {
|
||||
return nil //nolint: nilerr // we don't want to return an error if the file doesn't exist
|
||||
return nil //nolint:nilerr // we don't want to return an error if the file doesn't exist
|
||||
}
|
||||
}
|
||||
contents, err := os.ReadFile(filename)
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ func TestMarklogic(t *testing.T) {
|
|||
ml := &Marklogic{
|
||||
Hosts: []string{"example1"},
|
||||
URL: ts.URL,
|
||||
//Sources: []string{"http://localhost:8002/manage/v2/hosts/hostname1?view=status&format=json"},
|
||||
// Sources: []string{"http://localhost:8002/manage/v2/hosts/hostname1?view=status&format=json"},
|
||||
}
|
||||
|
||||
// Create a test accumulator
|
||||
|
|
|
|||
|
|
@ -129,7 +129,7 @@ func (m *Modbus) SampleConfig() string {
|
|||
}
|
||||
|
||||
func (m *Modbus) Init() error {
|
||||
//check device name
|
||||
// check device name
|
||||
if m.Name == "" {
|
||||
return errors.New("device name is empty")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,9 +1,7 @@
|
|||
/***
|
||||
The code contained here came from https://github.com/mongodb/mongo-tools/blob/master/mongostat/stat_types.go
|
||||
and contains modifications so that no other dependency from that project is needed. Other modifications included
|
||||
removing unnecessary code specific to formatting the output and determine the current state of the database. It
|
||||
is licensed under Apache Version 2.0, http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
***/
|
||||
// The code contained here came from https://github.com/mongodb/mongo-tools/blob/master/mongostat/stat_types.go
|
||||
// and contains modifications so that no other dependency from that project is needed. Other modifications included
|
||||
// removing unnecessary code specific to formatting the output and determine the current state of the database. It
|
||||
// is licensed under Apache Version 2.0, http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
|
||||
package mongodb
|
||||
|
||||
|
|
@ -706,7 +704,7 @@ type statLine struct {
|
|||
// Document fields
|
||||
DeletedD, InsertedD, ReturnedD, UpdatedD int64
|
||||
|
||||
//Commands fields
|
||||
// Commands fields
|
||||
AggregateCommandTotal, AggregateCommandFailed int64
|
||||
CountCommandTotal, CountCommandFailed int64
|
||||
DeleteCommandTotal, DeleteCommandFailed int64
|
||||
|
|
@ -1289,7 +1287,7 @@ func NewStatLine(oldMongo, newMongo mongoStatus, key string, all bool, sampleSec
|
|||
|
||||
if newStat.GlobalLock != nil {
|
||||
hasWT := newStat.WiredTiger != nil && oldStat.WiredTiger != nil
|
||||
//If we have wiredtiger stats, use those instead
|
||||
// If we have wiredtiger stats, use those instead
|
||||
if newStat.GlobalLock.CurrentQueue != nil {
|
||||
if hasWT {
|
||||
returnVal.QueuedReaders = newStat.GlobalLock.CurrentQueue.Readers + newStat.GlobalLock.ActiveClients.Readers -
|
||||
|
|
|
|||
|
|
@ -164,9 +164,7 @@ func (n *NeptuneApex) parseXML(acc telegraf.Accumulator, data []byte) error {
|
|||
if o.Xstatus != nil {
|
||||
fields["xstatus"] = *o.Xstatus
|
||||
}
|
||||
// Try to determine outlet type. Focus on accuracy, leaving the
|
||||
//outlet_type "unknown" when ambiguous. 24v and vortech cannot be
|
||||
// determined.
|
||||
// Try to determine outlet type. Focus on accuracy, leaving the outlet_type "unknown" when ambiguous. 24v and vortech cannot be determined.
|
||||
switch {
|
||||
case strings.HasPrefix(o.DeviceID, "base_Var"):
|
||||
tags["output_type"] = "variable"
|
||||
|
|
|
|||
|
|
@ -385,8 +385,8 @@ func (n *NginxPlusAPI) gatherHTTPUpstreamsMetrics(addr *url.URL, acc telegraf.Ac
|
|||
"healthchecks_fails": peer.HealthChecks.Fails,
|
||||
"healthchecks_unhealthy": peer.HealthChecks.Unhealthy,
|
||||
"downtime": peer.Downtime,
|
||||
//"selected": peer.Selected.toInt64,
|
||||
//"downstart": peer.Downstart.toInt64,
|
||||
// "selected": peer.Selected.toInt64,
|
||||
// "downstart": peer.Downstart.toInt64,
|
||||
}
|
||||
if peer.HealthChecks.LastPassed != nil {
|
||||
peerFields["healthchecks_last_passed"] = *peer.HealthChecks.LastPassed
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ func fakePassengerStatus(stat string) (string, error) {
|
|||
fileExtension = ".bat"
|
||||
content = "@echo off\n"
|
||||
for _, line := range strings.Split(strings.TrimSuffix(stat, "\n"), "\n") {
|
||||
content += "for /f \"delims=\" %%A in (\"" + line + "\") do echo %%~A\n" //my eyes are bleeding
|
||||
content += "for /f \"delims=\" %%A in (\"" + line + "\") do echo %%~A\n" // my eyes are bleeding
|
||||
}
|
||||
} else {
|
||||
content = fmt.Sprintf("#!/bin/sh\ncat << EOF\n%s\nEOF", stat)
|
||||
|
|
@ -86,7 +86,7 @@ func TestPassengerGenerateMetric(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
defer teardown(tempFilePath)
|
||||
|
||||
//Now we tested again above server, with our authentication data
|
||||
// Now we tested again above server, with our authentication data
|
||||
r := &passenger{
|
||||
Command: tempFilePath,
|
||||
}
|
||||
|
|
|
|||
|
|
@ -116,7 +116,7 @@ func TestPhpFpmGeneratesMetrics_From_Fcgi(t *testing.T) {
|
|||
s := statServer{}
|
||||
go fcgi.Serve(tcp, s) //nolint:errcheck // ignore the returned error as we cannot do anything about it anyway
|
||||
|
||||
//Now we tested again above server
|
||||
// Now we tested again above server
|
||||
r := &phpfpm{
|
||||
Urls: []string{"fcgi://" + tcp.Addr().String() + "/status"},
|
||||
Log: &testutil.Logger{},
|
||||
|
|
@ -167,7 +167,7 @@ func TestPhpFpmTimeout_From_Fcgi(t *testing.T) {
|
|||
time.Sleep(2 * timeout)
|
||||
}()
|
||||
|
||||
//Now we tested again above server
|
||||
// Now we tested again above server
|
||||
r := &phpfpm{
|
||||
Urls: []string{"fcgi://" + tcp.Addr().String() + "/status"},
|
||||
Timeout: config.Duration(timeout),
|
||||
|
|
@ -199,7 +199,7 @@ func TestPhpFpmCrashWithTimeout_From_Fcgi(t *testing.T) {
|
|||
|
||||
const timeout = 200 * time.Millisecond
|
||||
|
||||
//Now we tested again above server
|
||||
// Now we tested again above server
|
||||
r := &phpfpm{
|
||||
Urls: []string{"fcgi://" + tcpAddress + "/status"},
|
||||
Timeout: config.Duration(timeout),
|
||||
|
|
@ -447,7 +447,7 @@ func TestGatherDespiteUnavailable(t *testing.T) {
|
|||
s := statServer{}
|
||||
go fcgi.Serve(tcp, s) //nolint:errcheck // ignore the returned error as we cannot do anything about it anyway
|
||||
|
||||
//Now we tested again above server
|
||||
// Now we tested again above server
|
||||
r := &phpfpm{
|
||||
Urls: []string{"fcgi://" + tcp.Addr().String() + "/status", "/lala"},
|
||||
Log: &testutil.Logger{},
|
||||
|
|
|
|||
|
|
@ -276,7 +276,7 @@ func mockUnreachableHostPinger(string, float64, ...string) (string, error) {
|
|||
return UnreachablePingOutput, errors.New("So very bad")
|
||||
}
|
||||
|
||||
//Reply from 185.28.251.217: TTL expired in transit.
|
||||
// Reply from 185.28.251.217: TTL expired in transit.
|
||||
|
||||
// in case 'Destination net unreachable' ping app return receive packet which is not what we need
|
||||
// it's not contain valid metric so treat it as lost one
|
||||
|
|
|
|||
|
|
@ -23,8 +23,7 @@ func (pg *NativeFinder) UID(user string) ([]PID, error) {
|
|||
for _, p := range procs {
|
||||
username, err := p.Username()
|
||||
if err != nil {
|
||||
//skip, this can happen if we don't have permissions or
|
||||
//the pid no longer exists
|
||||
// skip, this can be caused by the pid no longer exists, or you don't have permissions to access it
|
||||
continue
|
||||
}
|
||||
if username == user {
|
||||
|
|
@ -63,8 +62,7 @@ func (pg *NativeFinder) FullPattern(pattern string) ([]PID, error) {
|
|||
for _, p := range procs {
|
||||
cmd, err := p.Cmdline()
|
||||
if err != nil {
|
||||
//skip, this can be caused by the pid no longer existing
|
||||
//or you having no permissions to access it
|
||||
// skip, this can be caused by the pid no longer exists, or you don't have permissions to access it
|
||||
continue
|
||||
}
|
||||
if regxPattern.MatchString(cmd) {
|
||||
|
|
@ -122,8 +120,7 @@ func (pg *NativeFinder) Pattern(pattern string) ([]PID, error) {
|
|||
for _, p := range procs {
|
||||
name, err := processName(p)
|
||||
if err != nil {
|
||||
//skip, this can be caused by the pid no longer existing
|
||||
//or you having no permissions to access it
|
||||
// skip, this can be caused by the pid no longer exists, or you don't have permissions to access it
|
||||
continue
|
||||
}
|
||||
if regxPattern.MatchString(name) {
|
||||
|
|
|
|||
|
|
@ -571,7 +571,7 @@ func TestGather_systemdUnitPIDs(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGather_cgroupPIDs(t *testing.T) {
|
||||
//no cgroups in windows
|
||||
// no cgroups in windows
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("no cgroups in windows")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -148,7 +148,7 @@ func (rsl *riemannListener) removeConnection(c net.Conn) {
|
|||
rsl.connectionsMtx.Unlock()
|
||||
}
|
||||
|
||||
//Utilities
|
||||
// Utilities
|
||||
|
||||
/*
|
||||
readMessages will read Riemann messages in binary format
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ import (
|
|||
|
||||
func TestIPv4SW(t *testing.T) {
|
||||
str := `00000005` + // version
|
||||
`00000001` + //address type
|
||||
`00000001` + // address type
|
||||
`c0a80102` + // ip address
|
||||
`00000010` + // sub agent id
|
||||
`0000f3d4` + // sequence number
|
||||
|
|
|
|||
|
|
@ -393,18 +393,18 @@ func (*Smart) SampleConfig() string {
|
|||
|
||||
// Init performs one time setup of the plugin and returns an error if the configuration is invalid.
|
||||
func (m *Smart) Init() error {
|
||||
//if deprecated `path` (to smartctl binary) is provided in config and `path_smartctl` override does not exist
|
||||
// if deprecated `path` (to smartctl binary) is provided in config and `path_smartctl` override does not exist
|
||||
if len(m.Path) > 0 && len(m.PathSmartctl) == 0 {
|
||||
m.PathSmartctl = m.Path
|
||||
}
|
||||
|
||||
//if `path_smartctl` is not provided in config, try to find smartctl binary in PATH
|
||||
// if `path_smartctl` is not provided in config, try to find smartctl binary in PATH
|
||||
if len(m.PathSmartctl) == 0 {
|
||||
//nolint:errcheck // error handled later
|
||||
m.PathSmartctl, _ = exec.LookPath("smartctl")
|
||||
}
|
||||
|
||||
//if `path_nvme` is not provided in config, try to find nvme binary in PATH
|
||||
// if `path_nvme` is not provided in config, try to find nvme binary in PATH
|
||||
if len(m.PathNVMe) == 0 {
|
||||
//nolint:errcheck // error handled later
|
||||
m.PathNVMe, _ = exec.LookPath("nvme")
|
||||
|
|
@ -417,14 +417,14 @@ func (m *Smart) Init() error {
|
|||
err := validatePath(m.PathSmartctl)
|
||||
if err != nil {
|
||||
m.PathSmartctl = ""
|
||||
//without smartctl, plugin will not be able to gather basic metrics
|
||||
// without smartctl, plugin will not be able to gather basic metrics
|
||||
return fmt.Errorf("smartctl not found: verify that smartctl is installed and it is in your PATH (or specified in config): %w", err)
|
||||
}
|
||||
|
||||
err = validatePath(m.PathNVMe)
|
||||
if err != nil {
|
||||
m.PathNVMe = ""
|
||||
//without nvme, plugin will not be able to gather vendor specific attributes (but it can work without it)
|
||||
// without nvme, plugin will not be able to gather vendor specific attributes (but it can work without it)
|
||||
m.Log.Warnf(
|
||||
"nvme not found: verify that nvme is installed and it is in your PATH (or specified in config) to gather vendor specific attributes: %s",
|
||||
err.Error(),
|
||||
|
|
@ -813,7 +813,7 @@ func (m *Smart) gatherDisk(acc telegraf.Accumulator, device string, wg *sync.Wai
|
|||
fields := make(map[string]interface{})
|
||||
|
||||
if m.Attributes {
|
||||
//add power mode
|
||||
// add power mode
|
||||
keys := [...]string{"device", "device_type", "model", "serial_no", "wwn", "capacity", "enabled", "power"}
|
||||
for _, key := range keys {
|
||||
if value, ok := deviceTags[key]; ok {
|
||||
|
|
|
|||
|
|
@ -171,7 +171,7 @@ func TestReceiveTrap(t *testing.T) {
|
|||
entries []entry
|
||||
metrics []telegraf.Metric
|
||||
}{
|
||||
//ordinary v2c coldStart trap
|
||||
// ordinary v2c coldStart trap
|
||||
{
|
||||
name: "v2c coldStart",
|
||||
version: gosnmp.Version2c,
|
||||
|
|
@ -230,10 +230,10 @@ func TestReceiveTrap(t *testing.T) {
|
|||
),
|
||||
},
|
||||
},
|
||||
//Check that we're not running snmptranslate to look up oids
|
||||
//when we shouldn't be. This sends and receives a valid trap
|
||||
//but metric production should fail because the oids aren't in
|
||||
//the cache and oid lookup is intentionally mocked to fail.
|
||||
// Check that we're not running snmptranslate to look up oids
|
||||
// when we shouldn't be. This sends and receives a valid trap
|
||||
// but metric production should fail because the oids aren't in
|
||||
// the cache and oid lookup is intentionally mocked to fail.
|
||||
{
|
||||
name: "missing oid",
|
||||
version: gosnmp.Version2c,
|
||||
|
|
@ -251,10 +251,10 @@ func TestReceiveTrap(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
entries: []entry{}, //nothing in cache
|
||||
entries: []entry{}, // nothing in cache
|
||||
metrics: []telegraf.Metric{},
|
||||
},
|
||||
//v1 enterprise specific trap
|
||||
// v1 enterprise specific trap
|
||||
{
|
||||
name: "v1 trap enterprise",
|
||||
version: gosnmp.Version1,
|
||||
|
|
@ -308,7 +308,7 @@ func TestReceiveTrap(t *testing.T) {
|
|||
),
|
||||
},
|
||||
},
|
||||
//v1 generic trap
|
||||
// v1 generic trap
|
||||
{
|
||||
name: "v1 trap generic",
|
||||
version: gosnmp.Version1,
|
||||
|
|
@ -327,7 +327,7 @@ func TestReceiveTrap(t *testing.T) {
|
|||
},
|
||||
Enterprise: ".1.2.3",
|
||||
AgentAddress: "10.20.30.40",
|
||||
GenericTrap: 0, //coldStart
|
||||
GenericTrap: 0, // coldStart
|
||||
SpecificTrap: 0,
|
||||
Timestamp: uint(now),
|
||||
},
|
||||
|
|
@ -375,7 +375,7 @@ func TestReceiveTrap(t *testing.T) {
|
|||
),
|
||||
},
|
||||
},
|
||||
//ordinary v3 coldStart trap no auth and no priv
|
||||
// ordinary v3 coldStart trap no auth and no priv
|
||||
{
|
||||
name: "v3 coldStart noAuthNoPriv",
|
||||
version: gosnmp.Version3,
|
||||
|
|
@ -439,7 +439,7 @@ func TestReceiveTrap(t *testing.T) {
|
|||
),
|
||||
},
|
||||
},
|
||||
//ordinary v3 coldstart trap SHA auth and no priv
|
||||
// ordinary v3 coldstart trap SHA auth and no priv
|
||||
{
|
||||
name: "v3 coldStart authShaNoPriv",
|
||||
version: gosnmp.Version3,
|
||||
|
|
@ -501,7 +501,7 @@ func TestReceiveTrap(t *testing.T) {
|
|||
),
|
||||
},
|
||||
},
|
||||
//ordinary v3 coldstart trap SHA224 auth and no priv
|
||||
// ordinary v3 coldstart trap SHA224 auth and no priv
|
||||
{
|
||||
name: "v3 coldStart authShaNoPriv",
|
||||
version: gosnmp.Version3,
|
||||
|
|
@ -563,7 +563,7 @@ func TestReceiveTrap(t *testing.T) {
|
|||
),
|
||||
},
|
||||
},
|
||||
//ordinary v3 coldstart trap SHA256 auth and no priv
|
||||
// ordinary v3 coldstart trap SHA256 auth and no priv
|
||||
{
|
||||
name: "v3 coldStart authSha256NoPriv",
|
||||
version: gosnmp.Version3,
|
||||
|
|
@ -625,7 +625,7 @@ func TestReceiveTrap(t *testing.T) {
|
|||
),
|
||||
},
|
||||
},
|
||||
//ordinary v3 coldstart trap SHA384 auth and no priv
|
||||
// ordinary v3 coldstart trap SHA384 auth and no priv
|
||||
{
|
||||
name: "v3 coldStart authSha384NoPriv",
|
||||
version: gosnmp.Version3,
|
||||
|
|
@ -687,7 +687,7 @@ func TestReceiveTrap(t *testing.T) {
|
|||
),
|
||||
},
|
||||
},
|
||||
//ordinary v3 coldstart trap SHA512 auth and no priv
|
||||
// ordinary v3 coldstart trap SHA512 auth and no priv
|
||||
{
|
||||
name: "v3 coldStart authShaNoPriv",
|
||||
version: gosnmp.Version3,
|
||||
|
|
@ -749,7 +749,7 @@ func TestReceiveTrap(t *testing.T) {
|
|||
),
|
||||
},
|
||||
},
|
||||
//ordinary v3 coldstart trap SHA auth and no priv
|
||||
// ordinary v3 coldstart trap SHA auth and no priv
|
||||
{
|
||||
name: "v3 coldStart authShaNoPriv",
|
||||
version: gosnmp.Version3,
|
||||
|
|
@ -811,7 +811,7 @@ func TestReceiveTrap(t *testing.T) {
|
|||
),
|
||||
},
|
||||
},
|
||||
//ordinary v3 coldstart trap MD5 auth and no priv
|
||||
// ordinary v3 coldstart trap MD5 auth and no priv
|
||||
{
|
||||
name: "v3 coldStart authMD5NoPriv",
|
||||
version: gosnmp.Version3,
|
||||
|
|
@ -873,7 +873,7 @@ func TestReceiveTrap(t *testing.T) {
|
|||
),
|
||||
},
|
||||
},
|
||||
//ordinary v3 coldStart SHA trap auth and AES priv
|
||||
// ordinary v3 coldStart SHA trap auth and AES priv
|
||||
{
|
||||
name: "v3 coldStart authSHAPrivAES",
|
||||
version: gosnmp.Version3,
|
||||
|
|
@ -937,7 +937,7 @@ func TestReceiveTrap(t *testing.T) {
|
|||
),
|
||||
},
|
||||
},
|
||||
//ordinary v3 coldStart SHA trap auth and DES priv
|
||||
// ordinary v3 coldStart SHA trap auth and DES priv
|
||||
{
|
||||
name: "v3 coldStart authSHAPrivDES",
|
||||
version: gosnmp.Version3,
|
||||
|
|
@ -1001,7 +1001,7 @@ func TestReceiveTrap(t *testing.T) {
|
|||
),
|
||||
},
|
||||
},
|
||||
//ordinary v3 coldStart SHA trap auth and AES192 priv
|
||||
// ordinary v3 coldStart SHA trap auth and AES192 priv
|
||||
{
|
||||
name: "v3 coldStart authSHAPrivAES192",
|
||||
version: gosnmp.Version3,
|
||||
|
|
@ -1065,7 +1065,7 @@ func TestReceiveTrap(t *testing.T) {
|
|||
),
|
||||
},
|
||||
},
|
||||
//ordinary v3 coldStart SHA trap auth and AES192C priv
|
||||
// ordinary v3 coldStart SHA trap auth and AES192C priv
|
||||
{
|
||||
name: "v3 coldStart authSHAPrivAES192C",
|
||||
version: gosnmp.Version3,
|
||||
|
|
@ -1129,7 +1129,7 @@ func TestReceiveTrap(t *testing.T) {
|
|||
),
|
||||
},
|
||||
},
|
||||
//ordinary v3 coldStart SHA trap auth and AES256 priv
|
||||
// ordinary v3 coldStart SHA trap auth and AES256 priv
|
||||
{
|
||||
name: "v3 coldStart authSHAPrivAES256",
|
||||
version: gosnmp.Version3,
|
||||
|
|
@ -1193,7 +1193,7 @@ func TestReceiveTrap(t *testing.T) {
|
|||
),
|
||||
},
|
||||
},
|
||||
//ordinary v3 coldStart SHA trap auth and AES256C priv
|
||||
// ordinary v3 coldStart SHA trap auth and AES256C priv
|
||||
{
|
||||
name: "v3 coldStart authSHAPrivAES256C",
|
||||
version: gosnmp.Version3,
|
||||
|
|
@ -1284,7 +1284,7 @@ func TestReceiveTrap(t *testing.T) {
|
|||
timeFunc: func() time.Time {
|
||||
return fakeTime
|
||||
},
|
||||
//if cold start be answer otherwise err
|
||||
// if cold start be answer otherwise err
|
||||
Log: testutil.Logger{},
|
||||
Version: tt.version.String(),
|
||||
SecName: config.NewSecret([]byte(tt.secName)),
|
||||
|
|
@ -1298,7 +1298,7 @@ func TestReceiveTrap(t *testing.T) {
|
|||
|
||||
require.NoError(t, s.Init())
|
||||
|
||||
//inject test translator
|
||||
// inject test translator
|
||||
s.transl = newTestTranslator(tt.entries)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
|
@ -1359,7 +1359,7 @@ func TestReceiveTrapMultipleConfig(t *testing.T) {
|
|||
entries []entry
|
||||
metrics []telegraf.Metric
|
||||
}{
|
||||
//ordinary v3 coldStart SHA trap auth and AES priv
|
||||
// ordinary v3 coldStart SHA trap auth and AES priv
|
||||
{
|
||||
name: "v3 coldStart authSHAPrivAES",
|
||||
version: gosnmp.Version3,
|
||||
|
|
@ -1437,7 +1437,7 @@ func TestReceiveTrapMultipleConfig(t *testing.T) {
|
|||
),
|
||||
},
|
||||
},
|
||||
//ordinary v3 coldStart SHA trap auth and AES256 priv
|
||||
// ordinary v3 coldStart SHA trap auth and AES256 priv
|
||||
{
|
||||
name: "v3 coldStart authSHAPrivAES256",
|
||||
version: gosnmp.Version3,
|
||||
|
|
@ -1514,7 +1514,7 @@ func TestReceiveTrapMultipleConfig(t *testing.T) {
|
|||
fakeTime,
|
||||
)},
|
||||
},
|
||||
//ordinary v3 coldStart SHA trap auth and AES256C priv
|
||||
// ordinary v3 coldStart SHA trap auth and AES256C priv
|
||||
{
|
||||
name: "v3 coldStart authSHAPrivAES256C",
|
||||
version: gosnmp.Version3,
|
||||
|
|
@ -1620,7 +1620,7 @@ func TestReceiveTrapMultipleConfig(t *testing.T) {
|
|||
timeFunc: func() time.Time {
|
||||
return fakeTime
|
||||
},
|
||||
//if cold start be answer otherwise err
|
||||
// if cold start be answer otherwise err
|
||||
Log: testutil.Logger{},
|
||||
Version: tt.version.String(),
|
||||
SecName: config.NewSecret([]byte(tt.secName + "1")),
|
||||
|
|
@ -1649,7 +1649,7 @@ func TestReceiveTrapMultipleConfig(t *testing.T) {
|
|||
timeFunc: func() time.Time {
|
||||
return fakeTime
|
||||
},
|
||||
//if cold start be answer otherwise err
|
||||
// if cold start be answer otherwise err
|
||||
Log: testutil.Logger{},
|
||||
Version: tt.version.String(),
|
||||
SecName: config.NewSecret([]byte(tt.secName + "2")),
|
||||
|
|
@ -1664,7 +1664,7 @@ func TestReceiveTrapMultipleConfig(t *testing.T) {
|
|||
require.NoError(t, s1.Init())
|
||||
require.NoError(t, s2.Init())
|
||||
|
||||
//inject test translator
|
||||
// inject test translator
|
||||
s1.transl = newTestTranslator(tt.entries)
|
||||
s2.transl = newTestTranslator(tt.entries)
|
||||
|
||||
|
|
|
|||
|
|
@ -137,7 +137,7 @@ func (s *SQLServer) initQueries() error {
|
|||
Query{ScriptName: "AzureArcSQLMIPerformanceCounters", Script: sqlAzureArcMIPerformanceCounters, ResultByRow: false}
|
||||
queries["AzureArcSQLMIRequests"] = Query{ScriptName: "AzureArcSQLMIRequests", Script: sqlAzureArcMIRequests, ResultByRow: false}
|
||||
queries["AzureArcSQLMISchedulers"] = Query{ScriptName: "AzureArcSQLMISchedulers", Script: sqlAzureArcMISchedulers, ResultByRow: false}
|
||||
} else if s.DatabaseType == typeSQLServer { //These are still V2 queries and have not been refactored yet.
|
||||
} else if s.DatabaseType == typeSQLServer { // These are still V2 queries and have not been refactored yet.
|
||||
queries["SQLServerPerformanceCounters"] = Query{ScriptName: "SQLServerPerformanceCounters", Script: sqlServerPerformanceCounters, ResultByRow: false}
|
||||
queries["SQLServerWaitStatsCategorized"] = Query{ScriptName: "SQLServerWaitStatsCategorized", Script: sqlServerWaitStatsCategorized, ResultByRow: false}
|
||||
queries["SQLServerDatabaseIO"] = Query{ScriptName: "SQLServerDatabaseIO", Script: sqlServerDatabaseIO, ResultByRow: false}
|
||||
|
|
|
|||
|
|
@ -187,7 +187,7 @@ func (s *Suricata) parseAlert(acc telegraf.Accumulator, result map[string]interf
|
|||
|
||||
totalmap := make(map[string]interface{})
|
||||
for k, v := range result["alert"].(map[string]interface{}) {
|
||||
//source and target fields are maps
|
||||
// source and target fields are maps
|
||||
err := flexFlatten(totalmap, k, v, s.Delimiter)
|
||||
if err != nil {
|
||||
s.Log.Debugf("Flattening alert failed: %v", err)
|
||||
|
|
@ -196,7 +196,7 @@ func (s *Suricata) parseAlert(acc telegraf.Accumulator, result map[string]interf
|
|||
}
|
||||
}
|
||||
|
||||
//threads field do not exist in alert output, always global
|
||||
// threads field do not exist in alert output, always global
|
||||
acc.AddFields("suricata_alert", totalmap, nil)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ func TestSuricataLarge(t *testing.T) {
|
|||
_, err = c.Write([]byte("\n"))
|
||||
require.NoError(t, err)
|
||||
|
||||
//test suricata alerts
|
||||
// test suricata alerts
|
||||
data2, err := os.ReadFile("testdata/test2.json")
|
||||
require.NoError(t, err)
|
||||
_, err = c.Write(data2)
|
||||
|
|
|
|||
|
|
@ -162,7 +162,7 @@ func TestTailDosLineEndings(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGrokParseLogFilesWithMultiline(t *testing.T) {
|
||||
//we make sure the timeout won't kick in
|
||||
// we make sure the timeout won't kick in
|
||||
d, err := time.ParseDuration("100s")
|
||||
require.NoError(t, err)
|
||||
duration := config.Duration(d)
|
||||
|
|
@ -281,7 +281,7 @@ func TestGrokParseLogFilesWithMultilineTimeout(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGrokParseLogFilesWithMultilineTailerCloseFlushesMultilineBuffer(t *testing.T) {
|
||||
//we make sure the timeout won't kick in
|
||||
// we make sure the timeout won't kick in
|
||||
duration := config.Duration(100 * time.Second)
|
||||
|
||||
tt := NewTestTail()
|
||||
|
|
|
|||
|
|
@ -311,7 +311,7 @@ func sensorsTemperaturesOld(syspath string) ([]host.TemperatureStat, error) {
|
|||
//nolint:errcheck // skip on error
|
||||
c, _ := os.ReadFile(filepath.Join(filepath.Dir(file), filename[0]+"_label"))
|
||||
if c != nil {
|
||||
//format the label from "Core 0" to "core0_"
|
||||
// format the label from "Core 0" to "core0_"
|
||||
label = strings.Join(strings.Split(strings.TrimSpace(strings.ToLower(string(c))), " "), "") + "_"
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -121,7 +121,7 @@ func (u *Upsd) gatherUps(acc telegraf.Accumulator, upsname string, variables []n
|
|||
tags := map[string]string{
|
||||
"serial": fmt.Sprintf("%v", metrics["device.serial"]),
|
||||
"ups_name": upsname,
|
||||
//"variables": variables.Status not sure if it's a good idea to provide this
|
||||
// "variables": variables.Status not sure if it's a good idea to provide this
|
||||
"model": fmt.Sprintf("%v", metrics["device.model"]),
|
||||
}
|
||||
|
||||
|
|
@ -194,16 +194,16 @@ func (u *Upsd) mapStatus(metrics map[string]interface{}, tags map[string]string)
|
|||
status := uint64(0)
|
||||
statusString := fmt.Sprintf("%v", metrics["ups.status"])
|
||||
statuses := strings.Fields(statusString)
|
||||
//Source: 1.3.2 at http://rogerprice.org/NUT/ConfigExamples.A5.pdf
|
||||
//apcupsd bits:
|
||||
//0 Runtime calibration occurring (Not reported by Smart UPS v/s and BackUPS Pro)
|
||||
//1 SmartTrim (Not reported by 1st and 2nd generation SmartUPS models)
|
||||
//2 SmartBoost
|
||||
//3 On line (this is the normal condition)
|
||||
//4 On battery
|
||||
//5 Overloaded output
|
||||
//6 Battery low
|
||||
//7 Replace battery
|
||||
// Source: 1.3.2 at http://rogerprice.org/NUT/ConfigExamples.A5.pdf
|
||||
// apcupsd bits:
|
||||
// 0 Runtime calibration occurring (Not reported by Smart UPS v/s and BackUPS Pro)
|
||||
// 1 SmartTrim (Not reported by 1st and 2nd generation SmartUPS models)
|
||||
// 2 SmartBoost
|
||||
// 3 On line (this is the normal condition)
|
||||
// 4 On battery
|
||||
// 5 Overloaded output
|
||||
// 6 Battery low
|
||||
// 7 Replace battery
|
||||
if choice.Contains("CAL", statuses) {
|
||||
status |= 1 << 0
|
||||
tags["status_CAL"] = "true"
|
||||
|
|
|
|||
|
|
@ -33,25 +33,25 @@ var (
|
|||
defaultAdmBinary = "/usr/bin/varnishadm"
|
||||
defaultTimeout = config.Duration(time.Second)
|
||||
|
||||
//vcl name and backend restriction regexp [A-Za-z][A-Za-z0-9_-]*
|
||||
// vcl name and backend restriction regexp [A-Za-z][A-Za-z0-9_-]*
|
||||
defaultRegexps = []*regexp.Regexp{
|
||||
//dynamic backends
|
||||
// dynamic backends
|
||||
//nolint:lll // conditionally long line allowed to have a better understanding of following regexp
|
||||
//VBE.VCL_xxxx_xxx_VOD_SHIELD_Vxxxxxxxxxxxxx_xxxxxxxxxxxxx.goto.000007c8.(xx.xx.xxx.xx).(http://xxxxxxx-xxxxx-xxxxx-xxxxxx-xx-xxxx-x-xxxx.xx-xx-xxxx-x.amazonaws.com:80).(ttl:5.000000).fail_eaddrnotavail
|
||||
// VBE.VCL_xxxx_xxx_VOD_SHIELD_Vxxxxxxxxxxxxx_xxxxxxxxxxxxx.goto.000007c8.(xx.xx.xxx.xx).(http://xxxxxxx-xxxxx-xxxxx-xxxxxx-xx-xxxx-x-xxxx.xx-xx-xxxx-x.amazonaws.com:80).(ttl:5.000000).fail_eaddrnotavail
|
||||
regexp.MustCompile(
|
||||
`^VBE\.(?P<_vcl>[\w\-]*)\.goto\.[[:alnum:]]+\.\((?P<backend>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\)\.\((?P<server>.*)\)\.\(ttl:\d*\.\d*.*\)`,
|
||||
),
|
||||
|
||||
//VBE.reload_20210622_153544_23757.default.unhealthy
|
||||
// VBE.reload_20210622_153544_23757.default.unhealthy
|
||||
regexp.MustCompile(`^VBE\.(?P<_vcl>[\w\-]*)\.(?P<backend>[\w\-]*)\.([\w\-]*)`),
|
||||
|
||||
//KVSTORE values
|
||||
// KVSTORE values
|
||||
regexp.MustCompile(`^KVSTORE\.(?P<id>[\w\-]*)\.(?P<_vcl>[\w\-]*)\.([\w\-]*)`),
|
||||
|
||||
//XCNT.abc1234.XXX+_YYYY.cr.pass.val
|
||||
// XCNT.abc1234.XXX+_YYYY.cr.pass.val
|
||||
regexp.MustCompile(`^XCNT\.(?P<_vcl>[\w\-]*)(\.)*(?P<group>[\w\-.+]*)\.(?P<_field>[\w\-.+]*)\.val`),
|
||||
|
||||
//generic metric like MSE_STORE.store-1-1.g_aio_running_bytes_write
|
||||
// generic metric like MSE_STORE.store-1-1.g_aio_running_bytes_write
|
||||
regexp.MustCompile(`([\w\-]*)\.(?P<_field>[\w\-.]*)`),
|
||||
}
|
||||
)
|
||||
|
|
@ -146,7 +146,7 @@ func (s *Varnish) Gather(acc telegraf.Accumulator) error {
|
|||
}
|
||||
|
||||
if s.MetricVersion == 2 {
|
||||
//run varnishadm to get active vcl
|
||||
// run varnishadm to get active vcl
|
||||
var activeVcl = "boot"
|
||||
if s.admRun != nil {
|
||||
admOut, err := s.admRun(s.AdmBinary, s.UseSudo, admArgs, s.Timeout)
|
||||
|
|
@ -165,26 +165,26 @@ func (s *Varnish) Gather(acc telegraf.Accumulator) error {
|
|||
|
||||
// Prepare varnish cli tools arguments
|
||||
func (s *Varnish) prepareCmdArgs() ([]string, []string) {
|
||||
//default varnishadm arguments
|
||||
// default varnishadm arguments
|
||||
admArgs := []string{"vcl.list", "-j"}
|
||||
|
||||
//default varnish stats arguments
|
||||
// default varnish stats arguments
|
||||
statsArgs := []string{"-j"}
|
||||
if s.MetricVersion == 1 {
|
||||
statsArgs = []string{"-1"}
|
||||
}
|
||||
|
||||
//add optional instance name
|
||||
// add optional instance name
|
||||
if s.InstanceName != "" {
|
||||
statsArgs = append(statsArgs, []string{"-n", s.InstanceName}...)
|
||||
admArgs = append([]string{"-n", s.InstanceName}, admArgs...)
|
||||
}
|
||||
|
||||
//override custom arguments
|
||||
// override custom arguments
|
||||
if len(s.AdmBinaryArgs) > 0 {
|
||||
admArgs = s.AdmBinaryArgs
|
||||
}
|
||||
//override custom arguments
|
||||
// override custom arguments
|
||||
if len(s.BinaryArgs) > 0 {
|
||||
statsArgs = s.BinaryArgs
|
||||
}
|
||||
|
|
@ -268,13 +268,13 @@ func (s *Varnish) processMetricsV2(activeVcl string, acc telegraf.Accumulator, o
|
|||
|
||||
if value, ok := data["value"]; ok {
|
||||
if number, ok := value.(json.Number); ok {
|
||||
//parse bitmap value
|
||||
// parse bitmap value
|
||||
if flag == "b" {
|
||||
if metricValue, parseError = strconv.ParseUint(number.String(), 10, 64); parseError != nil {
|
||||
parseError = fmt.Errorf("%q value uint64 error: %w", fieldName, parseError)
|
||||
}
|
||||
} else if metricValue, parseError = number.Int64(); parseError != nil {
|
||||
//try parse float
|
||||
// try parse float
|
||||
if metricValue, parseError = number.Float64(); parseError != nil {
|
||||
parseError = fmt.Errorf("stat %q value %q is not valid number: %w", fieldName, value, parseError)
|
||||
}
|
||||
|
|
@ -291,7 +291,7 @@ func (s *Varnish) processMetricsV2(activeVcl string, acc telegraf.Accumulator, o
|
|||
|
||||
metric := s.parseMetricV2(fieldName)
|
||||
if metric.vclName != "" && activeVcl != "" && metric.vclName != activeVcl {
|
||||
//skip not active vcl
|
||||
// skip not active vcl
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
@ -336,7 +336,7 @@ func getActiveVCLJson(out io.Reader) (string, error) {
|
|||
return s["name"].(string), nil
|
||||
}
|
||||
default:
|
||||
//ignore
|
||||
// ignore
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
|
@ -345,7 +345,7 @@ func getActiveVCLJson(out io.Reader) (string, error) {
|
|||
|
||||
// Gets the "counters" section from varnishstat json (there is change in schema structure in varnish 6.5+)
|
||||
func getCountersJSON(rootJSON map[string]interface{}) map[string]interface{} {
|
||||
//version 1 contains "counters" wrapper
|
||||
// version 1 contains "counters" wrapper
|
||||
if counters, exists := rootJSON["counters"]; exists {
|
||||
return counters.(map[string]interface{})
|
||||
}
|
||||
|
|
@ -364,7 +364,7 @@ func (s *Varnish) parseMetricV2(name string) (metric varnishMetric) {
|
|||
"section": section,
|
||||
}
|
||||
|
||||
//parse name using regexpsCompiled
|
||||
// parse name using regexpsCompiled
|
||||
for _, re := range s.regexpsCompiled {
|
||||
submatch := re.FindStringSubmatch(name)
|
||||
if len(submatch) < 1 {
|
||||
|
|
|
|||
|
|
@ -98,7 +98,7 @@ type objectRef struct {
|
|||
name string
|
||||
altID string
|
||||
ref types.ManagedObjectReference
|
||||
parentRef *types.ManagedObjectReference //Pointer because it must be nillable
|
||||
parentRef *types.ManagedObjectReference // Pointer because it must be nillable
|
||||
guest string
|
||||
dcname string
|
||||
rpname string
|
||||
|
|
@ -734,13 +734,13 @@ func getResourcePools(ctx context.Context, e *Endpoint, resourceFilter *Resource
|
|||
}
|
||||
|
||||
func getResourcePoolName(rp types.ManagedObjectReference, rps objectMap) string {
|
||||
//Loop through the Resource Pools objectmap to find the corresponding one
|
||||
// Loop through the Resource Pools objectmap to find the corresponding one
|
||||
for _, r := range rps {
|
||||
if r.ref == rp {
|
||||
return r.name
|
||||
}
|
||||
}
|
||||
return "Resources" //Default value
|
||||
return "Resources" // Default value
|
||||
}
|
||||
|
||||
// noinspection GoUnusedParameter
|
||||
|
|
@ -777,7 +777,7 @@ func getVMs(ctx context.Context, e *Endpoint, resourceFilter *ResourceFilter) (o
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
//Create a ResourcePool Filter and get the list of Resource Pools
|
||||
// Create a ResourcePool Filter and get the list of Resource Pools
|
||||
rprf := ResourceFilter{
|
||||
finder: &Finder{client},
|
||||
resType: "ResourcePool",
|
||||
|
|
|
|||
|
|
@ -165,7 +165,7 @@ func getCmmdsMap(ctx context.Context, client *vim25.Client, clusterObj *object.C
|
|||
{Type: "DISK"},
|
||||
}
|
||||
|
||||
//Some esx host can be down or in maintenance mode. Hence cmmds query might fail on such hosts.
|
||||
// Some esx host can be down or in maintenance mode. Hence cmmds query might fail on such hosts.
|
||||
// We iterate until be get proper api response
|
||||
var resp *types.QueryCmmdsResponse
|
||||
for _, host := range hosts {
|
||||
|
|
|
|||
|
|
@ -149,7 +149,7 @@ func createSim(folders int) (*simulator.Model, *simulator.Server, error) {
|
|||
|
||||
model.Folder = folders
|
||||
model.Datacenter = 2
|
||||
//model.App = 1
|
||||
// model.App = 1
|
||||
|
||||
err := model.Create()
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -46,47 +46,47 @@ type pdhFmtCountervalueItemDouble struct {
|
|||
|
||||
// pdhCounterInfo structure contains information describing the properties of a counter. This information also includes the counter path.
|
||||
type pdhCounterInfo struct {
|
||||
//Size of the structure, including the appended strings, in bytes.
|
||||
// Size of the structure, including the appended strings, in bytes.
|
||||
DwLength uint32
|
||||
//Counter type. For a list of counter types,
|
||||
//see the Counter Types section of the <a "href=http://go.microsoft.com/fwlink/p/?linkid=84422">Windows Server 2003 Deployment Kit</a>.
|
||||
//The counter type constants are defined in Winperf.h.
|
||||
// Counter type. For a list of counter types,
|
||||
// see the Counter Types section of the <a "href=http://go.microsoft.com/fwlink/p/?linkid=84422">Windows Server 2003 Deployment Kit</a>.
|
||||
// The counter type constants are defined in Winperf.h.
|
||||
DwType uint32
|
||||
//Counter version information. Not used.
|
||||
// Counter version information. Not used.
|
||||
CVersion uint32
|
||||
//Counter status that indicates if the counter value is valid. For a list of possible values,
|
||||
//see <a href="https://msdn.microsoft.com/en-us/library/windows/desktop/aa371894(v=vs.85).aspx">Checking PDH Interface Return Values</a>.
|
||||
// Counter status that indicates if the counter value is valid. For a list of possible values,
|
||||
// see <a href="https://msdn.microsoft.com/en-us/library/windows/desktop/aa371894(v=vs.85).aspx">Checking PDH Interface Return Values</a>.
|
||||
CStatus uint32
|
||||
//Scale factor to use when computing the displayable value of the counter. The scale factor is a power of ten.
|
||||
//The valid range of this parameter is PDH_MIN_SCALE (–7) (the returned value is the actual value times 10–⁷) to
|
||||
//PDH_MAX_SCALE (+7) (the returned value is the actual value times 10⁺⁷). A value of zero will set the scale to one, so that the actual value is returned
|
||||
// Scale factor to use when computing the displayable value of the counter. The scale factor is a power of ten.
|
||||
// The valid range of this parameter is PDH_MIN_SCALE (–7) (the returned value is the actual value times 10–⁷) to
|
||||
// PDH_MAX_SCALE (+7) (the returned value is the actual value times 10⁺⁷). A value of zero will set the scale to one, so that the actual value is returned
|
||||
LScale int32
|
||||
//Default scale factor as suggested by the counter's provider.
|
||||
// Default scale factor as suggested by the counter's provider.
|
||||
LDefaultScale int32
|
||||
//The value passed in the dwUserData parameter when calling PdhAddCounter.
|
||||
// The value passed in the dwUserData parameter when calling PdhAddCounter.
|
||||
DwUserData *uint32
|
||||
//The value passed in the dwUserData parameter when calling PdhOpenQuery.
|
||||
// The value passed in the dwUserData parameter when calling PdhOpenQuery.
|
||||
DwQueryUserData *uint32
|
||||
//Null-terminated string that specifies the full counter path. The string follows this structure in memory.
|
||||
// Null-terminated string that specifies the full counter path. The string follows this structure in memory.
|
||||
SzFullPath *uint16 // pointer to a string
|
||||
//Null-terminated string that contains the name of the computer specified in the counter path. Is NULL, if the path does not specify a computer.
|
||||
//The string follows this structure in memory.
|
||||
// Null-terminated string that contains the name of the computer specified in the counter path. Is NULL, if the path does not specify a computer.
|
||||
// The string follows this structure in memory.
|
||||
SzMachineName *uint16 // pointer to a string
|
||||
//Null-terminated string that contains the name of the performance object specified in the counter path. The string follows this structure in memory.
|
||||
// Null-terminated string that contains the name of the performance object specified in the counter path. The string follows this structure in memory.
|
||||
SzObjectName *uint16 // pointer to a string
|
||||
//Null-terminated string that contains the name of the object instance specified in the counter path. Is NULL, if the path does not specify an instance.
|
||||
//The string follows this structure in memory.
|
||||
// Null-terminated string that contains the name of the object instance specified in the counter path. Is NULL, if the path does not specify an instance.
|
||||
// The string follows this structure in memory.
|
||||
SzInstanceName *uint16 // pointer to a string
|
||||
//Null-terminated string that contains the name of the parent instance specified in the counter path.
|
||||
//Is NULL, if the path does not specify a parent instance. The string follows this structure in memory.
|
||||
// Null-terminated string that contains the name of the parent instance specified in the counter path.
|
||||
// Is NULL, if the path does not specify a parent instance. The string follows this structure in memory.
|
||||
SzParentInstance *uint16 // pointer to a string
|
||||
//Instance index specified in the counter path. Is 0, if the path does not specify an instance index.
|
||||
// Instance index specified in the counter path. Is 0, if the path does not specify an instance index.
|
||||
DwInstanceIndex uint32 // pointer to a string
|
||||
//Null-terminated string that contains the counter name. The string follows this structure in memory.
|
||||
// Null-terminated string that contains the counter name. The string follows this structure in memory.
|
||||
SzCounterName *uint16 // pointer to a string
|
||||
//Help text that describes the counter. Is NULL if the source is a log file.
|
||||
// Help text that describes the counter. Is NULL if the source is a log file.
|
||||
SzExplainText *uint16 // pointer to a string
|
||||
//Start of the string data that is appended to the structure.
|
||||
// Start of the string data that is appended to the structure.
|
||||
DataBuffer [1]uint32 // pointer to an extra space
|
||||
}
|
||||
|
||||
|
|
@ -110,6 +110,6 @@ type pdhRawCounter struct {
|
|||
type pdhRawCounterItem struct {
|
||||
// Pointer to a null-terminated string that specifies the instance name of the counter. The string is appended to the end of this structure.
|
||||
SzName *uint16
|
||||
//A pdhRawCounter structure that contains the raw counter value of the instance
|
||||
// A pdhRawCounter structure that contains the raw counter value of the instance
|
||||
RawValue pdhRawCounter
|
||||
}
|
||||
|
|
|
|||
|
|
@ -425,7 +425,7 @@ func (m *WinPerfCounters) Gather(acc telegraf.Accumulator) error {
|
|||
return err
|
||||
}
|
||||
for _, hostCounterSet := range m.hostCounters {
|
||||
//some counters need two data samples before computing a value
|
||||
// some counters need two data samples before computing a value
|
||||
if err = hostCounterSet.query.CollectData(); err != nil {
|
||||
return m.checkError(err)
|
||||
}
|
||||
|
|
@ -449,7 +449,7 @@ func (m *WinPerfCounters) Gather(acc telegraf.Accumulator) error {
|
|||
}
|
||||
}
|
||||
var wg sync.WaitGroup
|
||||
//iterate over computers
|
||||
// iterate over computers
|
||||
for _, hostCounterInfo := range m.hostCounters {
|
||||
wg.Add(1)
|
||||
go func(hostInfo *hostCountersInfo) {
|
||||
|
|
@ -482,7 +482,7 @@ func (m *WinPerfCounters) gatherComputerCounters(hostCounterInfo *hostCountersIn
|
|||
value, err = hostCounterInfo.query.GetFormattedCounterValueDouble(metric.counterHandle)
|
||||
}
|
||||
if err != nil {
|
||||
//ignore invalid data as some counters from process instances returns this sometimes
|
||||
// ignore invalid data as some counters from process instances returns this sometimes
|
||||
if !isKnownCounterDataError(err) {
|
||||
return fmt.Errorf("error while getting value for counter %q: %w", metric.counterPath, err)
|
||||
}
|
||||
|
|
@ -498,7 +498,7 @@ func (m *WinPerfCounters) gatherComputerCounters(hostCounterInfo *hostCountersIn
|
|||
counterValues, err = hostCounterInfo.query.GetFormattedCounterArrayDouble(metric.counterHandle)
|
||||
}
|
||||
if err != nil {
|
||||
//ignore invalid data as some counters from process instances returns this sometimes
|
||||
// ignore invalid data as some counters from process instances returns this sometimes
|
||||
if !isKnownCounterDataError(err) {
|
||||
return fmt.Errorf("error while getting value for counter %q: %w", metric.counterPath, err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1514,7 +1514,7 @@ func TestGatherRefreshingWithExpansion(t *testing.T) {
|
|||
"source": hostname(),
|
||||
}
|
||||
|
||||
//test before elapsing CounterRefreshRate counters are not refreshed
|
||||
// test before elapsing CounterRefreshRate counters are not refreshed
|
||||
err = m.Gather(&acc2)
|
||||
require.NoError(t, err)
|
||||
counters, ok = m.hostCounters["localhost"]
|
||||
|
|
@ -1594,7 +1594,7 @@ func TestGatherRefreshingWithoutExpansion(t *testing.T) {
|
|||
"source": hostname(),
|
||||
}
|
||||
acc1.AssertContainsTaggedFields(t, measurement, fields2, tags2)
|
||||
//test finding new instance
|
||||
// test finding new instance
|
||||
cps2 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(I2)\\C1", "\\O(I2)\\C2", "\\O(I3)\\C1", "\\O(I3)\\C2"}
|
||||
fpm = &FakePerformanceQuery{
|
||||
counters: createCounterMap(
|
||||
|
|
@ -1628,7 +1628,7 @@ func TestGatherRefreshingWithoutExpansion(t *testing.T) {
|
|||
"source": hostname(),
|
||||
}
|
||||
|
||||
//test before elapsing CounterRefreshRate counters are not refreshed
|
||||
// test before elapsing CounterRefreshRate counters are not refreshed
|
||||
|
||||
err = m.Gather(&acc2)
|
||||
require.NoError(t, err)
|
||||
|
|
@ -1640,7 +1640,7 @@ func TestGatherRefreshingWithoutExpansion(t *testing.T) {
|
|||
acc2.AssertContainsTaggedFields(t, measurement, fields1, tags1)
|
||||
acc2.AssertContainsTaggedFields(t, measurement, fields2, tags2)
|
||||
acc2.AssertContainsTaggedFields(t, measurement, fields3, tags3)
|
||||
//test changed configuration
|
||||
// test changed configuration
|
||||
perfObjects = createPerfObject("", measurement, "O", []string{"*"}, []string{"C1", "C2", "C3"}, true, false, false)
|
||||
cps3 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(I1)\\C3", "\\O(I2)\\C1", "\\O(I2)\\C2", "\\O(I2)\\C3"}
|
||||
fpm = &FakePerformanceQuery{
|
||||
|
|
@ -1963,7 +1963,7 @@ func TestGatherRaw(t *testing.T) {
|
|||
|
||||
counters, ok = m.hostCounters["localhost"]
|
||||
require.True(t, ok)
|
||||
require.Len(t, counters.counters, 4) //expanded counters
|
||||
require.Len(t, counters.counters, 4) // expanded counters
|
||||
require.Len(t, acc2.Metrics, 2)
|
||||
|
||||
acc2.AssertContainsTaggedFields(t, measurement, fields1, tags1)
|
||||
|
|
@ -2053,9 +2053,9 @@ func TestLocalizeWildcardsExpansion(t *testing.T) {
|
|||
require.NoError(t, m.Gather(&acc))
|
||||
require.Len(t, acc.Metrics, 1)
|
||||
|
||||
//running on localized windows with UseWildcardsExpansion and
|
||||
//with LocalizeWildcardsExpansion, this will be localized. Using LocalizeWildcardsExpansion=false it will
|
||||
//be English.
|
||||
// running on localized windows with UseWildcardsExpansion and
|
||||
// with LocalizeWildcardsExpansion, this will be localized. Using LocalizeWildcardsExpansion=false it will
|
||||
// be English.
|
||||
require.Contains(t, acc.Metrics[0].Fields, sanitizedChars.Replace(counter))
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -167,7 +167,7 @@ func (m *WinServices) Gather(acc telegraf.Accumulator) error {
|
|||
tags := map[string]string{
|
||||
"service_name": service.ServiceName,
|
||||
}
|
||||
//display name could be empty, but still valid service
|
||||
// display name could be empty, but still valid service
|
||||
if len(service.DisplayName) > 0 {
|
||||
tags["display_name"] = service.DisplayName
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ import (
|
|||
|
||||
// testData is DD wrapper for unit testing of WinServices
|
||||
type testData struct {
|
||||
//collection that will be returned in ListServices if service array passed into WinServices constructor is empty
|
||||
// collection that will be returned in ListServices if service array passed into WinServices constructor is empty
|
||||
queryServiceList []string
|
||||
mgrConnectError error
|
||||
mgrListServicesError error
|
||||
|
|
@ -124,7 +124,7 @@ var testErrors = []testData{
|
|||
}
|
||||
|
||||
func TestMgrErrors(t *testing.T) {
|
||||
//mgr.connect error
|
||||
// mgr.connect error
|
||||
winServices := &WinServices{
|
||||
Log: testutil.Logger{},
|
||||
mgrProvider: &FakeMgProvider{testErrors[0]},
|
||||
|
|
@ -134,7 +134,7 @@ func TestMgrErrors(t *testing.T) {
|
|||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), testErrors[0].mgrConnectError.Error())
|
||||
|
||||
////mgr.listServices error
|
||||
// mgr.listServices error
|
||||
winServices = &WinServices{
|
||||
Log: testutil.Logger{},
|
||||
mgrProvider: &FakeMgProvider{testErrors[1]},
|
||||
|
|
@ -144,7 +144,7 @@ func TestMgrErrors(t *testing.T) {
|
|||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), testErrors[1].mgrListServicesError.Error())
|
||||
|
||||
////mgr.listServices error 2
|
||||
// mgr.listServices error 2
|
||||
winServices = &WinServices{
|
||||
Log: testutil.Logger{},
|
||||
ServiceNames: []string{"Fake service 1"},
|
||||
|
|
@ -174,11 +174,11 @@ func TestServiceErrors(t *testing.T) {
|
|||
log.SetOutput(buf)
|
||||
require.NoError(t, winServices.Gather(&acc1))
|
||||
|
||||
//open service error
|
||||
// open service error
|
||||
require.Contains(t, buf.String(), testErrors[2].services[0].serviceOpenError.Error())
|
||||
//query service error
|
||||
// query service error
|
||||
require.Contains(t, buf.String(), testErrors[2].services[1].serviceQueryError.Error())
|
||||
//config service error
|
||||
// config service error
|
||||
require.Contains(t, buf.String(), testErrors[2].services[2].serviceConfigError.Error())
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -252,7 +252,7 @@ func TestZfsPoolMetrics(t *testing.T) {
|
|||
err = z.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
//one pool, all metrics
|
||||
// one pool, all metrics
|
||||
tags := map[string]string{
|
||||
"pool": "HOME",
|
||||
}
|
||||
|
|
@ -318,7 +318,7 @@ func TestZfsGeneratesMetrics(t *testing.T) {
|
|||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
//one pool, all metrics
|
||||
// one pool, all metrics
|
||||
tags := map[string]string{
|
||||
"pools": "HOME",
|
||||
}
|
||||
|
|
@ -330,7 +330,7 @@ func TestZfsGeneratesMetrics(t *testing.T) {
|
|||
acc.AssertContainsTaggedFields(t, "zfs", intMetrics, tags)
|
||||
acc.Metrics = nil
|
||||
|
||||
//two pools, all metrics
|
||||
// two pools, all metrics
|
||||
err = os.MkdirAll(testKstatPath+"/STORAGE", 0750)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
|
@ -351,7 +351,7 @@ func TestZfsGeneratesMetrics(t *testing.T) {
|
|||
|
||||
intMetrics = getKstatMetricsArcOnly()
|
||||
|
||||
//two pools, one metric
|
||||
// two pools, one metric
|
||||
z = &Zfs{KstatPath: testKstatPath, KstatMetrics: []string{"arcstats"}}
|
||||
acc3 := testutil.Accumulator{}
|
||||
err = z.Gather(&acc3)
|
||||
|
|
|
|||
|
|
@ -262,7 +262,7 @@ func TestLineProtocolConverter_Record(t *testing.T) {
|
|||
wantErr: false,
|
||||
},
|
||||
|
||||
//// Test data from distributed trace repo sample json
|
||||
// Test data from distributed trace repo sample json
|
||||
// https://github.com/mattkanwisher/distributedtrace/blob/master/testclient/sample.json
|
||||
{
|
||||
name: "distributed_trace_sample",
|
||||
|
|
|
|||
|
|
@ -53,7 +53,7 @@ func TestZipkinPlugin(t *testing.T) {
|
|||
"trace_id": "22c4fc8ab3669045",
|
||||
"name": "child",
|
||||
"service_name": "trivial",
|
||||
"annotation": "trivial", //base64: dHJpdmlhbA==
|
||||
"annotation": "trivial", // base64: dHJpdmlhbA==
|
||||
"endpoint_host": "127.0.0.1",
|
||||
"annotation_key": "lc",
|
||||
},
|
||||
|
|
@ -86,7 +86,7 @@ func TestZipkinPlugin(t *testing.T) {
|
|||
"trace_id": "22c4fc8ab3669045",
|
||||
"name": "child",
|
||||
"service_name": "trivial",
|
||||
"annotation": "trivial", //base64: dHJpdmlhbA==
|
||||
"annotation": "trivial", // base64: dHJpdmlhbA==
|
||||
"endpoint_host": "127.0.0.1",
|
||||
"annotation_key": "lc",
|
||||
},
|
||||
|
|
@ -167,7 +167,7 @@ func TestZipkinPlugin(t *testing.T) {
|
|||
Tags: map[string]string{
|
||||
"trace_id": "22c4fc8ab3669045",
|
||||
"service_name": "trivial",
|
||||
"annotation": "trivial", //base64: dHJpdmlhbA==
|
||||
"annotation": "trivial", // base64: dHJpdmlhbA==
|
||||
"annotation_key": "lc",
|
||||
"id": "5195e96239641e",
|
||||
"parent_id": "5195e96239641e",
|
||||
|
|
@ -618,7 +618,7 @@ func TestZipkinPlugin(t *testing.T) {
|
|||
}
|
||||
mockAcc.Wait(
|
||||
len(tt.want),
|
||||
) //Since the server is running concurrently, we need to wait for the number of data points we want to test to be added to the Accumulator.
|
||||
) // Since the server is running concurrently, we need to wait for the number of data points we want to test to be added to the Accumulator.
|
||||
if len(mockAcc.Errors) > 0 != tt.wantErr {
|
||||
t.Fatalf("Got unexpected errors. want error = %v, errors = %v\n", tt.wantErr, mockAcc.Errors)
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue