chore: Bump golangci-lint from v1.50.1 to v1.51.0 (#12605)
Co-authored-by: Pawel Zak <Pawel Zak>
This commit is contained in:
parent
e466cab0c0
commit
17c77df228
|
|
@ -25,5 +25,5 @@ jobs:
|
|||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
version: v1.50.1
|
||||
version: v1.51.0
|
||||
args: --timeout 15m0s --verbose
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@ linters:
|
|||
- errcheck
|
||||
- errname
|
||||
- exportloopref
|
||||
- gocheckcompilerdirectives
|
||||
- goprintffuncname
|
||||
- gosimple
|
||||
- govet
|
||||
|
|
@ -130,7 +131,7 @@ linters-settings:
|
|||
- name: unconditional-recursion
|
||||
- name: unexported-naming
|
||||
- name: unhandled-error
|
||||
arguments: [ "outputBuffer.Write", "fmt.Printf", "fmt.Println", "fmt.Print", "fmt.Fprintf", "fmt.Fprint", "fmt.Fprintln" ]
|
||||
arguments: [ "fmt.Printf", "fmt.Println", "fmt.Print", "fmt.Fprintf", "fmt.Fprint", "fmt.Fprintln" ]
|
||||
- name: unnecessary-stmt
|
||||
- name: unreachable-code
|
||||
- name: unused-parameter
|
||||
|
|
@ -183,6 +184,9 @@ issues:
|
|||
- path: cmd/telegraf/(main|printer).go
|
||||
text: "Error return value of `outputBuffer.Write` is not checked"
|
||||
|
||||
- path: cmd/telegraf/(main|printer).go
|
||||
text: "unhandled-error: Unhandled error in call to function outputBuffer.Write"
|
||||
|
||||
# output configuration options
|
||||
output:
|
||||
# Format: colored-line-number|line-number|json|tab|checkstyle|code-climate|junit-xml|github-actions
|
||||
|
|
|
|||
2
Makefile
2
Makefile
|
|
@ -167,7 +167,7 @@ vet:
|
|||
.PHONY: lint-install
|
||||
lint-install:
|
||||
@echo "Installing golangci-lint"
|
||||
go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.50.1
|
||||
go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.51.0
|
||||
|
||||
@echo "Installing markdownlint"
|
||||
npm install -g markdownlint-cli
|
||||
|
|
|
|||
|
|
@ -30,12 +30,8 @@ func (a CollectionByKeys) IsAvailable(tags map[string]string) *DataGroup {
|
|||
|
||||
matchFound := true
|
||||
for k, v := range tags {
|
||||
if val, ok := group.tags[k]; ok {
|
||||
if val != v {
|
||||
matchFound = false
|
||||
break
|
||||
}
|
||||
} else {
|
||||
val, ok := group.tags[k]
|
||||
if !ok || val != v {
|
||||
matchFound = false
|
||||
break
|
||||
}
|
||||
|
|
|
|||
|
|
@ -165,15 +165,15 @@ func (l *LeoFS) Gather(acc telegraf.Accumulator) error {
|
|||
|
||||
port := "4020"
|
||||
if len(results) > 2 {
|
||||
acc.AddError(fmt.Errorf("Unable to parse address %q", endpoint))
|
||||
acc.AddError(fmt.Errorf("unable to parse address %q", endpoint))
|
||||
continue
|
||||
} else if len(results) == 2 {
|
||||
if _, err := strconv.Atoi(results[1]); err == nil {
|
||||
port = results[1]
|
||||
} else {
|
||||
acc.AddError(fmt.Errorf("Unable to parse port from %q", endpoint))
|
||||
_, err := strconv.Atoi(results[1])
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("unable to parse port from %q", endpoint))
|
||||
continue
|
||||
}
|
||||
port = results[1]
|
||||
}
|
||||
|
||||
st, ok := serverTypeMapping[port]
|
||||
|
|
|
|||
|
|
@ -92,11 +92,10 @@ func (pf *PF) parsePfctlOutput(pfoutput string, acc telegraf.Accumulator) error
|
|||
for !anyTableHeaderRE.MatchString(line) {
|
||||
stanzaLines = append(stanzaLines, line)
|
||||
more := scanner.Scan()
|
||||
if more {
|
||||
line = scanner.Text()
|
||||
} else {
|
||||
if !more {
|
||||
break
|
||||
}
|
||||
line = scanner.Text()
|
||||
}
|
||||
if perr := s.ParseFunc(stanzaLines, fields); perr != nil {
|
||||
return perr
|
||||
|
|
|
|||
|
|
@ -147,9 +147,7 @@ func (p *Postgresql) gatherMetricsFromQuery(acc telegraf.Accumulator, sqlQuery s
|
|||
p.AdditionalTags = nil
|
||||
if tagValue != "" {
|
||||
tagList := strings.Split(tagValue, ",")
|
||||
for t := range tagList {
|
||||
p.AdditionalTags = append(p.AdditionalTags, tagList[t])
|
||||
}
|
||||
p.AdditionalTags = append(p.AdditionalTags, tagList...)
|
||||
}
|
||||
|
||||
p.Timestamp = timestamp
|
||||
|
|
|
|||
|
|
@ -384,96 +384,95 @@ func (h *Host) SNMPMap(
|
|||
lastOid := ""
|
||||
for _, variable := range result.Variables {
|
||||
lastOid = variable.Name
|
||||
if strings.HasPrefix(variable.Name, oidAsked) {
|
||||
switch variable.Type {
|
||||
// handle instance names
|
||||
case gosnmp.OctetString:
|
||||
// Check if instance is in includes instances
|
||||
getInstances := true
|
||||
if len(table.IncludeInstances) > 0 {
|
||||
getInstances = false
|
||||
for _, instance := range table.IncludeInstances {
|
||||
if instance == string(variable.Value.([]byte)) {
|
||||
getInstances = true
|
||||
}
|
||||
}
|
||||
}
|
||||
// Check if instance is in excludes instances
|
||||
if len(table.ExcludeInstances) > 0 {
|
||||
getInstances = true
|
||||
for _, instance := range table.ExcludeInstances {
|
||||
if instance == string(variable.Value.([]byte)) {
|
||||
getInstances = false
|
||||
}
|
||||
}
|
||||
}
|
||||
// We don't want this instance
|
||||
if !getInstances {
|
||||
continue
|
||||
}
|
||||
|
||||
// remove oid table from the complete oid
|
||||
// in order to get the current instance id
|
||||
key := strings.Replace(variable.Name, oidAsked, "", 1)
|
||||
|
||||
if len(table.subTables) == 0 {
|
||||
// We have a mapping table
|
||||
// but no subtables
|
||||
// This is just a bulk request
|
||||
|
||||
// Building mapping table
|
||||
mapping := map[string]string{strings.Trim(key, "."): string(variable.Value.([]byte))}
|
||||
_, exists := h.OidInstanceMapping[table.oid]
|
||||
if exists {
|
||||
h.OidInstanceMapping[table.oid][strings.Trim(key, ".")] = string(variable.Value.([]byte))
|
||||
} else {
|
||||
h.OidInstanceMapping[table.oid] = mapping
|
||||
}
|
||||
|
||||
// Add table oid in bulk oid list
|
||||
oid := Data{}
|
||||
oid.Oid = table.oid
|
||||
if val, ok := nameToOid[oid.Oid]; ok {
|
||||
oid.rawOid = "." + val
|
||||
} else {
|
||||
oid.rawOid = oid.Oid
|
||||
}
|
||||
h.bulkOids = append(h.bulkOids, oid)
|
||||
} else {
|
||||
// We have a mapping table
|
||||
// and some subtables
|
||||
// This is a bunch of get requests
|
||||
// This is the best case :)
|
||||
|
||||
// For each subtable ...
|
||||
for _, sb := range table.subTables {
|
||||
// ... we create a new Data (oid) object
|
||||
oid := Data{}
|
||||
// Looking for more information about this subtable
|
||||
ssb, exists := subTableMap[sb]
|
||||
if exists {
|
||||
// We found a subtable section in config files
|
||||
oid.Oid = ssb.Oid + key
|
||||
oid.rawOid = ssb.Oid + key
|
||||
oid.Unit = ssb.Unit
|
||||
oid.Instance = string(variable.Value.([]byte))
|
||||
} else {
|
||||
// We did NOT find a subtable section in config files
|
||||
oid.Oid = sb + key
|
||||
oid.rawOid = sb + key
|
||||
oid.Instance = string(variable.Value.([]byte))
|
||||
}
|
||||
// TODO check oid validity
|
||||
|
||||
// Add the new oid to internalGetOids list
|
||||
h.internalGetOids = append(h.internalGetOids, oid)
|
||||
}
|
||||
}
|
||||
default:
|
||||
}
|
||||
} else {
|
||||
if !strings.HasPrefix(variable.Name, oidAsked) {
|
||||
break
|
||||
}
|
||||
switch variable.Type {
|
||||
// handle instance names
|
||||
case gosnmp.OctetString:
|
||||
// Check if instance is in includes instances
|
||||
getInstances := true
|
||||
if len(table.IncludeInstances) > 0 {
|
||||
getInstances = false
|
||||
for _, instance := range table.IncludeInstances {
|
||||
if instance == string(variable.Value.([]byte)) {
|
||||
getInstances = true
|
||||
}
|
||||
}
|
||||
}
|
||||
// Check if instance is in excludes instances
|
||||
if len(table.ExcludeInstances) > 0 {
|
||||
getInstances = true
|
||||
for _, instance := range table.ExcludeInstances {
|
||||
if instance == string(variable.Value.([]byte)) {
|
||||
getInstances = false
|
||||
}
|
||||
}
|
||||
}
|
||||
// We don't want this instance
|
||||
if !getInstances {
|
||||
continue
|
||||
}
|
||||
|
||||
// remove oid table from the complete oid
|
||||
// in order to get the current instance id
|
||||
key := strings.Replace(variable.Name, oidAsked, "", 1)
|
||||
|
||||
if len(table.subTables) == 0 {
|
||||
// We have a mapping table
|
||||
// but no subtables
|
||||
// This is just a bulk request
|
||||
|
||||
// Building mapping table
|
||||
mapping := map[string]string{strings.Trim(key, "."): string(variable.Value.([]byte))}
|
||||
_, exists := h.OidInstanceMapping[table.oid]
|
||||
if exists {
|
||||
h.OidInstanceMapping[table.oid][strings.Trim(key, ".")] = string(variable.Value.([]byte))
|
||||
} else {
|
||||
h.OidInstanceMapping[table.oid] = mapping
|
||||
}
|
||||
|
||||
// Add table oid in bulk oid list
|
||||
oid := Data{}
|
||||
oid.Oid = table.oid
|
||||
if val, ok := nameToOid[oid.Oid]; ok {
|
||||
oid.rawOid = "." + val
|
||||
} else {
|
||||
oid.rawOid = oid.Oid
|
||||
}
|
||||
h.bulkOids = append(h.bulkOids, oid)
|
||||
} else {
|
||||
// We have a mapping table
|
||||
// and some subtables
|
||||
// This is a bunch of get requests
|
||||
// This is the best case :)
|
||||
|
||||
// For each subtable ...
|
||||
for _, sb := range table.subTables {
|
||||
// ... we create a new Data (oid) object
|
||||
oid := Data{}
|
||||
// Looking for more information about this subtable
|
||||
ssb, exists := subTableMap[sb]
|
||||
if exists {
|
||||
// We found a subtable section in config files
|
||||
oid.Oid = ssb.Oid + key
|
||||
oid.rawOid = ssb.Oid + key
|
||||
oid.Unit = ssb.Unit
|
||||
oid.Instance = string(variable.Value.([]byte))
|
||||
} else {
|
||||
// We did NOT find a subtable section in config files
|
||||
oid.Oid = sb + key
|
||||
oid.rawOid = sb + key
|
||||
oid.Instance = string(variable.Value.([]byte))
|
||||
}
|
||||
// TODO check oid validity
|
||||
|
||||
// Add the new oid to internalGetOids list
|
||||
h.internalGetOids = append(h.internalGetOids, oid)
|
||||
}
|
||||
}
|
||||
default:
|
||||
}
|
||||
}
|
||||
// Determine if we need more requests
|
||||
if strings.HasPrefix(lastOid, oidAsked) {
|
||||
|
|
@ -659,13 +658,12 @@ func (h *Host) HandleResponse(
|
|||
// From mapping table
|
||||
mapping, inMappingNoSubTable := h.OidInstanceMapping[oidKey]
|
||||
if inMappingNoSubTable {
|
||||
// filter if the instance in not in
|
||||
// OidInstanceMapping mapping map
|
||||
if instanceName, exists := mapping[instance]; exists {
|
||||
tags["instance"] = instanceName
|
||||
} else {
|
||||
// filter if the instance in not in OidInstanceMapping mapping map
|
||||
instanceName, exists := mapping[instance]
|
||||
if !exists {
|
||||
continue
|
||||
}
|
||||
tags["instance"] = instanceName
|
||||
} else if oid.Instance != "" {
|
||||
// From config files
|
||||
tags["instance"] = oid.Instance
|
||||
|
|
|
|||
|
|
@ -196,9 +196,7 @@ func setSystemctl(timeout config.Duration, unitType string, pattern string) (*by
|
|||
// create patterns parameters if provided in config
|
||||
if pattern != "" {
|
||||
psplit := strings.SplitN(pattern, " ", -1)
|
||||
for v := range psplit {
|
||||
params = append(params, psplit[v])
|
||||
}
|
||||
params = append(params, psplit...)
|
||||
}
|
||||
params = append(params, "--all", "--plain")
|
||||
// add type as configured in config
|
||||
|
|
|
|||
|
|
@ -213,18 +213,18 @@ func (g *Graphite) send(batch []byte) error {
|
|||
g.failedServers = append(g.failedServers, g.conns[n].RemoteAddr().String())
|
||||
break
|
||||
}
|
||||
if _, e := g.conns[n].Write(batch); e != nil {
|
||||
// Error
|
||||
g.Log.Debugf("Graphite Error: " + e.Error())
|
||||
// Close explicitly and let's try the next one
|
||||
err := g.conns[n].Close()
|
||||
g.Log.Debugf("Failed to close the connection: %v", err)
|
||||
// Mark server as failed so a new connection will be made
|
||||
g.failedServers = append(g.failedServers, g.conns[n].RemoteAddr().String())
|
||||
} else {
|
||||
_, e := g.conns[n].Write(batch)
|
||||
if e == nil {
|
||||
globalErr = nil
|
||||
break
|
||||
}
|
||||
// Error
|
||||
g.Log.Debugf("Graphite Error: " + e.Error())
|
||||
// Close explicitly and let's try the next one
|
||||
err = g.conns[n].Close()
|
||||
g.Log.Debugf("Failed to close the connection: %v", err)
|
||||
// Mark server as failed so a new connection will be made
|
||||
g.failedServers = append(g.failedServers, g.conns[n].RemoteAddr().String())
|
||||
}
|
||||
|
||||
return globalErr
|
||||
|
|
|
|||
Loading…
Reference in New Issue