chore: Fix linter findings for nolintlint (part3) (#12613)

Co-authored-by: Pawel Zak <Pawel Zak>
This commit is contained in:
Paweł Żak 2023-02-07 17:06:12 +01:00 committed by GitHub
parent 0339432753
commit 9a0cecc788
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 48 additions and 87 deletions

View File

@ -21,6 +21,7 @@ linters:
- makezero
- nakedret
- nilerr
- nolintlint
- prealloc
- predeclared
- revive
@ -80,6 +81,13 @@ linters-settings:
# Tab width in spaces.
# Default: 1
tab-width: 4
nolintlint:
# Enable to require an explanation of nonzero length after each nolint directive.
# Default: false
require-explanation: true
# Enable to require nolint directives to mention the specific linter being suppressed.
# Default: false
require-specific: true
prealloc:
# Report pre-allocation suggestions only on simple loops that have no returns/breaks/continues/gotos in them.
# Default: true

View File

@ -84,7 +84,7 @@ func (s *Shim) Run(pollInterval time.Duration) error {
if err != nil {
return fmt.Errorf("RunProcessor error: %w", err)
}
} else if s.Output != nil { //nolint:revive // Not simplifying here to stay in the structure for better understanding the code
} else if s.Output != nil {
err := s.RunOutput()
if err != nil {
return fmt.Errorf("RunOutput error: %w", err)

View File

@ -876,10 +876,7 @@ func (s *Statsd) handler(conn *net.TCPConn, id string) {
// connection cleanup function
defer func() {
s.wg.Done()
// Ignore the returned error as we cannot do anything about it anyway
//nolint:errcheck,revive
conn.Close()
conn.Close() //nolint:revive // Ignore the returned error as we cannot do anything about it anyway
// Add one connection potential back to channel when this one closes
s.accept <- true
@ -911,11 +908,8 @@ func (s *Statsd) handler(conn *net.TCPConn, id string) {
b := s.bufPool.Get().(*bytes.Buffer)
b.Reset()
// Writes to a bytes buffer always succeed, so do not check the errors here
//nolint:errcheck,revive
b.Write(scanner.Bytes())
//nolint:errcheck,revive
b.WriteByte('\n')
b.Write(scanner.Bytes()) //nolint:revive // Writes to a bytes buffer always succeed, so do not check the errors here
b.WriteByte('\n') //nolint:revive // Writes to a bytes buffer always succeed, so do not check the errors here
select {
case s.in <- input{Buffer: b, Time: time.Now(), Addr: remoteIP}:
@ -933,9 +927,7 @@ func (s *Statsd) handler(conn *net.TCPConn, id string) {
// refuser refuses a TCP connection
func (s *Statsd) refuser(conn *net.TCPConn) {
// Ignore the returned error as we cannot do anything about it anyway
//nolint:errcheck,revive
conn.Close()
conn.Close() //nolint:revive // Ignore the returned error as we cannot do anything about it anyway
s.Log.Infof("Refused TCP Connection from %s", conn.RemoteAddr())
s.Log.Warn("Maximum TCP Connections reached, you may want to adjust max_tcp_connections")
}
@ -959,13 +951,9 @@ func (s *Statsd) Stop() {
s.Log.Infof("Stopping the statsd service")
close(s.done)
if s.isUDP() {
// Ignore the returned error as we cannot do anything about it anyway
//nolint:errcheck,revive
s.UDPlistener.Close()
s.UDPlistener.Close() //nolint:revive // Ignore the returned error as we cannot do anything about it anyway
} else {
// Ignore the returned error as we cannot do anything about it anyway
//nolint:errcheck,revive
s.TCPlistener.Close()
s.TCPlistener.Close() //nolint:revive // Ignore the returned error as we cannot do anything about it anyway
// Close all open TCP connections
// - get all conns from the s.conns map and put into slice
// - this is so the forget() function doesnt conflict with looping
@ -977,9 +965,7 @@ func (s *Statsd) Stop() {
}
s.cleanup.Unlock()
for _, conn := range conns {
// Ignore the returned error as we cannot do anything about it anyway
//nolint:errcheck,revive
conn.Close()
conn.Close() //nolint:revive // Ignore the returned error as we cannot do anything about it anyway
}
}
s.Unlock()

View File

@ -69,9 +69,7 @@ func (s *Suricata) Start(acc telegraf.Accumulator) error {
// Stop causes the plugin to cease collecting JSON data from the socket provided
// to Suricata.
func (s *Suricata) Stop() {
// Ignore the returned error as we cannot do anything about it anyway
//nolint:errcheck,revive
s.inputListener.Close()
s.inputListener.Close() //nolint:revive // Ignore the returned error as we cannot do anything about it anyway
if s.cancel != nil {
s.cancel()
}
@ -184,7 +182,7 @@ func (s *Suricata) parseStats(acc telegraf.Accumulator, result map[string]interf
return
}
fields := make(map[string](map[string]interface{}))
fields := make(map[string]map[string]interface{})
totalmap := make(map[string]interface{})
for k, v := range result["stats"].(map[string]interface{}) {
if k == "threads" {

View File

@ -58,9 +58,7 @@ func TestSynproxyFileInvalidHex(t *testing.T) {
func TestNoSynproxyFile(t *testing.T) {
tmpfile := makeFakeSynproxyFile([]byte(synproxyFileNormal))
// Remove file to generate "no such file" error
// Ignore errors if file does not yet exist
//nolint:errcheck,revive
os.Remove(tmpfile)
os.Remove(tmpfile) //nolint:revive // Ignore errors if file does not yet exist
k := Synproxy{
statFile: tmpfile,

View File

@ -97,9 +97,7 @@ func (s *Syslog) Start(acc telegraf.Accumulator) error {
}
if scheme == "unix" || scheme == "unixpacket" || scheme == "unixgram" {
// Accept success and failure in case the file does not exist
//nolint:errcheck,revive
os.Remove(s.Address)
os.Remove(s.Address) //nolint:revive // Accept success and failure in case the file does not exist
}
if s.isStream {
@ -141,9 +139,7 @@ func (s *Syslog) Stop() {
defer s.mu.Unlock()
if s.Closer != nil {
// Ignore the returned error as we cannot do anything about it anyway
//nolint:errcheck,revive
s.Close()
s.Close() //nolint:revive // Ignore the returned error as we cannot do anything about it anyway
}
s.wg.Wait()
}
@ -269,9 +265,7 @@ func (s *Syslog) removeConnection(c net.Conn) {
func (s *Syslog) handle(conn net.Conn, acc telegraf.Accumulator) {
defer func() {
s.removeConnection(conn)
// Ignore the returned error as we cannot do anything about it anyway
//nolint:errcheck,revive
conn.Close()
conn.Close() //nolint:revive // Ignore the returned error as we cannot do anything about it anyway
}()
var p syslog.Parser
@ -422,9 +416,7 @@ type unixCloser struct {
func (uc unixCloser) Close() error {
err := uc.closer.Close()
// Accept success and failure in case the file does not exist
//nolint:errcheck,revive
os.Remove(uc.path)
os.Remove(uc.path) //nolint:revive // Accept success and failure in case the file does not exist
return err
}

View File

@ -106,10 +106,7 @@ func formatUptime(uptime uint64) string {
minutes %= 60
fmt.Fprintf(w, "%2d:%02d", hours, minutes)
// This will always succeed, so skip checking the error
//nolint:errcheck,revive
w.Flush()
w.Flush() //nolint:revive // This will always succeed, so skip checking the error
return buf.String()
}

View File

@ -127,9 +127,8 @@ func (t *TCPListener) Stop() {
t.Lock()
defer t.Unlock()
close(t.done)
// Ignore the returned error as we cannot do anything about it anyway
//nolint:errcheck,revive
t.listener.Close()
t.listener.Close() //nolint:revive // Ignore the returned error as we cannot do anything about it anyway
// Close all open TCP connections
// - get all conns from the t.conns map and put into slice
@ -142,9 +141,7 @@ func (t *TCPListener) Stop() {
}
t.cleanup.Unlock()
for _, conn := range conns {
// Ignore the returned error as we cannot do anything about it anyway
//nolint:errcheck,revive
conn.Close()
conn.Close() //nolint:revive // Ignore the returned error as we cannot do anything about it anyway
}
t.wg.Wait()
@ -196,8 +193,8 @@ func (t *TCPListener) refuser(conn *net.TCPConn) {
fmt.Fprintf(conn, "Telegraf maximum concurrent TCP connections (%d)"+
" reached, closing.\nYou may want to increase max_tcp_connections in"+
" the Telegraf tcp listener configuration.\n", t.MaxTCPConnections)
//nolint:errcheck,revive
conn.Close()
conn.Close() //nolint:revive // Ignore the returned error as we cannot do anything about it anyway
t.Log.Infof("Refused TCP Connection from %s", conn.RemoteAddr())
t.Log.Warn("Maximum TCP Connections reached, you may want to adjust max_tcp_connections")
}

View File

@ -120,9 +120,7 @@ func (u *UDPListener) Stop() {
defer u.Unlock()
close(u.done)
u.wg.Wait()
// Ignore the returned error as we cannot do anything about it anyway
//nolint:errcheck,revive
u.listener.Close()
u.listener.Close() //nolint:revive // Ignore the returned error as we cannot do anything about it anyway
close(u.in)
u.Log.Infof("Stopped service on %q", u.ServiceAddress)
}

View File

@ -99,9 +99,7 @@ func (f *Finder) descend(ctx context.Context, root types.ManagedObjectReference,
if err != nil {
return err
}
// Ignore the returned error as we cannot do anything about it anyway
//nolint:errcheck,revive
defer v.Destroy(ctx)
defer v.Destroy(ctx) //nolint:errcheck // Ignore the returned error as we cannot do anything about it anyway
var content []types.ObjectContent
fields := []string{"name"}
@ -119,9 +117,7 @@ func (f *Finder) descend(ctx context.Context, root types.ManagedObjectReference,
if err != nil {
return err
}
// Ignore the returned error as we cannot do anything about it anyway
//nolint:errcheck,revive
defer v2.Destroy(ctx)
defer v2.Destroy(ctx) //nolint:errcheck // Ignore the returned error as we cannot do anything about it anyway
err = v2.Retrieve(ctx, []string{resType}, fields, &content)
if err != nil {
return err

View File

@ -73,7 +73,7 @@ func (pt *PapertrailWebhook) eventHandler(w http.ResponseWriter, r *http.Request
}
pt.acc.AddFields("papertrail", fields, tags, e.ReceivedAt)
}
} else if payload.Counts != nil { //nolint:revive // Not simplifying here to stay in the structure for better understanding the code
} else if payload.Counts != nil {
// Handle count-based payload
for _, c := range payload.Counts {
for ts, count := range *c.TimeSeries {

View File

@ -109,8 +109,6 @@ func (wb *Webhooks) Start(acc telegraf.Accumulator) error {
}
func (wb *Webhooks) Stop() {
// Ignore the returned error as we cannot do anything about it anyway
//nolint:errcheck,revive
wb.srv.Close()
wb.srv.Close() //nolint:revive // Ignore the returned error as we cannot do anything about it anyway
wb.Log.Infof("Stopping the Webhooks service")
}

View File

@ -120,9 +120,7 @@ func (z *Zipkin) Stop() {
defer z.waitGroup.Wait()
defer cancel()
// Ignore the returned error as we cannot do anything about it anyway
//nolint:errcheck,revive
z.server.Shutdown(ctx)
z.server.Shutdown(ctx) //nolint:errcheck,revive // Ignore the returned error as we cannot do anything about it anyway
}
// Listen creates an http server on the zipkin instance it is called with, and

View File

@ -240,7 +240,7 @@ func (p *Postgresql) writeSequential(tableSources map[string]*TableSource) error
if err != nil {
return fmt.Errorf("starting transaction: %w", err)
}
defer tx.Rollback(p.dbContext) //nolint:errcheck
defer tx.Rollback(p.dbContext) //nolint:errcheck // In case of failure during commit, "err" from commit will be returned
for _, tableSource := range tableSources {
sp := tx
@ -317,19 +317,16 @@ func isTempError(err error) bool {
errClass := pgErr.Code[:2]
switch errClass {
case "23": // Integrity Constraint Violation
switch pgErr.Code { //nolint:revive
case "23505": // unique_violation
if strings.Contains(err.Error(), "pg_type_typname_nsp_index") {
//23505 - unique_violation
if pgErr.Code == "23505" && strings.Contains(err.Error(), "pg_type_typname_nsp_index") {
// Happens when you try to create 2 tables simultaneously.
return true
}
}
case "25": // Invalid Transaction State
// If we're here, this is a bug, but recoverable
return true
case "40": // Transaction Rollback
switch pgErr.Code { //nolint:revive
case "40P01": // deadlock_detected
if pgErr.Code == "40P01" { // deadlock_detected
return true
}
case "42": // Syntax Error or Access Rule Violation
@ -433,7 +430,7 @@ func (p *Postgresql) writeTagTable(ctx context.Context, db dbh, tableSource *Tab
if err != nil {
return err
}
defer tx.Rollback(ctx) //nolint:errcheck
defer tx.Rollback(ctx) //nolint:errcheck // In case of failure during commit, "err" from commit will be returned
ident := pgx.Identifier{ttsrc.postgresql.Schema, ttsrc.Name()}
identTemp := pgx.Identifier{ttsrc.Name() + "_temp"}

View File

@ -354,7 +354,7 @@ func TestWriteIntegration_concurrent(t *testing.T) {
// Lock the table so that we ensure the writes hangs and the plugin has to open another connection.
tx, err := p.db.Begin(ctx)
require.NoError(t, err)
defer tx.Rollback(ctx) //nolint:errcheck
defer tx.Rollback(ctx) //nolint:errcheck // ignore the returned error as we cannot do anything about it anyway
_, err = tx.Exec(ctx, "LOCK TABLE "+utils.QuoteIdentifier(t.Name()+"_a"))
require.NoError(t, err)

View File

@ -148,7 +148,7 @@ func (tm *TableManager) MatchSource(ctx context.Context, db dbh, rowSource *Tabl
// If the table cannot be modified, the returned column list is the columns which are missing from the table. This
// includes when an error is returned.
//
//nolint:revive
//nolint:revive //argument-limit conditionally more arguments allowed
func (tm *TableManager) EnsureStructure(
ctx context.Context,
db dbh,
@ -248,7 +248,7 @@ func (tm *TableManager) EnsureStructure(
if err != nil {
return append(addColumns, invalidColumns...), err
}
defer tx.Rollback(ctx) //nolint:errcheck
defer tx.Rollback(ctx) //nolint:errcheck // In case of failure during commit, "err" from commit will be returned
// It's possible to have multiple telegraf processes, in which we can't ensure they all lock tables in the same
// order. So to prevent possible deadlocks, we have to have a single lock for all schema modifications.
if _, err := tx.Exec(ctx, "SELECT pg_advisory_xact_lock($1)", schemaAdvisoryLockID); err != nil {
@ -346,7 +346,7 @@ func (tm *TableManager) getColumns(ctx context.Context, db dbh, name string) (ma
return cols, rows.Err()
}
//nolint:revive
//nolint:revive //argument-limit conditionally more arguments allowed
func (tm *TableManager) update(ctx context.Context,
tx pgx.Tx,
state *tableState,

View File

@ -92,7 +92,7 @@ func (s *SignalFx) Connect() error {
if s.IngestURL != "" {
client.DatapointEndpoint = datapointEndpointForIngestURL(s.IngestURL)
client.EventEndpoint = eventEndpointForIngestURL(s.IngestURL)
} else if s.SignalFxRealm != "" { //nolint: revive // "Simplifying" if c {...} else {... return } would not simplify anything at all in this case
} else if s.SignalFxRealm != "" {
client.DatapointEndpoint = datapointEndpointForRealm(s.SignalFxRealm)
client.EventEndpoint = eventEndpointForRealm(s.SignalFxRealm)
} else {

View File

@ -69,9 +69,7 @@ func TestMain(m *testing.M) {
log.Fatal(err)
}
// Ignore the returned error as the tests will fail anyway
//nolint:errcheck,revive
go serv.Serve(lis)
go serv.Serve(lis) //nolint:errcheck // Ignore the returned error as the tests will fail anyway
opt := grpc.WithTransportCredentials(insecure.NewCredentials())
conn, err := grpc.Dial(lis.Addr().String(), opt)