chore: Enable additional gocritic (#13714)

This commit is contained in:
Joshua Powers 2023-08-03 10:58:27 -06:00 committed by GitHub
parent 0f8957f0e9
commit 2ac45b8d25
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 19 additions and 47 deletions

View File

@ -83,8 +83,10 @@ linters-settings:
- builtinShadowDecl
- caseOrder
- codegenComment
- commentedOutCode
- deferInLoop
- dupArg
- deprecatedComment
- dupBranchBody
- dupCase
- dupSubExpr

View File

@ -463,7 +463,7 @@ func TestCommandVersion(t *testing.T) {
}
}
// Deprecated in favor of command version
// Users should use the version subcommand
func TestFlagVersion(t *testing.T) {
tests := []struct {
Version string

View File

@ -22,8 +22,6 @@ func TestShimSetsUpLogger(t *testing.T) {
_, err := stdinWriter.Write([]byte("\n"))
require.NoError(t, err)
// <-metricProcessed
r := bufio.NewReader(stderrReader)
out, err := r.ReadString('\n')
require.NoError(t, err)

View File

@ -329,7 +329,6 @@ func (s *AliyunCMS) gatherMetric(acc telegraf.Accumulator, metricName string, me
fields[formatField(metricName, key)] = value
}
}
//Log.logW("Datapoint time: %s, now: %s", time.Unix(datapointTime, 0).Format(time.RFC3339), time.Now().Format(time.RFC3339))
acc.AddFields(s.measurement, fields, tags, time.Unix(datapointTime, 0))
}

View File

@ -134,17 +134,8 @@ func newDiscoveryTool(
case "acs_ocs":
return nil, noDiscoverySupportErr
case "acs_oss":
//oss is really complicated
//it is on it's own format
// oss is really complicated and its' own format
return nil, noDiscoverySupportErr
//As a possible solution we can
//mimic to request format supported by oss
//req := DescribeLOSSRequest{
// RpcRequest: &requests.RpcRequest{},
//}
//req.InitWithApiInfo("oss", "2014-08-15", "DescribeDBInstances", "oss", "openAPI")
case "acs_vpc_eip":
dscReq[region] = vpc.CreateDescribeEipAddressesRequest()
responseRootKey = "EipAddresses"

View File

@ -246,7 +246,6 @@ func (h *InfluxDBV2Listener) handleWrite() http.HandlerFunc {
var readErr error
var bytes []byte
//body = http.MaxBytesReader(res, req.Body, 1000000) //p.MaxBodySize.Size)
bytes, readErr = io.ReadAll(body)
if readErr != nil {
h.Log.Debugf("Error parsing the request body: %v", readErr.Error())

View File

@ -463,8 +463,8 @@ func (p *PowerStat) addTurboRatioLimit(socketID string, acc telegraf.Accumulator
}
if (model != strconv.FormatInt(0x37, 10)) && // INTEL_FAM6_ATOM_SILVERMONT
(model != strconv.FormatInt(0x4A, 10)) && // INTEL_FAM6_ATOM_SILVERMONT_MID:
(model != strconv.FormatInt(0x5A, 10)) && // INTEL_FAM6_ATOM_AIRMONT_MID:
(model != strconv.FormatInt(0x4A, 10)) && // INTEL_FAM6_ATOM_SILVERMONT_MID
(model != strconv.FormatInt(0x5A, 10)) && // INTEL_FAM6_ATOM_AIRMONT_MID
(model != strconv.FormatInt(0x2E, 10)) && // INTEL_FAM6_NEHALEM_EX
(model != strconv.FormatInt(0x2F, 10)) && // INTEL_FAM6_WESTMERE_EX
(model != strconv.FormatInt(0x57, 10)) && // INTEL_FAM6_XEON_PHI_KNL
@ -492,7 +492,7 @@ func (p *PowerStat) addTurboRatioLimit(socketID string, acc telegraf.Accumulator
}
// dump_atom_turbo_ratio_limits
if model == strconv.FormatInt(0x37, 10) || // INTEL_FAM6_ATOM_SILVERMONT
model == strconv.FormatInt(0x4A, 10) || // INTEL_FAM6_ATOM_SILVERMONT_MID:
model == strconv.FormatInt(0x4A, 10) || // INTEL_FAM6_ATOM_SILVERMONT_MID
model == strconv.FormatInt(0x5A, 10) { // INTEL_FAM6_ATOM_AIRMONT_MID
coreCounts := uint64(0x04030201) // counting the number of active cores 1 to 4
msrTurboRatioLimit, err := p.msr.readSingleMsr(cpuID, msrAtomCoreTurboRatiosString)

View File

@ -106,7 +106,6 @@ func TestLibvirt_Gather(t *testing.T) {
err := l.Gather(&acc)
require.NoError(t, err)
// require.Contains(t, err.Error(), "couldn't find any domains on system")
mockLibvirtUtils.AssertExpectations(t)
})

View File

@ -115,7 +115,7 @@ func Test_Logstash6GatherProcessStats(test *testing.T) {
}
func Test_Logstash5GatherPipelineStats(test *testing.T) {
//logstash5accPipelineStats.SetDebug(true)
logstash5accPipelineStats.SetDebug(true)
fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {
writer.Header().Set("Content-Type", "application/json")
_, err := fmt.Fprintf(writer, "%s", string(logstash5PipelineJSON))
@ -213,7 +213,7 @@ func Test_Logstash5GatherPipelineStats(test *testing.T) {
}
func Test_Logstash6GatherPipelinesStats(test *testing.T) {
//logstash6accPipelinesStats.SetDebug(true)
logstash6accPipelinesStats.SetDebug(true)
fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {
writer.Header().Set("Content-Type", "application/json")
_, err := fmt.Fprintf(writer, "%s", string(logstash6PipelinesJSON))

View File

@ -381,7 +381,6 @@ func (l *Lustre2) GetLustreProcStats(fileglob string, wantedFields []*mapping) e
client = ""
}
//lines, err := internal.ReadLines(file)
wholeFile, err := os.ReadFile(file)
if err != nil {
return err
@ -443,7 +442,6 @@ func (l *Lustre2) GetLustreProcStats(fileglob string, wantedFields []*mapping) e
// Gather reads stats from all lustre targets
func (l *Lustre2) Gather(acc telegraf.Accumulator) error {
//l.allFields = make(map[string]map[string]interface{})
l.allFields = make(map[tags]map[string]interface{})
if len(l.OstProcfiles) == 0 {

View File

@ -128,7 +128,6 @@ func (c *Marklogic) Gather(accumulator telegraf.Accumulator) error {
// Range over all source URL's appended to the struct
for _, serv := range c.Sources {
//fmt.Printf("Encoded URL is %q\n", serv)
wg.Add(1)
go func(serv string) {
defer wg.Done()

View File

@ -161,7 +161,7 @@ func TestMetricResult(t *testing.T) {
0x00, 0x00, 0x08, 0x98, // 2200
0x00, 0x00, 0x08, 0x99, // 2201
0x00, 0x00, 0x08, 0x9A, // 2202
0x40, 0x49, 0x0f, 0xdb, // float32(3.1415927410125732421875)
0x40, 0x49, 0x0f, 0xdb, // float32 of 3.1415927410125732421875
}
// Write the data to a fake server

View File

@ -394,7 +394,7 @@ func (d *PacketDecoder) decodeIPv6Header(r io.Reader) (h IPV6Header, err error)
h.DSCP = uint8((fourByteBlock & 0xFC00000) >> 22)
h.ECN = uint8((fourByteBlock & 0x300000) >> 20)
// flowLabel := fourByteBlock & 0xFFFFF // not currently being used.
// The flowLabel is available via fourByteBlock & 0xFFFFF
if err := read(r, &h.PayloadLength, "PayloadLength"); err != nil {
return h, err
}

View File

@ -54,7 +54,7 @@ func TestIPv4Header(t *testing.T) {
0x00, 0x00, // identification
0x00, 0x00, // flags + frag offset
0x00, // ttl
0x11, // protocol; 0x11 = udp
0x11, // protocol udp (0x11)
0x00, 0x00, // header checksum
0x7f, 0x00, 0x00, 0x01, // src ip
0x7f, 0x00, 0x00, 0x02, // dst ip
@ -104,7 +104,7 @@ func TestIPv4HeaderSwitch(t *testing.T) {
0x00, 0x00, // identification
0x00, 0x00, // flags + frag offset
0x00, // ttl
0x11, // protocol; 0x11 = udp
0x11, // protocol udp (0x11)
0x00, 0x00, // header checksum
0x7f, 0x00, 0x00, 0x01, // src ip
0x7f, 0x00, 0x00, 0x02, // dst ip
@ -126,7 +126,7 @@ func TestIPv4HeaderSwitch(t *testing.T) {
0x00, 0x00, // identification
0x00, 0x00, // flags + frag offset
0x00, // ttl
0x06, // protocol; 0x06 = tcp
0x06, // protocol tcp (0x06)
0x00, 0x00, // header checksum
0x7f, 0x00, 0x00, 0x01, // src ip
0x7f, 0x00, 0x00, 0x02, // dst ip

View File

@ -39,7 +39,6 @@ var (
// collectVsan is the entry point for vsan metrics collection
func (e *Endpoint) collectVsan(ctx context.Context, acc telegraf.Accumulator) error {
//resourceType := "vsan"
lower := versionLowerThan(e.apiVersion, 5, 5)
if lower {
return fmt.Errorf("a minimum API version of 5.5 is required for vSAN. Found: %s. Skipping vCenter: %s", e.apiVersion, e.URL.Host)

View File

@ -341,8 +341,7 @@ func (c *CloudWatchLogs) Write(metrics []telegraf.Metric) error {
// Sorting out log events by TS and sending them to cloud watch logs
for logStream, elem := range c.ls {
for index, batch := range elem.messageBatches {
if len(batch.logEvents) == 0 { //can't push empty batch
//c.Log.Warnf("Empty batch detected, skipping...")
if len(batch.logEvents) == 0 {
continue
}
//Sorting

View File

@ -278,11 +278,11 @@ func (tsrc *TableSource) getValues() ([]interface{}, error) {
}
values = append(values, tagValues...)
} else {
// tags_as_foreign_key=false, tags_as_json=true
// tags_as_foreign_key is false and tags_as_json is true
values = append(values, utils.TagListToJSON(metric.TagList()))
}
} else {
// tags_as_foreignkey=true
// tags_as_foreignkey is true
tagID := utils.GetTagID(metric)
if tsrc.postgresql.ForeignTagConstraint {
if _, ok := tsrc.tagSets[tagID]; !ok {
@ -294,7 +294,7 @@ func (tsrc *TableSource) getValues() ([]interface{}, error) {
}
if !tsrc.postgresql.FieldsAsJsonb {
// fields_as_json=false
// fields_as_json is false
fieldValues := make([]interface{}, len(tsrc.fieldColumns.columns))
fieldsEmpty := true
for _, field := range metric.FieldList() {
@ -310,7 +310,7 @@ func (tsrc *TableSource) getValues() ([]interface{}, error) {
}
values = append(values, fieldValues...)
} else {
// fields_as_json=true
// fields_as_json is true
value, err := utils.FieldListToJSON(metric.FieldList())
if err != nil {
return nil, err

View File

@ -143,16 +143,12 @@ func (p *SQL) deriveDatatype(value interface{}) string {
func (p *SQL) generateCreateTable(metric telegraf.Metric) string {
columns := make([]string, 0, len(metric.TagList())+len(metric.FieldList())+1)
// ## {KEY_COLUMNS} is a comma-separated list of key columns (timestamp and tags)
//var pk []string
if p.TimestampColumn != "" {
//pk = append(pk, quoteIdent(p.TimestampColumn))
columns = append(columns, fmt.Sprintf("%s %s", quoteIdent(p.TimestampColumn), p.Convert.Timestamp))
}
for _, tag := range metric.TagList() {
//pk = append(pk, quoteIdent(tag.Key))
columns = append(columns, fmt.Sprintf("%s %s", quoteIdent(tag.Key), p.Convert.Text))
}
@ -166,7 +162,6 @@ func (p *SQL) generateCreateTable(metric telegraf.Metric) string {
query = strings.ReplaceAll(query, "{TABLE}", quoteIdent(metric.Name()))
query = strings.ReplaceAll(query, "{TABLELITERAL}", quoteStr(metric.Name()))
query = strings.ReplaceAll(query, "{COLUMNS}", strings.Join(columns, ","))
//query = strings.ReplaceAll(query, "{KEY_COLUMNS}", strings.Join(pk, ","))
return query
}

View File

@ -200,7 +200,6 @@ func TestMysqlIntegration(t *testing.T) {
p.Log = testutil.Logger{}
p.Driver = "mysql"
p.DataSourceName = address
//p.Convert.Timestamp = "TEXT" //disable mysql default current_timestamp()
p.InitSQL = "SET sql_mode='ANSI_QUOTES';"
require.NoError(t, p.Connect())
@ -332,7 +331,6 @@ func TestClickHouseIntegration(t *testing.T) {
}
initdb, err := filepath.Abs("testdata/clickhouse/initdb")
// confd, err := filepath.Abs("testdata/clickhouse/config.d")
require.NoError(t, err)
// initdb/init.sql creates this database

View File

@ -105,8 +105,6 @@ func noLongLinesInParagraphs(threshold int) func(*T, ast.Node) error {
for _, seg := range segs.Sliced(0, segs.Len()) {
line := t.line(seg.Start)
paraLines = append(paraLines, line)
// t.printFileLine(line)
// fmt.Printf("paragraph line\n")
}
}
@ -117,8 +115,6 @@ func noLongLinesInParagraphs(threshold int) func(*T, ast.Node) error {
length := cur - last - 1 // -1 to exclude the newline
if length > threshold {
longLines = append(longLines, i)
// t.printFileLine(i)
// fmt.Printf("long line\n")
}
last = cur
}