chore: Fix linter findings for prealloc (part2) (#12242)

This commit is contained in:
Paweł Żak 2022-11-15 09:31:51 +01:00 committed by GitHub
parent 4c67b956bf
commit 74703d092b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 49 additions and 51 deletions

View File

@ -33,7 +33,7 @@ type NFSClient struct {
func convertToUint64(line []string) ([]uint64, error) { func convertToUint64(line []string) ([]uint64, error) {
/* A "line" of input data (a pre-split array of strings) is /* A "line" of input data (a pre-split array of strings) is
processed one field at a time. Each field is converted to processed one field at a time. Each field is converted to
an uint64 value, and appened to an array of return values. an uint64 value, and appended to an array of return values.
On an error, check for ErrRange, and returns an error On an error, check for ErrRange, and returns an error
if found. This situation indicates a pretty major issue in if found. This situation indicates a pretty major issue in
the /proc/self/mountstats file, and returning faulty data the /proc/self/mountstats file, and returning faulty data
@ -41,12 +41,11 @@ func convertToUint64(line []string) ([]uint64, error) {
whatever we got in the first place (probably 0). whatever we got in the first place (probably 0).
Yes, this is ugly. */ Yes, this is ugly. */
var nline []uint64
if len(line) < 2 { if len(line) < 2 {
return nline, nil return nil, nil
} }
nline := make([]uint64, 0, len(line[1:]))
// Skip the first field; it's handled specially as the "first" variable // Skip the first field; it's handled specially as the "first" variable
for _, l := range line[1:] { for _, l := range line[1:] {
val, err := strconv.ParseUint(l, 10, 64) val, err := strconv.ParseUint(l, 10, 64)

View File

@ -412,14 +412,14 @@ func (p *Procstat) systemdUnitPIDs() []PidsTags {
} }
func (p *Procstat) simpleSystemdUnitPIDs() ([]PID, error) { func (p *Procstat) simpleSystemdUnitPIDs() ([]PID, error) {
var pids []PID out, err := execCommand("systemctl", "show", p.SystemdUnit).Output()
cmd := execCommand("systemctl", "show", p.SystemdUnit)
out, err := cmd.Output()
if err != nil { if err != nil {
return nil, err return nil, err
} }
for _, line := range bytes.Split(out, []byte{'\n'}) {
lines := bytes.Split(out, []byte{'\n'})
pids := make([]PID, 0, len(lines))
for _, line := range lines {
kv := bytes.SplitN(line, []byte{'='}, 2) kv := bytes.SplitN(line, []byte{'='}, 2)
if len(kv) != 2 { if len(kv) != 2 {
continue continue
@ -441,17 +441,17 @@ func (p *Procstat) simpleSystemdUnitPIDs() ([]PID, error) {
} }
func (p *Procstat) cgroupPIDs() []PidsTags { func (p *Procstat) cgroupPIDs() []PidsTags {
var pidTags []PidsTags
procsPath := p.CGroup procsPath := p.CGroup
if procsPath[0] != '/' { if procsPath[0] != '/' {
procsPath = "/sys/fs/cgroup/" + procsPath procsPath = "/sys/fs/cgroup/" + procsPath
} }
items, err := filepath.Glob(procsPath) items, err := filepath.Glob(procsPath)
if err != nil { if err != nil {
pidTags = append(pidTags, PidsTags{nil, nil, fmt.Errorf("glob failed '%s'", err)}) return []PidsTags{{nil, nil, fmt.Errorf("glob failed '%s'", err)}}
return pidTags
} }
pidTags := make([]PidsTags, 0, len(items))
for _, item := range items { for _, item := range items {
pids, err := p.singleCgroupPIDs(item) pids, err := p.singleCgroupPIDs(item)
tags := map[string]string{"cgroup": p.CGroup, "cgroup_full": item} tags := map[string]string{"cgroup": p.CGroup, "cgroup_full": item}
@ -462,8 +462,6 @@ func (p *Procstat) cgroupPIDs() []PidsTags {
} }
func (p *Procstat) singleCgroupPIDs(path string) ([]PID, error) { func (p *Procstat) singleCgroupPIDs(path string) ([]PID, error) {
var pids []PID
ok, err := isDir(path) ok, err := isDir(path)
if err != nil { if err != nil {
return nil, err return nil, err
@ -476,7 +474,10 @@ func (p *Procstat) singleCgroupPIDs(path string) ([]PID, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
for _, pidBS := range bytes.Split(out, []byte{'\n'}) {
lines := bytes.Split(out, []byte{'\n'})
pids := make([]PID, 0, len(lines))
for _, pidBS := range lines {
if len(pidBS) == 0 { if len(pidBS) == 0 {
continue continue
} }

View File

@ -27,7 +27,7 @@ func mockExecCommand(arg0 string, args ...string) *exec.Cmd {
return cmd return cmd
} }
func TestMockExecCommand(_ *testing.T) { func TestMockExecCommand(_ *testing.T) {
var cmd []string var cmd []string //nolint:prealloc // Pre-allocated this slice would break the algorithm
for _, arg := range os.Args { for _, arg := range os.Args {
if arg == "--" { if arg == "--" {
cmd = []string{} cmd = []string{}

View File

@ -587,8 +587,7 @@ func (m *Smart) getVendorNVMeAttributes(acc telegraf.Accumulator, devices []stri
} }
func getDeviceInfoForNVMeDisks(acc telegraf.Accumulator, devices []string, nvme string, timeout config.Duration, useSudo bool) []nvmeDevice { func getDeviceInfoForNVMeDisks(acc telegraf.Accumulator, devices []string, nvme string, timeout config.Duration, useSudo bool) []nvmeDevice {
var nvmeDevices []nvmeDevice nvmeDevices := make([]nvmeDevice, 0, len(devices))
for _, device := range devices { for _, device := range devices {
newDevice, err := gatherNVMeDeviceInfo(nvme, device, timeout, useSudo) newDevice, err := gatherNVMeDeviceInfo(nvme, device, timeout, useSudo)
if err != nil { if err != nil {

View File

@ -25,7 +25,7 @@ func mockExecCommand(arg0 string, args ...string) *exec.Cmd {
// //
// Idea based on https://github.com/golang/go/blob/7c31043/src/os/exec/exec_test.go#L568 // Idea based on https://github.com/golang/go/blob/7c31043/src/os/exec/exec_test.go#L568
func TestMockExecCommand(_ *testing.T) { func TestMockExecCommand(_ *testing.T) {
var cmd []string var cmd []string //nolint:prealloc // Pre-allocated this slice would break the algorithm
for _, arg := range os.Args { for _, arg := range os.Args {
if arg == "--" { if arg == "--" {
cmd = []string{} cmd = []string{}

View File

@ -179,11 +179,11 @@ func (s *SQLServer) initQueries() error {
} }
} }
var querylist []string queryList := make([]string, 0, len(queries))
for query := range queries { for query := range queries {
querylist = append(querylist, query) queryList = append(queryList, query)
} }
s.Log.Infof("Config: Effective Queries: %#v\n", querylist) s.Log.Infof("Config: Effective Queries: %#v\n", queryList)
return nil return nil
} }

View File

@ -133,10 +133,10 @@ func (t *TCPListener) Stop() {
// Close all open TCP connections // Close all open TCP connections
// - get all conns from the t.conns map and put into slice // - get all conns from the t.conns map and put into slice
// - this is so the forget() function doesnt conflict with looping // - this is so the forget() function doesn't conflict with looping
// over the t.conns map // over the t.conns map
var conns []*net.TCPConn
t.cleanup.Lock() t.cleanup.Lock()
conns := make([]*net.TCPConn, 0, len(t.conns))
for _, conn := range t.conns { for _, conn := range t.conns {
conns = append(conns, conn) conns = append(conns, conn)
} }

View File

@ -103,7 +103,7 @@ func (*Varnish) SampleConfig() string {
} }
func (s *Varnish) Init() error { func (s *Varnish) Init() error {
var customRegexps []*regexp.Regexp customRegexps := make([]*regexp.Regexp, 0, len(s.Regexps))
for _, re := range s.Regexps { for _, re := range s.Regexps {
compiled, err := regexp.Compile(re) compiled, err := regexp.Compile(re)
if err != nil { if err != nil {

View File

@ -67,8 +67,6 @@ func (wg *Wireguard) Gather(acc telegraf.Accumulator) error {
} }
func (wg *Wireguard) enumerateDevices() ([]*wgtypes.Device, error) { func (wg *Wireguard) enumerateDevices() ([]*wgtypes.Device, error) {
var devices []*wgtypes.Device
// If no device names are specified, defer to the library to enumerate // If no device names are specified, defer to the library to enumerate
// all of them // all of them
if len(wg.Devices) == 0 { if len(wg.Devices) == 0 {
@ -76,6 +74,7 @@ func (wg *Wireguard) enumerateDevices() ([]*wgtypes.Device, error) {
} }
// Otherwise, explicitly populate only device names specified in config // Otherwise, explicitly populate only device names specified in config
devices := make([]*wgtypes.Device, 0, len(wg.Devices))
for _, name := range wg.Devices { for _, name := range wg.Devices {
dev, err := wg.client.Device(name) dev, err := wg.client.Device(name)
if err != nil { if err != nil {

View File

@ -277,7 +277,7 @@ func (a *AzureMonitor) Write(metrics []telegraf.Metric) error {
return nil return nil
} }
var body []byte var body []byte //nolint:prealloc // There is no point in guessing the final capacity of this slice
for _, m := range azmetrics { for _, m := range azmetrics {
// Azure Monitor accepts new batches of points in new-line delimited // Azure Monitor accepts new batches of points in new-line delimited
// JSON, following RFC 4288 (see https://github.com/ndjson/ndjson-spec). // JSON, following RFC 4288 (see https://github.com/ndjson/ndjson-spec).
@ -364,8 +364,8 @@ func hashIDWithTagKeysOnly(m telegraf.Metric) uint64 {
} }
func translate(m telegraf.Metric, prefix string) (*azureMonitorMetric, error) { func translate(m telegraf.Metric, prefix string) (*azureMonitorMetric, error) {
var dimensionNames []string dimensionNames := make([]string, 0, len(m.TagList()))
var dimensionValues []string dimensionValues := make([]string, 0, len(m.TagList()))
for _, tag := range m.TagList() { for _, tag := range m.TagList() {
// Azure custom metrics service supports up to 10 dimensions // Azure custom metrics service supports up to 10 dimensions
if len(dimensionNames) >= 10 { if len(dimensionNames) >= 10 {

View File

@ -109,8 +109,7 @@ func (e *EventHubs) SetSerializer(serializer serializers.Serializer) {
} }
func (e *EventHubs) Write(metrics []telegraf.Metric) error { func (e *EventHubs) Write(metrics []telegraf.Metric) error {
var events []*eventhub.Event events := make([]*eventhub.Event, 0, len(metrics))
for _, metric := range metrics { for _, metric := range metrics {
payload, err := e.serializer.Serialize(metric) payload, err := e.serializer.Serialize(metric)

View File

@ -327,7 +327,6 @@ func (*Graylog) SampleConfig() string {
} }
func (g *Graylog) Connect() error { func (g *Graylog) Connect() error {
var writers []io.Writer
dialer := &net.Dialer{Timeout: time.Duration(g.Timeout)} dialer := &net.Dialer{Timeout: time.Duration(g.Timeout)}
if len(g.Servers) == 0 { if len(g.Servers) == 0 {
@ -339,6 +338,7 @@ func (g *Graylog) Connect() error {
return err return err
} }
writers := make([]io.Writer, 0, len(g.Servers))
for _, server := range g.Servers { for _, server := range g.Servers {
w := newGelfWriter(gelfConfig{Endpoint: server}, dialer, tlsCfg) w := newGelfWriter(gelfConfig{Endpoint: server}, dialer, tlsCfg)
err := w.Connect() err := w.Connect()

View File

@ -149,7 +149,7 @@ func (g *Groundwork) Write(metrics []telegraf.Metric) error {
}) })
} }
var resources []transit.MonitoredResource resources := make([]transit.MonitoredResource, 0, len(resourceToServicesMap))
for resourceName, services := range resourceToServicesMap { for resourceName, services := range resourceToServicesMap {
resources = append(resources, transit.MonitoredResource{ resources = append(resources, transit.MonitoredResource{
BaseResource: transit.BaseResource{ BaseResource: transit.BaseResource{

View File

@ -158,12 +158,12 @@ func (s *IoTDB) convertTimestampOfMetric(m telegraf.Metric) (int64, error) {
// convert Metrics to Records with tags // convert Metrics to Records with tags
func (s *IoTDB) convertMetricsToRecordsWithTags(metrics []telegraf.Metric) (*recordsWithTags, error) { func (s *IoTDB) convertMetricsToRecordsWithTags(metrics []telegraf.Metric) (*recordsWithTags, error) {
var deviceidList []string timestampList := make([]int64, 0, len(metrics))
var measurementsList [][]string deviceidList := make([]string, 0, len(metrics))
var valuesList [][]interface{} measurementsList := make([][]string, 0, len(metrics))
var dataTypesList [][]client.TSDataType valuesList := make([][]interface{}, 0, len(metrics))
var timestampList []int64 dataTypesList := make([][]client.TSDataType, 0, len(metrics))
var tagsList [][]*telegraf.Tag tagsList := make([][]*telegraf.Tag, 0, len(metrics))
for _, metric := range metrics { for _, metric := range metrics {
// write `metric` to the output sink here // write `metric` to the output sink here

View File

@ -3,8 +3,9 @@ package loki
import ( import (
"testing" "testing"
"github.com/influxdata/telegraf"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/influxdata/telegraf"
) )
type tuple struct { type tuple struct {
@ -13,8 +14,7 @@ type tuple struct {
func generateLabelsAndTag(tt ...tuple) (map[string]string, []*telegraf.Tag) { func generateLabelsAndTag(tt ...tuple) (map[string]string, []*telegraf.Tag) {
labels := map[string]string{} labels := map[string]string{}
var tags []*telegraf.Tag tags := make([]*telegraf.Tag, 0, len(tt))
for _, t := range tt { for _, t := range tt {
labels[t.key] = t.value labels[t.key] = t.value
tags = append(tags, &telegraf.Tag{Key: t.key, Value: t.value}) tags = append(tags, &telegraf.Tag{Key: t.key, Value: t.value})

View File

@ -179,7 +179,7 @@ func (tm *TableManager) EnsureStructure(
// check that the missing columns are columns that can be added // check that the missing columns are columns that can be added
addColumns := make([]utils.Column, 0, len(missingCols)) addColumns := make([]utils.Column, 0, len(missingCols))
var invalidColumns []utils.Column invalidColumns := make([]utils.Column, 0, len(missingCols))
for _, col := range missingCols { for _, col := range missingCols {
if tm.validateColumnName(col.Name) { if tm.validateColumnName(col.Name) {
addColumns = append(addColumns, col) addColumns = append(addColumns, col)

View File

@ -169,7 +169,7 @@ func (r *Riemann) tags(tags map[string]string) []string {
} }
// otherwise add all values from telegraf tag key/value pairs // otherwise add all values from telegraf tag key/value pairs
var keys []string keys := make([]string, 0, len(tags))
for key := range tags { for key := range tags {
keys = append(keys, key) keys = append(keys, key)
} }

View File

@ -141,7 +141,7 @@ func (p *SQL) deriveDatatype(value interface{}) string {
} }
func (p *SQL) generateCreateTable(metric telegraf.Metric) string { func (p *SQL) generateCreateTable(metric telegraf.Metric) string {
var columns []string columns := make([]string, 0, len(metric.TagList())+len(metric.FieldList())+1)
// ## {KEY_COLUMNS} is a comma-separated list of key columns (timestamp and tags) // ## {KEY_COLUMNS} is a comma-separated list of key columns (timestamp and tags)
//var pk []string //var pk []string
@ -171,7 +171,8 @@ func (p *SQL) generateCreateTable(metric telegraf.Metric) string {
} }
func (p *SQL) generateInsert(tablename string, columns []string) string { func (p *SQL) generateInsert(tablename string, columns []string) string {
var placeholders, quotedColumns []string placeholders := make([]string, 0, len(columns))
quotedColumns := make([]string, 0, len(columns))
for _, column := range columns { for _, column := range columns {
quotedColumns = append(quotedColumns, quoteIdent(column)) quotedColumns = append(quotedColumns, quoteIdent(column))
} }

View File

@ -7,10 +7,10 @@ import (
"sync" "sync"
"time" "time"
"github.com/influxdata/telegraf"
monpb "google.golang.org/genproto/googleapis/monitoring/v3" monpb "google.golang.org/genproto/googleapis/monitoring/v3"
tspb "google.golang.org/protobuf/types/known/timestamppb" tspb "google.golang.org/protobuf/types/known/timestamppb"
"github.com/influxdata/telegraf"
) )
type counterCache struct { type counterCache struct {
@ -87,7 +87,7 @@ func NewCounterCacheEntry(value *monpb.TypedValue, ts *tspb.Timestamp) *counterC
func GetCounterCacheKey(m telegraf.Metric, f *telegraf.Field) string { func GetCounterCacheKey(m telegraf.Metric, f *telegraf.Field) string {
// normalize tag list to form a predictable key // normalize tag list to form a predictable key
var tags []string tags := make([]string, 0, len(m.TagList()))
for _, t := range m.TagList() { for _, t := range m.TagList() {
tags = append(tags, strings.Join([]string{t.Key, t.Value}, "=")) tags = append(tags, strings.Join([]string{t.Key, t.Value}, "="))
} }