chore: Fix linter findings for prealloc (part2) (#12242)
This commit is contained in:
parent
4c67b956bf
commit
74703d092b
|
|
@ -33,7 +33,7 @@ type NFSClient struct {
|
|||
func convertToUint64(line []string) ([]uint64, error) {
|
||||
/* A "line" of input data (a pre-split array of strings) is
|
||||
processed one field at a time. Each field is converted to
|
||||
an uint64 value, and appened to an array of return values.
|
||||
an uint64 value, and appended to an array of return values.
|
||||
On an error, check for ErrRange, and returns an error
|
||||
if found. This situation indicates a pretty major issue in
|
||||
the /proc/self/mountstats file, and returning faulty data
|
||||
|
|
@ -41,12 +41,11 @@ func convertToUint64(line []string) ([]uint64, error) {
|
|||
whatever we got in the first place (probably 0).
|
||||
Yes, this is ugly. */
|
||||
|
||||
var nline []uint64
|
||||
|
||||
if len(line) < 2 {
|
||||
return nline, nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
nline := make([]uint64, 0, len(line[1:]))
|
||||
// Skip the first field; it's handled specially as the "first" variable
|
||||
for _, l := range line[1:] {
|
||||
val, err := strconv.ParseUint(l, 10, 64)
|
||||
|
|
|
|||
|
|
@ -412,14 +412,14 @@ func (p *Procstat) systemdUnitPIDs() []PidsTags {
|
|||
}
|
||||
|
||||
func (p *Procstat) simpleSystemdUnitPIDs() ([]PID, error) {
|
||||
var pids []PID
|
||||
|
||||
cmd := execCommand("systemctl", "show", p.SystemdUnit)
|
||||
out, err := cmd.Output()
|
||||
out, err := execCommand("systemctl", "show", p.SystemdUnit).Output()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, line := range bytes.Split(out, []byte{'\n'}) {
|
||||
|
||||
lines := bytes.Split(out, []byte{'\n'})
|
||||
pids := make([]PID, 0, len(lines))
|
||||
for _, line := range lines {
|
||||
kv := bytes.SplitN(line, []byte{'='}, 2)
|
||||
if len(kv) != 2 {
|
||||
continue
|
||||
|
|
@ -441,17 +441,17 @@ func (p *Procstat) simpleSystemdUnitPIDs() ([]PID, error) {
|
|||
}
|
||||
|
||||
func (p *Procstat) cgroupPIDs() []PidsTags {
|
||||
var pidTags []PidsTags
|
||||
|
||||
procsPath := p.CGroup
|
||||
if procsPath[0] != '/' {
|
||||
procsPath = "/sys/fs/cgroup/" + procsPath
|
||||
}
|
||||
|
||||
items, err := filepath.Glob(procsPath)
|
||||
if err != nil {
|
||||
pidTags = append(pidTags, PidsTags{nil, nil, fmt.Errorf("glob failed '%s'", err)})
|
||||
return pidTags
|
||||
return []PidsTags{{nil, nil, fmt.Errorf("glob failed '%s'", err)}}
|
||||
}
|
||||
|
||||
pidTags := make([]PidsTags, 0, len(items))
|
||||
for _, item := range items {
|
||||
pids, err := p.singleCgroupPIDs(item)
|
||||
tags := map[string]string{"cgroup": p.CGroup, "cgroup_full": item}
|
||||
|
|
@ -462,8 +462,6 @@ func (p *Procstat) cgroupPIDs() []PidsTags {
|
|||
}
|
||||
|
||||
func (p *Procstat) singleCgroupPIDs(path string) ([]PID, error) {
|
||||
var pids []PID
|
||||
|
||||
ok, err := isDir(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -476,7 +474,10 @@ func (p *Procstat) singleCgroupPIDs(path string) ([]PID, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, pidBS := range bytes.Split(out, []byte{'\n'}) {
|
||||
|
||||
lines := bytes.Split(out, []byte{'\n'})
|
||||
pids := make([]PID, 0, len(lines))
|
||||
for _, pidBS := range lines {
|
||||
if len(pidBS) == 0 {
|
||||
continue
|
||||
}
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ func mockExecCommand(arg0 string, args ...string) *exec.Cmd {
|
|||
return cmd
|
||||
}
|
||||
func TestMockExecCommand(_ *testing.T) {
|
||||
var cmd []string
|
||||
var cmd []string //nolint:prealloc // Pre-allocated this slice would break the algorithm
|
||||
for _, arg := range os.Args {
|
||||
if arg == "--" {
|
||||
cmd = []string{}
|
||||
|
|
|
|||
|
|
@ -587,8 +587,7 @@ func (m *Smart) getVendorNVMeAttributes(acc telegraf.Accumulator, devices []stri
|
|||
}
|
||||
|
||||
func getDeviceInfoForNVMeDisks(acc telegraf.Accumulator, devices []string, nvme string, timeout config.Duration, useSudo bool) []nvmeDevice {
|
||||
var nvmeDevices []nvmeDevice
|
||||
|
||||
nvmeDevices := make([]nvmeDevice, 0, len(devices))
|
||||
for _, device := range devices {
|
||||
newDevice, err := gatherNVMeDeviceInfo(nvme, device, timeout, useSudo)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ func mockExecCommand(arg0 string, args ...string) *exec.Cmd {
|
|||
//
|
||||
// Idea based on https://github.com/golang/go/blob/7c31043/src/os/exec/exec_test.go#L568
|
||||
func TestMockExecCommand(_ *testing.T) {
|
||||
var cmd []string
|
||||
var cmd []string //nolint:prealloc // Pre-allocated this slice would break the algorithm
|
||||
for _, arg := range os.Args {
|
||||
if arg == "--" {
|
||||
cmd = []string{}
|
||||
|
|
|
|||
|
|
@ -179,11 +179,11 @@ func (s *SQLServer) initQueries() error {
|
|||
}
|
||||
}
|
||||
|
||||
var querylist []string
|
||||
queryList := make([]string, 0, len(queries))
|
||||
for query := range queries {
|
||||
querylist = append(querylist, query)
|
||||
queryList = append(queryList, query)
|
||||
}
|
||||
s.Log.Infof("Config: Effective Queries: %#v\n", querylist)
|
||||
s.Log.Infof("Config: Effective Queries: %#v\n", queryList)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -133,10 +133,10 @@ func (t *TCPListener) Stop() {
|
|||
|
||||
// Close all open TCP connections
|
||||
// - get all conns from the t.conns map and put into slice
|
||||
// - this is so the forget() function doesnt conflict with looping
|
||||
// - this is so the forget() function doesn't conflict with looping
|
||||
// over the t.conns map
|
||||
var conns []*net.TCPConn
|
||||
t.cleanup.Lock()
|
||||
conns := make([]*net.TCPConn, 0, len(t.conns))
|
||||
for _, conn := range t.conns {
|
||||
conns = append(conns, conn)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -103,7 +103,7 @@ func (*Varnish) SampleConfig() string {
|
|||
}
|
||||
|
||||
func (s *Varnish) Init() error {
|
||||
var customRegexps []*regexp.Regexp
|
||||
customRegexps := make([]*regexp.Regexp, 0, len(s.Regexps))
|
||||
for _, re := range s.Regexps {
|
||||
compiled, err := regexp.Compile(re)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -67,8 +67,6 @@ func (wg *Wireguard) Gather(acc telegraf.Accumulator) error {
|
|||
}
|
||||
|
||||
func (wg *Wireguard) enumerateDevices() ([]*wgtypes.Device, error) {
|
||||
var devices []*wgtypes.Device
|
||||
|
||||
// If no device names are specified, defer to the library to enumerate
|
||||
// all of them
|
||||
if len(wg.Devices) == 0 {
|
||||
|
|
@ -76,6 +74,7 @@ func (wg *Wireguard) enumerateDevices() ([]*wgtypes.Device, error) {
|
|||
}
|
||||
|
||||
// Otherwise, explicitly populate only device names specified in config
|
||||
devices := make([]*wgtypes.Device, 0, len(wg.Devices))
|
||||
for _, name := range wg.Devices {
|
||||
dev, err := wg.client.Device(name)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -277,7 +277,7 @@ func (a *AzureMonitor) Write(metrics []telegraf.Metric) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
var body []byte
|
||||
var body []byte //nolint:prealloc // There is no point in guessing the final capacity of this slice
|
||||
for _, m := range azmetrics {
|
||||
// Azure Monitor accepts new batches of points in new-line delimited
|
||||
// JSON, following RFC 4288 (see https://github.com/ndjson/ndjson-spec).
|
||||
|
|
@ -364,8 +364,8 @@ func hashIDWithTagKeysOnly(m telegraf.Metric) uint64 {
|
|||
}
|
||||
|
||||
func translate(m telegraf.Metric, prefix string) (*azureMonitorMetric, error) {
|
||||
var dimensionNames []string
|
||||
var dimensionValues []string
|
||||
dimensionNames := make([]string, 0, len(m.TagList()))
|
||||
dimensionValues := make([]string, 0, len(m.TagList()))
|
||||
for _, tag := range m.TagList() {
|
||||
// Azure custom metrics service supports up to 10 dimensions
|
||||
if len(dimensionNames) >= 10 {
|
||||
|
|
|
|||
|
|
@ -109,8 +109,7 @@ func (e *EventHubs) SetSerializer(serializer serializers.Serializer) {
|
|||
}
|
||||
|
||||
func (e *EventHubs) Write(metrics []telegraf.Metric) error {
|
||||
var events []*eventhub.Event
|
||||
|
||||
events := make([]*eventhub.Event, 0, len(metrics))
|
||||
for _, metric := range metrics {
|
||||
payload, err := e.serializer.Serialize(metric)
|
||||
|
||||
|
|
|
|||
|
|
@ -327,7 +327,6 @@ func (*Graylog) SampleConfig() string {
|
|||
}
|
||||
|
||||
func (g *Graylog) Connect() error {
|
||||
var writers []io.Writer
|
||||
dialer := &net.Dialer{Timeout: time.Duration(g.Timeout)}
|
||||
|
||||
if len(g.Servers) == 0 {
|
||||
|
|
@ -339,6 +338,7 @@ func (g *Graylog) Connect() error {
|
|||
return err
|
||||
}
|
||||
|
||||
writers := make([]io.Writer, 0, len(g.Servers))
|
||||
for _, server := range g.Servers {
|
||||
w := newGelfWriter(gelfConfig{Endpoint: server}, dialer, tlsCfg)
|
||||
err := w.Connect()
|
||||
|
|
|
|||
|
|
@ -149,7 +149,7 @@ func (g *Groundwork) Write(metrics []telegraf.Metric) error {
|
|||
})
|
||||
}
|
||||
|
||||
var resources []transit.MonitoredResource
|
||||
resources := make([]transit.MonitoredResource, 0, len(resourceToServicesMap))
|
||||
for resourceName, services := range resourceToServicesMap {
|
||||
resources = append(resources, transit.MonitoredResource{
|
||||
BaseResource: transit.BaseResource{
|
||||
|
|
|
|||
|
|
@ -158,12 +158,12 @@ func (s *IoTDB) convertTimestampOfMetric(m telegraf.Metric) (int64, error) {
|
|||
|
||||
// convert Metrics to Records with tags
|
||||
func (s *IoTDB) convertMetricsToRecordsWithTags(metrics []telegraf.Metric) (*recordsWithTags, error) {
|
||||
var deviceidList []string
|
||||
var measurementsList [][]string
|
||||
var valuesList [][]interface{}
|
||||
var dataTypesList [][]client.TSDataType
|
||||
var timestampList []int64
|
||||
var tagsList [][]*telegraf.Tag
|
||||
timestampList := make([]int64, 0, len(metrics))
|
||||
deviceidList := make([]string, 0, len(metrics))
|
||||
measurementsList := make([][]string, 0, len(metrics))
|
||||
valuesList := make([][]interface{}, 0, len(metrics))
|
||||
dataTypesList := make([][]client.TSDataType, 0, len(metrics))
|
||||
tagsList := make([][]*telegraf.Tag, 0, len(metrics))
|
||||
|
||||
for _, metric := range metrics {
|
||||
// write `metric` to the output sink here
|
||||
|
|
|
|||
|
|
@ -3,8 +3,9 @@ package loki
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
type tuple struct {
|
||||
|
|
@ -13,8 +14,7 @@ type tuple struct {
|
|||
|
||||
func generateLabelsAndTag(tt ...tuple) (map[string]string, []*telegraf.Tag) {
|
||||
labels := map[string]string{}
|
||||
var tags []*telegraf.Tag
|
||||
|
||||
tags := make([]*telegraf.Tag, 0, len(tt))
|
||||
for _, t := range tt {
|
||||
labels[t.key] = t.value
|
||||
tags = append(tags, &telegraf.Tag{Key: t.key, Value: t.value})
|
||||
|
|
|
|||
|
|
@ -179,7 +179,7 @@ func (tm *TableManager) EnsureStructure(
|
|||
|
||||
// check that the missing columns are columns that can be added
|
||||
addColumns := make([]utils.Column, 0, len(missingCols))
|
||||
var invalidColumns []utils.Column
|
||||
invalidColumns := make([]utils.Column, 0, len(missingCols))
|
||||
for _, col := range missingCols {
|
||||
if tm.validateColumnName(col.Name) {
|
||||
addColumns = append(addColumns, col)
|
||||
|
|
|
|||
|
|
@ -169,7 +169,7 @@ func (r *Riemann) tags(tags map[string]string) []string {
|
|||
}
|
||||
|
||||
// otherwise add all values from telegraf tag key/value pairs
|
||||
var keys []string
|
||||
keys := make([]string, 0, len(tags))
|
||||
for key := range tags {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -141,7 +141,7 @@ func (p *SQL) deriveDatatype(value interface{}) string {
|
|||
}
|
||||
|
||||
func (p *SQL) generateCreateTable(metric telegraf.Metric) string {
|
||||
var columns []string
|
||||
columns := make([]string, 0, len(metric.TagList())+len(metric.FieldList())+1)
|
||||
// ## {KEY_COLUMNS} is a comma-separated list of key columns (timestamp and tags)
|
||||
//var pk []string
|
||||
|
||||
|
|
@ -171,7 +171,8 @@ func (p *SQL) generateCreateTable(metric telegraf.Metric) string {
|
|||
}
|
||||
|
||||
func (p *SQL) generateInsert(tablename string, columns []string) string {
|
||||
var placeholders, quotedColumns []string
|
||||
placeholders := make([]string, 0, len(columns))
|
||||
quotedColumns := make([]string, 0, len(columns))
|
||||
for _, column := range columns {
|
||||
quotedColumns = append(quotedColumns, quoteIdent(column))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -7,10 +7,10 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
|
||||
monpb "google.golang.org/genproto/googleapis/monitoring/v3"
|
||||
tspb "google.golang.org/protobuf/types/known/timestamppb"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
type counterCache struct {
|
||||
|
|
@ -87,7 +87,7 @@ func NewCounterCacheEntry(value *monpb.TypedValue, ts *tspb.Timestamp) *counterC
|
|||
|
||||
func GetCounterCacheKey(m telegraf.Metric, f *telegraf.Field) string {
|
||||
// normalize tag list to form a predictable key
|
||||
var tags []string
|
||||
tags := make([]string, 0, len(m.TagList()))
|
||||
for _, t := range m.TagList() {
|
||||
tags = append(tags, strings.Join([]string{t.Key, t.Value}, "="))
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue