chore: fix linter findings for makezero (part3) (#12371)
This commit is contained in:
parent
6fb840085d
commit
c6663aca4f
|
|
@ -245,17 +245,20 @@ func (s *Subscription) buildSubscription() (*gnmiLib.Subscription, error) {
|
|||
// Create a new gNMI SubscribeRequest
|
||||
func (c *GNMI) newSubscribeRequest() (*gnmiLib.SubscribeRequest, error) {
|
||||
// Create subscription objects
|
||||
var err error
|
||||
subscriptions := make([]*gnmiLib.Subscription, len(c.Subscriptions)+len(c.TagSubscriptions))
|
||||
for i, subscription := range c.TagSubscriptions {
|
||||
if subscriptions[i], err = subscription.buildSubscription(); err != nil {
|
||||
subscriptions := make([]*gnmiLib.Subscription, 0, len(c.Subscriptions)+len(c.TagSubscriptions))
|
||||
for _, subscription := range c.TagSubscriptions {
|
||||
sub, err := subscription.buildSubscription()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
subscriptions = append(subscriptions, sub)
|
||||
}
|
||||
for i, subscription := range c.Subscriptions {
|
||||
if subscriptions[i+len(c.TagSubscriptions)], err = subscription.buildSubscription(); err != nil {
|
||||
for _, subscription := range c.Subscriptions {
|
||||
sub, err := subscription.buildSubscription()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
subscriptions = append(subscriptions, sub)
|
||||
}
|
||||
|
||||
// Construct subscribe request
|
||||
|
|
|
|||
|
|
@ -83,23 +83,23 @@ func BenchmarkInfluxDBListener_serveWrite(b *testing.B) {
|
|||
}
|
||||
|
||||
func lines(lines, numTags, numFields int) string {
|
||||
lp := make([]string, lines)
|
||||
lp := make([]string, 0, lines)
|
||||
for i := 0; i < lines; i++ {
|
||||
tags := make([]string, numTags)
|
||||
tags := make([]string, 0, numTags)
|
||||
for j := 0; j < numTags; j++ {
|
||||
tags[j] = fmt.Sprintf("t%d=v%d", j, j)
|
||||
tags = append(tags, fmt.Sprintf("t%d=v%d", j, j))
|
||||
}
|
||||
|
||||
fields := make([]string, numFields)
|
||||
fields := make([]string, 0, numFields)
|
||||
for k := 0; k < numFields; k++ {
|
||||
fields[k] = fmt.Sprintf("f%d=%d", k, k)
|
||||
fields = append(fields, fmt.Sprintf("f%d=%d", k, k))
|
||||
}
|
||||
|
||||
lp[i] = fmt.Sprintf("m%d,%s %s",
|
||||
lp = append(lp, fmt.Sprintf("m%d,%s %s",
|
||||
i,
|
||||
strings.Join(tags, ","),
|
||||
strings.Join(fields, ","),
|
||||
)
|
||||
))
|
||||
}
|
||||
|
||||
return strings.Join(lp, "\n")
|
||||
|
|
|
|||
|
|
@ -83,23 +83,23 @@ func BenchmarkInfluxDBV2Listener_serveWrite(b *testing.B) {
|
|||
}
|
||||
|
||||
func lines(lines, numTags, numFields int) string {
|
||||
lp := make([]string, lines)
|
||||
lp := make([]string, 0, lines)
|
||||
for i := 0; i < lines; i++ {
|
||||
tags := make([]string, numTags)
|
||||
tags := make([]string, 0, numTags)
|
||||
for j := 0; j < numTags; j++ {
|
||||
tags[j] = fmt.Sprintf("t%d=v%d", j, j)
|
||||
tags = append(tags, fmt.Sprintf("t%d=v%d", j, j))
|
||||
}
|
||||
|
||||
fields := make([]string, numFields)
|
||||
fields := make([]string, 0, numFields)
|
||||
for k := 0; k < numFields; k++ {
|
||||
fields[k] = fmt.Sprintf("f%d=%d", k, k)
|
||||
fields = append(fields, fmt.Sprintf("f%d=%d", k, k))
|
||||
}
|
||||
|
||||
lp[i] = fmt.Sprintf("m%d,%s %s",
|
||||
lp = append(lp, fmt.Sprintf("m%d,%s %s",
|
||||
i,
|
||||
strings.Join(tags, ","),
|
||||
strings.Join(fields, ","),
|
||||
)
|
||||
))
|
||||
}
|
||||
|
||||
return strings.Join(lp, "\n")
|
||||
|
|
|
|||
|
|
@ -606,12 +606,14 @@ func (m *Mysql) gatherSlaveStatuses(db *sql.DB, serv string, acc telegraf.Accumu
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
vals := make([]sql.RawBytes, len(cols))
|
||||
valPtrs := make([]interface{}, len(cols))
|
||||
|
||||
vals := make([]sql.RawBytes, 0, len(cols))
|
||||
valPtrs := make([]interface{}, 0, len(cols))
|
||||
// fill the array with sql.Rawbytes
|
||||
for i := range vals {
|
||||
vals[i] = sql.RawBytes{}
|
||||
valPtrs[i] = &vals[i]
|
||||
for range cols {
|
||||
rawBytes := sql.RawBytes{}
|
||||
vals = append(vals, rawBytes)
|
||||
valPtrs = append(valPtrs, &rawBytes)
|
||||
}
|
||||
if err = rows.Scan(valPtrs...); err != nil {
|
||||
return err
|
||||
|
|
|
|||
|
|
@ -57,10 +57,10 @@ func (o *ReadClient) Connect() error {
|
|||
return err
|
||||
}
|
||||
|
||||
readValueIds := make([]*ua.ReadValueID, len(o.NodeIDs))
|
||||
readValueIds := make([]*ua.ReadValueID, 0, len(o.NodeIDs))
|
||||
if o.Workarounds.UseUnregisteredReads {
|
||||
for i, nid := range o.NodeIDs {
|
||||
readValueIds[i] = &ua.ReadValueID{NodeID: nid}
|
||||
for _, nid := range o.NodeIDs {
|
||||
readValueIds = append(readValueIds, &ua.ReadValueID{NodeID: nid})
|
||||
}
|
||||
} else {
|
||||
regResp, err := o.Client.RegisterNodes(&ua.RegisterNodesRequest{
|
||||
|
|
@ -70,8 +70,8 @@ func (o *ReadClient) Connect() error {
|
|||
return fmt.Errorf("registerNodes failed: %v", err)
|
||||
}
|
||||
|
||||
for i, v := range regResp.RegisteredNodeIDs {
|
||||
readValueIds[i] = &ua.ReadValueID{NodeID: v}
|
||||
for _, v := range regResp.RegisteredNodeIDs {
|
||||
readValueIds = append(readValueIds, &ua.ReadValueID{NodeID: v})
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -87,9 +87,9 @@ func (pg *NativeFinder) FastProcessList() ([]*process.Process, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
result := make([]*process.Process, len(pids))
|
||||
for i, pid := range pids {
|
||||
result[i] = &process.Process{Pid: pid}
|
||||
result := make([]*process.Process, 0, len(pids))
|
||||
for _, pid := range pids {
|
||||
result = append(result, &process.Process{Pid: pid})
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v8"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/common/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
|
|
@ -228,9 +229,8 @@ func (r *Redis) connect() error {
|
|||
r.Servers = []string{"tcp://localhost:6379"}
|
||||
}
|
||||
|
||||
r.clients = make([]Client, len(r.Servers))
|
||||
|
||||
for i, serv := range r.Servers {
|
||||
r.clients = make([]Client, 0, len(r.Servers))
|
||||
for _, serv := range r.Servers {
|
||||
if !strings.HasPrefix(serv, "tcp://") && !strings.HasPrefix(serv, "unix://") {
|
||||
r.Log.Warn("Server URL found without scheme; please update your configuration file")
|
||||
serv = "tcp://" + serv
|
||||
|
|
@ -288,10 +288,10 @@ func (r *Redis) connect() error {
|
|||
tags["port"] = u.Port()
|
||||
}
|
||||
|
||||
r.clients[i] = &RedisClient{
|
||||
r.clients = append(r.clients, &RedisClient{
|
||||
client: client,
|
||||
tags: tags,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
r.connected = true
|
||||
|
|
|
|||
|
|
@ -53,14 +53,13 @@ func (r *RedisSentinel) Init() error {
|
|||
r.Servers = []string{"tcp://localhost:26379"}
|
||||
}
|
||||
|
||||
r.clients = make([]*RedisSentinelClient, len(r.Servers))
|
||||
|
||||
tlsConfig, err := r.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i, serv := range r.Servers {
|
||||
r.clients = make([]*RedisSentinelClient, 0, len(r.Servers))
|
||||
for _, serv := range r.Servers {
|
||||
u, err := url.Parse(serv)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to parse to address %q: %v", serv, err)
|
||||
|
|
@ -96,10 +95,10 @@ func (r *RedisSentinel) Init() error {
|
|||
},
|
||||
)
|
||||
|
||||
r.clients[i] = &RedisSentinelClient{
|
||||
r.clients = append(r.clients, &RedisSentinelClient{
|
||||
sentinel: sentinel,
|
||||
tags: tags,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -18,12 +18,11 @@ import (
|
|||
|
||||
func pwgen(n int) string {
|
||||
charset := []byte("abcdedfghijklmnopqrstABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
|
||||
|
||||
nchars := len(charset)
|
||||
buffer := make([]byte, n)
|
||||
|
||||
for i := range buffer {
|
||||
buffer[i] = charset[rand.Intn(nchars)]
|
||||
buffer := make([]byte, 0, n)
|
||||
for i := 0; i < n; i++ {
|
||||
buffer = append(buffer, charset[rand.Intn(nchars)])
|
||||
}
|
||||
|
||||
return string(buffer)
|
||||
|
|
|
|||
|
|
@ -270,15 +270,15 @@ func (s *Stackdriver) newListTimeSeriesFilter(metricType string) string {
|
|||
|
||||
var valueFmt string
|
||||
if len(s.Filter.ResourceLabels) > 0 {
|
||||
resourceLabelsFilter := make([]string, len(s.Filter.ResourceLabels))
|
||||
for i, resourceLabel := range s.Filter.ResourceLabels {
|
||||
resourceLabelsFilter := make([]string, 0, len(s.Filter.ResourceLabels))
|
||||
for _, resourceLabel := range s.Filter.ResourceLabels {
|
||||
// check if resource label value contains function
|
||||
if includeExcludeHelper(resourceLabel.Value, functions, nil) {
|
||||
valueFmt = `resource.labels.%s = %s`
|
||||
} else {
|
||||
valueFmt = `resource.labels.%s = "%s"`
|
||||
}
|
||||
resourceLabelsFilter[i] = fmt.Sprintf(valueFmt, resourceLabel.Key, resourceLabel.Value)
|
||||
resourceLabelsFilter = append(resourceLabelsFilter, fmt.Sprintf(valueFmt, resourceLabel.Key, resourceLabel.Value))
|
||||
}
|
||||
if len(resourceLabelsFilter) == 1 {
|
||||
filterString += fmt.Sprintf(" AND %s", resourceLabelsFilter[0])
|
||||
|
|
@ -288,15 +288,15 @@ func (s *Stackdriver) newListTimeSeriesFilter(metricType string) string {
|
|||
}
|
||||
|
||||
if len(s.Filter.MetricLabels) > 0 {
|
||||
metricLabelsFilter := make([]string, len(s.Filter.MetricLabels))
|
||||
for i, metricLabel := range s.Filter.MetricLabels {
|
||||
metricLabelsFilter := make([]string, 0, len(s.Filter.MetricLabels))
|
||||
for _, metricLabel := range s.Filter.MetricLabels {
|
||||
// check if metric label value contains function
|
||||
if includeExcludeHelper(metricLabel.Value, functions, nil) {
|
||||
valueFmt = `metric.labels.%s = %s`
|
||||
} else {
|
||||
valueFmt = `metric.labels.%s = "%s"`
|
||||
}
|
||||
metricLabelsFilter[i] = fmt.Sprintf(valueFmt, metricLabel.Key, metricLabel.Value)
|
||||
metricLabelsFilter = append(metricLabelsFilter, fmt.Sprintf(valueFmt, metricLabel.Key, metricLabel.Value))
|
||||
}
|
||||
if len(metricLabelsFilter) == 1 {
|
||||
filterString += fmt.Sprintf(" AND %s", metricLabelsFilter[0])
|
||||
|
|
@ -429,9 +429,9 @@ func (s *Stackdriver) newListMetricDescriptorsFilters() []string {
|
|||
return nil
|
||||
}
|
||||
|
||||
metricTypeFilters := make([]string, len(s.MetricTypePrefixInclude))
|
||||
for i, metricTypePrefix := range s.MetricTypePrefixInclude {
|
||||
metricTypeFilters[i] = fmt.Sprintf(`metric.type = starts_with(%q)`, metricTypePrefix)
|
||||
metricTypeFilters := make([]string, 0, len(s.MetricTypePrefixInclude))
|
||||
for _, metricTypePrefix := range s.MetricTypePrefixInclude {
|
||||
metricTypeFilters = append(metricTypeFilters, fmt.Sprintf(`metric.type = starts_with(%q)`, metricTypePrefix))
|
||||
}
|
||||
return metricTypeFilters
|
||||
}
|
||||
|
|
|
|||
|
|
@ -551,11 +551,9 @@ func (e *Endpoint) simpleMetadataSelect(ctx context.Context, client *Client, res
|
|||
func (e *Endpoint) complexMetadataSelect(ctx context.Context, res *resourceKind, objects objectMap) {
|
||||
// We're only going to get metadata from maxMetadataSamples resources. If we have
|
||||
// more resources than that, we pick maxMetadataSamples samples at random.
|
||||
sampledObjects := make([]*objectRef, len(objects))
|
||||
i := 0
|
||||
sampledObjects := make([]*objectRef, 0, len(objects))
|
||||
for _, obj := range objects {
|
||||
sampledObjects[i] = obj
|
||||
i++
|
||||
sampledObjects = append(sampledObjects, obj)
|
||||
}
|
||||
n := len(sampledObjects)
|
||||
if n > maxMetadataSamples {
|
||||
|
|
|
|||
|
|
@ -91,8 +91,8 @@ func (v *VSphere) Start(_ telegraf.Accumulator) error {
|
|||
v.cancel = cancel
|
||||
|
||||
// Create endpoints, one for each vCenter we're monitoring
|
||||
v.endpoints = make([]*Endpoint, len(v.Vcenters))
|
||||
for i, rawURL := range v.Vcenters {
|
||||
v.endpoints = make([]*Endpoint, 0, len(v.Vcenters))
|
||||
for _, rawURL := range v.Vcenters {
|
||||
u, err := soap.ParseURL(rawURL)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -101,7 +101,7 @@ func (v *VSphere) Start(_ telegraf.Accumulator) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.endpoints[i] = ep
|
||||
v.endpoints = append(v.endpoints, ep)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -164,14 +164,14 @@ func createSim(folders int) (*simulator.Model, *simulator.Server, error) {
|
|||
|
||||
func testAlignUniform(t *testing.T, n int) {
|
||||
now := time.Now().Truncate(60 * time.Second)
|
||||
info := make([]types.PerfSampleInfo, n)
|
||||
values := make([]int64, n)
|
||||
info := make([]types.PerfSampleInfo, 0, n)
|
||||
values := make([]int64, 0, n)
|
||||
for i := 0; i < n; i++ {
|
||||
info[i] = types.PerfSampleInfo{
|
||||
info = append(info, types.PerfSampleInfo{
|
||||
Timestamp: now.Add(time.Duration(20*i) * time.Second),
|
||||
Interval: 20,
|
||||
}
|
||||
values[i] = 1
|
||||
})
|
||||
values = append(values, 1)
|
||||
}
|
||||
e := Endpoint{log: testutil.Logger{}}
|
||||
newInfo, newValues := e.alignSamples(info, values, 60*time.Second)
|
||||
|
|
@ -190,14 +190,14 @@ func TestAlignMetrics(t *testing.T) {
|
|||
// 20s to 60s of 1,2,3,1,2,3... (should average to 2)
|
||||
n := 30
|
||||
now := time.Now().Truncate(60 * time.Second)
|
||||
info := make([]types.PerfSampleInfo, n)
|
||||
values := make([]int64, n)
|
||||
info := make([]types.PerfSampleInfo, 0, n)
|
||||
values := make([]int64, 0, n)
|
||||
for i := 0; i < n; i++ {
|
||||
info[i] = types.PerfSampleInfo{
|
||||
info = append(info, types.PerfSampleInfo{
|
||||
Timestamp: now.Add(time.Duration(20*i) * time.Second),
|
||||
Interval: 20,
|
||||
}
|
||||
values[i] = int64(i%3 + 1)
|
||||
})
|
||||
values = append(values, int64(i%3+1))
|
||||
}
|
||||
e := Endpoint{log: testutil.Logger{}}
|
||||
newInfo, newValues := e.alignSamples(info, values, 60*time.Second)
|
||||
|
|
|
|||
|
|
@ -105,14 +105,14 @@ func NewTrace(spans []Span) (trace.Trace, error) {
|
|||
|
||||
// NewAnnotations converts a slice of Annotation into a slice of new Annotations
|
||||
func NewAnnotations(annotations []Annotation, endpoint Endpoint) []trace.Annotation {
|
||||
formatted := make([]trace.Annotation, len(annotations))
|
||||
for i, annotation := range annotations {
|
||||
formatted[i] = trace.Annotation{
|
||||
formatted := make([]trace.Annotation, 0, len(annotations))
|
||||
for _, annotation := range annotations {
|
||||
formatted = append(formatted, trace.Annotation{
|
||||
Host: endpoint.Host(),
|
||||
ServiceName: endpoint.Name(),
|
||||
Timestamp: annotation.Timestamp(),
|
||||
Value: annotation.Value(),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
return formatted
|
||||
|
|
@ -121,14 +121,14 @@ func NewAnnotations(annotations []Annotation, endpoint Endpoint) []trace.Annotat
|
|||
// NewBinaryAnnotations is very similar to NewAnnotations, but it
|
||||
// converts BinaryAnnotations instead of the normal Annotation
|
||||
func NewBinaryAnnotations(annotations []BinaryAnnotation, endpoint Endpoint) []trace.BinaryAnnotation {
|
||||
formatted := make([]trace.BinaryAnnotation, len(annotations))
|
||||
for i, annotation := range annotations {
|
||||
formatted[i] = trace.BinaryAnnotation{
|
||||
formatted := make([]trace.BinaryAnnotation, 0, len(annotations))
|
||||
for _, annotation := range annotations {
|
||||
formatted = append(formatted, trace.BinaryAnnotation{
|
||||
Host: endpoint.Host(),
|
||||
ServiceName: endpoint.Name(),
|
||||
Key: annotation.Key(),
|
||||
Value: annotation.Value(),
|
||||
}
|
||||
})
|
||||
}
|
||||
return formatted
|
||||
}
|
||||
|
|
|
|||
|
|
@ -21,12 +21,12 @@ func (j *JSON) Decode(octets []byte) ([]codec.Span, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
res := make([]codec.Span, len(spans))
|
||||
res := make([]codec.Span, 0, len(spans))
|
||||
for i := range spans {
|
||||
if err := spans[i].Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res[i] = &spans[i]
|
||||
res = append(res, &spans[i])
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
|
@ -89,23 +89,23 @@ func (s *span) Name() string {
|
|||
}
|
||||
|
||||
func (s *span) Annotations() []codec.Annotation {
|
||||
res := make([]codec.Annotation, len(s.Anno))
|
||||
res := make([]codec.Annotation, 0, len(s.Anno))
|
||||
for i := range s.Anno {
|
||||
res[i] = &s.Anno[i]
|
||||
res = append(res, &s.Anno[i])
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (s *span) BinaryAnnotations() ([]codec.BinaryAnnotation, error) {
|
||||
res := make([]codec.BinaryAnnotation, len(s.BAnno))
|
||||
res := make([]codec.BinaryAnnotation, 0, len(s.BAnno))
|
||||
for i, a := range s.BAnno {
|
||||
if a.Key() != "" && a.Value() == "" {
|
||||
return nil, fmt.Errorf("No value for key %s at binaryAnnotations[%d]", a.K, i)
|
||||
return nil, fmt.Errorf("no value for key %s at binaryAnnotations[%d]", a.K, i)
|
||||
}
|
||||
if a.Value() != "" && a.Key() == "" {
|
||||
return nil, fmt.Errorf("No key at binaryAnnotations[%d]", i)
|
||||
return nil, fmt.Errorf("no key at binaryAnnotations[%d]", i)
|
||||
}
|
||||
res[i] = &s.BAnno[i]
|
||||
res = append(res, &s.BAnno[i])
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue