chore: Fix linter findings for `revive:enforce-map-style` in `plugins/inputs/[n-z]*` (#16072)

This commit is contained in:
Paweł Żak 2024-10-25 12:54:05 +02:00 committed by GitHub
parent f8999c716b
commit 22b153ac65
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
36 changed files with 106 additions and 105 deletions

View File

@ -63,7 +63,7 @@ func (n *NetIOStats) Gather(acc telegraf.Accumulator) error {
if err != nil {
return fmt.Errorf("error getting list of interfaces: %w", err)
}
interfacesByName := map[string]net.Interface{}
interfacesByName := make(map[string]net.Interface, len(interfaces))
for _, iface := range interfaces {
interfacesByName[iface.Name] = iface
}

View File

@ -31,7 +31,7 @@ func (ns *NetStats) Gather(acc telegraf.Accumulator) error {
counts["UDP"] = 0
// TODO: add family to tags or else
tags := map[string]string{}
tags := make(map[string]string)
for _, netcon := range netconns {
if netcon.Type == syscall.SOCK_DGRAM {
counts["UDP"]++

View File

@ -345,7 +345,7 @@ func (s *status) gatherRequestMetrics(tags map[string]string, acc telegraf.Accum
func (s *status) gatherZoneMetrics(tags map[string]string, acc telegraf.Accumulator) {
for zoneName, zone := range s.ServerZones {
zoneTags := map[string]string{}
zoneTags := make(map[string]string, len(tags)+1)
for k, v := range tags {
zoneTags[k] = v
}
@ -377,7 +377,7 @@ func (s *status) gatherZoneMetrics(tags map[string]string, acc telegraf.Accumula
func (s *status) gatherUpstreamMetrics(tags map[string]string, acc telegraf.Accumulator) {
for upstreamName, upstream := range s.Upstreams {
upstreamTags := map[string]string{}
upstreamTags := make(map[string]string, len(tags)+1)
for k, v := range tags {
upstreamTags[k] = v
}
@ -438,7 +438,7 @@ func (s *status) gatherUpstreamMetrics(tags map[string]string, acc telegraf.Accu
if peer.MaxConns != nil {
peerFields["max_conns"] = *peer.MaxConns
}
peerTags := map[string]string{}
peerTags := make(map[string]string, len(upstreamTags)+2)
for k, v := range upstreamTags {
peerTags[k] = v
}
@ -453,7 +453,7 @@ func (s *status) gatherUpstreamMetrics(tags map[string]string, acc telegraf.Accu
func (s *status) gatherCacheMetrics(tags map[string]string, acc telegraf.Accumulator) {
for cacheName, cache := range s.Caches {
cacheTags := map[string]string{}
cacheTags := make(map[string]string, len(tags)+1)
for k, v := range tags {
cacheTags[k] = v
}
@ -492,7 +492,7 @@ func (s *status) gatherCacheMetrics(tags map[string]string, acc telegraf.Accumul
func (s *status) gatherStreamMetrics(tags map[string]string, acc telegraf.Accumulator) {
for zoneName, zone := range s.Stream.ServerZones {
zoneTags := map[string]string{}
zoneTags := make(map[string]string, len(tags)+1)
for k, v := range tags {
zoneTags[k] = v
}
@ -509,7 +509,7 @@ func (s *status) gatherStreamMetrics(tags map[string]string, acc telegraf.Accumu
)
}
for upstreamName, upstream := range s.Stream.Upstreams {
upstreamTags := map[string]string{}
upstreamTags := make(map[string]string, len(tags)+1)
for k, v := range tags {
upstreamTags[k] = v
}
@ -551,7 +551,7 @@ func (s *status) gatherStreamMetrics(tags map[string]string, acc telegraf.Accumu
if peer.ResponseTime != nil {
peerFields["response_time"] = *peer.ResponseTime
}
peerTags := map[string]string{}
peerTags := make(map[string]string, len(upstreamTags)+2)
for k, v := range upstreamTags {
peerTags[k] = v
}

View File

@ -149,7 +149,7 @@ func (n *NginxPlusAPI) gatherSlabsMetrics(addr *url.URL, acc telegraf.Accumulato
tags := getTags(addr)
for zoneName, slab := range slabs {
slabTags := map[string]string{}
slabTags := make(map[string]string, len(tags)+1)
for k, v := range tags {
slabTags[k] = v
}
@ -165,7 +165,7 @@ func (n *NginxPlusAPI) gatherSlabsMetrics(addr *url.URL, acc telegraf.Accumulato
)
for slotID, slot := range slab.Slots {
slotTags := map[string]string{}
slotTags := make(map[string]string, len(slabTags)+1)
for k, v := range slabTags {
slotTags[k] = v
}
@ -249,9 +249,8 @@ func (n *NginxPlusAPI) gatherHTTPServerZonesMetrics(addr *url.URL, acc telegraf.
}
tags := getTags(addr)
for zoneName, zone := range httpServerZones {
zoneTags := map[string]string{}
zoneTags := make(map[string]string, len(tags)+1)
for k, v := range tags {
zoneTags[k] = v
}
@ -299,7 +298,7 @@ func (n *NginxPlusAPI) gatherHTTPLocationZonesMetrics(addr *url.URL, acc telegra
tags := getTags(addr)
for zoneName, zone := range httpLocationZones {
zoneTags := map[string]string{}
zoneTags := make(map[string]string, len(tags)+1)
for k, v := range tags {
zoneTags[k] = v
}
@ -345,7 +344,7 @@ func (n *NginxPlusAPI) gatherHTTPUpstreamsMetrics(addr *url.URL, acc telegraf.Ac
tags := getTags(addr)
for upstreamName, upstream := range httpUpstreams {
upstreamTags := map[string]string{}
upstreamTags := make(map[string]string, len(tags)+1)
for k, v := range tags {
upstreamTags[k] = v
}
@ -400,7 +399,7 @@ func (n *NginxPlusAPI) gatherHTTPUpstreamsMetrics(addr *url.URL, acc telegraf.Ac
if peer.MaxConns != nil {
peerFields["max_conns"] = *peer.MaxConns
}
peerTags := map[string]string{}
peerTags := make(map[string]string, len(upstreamTags)+2)
for k, v := range upstreamTags {
peerTags[k] = v
}
@ -429,7 +428,7 @@ func (n *NginxPlusAPI) gatherHTTPCachesMetrics(addr *url.URL, acc telegraf.Accum
tags := getTags(addr)
for cacheName, cache := range httpCaches {
cacheTags := map[string]string{}
cacheTags := make(map[string]string, len(tags)+1)
for k, v := range tags {
cacheTags[k] = v
}
@ -483,7 +482,7 @@ func (n *NginxPlusAPI) gatherStreamServerZonesMetrics(addr *url.URL, acc telegra
tags := getTags(addr)
for zoneName, zone := range streamServerZones {
zoneTags := map[string]string{}
zoneTags := make(map[string]string, len(tags)+1)
for k, v := range tags {
zoneTags[k] = v
}
@ -519,7 +518,7 @@ func (n *NginxPlusAPI) gatherResolverZonesMetrics(addr *url.URL, acc telegraf.Ac
tags := getTags(addr)
for zoneName, resolver := range resolverZones {
zoneTags := map[string]string{}
zoneTags := make(map[string]string, len(tags)+1)
for k, v := range tags {
zoneTags[k] = v
}
@ -562,7 +561,7 @@ func (n *NginxPlusAPI) gatherStreamUpstreamsMetrics(addr *url.URL, acc telegraf.
tags := getTags(addr)
for upstreamName, upstream := range streamUpstreams {
upstreamTags := map[string]string{}
upstreamTags := make(map[string]string, len(tags)+1)
for k, v := range tags {
upstreamTags[k] = v
}
@ -602,7 +601,7 @@ func (n *NginxPlusAPI) gatherStreamUpstreamsMetrics(addr *url.URL, acc telegraf.
if peer.ResponseTime != nil {
peerFields["response_time"] = *peer.ResponseTime
}
peerTags := map[string]string{}
peerTags := make(map[string]string, len(upstreamTags)+2)
for k, v := range upstreamTags {
peerTags[k] = v
}
@ -632,7 +631,7 @@ func (n *NginxPlusAPI) gatherHTTPLimitReqsMetrics(addr *url.URL, acc telegraf.Ac
tags := getTags(addr)
for limitReqName, limit := range httpLimitReqs {
limitReqsTags := map[string]string{}
limitReqsTags := make(map[string]string, len(tags)+1)
for k, v := range tags {
limitReqsTags[k] = v
}

View File

@ -182,7 +182,7 @@ func gatherStatusURL(r *bufio.Reader, tags map[string]string, acc telegraf.Accum
}, tags)
for zoneName, zone := range status.StreamServerZones {
zoneTags := map[string]string{}
zoneTags := make(map[string]string, len(tags)+1)
for k, v := range tags {
zoneTags[k] = v
}
@ -205,7 +205,7 @@ func gatherStatusURL(r *bufio.Reader, tags map[string]string, acc telegraf.Accum
for filterName, filters := range status.StreamFilterZones {
for filterKey, upstream := range filters {
filterTags := map[string]string{}
filterTags := make(map[string]string, len(tags)+2)
for k, v := range tags {
filterTags[k] = v
}
@ -230,7 +230,7 @@ func gatherStatusURL(r *bufio.Reader, tags map[string]string, acc telegraf.Accum
for upstreamName, upstreams := range status.StreamUpstreamZones {
for _, upstream := range upstreams {
upstreamServerTags := map[string]string{}
upstreamServerTags := make(map[string]string, len(tags)+2)
for k, v := range tags {
upstreamServerTags[k] = v
}

View File

@ -200,7 +200,7 @@ func gatherStatusURL(r *bufio.Reader, tags map[string]string, acc telegraf.Accum
}, tags)
for zoneName, zone := range status.ServerZones {
zoneTags := map[string]string{}
zoneTags := make(map[string]string, len(tags)+1)
for k, v := range tags {
zoneTags[k] = v
}
@ -231,7 +231,7 @@ func gatherStatusURL(r *bufio.Reader, tags map[string]string, acc telegraf.Accum
for filterName, filters := range status.FilterZones {
for filterKey, upstream := range filters {
filterTags := map[string]string{}
filterTags := make(map[string]string, len(tags)+2)
for k, v := range tags {
filterTags[k] = v
}
@ -264,7 +264,7 @@ func gatherStatusURL(r *bufio.Reader, tags map[string]string, acc telegraf.Accum
for upstreamName, upstreams := range status.UpstreamZones {
for _, upstream := range upstreams {
upstreamServerTags := map[string]string{}
upstreamServerTags := make(map[string]string, len(tags)+2)
for k, v := range tags {
upstreamServerTags[k] = v
}
@ -293,7 +293,7 @@ func gatherStatusURL(r *bufio.Reader, tags map[string]string, acc telegraf.Accum
}
for zoneName, zone := range status.CacheZones {
zoneTags := map[string]string{}
zoneTags := make(map[string]string, len(tags)+1)
for k, v := range tags {
zoneTags[k] = v
}

View File

@ -117,7 +117,7 @@ func (ns *Nstat) loadPaths() {
// loadGoodTable can be used to parse string heap that
// headers and values are arranged in right order
func (ns *Nstat) loadGoodTable(table []byte) map[string]interface{} {
entries := map[string]interface{}{}
entries := make(map[string]interface{})
fields := bytes.Fields(table)
var value int64
var err error
@ -145,7 +145,7 @@ func (ns *Nstat) loadGoodTable(table []byte) map[string]interface{} {
// loadUglyTable can be used to parse string heap that
// the headers and values are split with a newline
func (ns *Nstat) loadUglyTable(table []byte) map[string]interface{} {
entries := map[string]interface{}{}
entries := make(map[string]interface{})
// split the lines by newline
lines := bytes.Split(table, newLineByte)
var value int64

View File

@ -20,7 +20,7 @@ func Parse(acc telegraf.Accumulator, buf []byte) error {
tags := map[string]string{
"index": strconv.Itoa(i),
}
fields := map[string]interface{}{}
fields := make(map[string]interface{}, 39)
common.SetTagIfUsed(tags, "pstate", gpu.PState)
common.SetTagIfUsed(tags, "name", gpu.ProdName)

View File

@ -28,7 +28,7 @@ func Parse(acc telegraf.Accumulator, buf []byte) error {
tags := map[string]string{
"index": strconv.Itoa(i),
}
fields := map[string]interface{}{}
fields := make(map[string]interface{}, 44)
common.SetTagIfUsed(tags, "pstate", gpu.PerformanceState)
common.SetTagIfUsed(tags, "name", gpu.ProductName)
@ -83,7 +83,7 @@ func Parse(acc telegraf.Accumulator, buf []byte) error {
acc.AddFields("nvidia_smi", fields, tags, timestamp)
for _, device := range gpu.MigDevices.MigDevice {
tags := map[string]string{}
tags := make(map[string]string, 8)
common.SetTagIfUsed(tags, "index", device.Index)
common.SetTagIfUsed(tags, "gpu_index", device.GpuInstanceID)
common.SetTagIfUsed(tags, "compute_index", device.ComputeInstanceID)
@ -93,7 +93,7 @@ func Parse(acc telegraf.Accumulator, buf []byte) error {
common.SetTagIfUsed(tags, "uuid", gpu.UUID)
common.SetTagIfUsed(tags, "compute_mode", gpu.ComputeMode)
fields := map[string]interface{}{}
fields := make(map[string]interface{}, 8)
common.SetIfUsed("int", fields, "sram_uncorrectable", device.EccErrorCount.VolatileCount.SramUncorrectable)
common.SetIfUsed("int", fields, "memory_fb_total", device.FbMemoryUsage.Total)
common.SetIfUsed("int", fields, "memory_fb_reserved", device.FbMemoryUsage.Reserved)
@ -107,11 +107,11 @@ func Parse(acc telegraf.Accumulator, buf []byte) error {
}
for _, process := range gpu.Processes.ProcessInfo {
tags := map[string]string{}
tags := make(map[string]string, 2)
common.SetTagIfUsed(tags, "name", process.ProcessName)
common.SetTagIfUsed(tags, "type", process.Type)
fields := map[string]interface{}{}
fields := make(map[string]interface{}, 2)
common.SetIfUsed("int", fields, "pid", process.Pid)
common.SetIfUsed("int", fields, "used_memory", process.UsedMemory)

View File

@ -154,7 +154,7 @@ func (o *Openldap) Gather(acc telegraf.Accumulator) error {
}
func gatherSearchResult(sr *ldap.SearchResult, o *Openldap, acc telegraf.Accumulator) {
fields := map[string]interface{}{}
fields := make(map[string]interface{})
tags := map[string]string{
"server": o.Host,
"port": strconv.Itoa(o.Port),

View File

@ -47,7 +47,7 @@ func (a *aggregationResponse) GetMetrics(acc telegraf.Accumulator, measurement s
return nil
}
return a.Aggregations.GetMetrics(acc, measurement, a.Hits.TotalHits.Value, map[string]string{})
return a.Aggregations.GetMetrics(acc, measurement, a.Hits.TotalHits.Value, make(map[string]string))
}
func (a *aggregation) GetMetrics(acc telegraf.Accumulator, measurement string, docCount int64, tags map[string]string) error {

View File

@ -233,8 +233,8 @@ func (o *OpenStack) Start(telegraf.Accumulator) error {
}
// Prepare cross-dependency information
o.openstackFlavors = map[string]flavors.Flavor{}
o.openstackProjects = map[string]projects.Project{}
o.openstackFlavors = make(map[string]flavors.Flavor)
o.openstackProjects = make(map[string]projects.Project)
if slices.Contains(o.EnabledServices, "servers") {
// We need the flavors to output machine details for servers
page, err := flavors.ListDetail(o.compute, nil).AllPages(ctx)
@ -337,7 +337,7 @@ func (o *OpenStack) Gather(acc telegraf.Accumulator) error {
if o.MeasureRequest {
for service, duration := range callDuration {
acc.AddFields("openstack_request_duration", map[string]interface{}{service: duration}, map[string]string{})
acc.AddFields("openstack_request_duration", map[string]interface{}{service: duration}, make(map[string]string))
}
}

View File

@ -33,7 +33,7 @@ type request struct {
func newRequest(reqID uint16, flags uint8) *request {
r := &request{
reqID: reqID,
params: map[string]string{},
params: make(map[string]string),
keepConn: flags&flagKeepConn != 0,
}
r.rawParams = r.buf[:0]

View File

@ -202,11 +202,11 @@ func (p *Ping) nativePing(destination string) (*pingStats, error) {
func (p *Ping) pingToURLNative(destination string, acc telegraf.Accumulator) {
tags := map[string]string{"url": destination}
fields := map[string]interface{}{}
stats, err := p.nativePingFunc(destination)
if err != nil {
p.Log.Errorf("ping failed: %s", err.Error())
fields := make(map[string]interface{}, 1)
if strings.Contains(err.Error(), "unknown") {
fields["result_code"] = 1
} else {
@ -216,7 +216,7 @@ func (p *Ping) pingToURLNative(destination string, acc telegraf.Accumulator) {
return
}
fields = map[string]interface{}{
fields := map[string]interface{}{
"result_code": 0,
"packets_transmitted": stats.PacketsSent,
"packets_received": stats.PacketsRecv,

View File

@ -397,7 +397,7 @@ func registerPod(pod *corev1.Pod, p *Prometheus) {
}
p.Log.Debugf("will scrape metrics from %q", targetURL.String())
tags := map[string]string{}
tags := make(map[string]string, len(pod.Annotations)+len(pod.Labels)+2)
// add annotation as metrics tags, subject to include/exclude filters
for k, v := range pod.Annotations {

View File

@ -234,7 +234,7 @@ func (p *Prometheus) Init() error {
"Accept": acceptHeader,
}
p.kubernetesPods = map[PodID]URLAndAddress{}
p.kubernetesPods = make(map[PodID]URLAndAddress)
return nil
}
@ -377,7 +377,7 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) (map[s
var req *http.Request
var uClient *http.Client
requestFields := make(map[string]interface{})
tags := map[string]string{}
tags := make(map[string]string, len(u.Tags)+2)
if p.URLTag != "" {
tags[p.URLTag] = u.OriginalURL.String()
}
@ -629,8 +629,8 @@ func (p *Prometheus) Stop() {
func init() {
inputs.Add("prometheus", func() telegraf.Input {
return &Prometheus{
kubernetesPods: map[PodID]URLAndAddress{},
consulServices: map[string]URLAndAddress{},
kubernetesPods: make(map[PodID]URLAndAddress),
consulServices: make(map[string]URLAndAddress),
URLTag: "url",
}
})

View File

@ -300,7 +300,7 @@ func addServerMetrics(acc telegraf.Accumulator, counters map[string]int64) {
fields[errorName] = count
}
acc.AddCounter("ras", fields, map[string]string{})
acc.AddCounter("ras", fields, make(map[string]string))
}
func fetchMachineCheckError(rows *sql.Rows) (*machineCheckError, error) {

View File

@ -365,7 +365,7 @@ func (r *Redfish) gatherThermal(acc telegraf.Accumulator, address string, system
}
for _, j := range thermal.Temperatures {
tags := map[string]string{}
tags := make(map[string]string, 19)
tags["member_id"] = j.MemberID
tags["address"] = address
tags["name"] = j.Name
@ -392,8 +392,8 @@ func (r *Redfish) gatherThermal(acc telegraf.Accumulator, address string, system
}
for _, j := range thermal.Fans {
tags := map[string]string{}
fields := make(map[string]interface{})
tags := make(map[string]string, 20)
fields := make(map[string]interface{}, 5)
tags["member_id"] = j.MemberID
tags["address"] = address
tags["name"] = j.Name
@ -469,7 +469,7 @@ func (r *Redfish) gatherPower(acc telegraf.Accumulator, address string, system *
}
for _, j := range power.PowerSupplies {
tags := map[string]string{}
tags := make(map[string]string, 19)
tags["member_id"] = j.MemberID
tags["address"] = address
tags["name"] = j.Name
@ -496,7 +496,7 @@ func (r *Redfish) gatherPower(acc telegraf.Accumulator, address string, system *
}
for _, j := range power.Voltages {
tags := map[string]string{}
tags := make(map[string]string, 19)
tags["member_id"] = j.MemberID
tags["address"] = address
tags["name"] = j.Name

View File

@ -281,7 +281,7 @@ func (r *Redis) connect() error {
},
)
tags := map[string]string{}
tags := make(map[string]string, 2)
if u.Scheme == "unix" {
tags["socket"] = u.Path
} else {

View File

@ -77,8 +77,7 @@ func (r *RedisSentinel) Init() error {
}
var address string
tags := map[string]string{}
tags := make(map[string]string, 2)
switch u.Scheme {
case "tcp":
address = u.Host

View File

@ -4,7 +4,7 @@ import "github.com/influxdata/telegraf"
type Creator func() telegraf.Input
var Inputs = map[string]Creator{}
var Inputs = make(map[string]Creator)
func Add(name string, creator Creator) {
Inputs[name] = creator

View File

@ -60,7 +60,7 @@ type riemannListener struct {
}
func (rsl *riemannListener) listen(ctx context.Context) {
rsl.connections = map[string]net.Conn{}
rsl.connections = make(map[string]net.Conn)
wg := sync.WaitGroup{}
@ -208,16 +208,17 @@ func (rsl *riemannListener) read(conn net.Conn) {
rsl.riemannReturnErrorResponse(conn, "No Service Name")
return
}
tags := make(map[string]string)
fieldValues := map[string]interface{}{}
tags := make(map[string]string, len(m.Tags)+3)
for _, tag := range m.Tags {
tags[strings.ReplaceAll(tag, " ", "_")] = tag
}
tags["Host"] = m.Host
tags["Description"] = m.Description
tags["State"] = m.State
fieldValues["Metric"] = m.Metric
fieldValues["TTL"] = m.TTL.Seconds()
fieldValues := map[string]interface{}{
"Metric": m.Metric,
"TTL": m.TTL.Seconds(),
}
singleMetric := metric.New(m.Service, tags, fieldValues, m.Time, telegraf.Untyped)
rsl.AddMetric(singleMetric)
}

View File

@ -131,7 +131,7 @@ func (s *Salesforce) fetchLimits() (limits, error) {
return l, fmt.Errorf("salesforce responded with unexpected status code %d", resp.StatusCode)
}
l = limits{}
l = make(limits)
err = json.NewDecoder(resp.Body).Decode(&l)
return l, err
}

View File

@ -72,8 +72,8 @@ func (s *Sensors) Gather(acc telegraf.Accumulator) error {
//
// and parses the output to add it to the telegraf.Accumulator.
func (s *Sensors) parse(acc telegraf.Accumulator) error {
tags := map[string]string{}
fields := map[string]interface{}{}
tags := make(map[string]string)
fields := make(map[string]interface{})
chip := ""
cmd := execCommand(s.path, "-A", "-u")
out, err := internal.StdOutputTimeout(cmd, time.Duration(s.Timeout))
@ -85,8 +85,8 @@ func (s *Sensors) parse(acc telegraf.Accumulator) error {
if len(line) == 0 {
acc.AddFields("sensors", fields, tags)
chip = ""
tags = map[string]string{}
fields = map[string]interface{}{}
tags = make(map[string]string)
fields = make(map[string]interface{})
continue
}
if len(chip) == 0 {
@ -98,7 +98,7 @@ func (s *Sensors) parse(acc telegraf.Accumulator) error {
if len(tags) > 1 {
acc.AddFields("sensors", fields, tags)
}
fields = map[string]interface{}{}
fields = make(map[string]interface{})
tags = map[string]string{
"chip": chip,
"feature": strings.TrimRight(snake(line), ":"),

View File

@ -14,7 +14,7 @@ func makeMetrics(p *v5Format) []telegraf.Metric {
tags := map[string]string{
"agent_address": p.AgentAddress.String(),
}
fields := map[string]interface{}{}
fields := make(map[string]interface{}, 2)
for _, sample := range p.Samples {
tags["input_ifindex"] = strconv.FormatUint(uint64(sample.SampleData.InputIfIndex), 10)
tags["output_ifindex"] = strconv.FormatUint(uint64(sample.SampleData.OutputIfIndex), 10)

View File

@ -110,7 +110,7 @@ func (h rawPacketHeaderFlowData) getTags() map[string]string {
if h.Header != nil {
t = h.Header.getTags()
} else {
t = map[string]string{}
t = make(map[string]string, 1)
}
t["header_protocol"] = headerProtocolMap[h.HeaderProtocol]
return t
@ -120,7 +120,7 @@ func (h rawPacketHeaderFlowData) getFields() map[string]interface{} {
if h.Header != nil {
f = h.Header.getFields()
} else {
f = map[string]interface{}{}
f = make(map[string]interface{}, 3)
}
f["bytes"] = h.Bytes
f["frame_length"] = h.FrameLength
@ -145,7 +145,7 @@ func (h ethHeader) getTags() map[string]string {
if h.IPHeader != nil {
t = h.IPHeader.getTags()
} else {
t = map[string]string{}
t = make(map[string]string, 3)
}
t["src_mac"] = net.HardwareAddr(h.SourceMAC[:]).String()
t["dst_mac"] = net.HardwareAddr(h.DestinationMAC[:]).String()
@ -156,7 +156,7 @@ func (h ethHeader) getFields() map[string]interface{} {
if h.IPHeader != nil {
return h.IPHeader.getFields()
}
return map[string]interface{}{}
return make(map[string]interface{})
}
type protocolHeader containsMetricData
@ -184,7 +184,7 @@ func (h ipV4Header) getTags() map[string]string {
if h.ProtocolHeader != nil {
t = h.ProtocolHeader.getTags()
} else {
t = map[string]string{}
t = make(map[string]string, 2)
}
t["src_ip"] = net.IP(h.SourceIP[:]).String()
t["dst_ip"] = net.IP(h.DestIP[:]).String()
@ -195,7 +195,7 @@ func (h ipV4Header) getFields() map[string]interface{} {
if h.ProtocolHeader != nil {
f = h.ProtocolHeader.getFields()
} else {
f = map[string]interface{}{}
f = make(map[string]interface{}, 6)
}
f["ip_dscp"] = strconv.FormatUint(uint64(h.DSCP), 10)
f["ip_ecn"] = strconv.FormatUint(uint64(h.ECN), 10)
@ -223,7 +223,7 @@ func (h ipV6Header) getTags() map[string]string {
if h.ProtocolHeader != nil {
t = h.ProtocolHeader.getTags()
} else {
t = map[string]string{}
t = make(map[string]string, 2)
}
t["src_ip"] = net.IP(h.SourceIP[:]).String()
t["dst_ip"] = net.IP(h.DestIP[:]).String()
@ -234,7 +234,7 @@ func (h ipV6Header) getFields() map[string]interface{} {
if h.ProtocolHeader != nil {
f = h.ProtocolHeader.getFields()
} else {
f = map[string]interface{}{}
f = make(map[string]interface{}, 3)
}
f["ip_dscp"] = strconv.FormatUint(uint64(h.DSCP), 10)
f["ip_ecn"] = strconv.FormatUint(uint64(h.ECN), 10)

View File

@ -50,8 +50,6 @@ func (ss *SlabStats) Gather(acc telegraf.Accumulator) error {
}
func (ss *SlabStats) getSlabStats() (map[string]interface{}, error) {
fields := map[string]interface{}{}
out, err := ss.runCmd("/bin/cat", []string{ss.statFile})
if err != nil {
return nil, err
@ -64,6 +62,7 @@ func (ss *SlabStats) getSlabStats() (map[string]interface{}, error) {
scanner.Scan() // for "slabinfo - version: 2.1"
scanner.Scan() // for "# name <active_objs> <num_objs> <objsize> ..."
fields := make(map[string]interface{})
// Read data rows
for scanner.Scan() {
line := scanner.Text()

View File

@ -683,12 +683,12 @@ func gatherIntelNVMeDisk(acc telegraf.Accumulator, timeout config.Duration, uses
for scanner.Scan() {
line := scanner.Text()
tags := map[string]string{}
fields := make(map[string]interface{})
tags["device"] = path.Base(device.name)
tags["model"] = device.model
tags["serial_no"] = device.serialNumber
tags := map[string]string{
"device": path.Base(device.name),
"model": device.model,
"serial_no": device.serialNumber,
}
// Create struct to initialize later with intel attributes.
var (
@ -748,7 +748,7 @@ func (m *Smart) gatherDisk(acc telegraf.Accumulator, device string, wg *sync.Wai
return
}
deviceTags := map[string]string{}
deviceTags := make(map[string]string)
if m.TagWithDeviceType {
deviceNode := strings.SplitN(device, " ", 2)
deviceTags["device"] = path.Base(deviceNode[0])
@ -809,7 +809,7 @@ func (m *Smart) gatherDisk(acc telegraf.Accumulator, device string, wg *sync.Wai
}
}
tags := map[string]string{}
tags := make(map[string]string)
fields := make(map[string]interface{})
if m.Attributes {

View File

@ -112,7 +112,7 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error {
Name: s.Name,
Fields: s.Fields,
}
topTags := map[string]string{}
topTags := make(map[string]string)
if err := s.gatherTable(acc, gs, t, topTags, false); err != nil {
acc.AddError(fmt.Errorf("agent %s: %w", agent, err))
}

View File

@ -276,11 +276,11 @@ func setTrapOid(tags map[string]string, oid string, e snmp.MibEntry) {
func makeTrapHandler(s *SnmpTrap) gosnmp.TrapHandlerFunc {
return func(packet *gosnmp.SnmpPacket, addr *net.UDPAddr) {
tm := s.timeFunc()
fields := map[string]interface{}{}
tags := map[string]string{}
tags["version"] = packet.Version.String()
tags["source"] = addr.IP.String()
fields := make(map[string]interface{}, len(packet.Variables)+1)
tags := map[string]string{
"version": packet.Version.String(),
"source": addr.IP.String(),
}
if packet.Version == gosnmp.Version1 {
// Follow the procedure described in RFC 2576 3.1 to

View File

@ -75,7 +75,7 @@ func socketList(cmdName, proto string, timeout config.Duration) (*bytes.Buffer,
func (ss *Socketstat) parseAndGather(acc telegraf.Accumulator, data *bytes.Buffer, proto string) {
scanner := bufio.NewScanner(data)
tags := map[string]string{}
tags := make(map[string]string)
fields := make(map[string]interface{})
// ss output can have blank lines, and/or socket basic info lines and more advanced

View File

@ -390,7 +390,7 @@ func (s *SQLServer) accRow(query Query, acc telegraf.Accumulator, row scanner) e
// measurement: identified by the header
// tags: all other fields of type string
tags := map[string]string{}
tags := make(map[string]string, len(columnMap)+1)
var measurement string
for header, val := range columnMap {
if str, ok := (*val).(string); ok {

View File

@ -127,11 +127,14 @@ func (s *Supervisor) parseInstanceData(status supervisorInfo) (map[string]string
if err != nil {
return nil, nil, fmt.Errorf("failed to parse server string: %w", err)
}
tags := map[string]string{}
tags["id"] = status.Ident
tags["source"] = splittedURL[0]
tags["port"] = splittedURL[1]
fields := map[string]interface{}{"state": status.StateCode}
tags := map[string]string{
"id": status.Ident,
"source": splittedURL[0],
"port": splittedURL[1],
}
fields := map[string]interface{}{
"state": status.StateCode,
}
return tags, fields, nil
}

View File

@ -19,7 +19,7 @@ func (k *Synproxy) Gather(acc telegraf.Accumulator) error {
return err
}
acc.AddCounter("synproxy", data, map[string]string{})
acc.AddCounter("synproxy", data, make(map[string]string))
return nil
}

View File

@ -235,7 +235,7 @@ func (s *Sysstat) parse(acc telegraf.Accumulator, option, tmpfile string, ts tim
return err
}
tags := map[string]string{}
tags := make(map[string]string)
if device != "-" {
tags["device"] = device
if addTags, ok := s.DeviceTags[device]; ok {

View File

@ -913,7 +913,7 @@ func getDatastores(ctx context.Context, e *Endpoint, resourceFilter *ResourceFil
func (e *Endpoint) loadCustomAttributes(entity mo.ManagedEntity) map[string]string {
if !e.customAttrEnabled {
return map[string]string{}
return make(map[string]string)
}
cvs := make(map[string]string)
for _, v := range entity.CustomValue {