chore: Fix linter findings for `revive:enforce-map-style` in `plugins/inputs/[a-m]*` (#16042)

This commit is contained in:
Paweł Żak 2024-10-21 13:11:11 +02:00 committed by GitHub
parent e257c14ec1
commit 6c48fbb1f2
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
33 changed files with 80 additions and 89 deletions

View File

@ -148,7 +148,7 @@ func (s *AliyunCMS) Init() error {
if metric.Dimensions == "" {
continue
}
metric.dimensionsUdObj = map[string]string{}
metric.dimensionsUdObj = make(map[string]string)
metric.dimensionsUdArr = []map[string]string{}
// first try to unmarshal as an object
@ -295,9 +295,9 @@ func (s *AliyunCMS) gatherMetric(acc telegraf.Accumulator, metricName string, me
NextDataPoint:
for _, datapoint := range datapoints {
fields := map[string]interface{}{}
fields := make(map[string]interface{}, len(datapoint))
tags := make(map[string]string, len(datapoint))
datapointTime := int64(0)
tags := map[string]string{}
for key, value := range datapoint {
switch key {
case "instanceId", "BucketName":

View File

@ -97,8 +97,6 @@ func newDiscoveryTool(
discoveryInterval time.Duration,
) (*discoveryTool, error) {
var (
dscReq = map[string]discoveryRequest{}
cli = map[string]aliyunSdkClient{}
responseRootKey string
responseObjectIDKey string
err error
@ -115,6 +113,8 @@ func newDiscoveryTool(
rateLimit = 1
}
dscReq := make(map[string]discoveryRequest, len(regions))
cli := make(map[string]aliyunSdkClient, len(regions))
for _, region := range regions {
switch project {
case "acs_ecs_dashboard":
@ -252,7 +252,7 @@ func newDiscoveryTool(
func (dt *discoveryTool) parseDiscoveryResponse(resp *responses.CommonResponse) (*parsedDResp, error) {
var (
fullOutput = map[string]interface{}{}
fullOutput = make(map[string]interface{})
data []byte
foundDataItem bool
foundRootKey bool
@ -335,8 +335,8 @@ func (dt *discoveryTool) getDiscoveryData(cli aliyunSdkClient, req *requests.Com
req.QueryParams["PageNumber"] = strconv.Itoa(pageNumber)
if len(discoveryData) == totalCount { // All data received
// Map data to appropriate shape before return
preparedData := map[string]interface{}{}
// Map data to the appropriate shape before return
preparedData := make(map[string]interface{}, len(discoveryData))
for _, raw := range discoveryData {
elem, ok := raw.(map[string]interface{})
@ -353,10 +353,7 @@ func (dt *discoveryTool) getDiscoveryData(cli aliyunSdkClient, req *requests.Com
}
func (dt *discoveryTool) getDiscoveryDataAcrossRegions(lmtr chan bool) (map[string]interface{}, error) {
var (
data map[string]interface{}
resultData = map[string]interface{}{}
)
resultData := make(map[string]interface{})
for region, cli := range dt.cli {
// Building common request, as the code below is the same no matter
@ -383,7 +380,7 @@ func (dt *discoveryTool) getDiscoveryDataAcrossRegions(lmtr chan bool) (map[stri
commonRequest.TransToAcsRequest()
// Get discovery data using common request
data, err = dt.getDiscoveryData(cli, commonRequest, lmtr)
data, err := dt.getDiscoveryData(cli, commonRequest, lmtr)
if err != nil {
return nil, err
}
@ -428,8 +425,7 @@ func (dt *discoveryTool) start() {
}
if !reflect.DeepEqual(data, lastData) {
lastData = nil
lastData = map[string]interface{}{}
lastData = make(map[string]interface{}, len(data))
for k, v := range data {
lastData[k] = v
}

View File

@ -186,7 +186,6 @@ func genTagsFields(gpus map[string]gpu, system map[string]sysInfo) []metric {
tags := map[string]string{
"name": cardID,
}
fields := map[string]interface{}{}
payload := gpus[cardID]
//nolint:errcheck // silently treat as zero if malformed
@ -202,6 +201,7 @@ func genTagsFields(gpus map[string]gpu, system map[string]sysInfo) []metric {
setTagIfUsed(tags, "gpu_unique_id", payload.GpuUniqueID)
fields := make(map[string]interface{}, 20)
setIfUsed("int", fields, "driver_version", strings.ReplaceAll(system["system"].DriverVersion, ".", ""))
setIfUsed("int", fields, "fan_speed", payload.GpuFanSpeedPercentage)
setIfUsed("int64", fields, "memory_total", payload.GpuVRAMTotalMemory)

View File

@ -505,7 +505,7 @@ func decodeStatusFsmap(acc telegraf.Accumulator, data *status) error {
"up_standby": data.FSMap.NumUpStandby,
"up": data.FSMap.NumUp,
}
acc.AddFields("ceph_fsmap", fields, map[string]string{})
acc.AddFields("ceph_fsmap", fields, make(map[string]string))
return nil
}
@ -521,7 +521,7 @@ func decodeStatusHealth(acc telegraf.Accumulator, data *status) error {
"status_code": statusCodes[data.Health.Status],
"status": data.Health.Status,
}
acc.AddFields("ceph_health", fields, map[string]string{})
acc.AddFields("ceph_health", fields, make(map[string]string))
return nil
}
@ -530,7 +530,7 @@ func decodeStatusMonmap(acc telegraf.Accumulator, data *status) error {
fields := map[string]interface{}{
"num_mons": data.MonMap.NumMons,
}
acc.AddFields("ceph_monmap", fields, map[string]string{})
acc.AddFields("ceph_monmap", fields, make(map[string]string))
return nil
}
@ -555,7 +555,7 @@ func decodeStatusOsdmap(acc telegraf.Accumulator, data *status) error {
}
}
acc.AddFields("ceph_osdmap", fields, map[string]string{})
acc.AddFields("ceph_osdmap", fields, make(map[string]string))
return nil
}
@ -586,7 +586,7 @@ func decodeStatusPgmap(acc telegraf.Accumulator, data *status) error {
"write_bytes_sec": data.PGMap.WriteBytesSec,
"write_op_per_sec": data.PGMap.WriteOpPerSec,
}
acc.AddFields("ceph_pgmap", fields, map[string]string{})
acc.AddFields("ceph_pgmap", fields, make(map[string]string))
return nil
}
@ -654,14 +654,14 @@ func decodeDf(acc telegraf.Accumulator, input string) error {
"total_used_raw_ratio": data.Stats.TotalUsedRawRatio,
"total_used": data.Stats.TotalUsed, // pre ceph 0.84
}
acc.AddFields("ceph_usage", fields, map[string]string{})
acc.AddFields("ceph_usage", fields, make(map[string]string))
// ceph.stats_by_class: records per device-class usage
for class, stats := range data.StatsbyClass {
tags := map[string]string{
"class": class,
}
fields := map[string]interface{}{}
fields := make(map[string]interface{})
for key, value := range stats {
fields[key] = value
}

View File

@ -227,7 +227,7 @@ func (c *Chrony) gatherActivity(acc telegraf.Accumulator) error {
return fmt.Errorf("got unexpected response type %T while waiting for activity data", r)
}
tags := map[string]string{}
tags := make(map[string]string, 1)
if c.source != "" {
tags["source"] = c.source
}
@ -300,7 +300,7 @@ func (c *Chrony) gatherServerStats(acc telegraf.Accumulator) error {
return fmt.Errorf("querying server statistics failed: %w", err)
}
tags := map[string]string{}
tags := make(map[string]string, 1)
if c.source != "" {
tags["source"] = c.source
}

View File

@ -142,8 +142,7 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error {
wg := sync.WaitGroup{}
rLock := sync.Mutex{}
results := map[string][]types.MetricDataResult{}
results := make(map[string][]types.MetricDataResult)
for namespace, namespacedQueries := range queries {
var batches [][]types.MetricDataQuery
@ -373,9 +372,8 @@ func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) map[string
return c.metricCache.queries
}
c.queryDimensions = map[string]*map[string]string{}
dataQueries := map[string][]types.MetricDataQuery{}
c.queryDimensions = make(map[string]*map[string]string)
dataQueries := make(map[string][]types.MetricDataQuery)
for i, filtered := range filteredMetrics {
for j, singleMetric := range filtered.metrics {
id := strconv.Itoa(j) + "_" + strconv.Itoa(i)
@ -460,8 +458,7 @@ func (c *CloudWatch) aggregateMetrics(acc telegraf.Accumulator, metricDataResult
namespace = sanitizeMeasurement(namespace)
for _, result := range results {
tags := map[string]string{}
tags := make(map[string]string)
if dimensions, ok := c.queryDimensions[*result.Id]; ok {
tags = *dimensions
}
@ -507,7 +504,7 @@ func snakeCase(s string) string {
// ctod converts cloudwatch dimensions to regular dimensions.
func ctod(cDimensions []types.Dimension) *map[string]string {
dimensions := map[string]string{}
dimensions := make(map[string]string, len(cDimensions))
for i := range cDimensions {
dimensions[snakeCase(*cDimensions[i].Name)] = *cDimensions[i].Value
}

View File

@ -149,8 +149,6 @@ func (c *CouchDB) fetchAndInsertData(accumulator telegraf.Accumulator, host stri
return fmt.Errorf("failed to decode stats from couchdb: HTTP body %q", response.Body)
}
fields := map[string]interface{}{}
// for couchdb 2.0 API changes
requestTime := metaData{
Current: stats.Couchdb.RequestTime.Current,
@ -207,6 +205,7 @@ func (c *CouchDB) fetchAndInsertData(accumulator telegraf.Accumulator, host stri
httpdStatusCodesStatus500 = stats.Couchdb.HttpdStatusCodes.Status500
}
fields := make(map[string]interface{}, 31)
// CouchDB meta stats:
c.generateFields(fields, "couchdb_auth_cache_misses", stats.Couchdb.AuthCacheMisses)
c.generateFields(fields, "couchdb_database_writes", stats.Couchdb.DatabaseWrites)

View File

@ -100,9 +100,9 @@ func (monitor *DirectoryMonitor) Init() error {
tags := map[string]string{
"directory": monitor.Directory,
}
monitor.filesDropped = selfstat.Register("directory_monitor", "files_dropped", map[string]string{})
monitor.filesDropped = selfstat.Register("directory_monitor", "files_dropped", make(map[string]string))
monitor.filesDroppedDir = selfstat.Register("directory_monitor", "files_dropped_per_dir", tags)
monitor.filesProcessed = selfstat.Register("directory_monitor", "files_processed", map[string]string{})
monitor.filesProcessed = selfstat.Register("directory_monitor", "files_processed", make(map[string]string))
monitor.filesProcessedDir = selfstat.Register("directory_monitor", "files_processed_per_dir", tags)
monitor.filesQueuedDir = selfstat.Register("directory_monitor", "files_queue_per_dir", tags)

View File

@ -81,7 +81,7 @@ func (d *DiskIO) Gather(acc telegraf.Accumulator) error {
match = true
}
tags := map[string]string{}
tags := make(map[string]string)
var devLinks []string
tags["name"], devLinks = d.diskName(io.Name)
@ -207,7 +207,7 @@ func (d *DiskIO) diskTags(devName string) map[string]string {
return nil
}
tags := map[string]string{}
tags := make(map[string]string, len(d.DeviceTags))
for _, dt := range d.DeviceTags {
if v, ok := di[dt]; ok {
tags[dt] = v

View File

@ -276,9 +276,6 @@ func (d *Docker) gatherSwarmInfo(acc telegraf.Accumulator) error {
return err
}
running := map[string]int{}
tasksNoShutdown := map[string]uint64{}
activeNodes := make(map[string]struct{})
for _, n := range nodes {
if n.Status.State != swarm.NodeStateDown {
@ -286,6 +283,8 @@ func (d *Docker) gatherSwarmInfo(acc telegraf.Accumulator) error {
}
}
tasksNoShutdown := make(map[string]uint64, len(tasks))
running := make(map[string]int, len(tasks))
for _, task := range tasks {
if task.DesiredState != swarm.TaskStateShutdown {
tasksNoShutdown[task.ServiceID]++
@ -297,8 +296,8 @@ func (d *Docker) gatherSwarmInfo(acc telegraf.Accumulator) error {
}
for _, service := range services {
tags := map[string]string{}
fields := make(map[string]interface{})
tags := make(map[string]string, 3)
fields := make(map[string]interface{}, 2)
now := time.Now()
tags["service_id"] = service.ID
tags["service_name"] = service.Spec.Name
@ -375,7 +374,7 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
var (
// "docker_devicemapper" measurement fields
poolName string
deviceMapperFields = map[string]interface{}{}
deviceMapperFields = make(map[string]interface{}, len(info.DriverStatus))
)
for _, rawData := range info.DriverStatus {

View File

@ -548,11 +548,11 @@ func (e *Elasticsearch) gatherIndicesStats(url string, acc telegraf.Accumulator)
now := time.Now()
// Total Shards Stats
shardsStats := map[string]interface{}{}
shardsStats := make(map[string]interface{}, len(indicesStats.Shards))
for k, v := range indicesStats.Shards {
shardsStats[k] = v
}
acc.AddFields("elasticsearch_indices_stats_shards_total", shardsStats, map[string]string{}, now)
acc.AddFields("elasticsearch_indices_stats_shards_total", shardsStats, make(map[string]string), now)
// All Stats
for m, s := range indicesStats.All {
@ -603,7 +603,7 @@ func (e *Elasticsearch) gatherIndividualIndicesStats(indices map[string]indexSta
}
func (e *Elasticsearch) categorizeIndices(indices map[string]indexStat) map[string][]string {
categorizedIndexNames := map[string][]string{}
categorizedIndexNames := make(map[string][]string, len(indices))
// If all indices are configured to be gathered, bucket them all together.
if len(e.IndicesInclude) == 0 || e.IndicesInclude[0] == "_all" {
@ -768,8 +768,8 @@ func (e *Elasticsearch) gatherJSONData(url string, v interface{}) error {
}
func (e *Elasticsearch) compileIndexMatchers() (map[string]filter.Filter, error) {
indexMatchers := map[string]filter.Filter{}
var err error
indexMatchers := make(map[string]filter.Filter, len(e.IndicesInclude))
// Compile each configured index into a glob matcher.
for _, configuredIndex := range e.IndicesInclude {

View File

@ -23,7 +23,7 @@ func parseSimpleResult(acc telegraf.Accumulator, measurement string, searchResul
}
func parseAggregationResult(acc telegraf.Accumulator, aggregationQueryList []aggregationQueryData, searchResult *elastic5.SearchResult) error {
measurements := map[string]map[string]string{}
measurements := make(map[string]map[string]string, len(aggregationQueryList))
// organize the aggregation query data by measurement
for _, aggregationQuery := range aggregationQueryList {

View File

@ -289,7 +289,7 @@ func (c *CommandEthtool) Interfaces(includeNamespaces bool) ([]NamespacedInterfa
// Handles are only used to create namespaced goroutines. We don't prefill
// with the handle for the initial namespace because we've already created
// its goroutine in Init().
handles := map[string]netns.NsHandle{}
handles := make(map[string]netns.NsHandle)
if includeNamespaces {
namespaces, err := os.ReadDir(namespaceDirectory)

View File

@ -13,7 +13,7 @@ func Parse(acc telegraf.Accumulator, sectionBytes, roomBytes, deviecsBytes []byt
return err
}
sections := map[uint16]string{}
sections := make(map[uint16]string, len(tmpSections))
for _, v := range tmpSections {
sections[v.ID] = v.Name
}
@ -22,7 +22,7 @@ func Parse(acc telegraf.Accumulator, sectionBytes, roomBytes, deviecsBytes []byt
if err := json.Unmarshal(roomBytes, &tmpRooms); err != nil {
return err
}
rooms := map[uint16]LinkRoomsSections{}
rooms := make(map[uint16]LinkRoomsSections, len(tmpRooms))
for _, v := range tmpRooms {
rooms[v.ID] = LinkRoomsSections{Name: v.Name, SectionID: v.SectionID}
}

View File

@ -240,7 +240,7 @@ func (h *Hugepages) gatherStatsFromMeminfo(acc telegraf.Accumulator) error {
metrics[metricName] = fieldValue
}
acc.AddFields("hugepages_"+meminfoHugepages, metrics, map[string]string{})
acc.AddFields("hugepages_"+meminfoHugepages, metrics, make(map[string]string))
return nil
}

View File

@ -202,12 +202,12 @@ func (r *IntelRDT) checkPIDsAssociation(ctx context.Context) error {
}
func (r *IntelRDT) associateProcessesWithPIDs(providedProcesses []string) (map[string]string, error) {
mapProcessPIDs := map[string]string{}
availableProcesses, err := r.Processor.getAllProcesses()
if err != nil {
return nil, errors.New("cannot gather information of all available processes")
}
mapProcessPIDs := make(map[string]string, len(availableProcesses))
for _, availableProcess := range availableProcesses {
if choice.Contains(availableProcess.Name, providedProcesses) {
pid := availableProcess.PID

View File

@ -112,8 +112,8 @@ func (p *Publisher) addToAccumulatorCores(measurement parsedCoresMeasurement) {
continue
}
}
tags := map[string]string{}
fields := make(map[string]interface{})
tags := make(map[string]string, 2)
fields := make(map[string]interface{}, 1)
tags["cores"] = measurement.cores
tags["name"] = pqosMetricOrder[i]
@ -163,8 +163,8 @@ func (p *Publisher) addToAccumulatorProcesses(measurement parsedProcessMeasureme
continue
}
}
tags := map[string]string{}
fields := make(map[string]interface{})
tags := make(map[string]string, 3)
fields := make(map[string]interface{}, 1)
tags["process"] = measurement.process
tags["cores"] = measurement.cores

View File

@ -66,7 +66,7 @@ func collectMemStat(acc telegraf.Accumulator) {
"heap_objects": m.HeapObjects, // total number of allocated objects
"num_gc": m.NumGC,
}
acc.AddFields("internal_memstats", fields, map[string]string{})
acc.AddFields("internal_memstats", fields, make(map[string]string))
}
func collectGoStat(acc telegraf.Accumulator) {
@ -77,7 +77,7 @@ func collectGoStat(acc telegraf.Accumulator) {
}
metrics.Read(samples)
fields := map[string]any{}
fields := make(map[string]any, len(samples))
for _, sample := range samples {
name := sanitizeName(sample.Name)

View File

@ -146,12 +146,11 @@ func (j *Jenkins) initialize(client *http.Client) error {
}
func (j *Jenkins) gatherNodeData(n node, acc telegraf.Accumulator) error {
tags := map[string]string{}
if n.DisplayName == "" {
return errors.New("error empty node name")
}
tags["node_name"] = n.DisplayName
tags := map[string]string{"node_name": n.DisplayName}
// filter out excluded or not included node_name
if !j.nodeFilter.Match(tags["node_name"]) {

View File

@ -155,7 +155,7 @@ func (k *Kernel) Gather(acc telegraf.Accumulator) error {
fields["ksm_"+f] = m
}
}
acc.AddCounter("kernel", fields, map[string]string{})
acc.AddCounter("kernel", fields, make(map[string]string))
if k.optCollect["psi"] {
if err := k.gatherPressure(acc); err != nil {

View File

@ -49,7 +49,7 @@ func (k *KernelVmstat) Gather(acc telegraf.Accumulator) error {
}
}
acc.AddFields("kernel_vmstat", fields, map[string]string{})
acc.AddFields("kernel_vmstat", fields, make(map[string]string))
return nil
}

View File

@ -24,13 +24,13 @@ func collectNodes(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesI
func (ki *KubernetesInventory) gatherNodeCount(count int, acc telegraf.Accumulator) {
fields := map[string]interface{}{"node_count": count}
tags := map[string]string{}
tags := make(map[string]string)
acc.AddFields(nodeMeasurement, fields, tags)
}
func (ki *KubernetesInventory) gatherNode(n *corev1.Node, acc telegraf.Accumulator) {
fields := map[string]interface{}{}
fields := make(map[string]interface{}, len(n.Status.Capacity)+len(n.Status.Allocatable)+1)
tags := map[string]string{
"node_name": n.Name,
"cluster_namespace": n.Annotations["cluster.x-k8s.io/cluster-namespace"],
@ -62,7 +62,6 @@ func (ki *KubernetesInventory) gatherNode(n *corev1.Node, acc telegraf.Accumulat
}
for _, val := range n.Status.Conditions {
conditionfields := map[string]interface{}{}
conditiontags := map[string]string{
"status": string(val.Status),
"condition": string(val.Type),
@ -83,8 +82,10 @@ func (ki *KubernetesInventory) gatherNode(n *corev1.Node, acc telegraf.Accumulat
}
running = 2
}
conditionfields["status_condition"] = running
conditionfields["ready"] = nodeready
conditionfields := map[string]interface{}{
"status_condition": running,
"ready": nodeready,
}
acc.AddFields(nodeMeasurement, conditionfields, conditiontags)
}

View File

@ -35,7 +35,7 @@ func (ki *KubernetesInventory) gatherPod(p *corev1.Pod, acc telegraf.Accumulator
return
}
containerList := map[string]*corev1.ContainerStatus{}
containerList := make(map[string]*corev1.ContainerStatus, len(p.Status.ContainerStatuses))
for i := range p.Status.ContainerStatuses {
containerList[p.Status.ContainerStatuses[i].Name] = &p.Status.ContainerStatuses[i]
}
@ -133,7 +133,6 @@ func (ki *KubernetesInventory) gatherPodContainer(p *corev1.Pod, cs corev1.Conta
}
for _, val := range p.Status.Conditions {
conditionfields := map[string]interface{}{}
conditiontags := map[string]string{
"container_name": c.Name,
"image": splitImage[0],
@ -159,8 +158,10 @@ func (ki *KubernetesInventory) gatherPodContainer(p *corev1.Pod, cs corev1.Conta
}
running = 2
}
conditionfields["status_condition"] = running
conditionfields["ready"] = podready
conditionfields := map[string]interface{}{
"status_condition": running,
"ready": podready,
}
acc.AddFields(podContainerMeasurement, conditionfields, conditiontags)
}

View File

@ -21,7 +21,7 @@ func collectResourceQuotas(ctx context.Context, acc telegraf.Accumulator, ki *Ku
}
func (ki *KubernetesInventory) gatherResourceQuota(r corev1.ResourceQuota, acc telegraf.Accumulator) {
fields := map[string]interface{}{}
fields := make(map[string]interface{}, len(r.Status.Hard)+len(r.Status.Used))
tags := map[string]string{
"resource": r.Name,
"namespace": r.Namespace,

View File

@ -74,7 +74,7 @@ func (*SysctlFS) SampleConfig() string {
}
func (sfs *SysctlFS) Gather(acc telegraf.Accumulator) error {
fields := map[string]interface{}{}
fields := make(map[string]interface{})
for _, n := range []string{"aio-nr", "aio-max-nr", "dquot-nr", "dquot-max", "super-nr", "super-max"} {
if err := sfs.gatherOne(n, fields); err != nil {

View File

@ -745,7 +745,7 @@ func (l *Lustre2) Gather(acc telegraf.Accumulator) error {
}
for tgs, fields := range l.allFields {
tags := map[string]string{}
tags := make(map[string]string, 5)
if len(tgs.name) > 0 {
tags["name"] = tgs.name
}

View File

@ -16,7 +16,7 @@ type Configuration interface {
}
func removeDuplicates(elements []uint16) []uint16 {
encountered := map[uint16]bool{}
encountered := make(map[uint16]bool, len(elements))
result := []uint16{}
for _, addr := range elements {

View File

@ -183,7 +183,7 @@ func (c *ConfigurationOriginal) newFieldFromDefinition(def fieldDefinition, type
}
func (c *ConfigurationOriginal) validateFieldDefinitions(fieldDefs []fieldDefinition, registerType string) error {
nameEncountered := map[string]bool{}
nameEncountered := make(map[string]bool, len(fieldDefs))
for _, item := range fieldDefs {
// check empty name
if item.Name == "" {

View File

@ -212,8 +212,7 @@ func (c *ConfigurationPerRequest) Check() error {
}
func (c *ConfigurationPerRequest) Process() (map[byte]requestSet, error) {
result := map[byte]requestSet{}
result := make(map[byte]requestSet, len(c.Requests))
for _, def := range c.Requests {
// Set default
if def.RegisterType == "" {

View File

@ -522,7 +522,7 @@ func (m *Modbus) collectFields(acc telegraf.Accumulator, timestamp time.Time, ta
for _, request := range requests {
for _, field := range request.fields {
// Collect tags from global and per-request
ftags := map[string]string{}
ftags := make(map[string]string, len(tags)+len(field.tags))
for k, v := range tags {
ftags[k] = v
}

View File

@ -890,7 +890,7 @@ type topStatLine struct {
}
func parseLocks(stat serverStatus) map[string]lockUsage {
returnVal := map[string]lockUsage{}
returnVal := make(map[string]lockUsage, len(stat.Locks))
for namespace, lockInfo := range stat.Locks {
returnVal[namespace] = lockUsage{
namespace,
@ -1454,7 +1454,7 @@ func NewStatLine(oldMongo, newMongo mongoStatus, key string, all bool, sampleSec
returnVal.TotalAvailable = newShardStats.TotalAvailable
returnVal.TotalCreated = newShardStats.TotalCreated
returnVal.TotalRefreshing = newShardStats.TotalRefreshing
returnVal.ShardHostStatsLines = map[string]shardHostStatLine{}
returnVal.ShardHostStatsLines = make(map[string]shardHostStatLine, len(newShardStats.Hosts))
for host, stats := range newShardStats.Hosts {
shardStatLine := &shardHostStatLine{
InUse: stats.InUse,

View File

@ -114,7 +114,7 @@ func (m *MQTTConsumer) Init() error {
return err
}
m.opts = opts
m.messages = map[telegraf.TrackingID]mqtt.Message{}
m.messages = make(map[telegraf.TrackingID]mqtt.Message)
m.topicParsers = make([]*TopicParser, 0, len(m.TopicParserConfig))
for _, cfg := range m.TopicParserConfig {
@ -125,8 +125,8 @@ func (m *MQTTConsumer) Init() error {
m.topicParsers = append(m.topicParsers, p)
}
m.payloadSize = selfstat.Register("mqtt_consumer", "payload_size", map[string]string{})
m.messagesRecv = selfstat.Register("mqtt_consumer", "messages_received", map[string]string{})
m.payloadSize = selfstat.Register("mqtt_consumer", "payload_size", make(map[string]string))
m.messagesRecv = selfstat.Register("mqtt_consumer", "messages_received", make(map[string]string))
return nil
}
func (m *MQTTConsumer) Start(acc telegraf.Accumulator) error {

View File

@ -992,7 +992,7 @@ func (m *Mysql) gatherUserStatisticsStatuses(db *sql.DB, servtag string, acc tel
}
tags := map[string]string{"server": servtag, "user": *read[0].(*string)}
fields := map[string]interface{}{}
fields := make(map[string]interface{}, len(cols))
for i := range cols {
if i == 0 {