chore: Fix linter findings for `revive:enforce-repeated-arg-type-style` in `plugins/outputs` and `plugins/parsers` (#15847)

This commit is contained in:
Paweł Żak 2024-09-11 21:57:14 +02:00 committed by GitHub
parent f7ae141484
commit 686ff791ba
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
29 changed files with 39 additions and 78 deletions

View File

@ -389,13 +389,7 @@ func unfinished() <-chan struct{} {
return unfinished
}
func verifyAggregateTelemetry(
t *testing.T,
m telegraf.Metric,
valueField string,
countField string,
telemetry *appinsights.AggregateMetricTelemetry,
) {
func verifyAggregateTelemetry(t *testing.T, m telegraf.Metric, valueField, countField string, telemetry *appinsights.AggregateMetricTelemetry) {
verifyAggregateField := func(fieldName string, telemetryValue float64) {
metricRawFieldValue, found := m.Fields()[fieldName]
if !found {
@ -417,13 +411,7 @@ func verifyAggregateTelemetry(
assertMapContains(t, m.Tags(), telemetry.Properties)
}
func verifySimpleTelemetry(
t *testing.T,
m telegraf.Metric,
valueField string,
expectedTelemetryName string,
telemetry *appinsights.MetricTelemetry,
) {
func verifySimpleTelemetry(t *testing.T, m telegraf.Metric, valueField, expectedTelemetryName string, telemetry *appinsights.MetricTelemetry) {
require.Equal(t, expectedTelemetryName, telemetry.Name, "Telemetry name is not what was expected")
require.InDelta(t, m.Fields()[valueField], telemetry.Value, testutil.DefaultDelta, "Telemetry value does not match metric value field")
require.Equal(t, m.Time(), telemetry.Timestamp, "Telemetry and metric timestamps do not match")

View File

@ -8,7 +8,7 @@ type Transmitter struct {
client appinsights.TelemetryClient
}
func NewTransmitter(ikey string, endpointURL string) *Transmitter {
func NewTransmitter(ikey, endpointURL string) *Transmitter {
if len(endpointURL) == 0 {
return &Transmitter{client: appinsights.NewTelemetryClient(ikey)}
}

View File

@ -256,7 +256,7 @@ func init() {
}
// For each table create the ingestor
func createIngestorByTable(client *kusto.Client, database string, tableName string, ingestionType string) (ingest.Ingestor, error) {
func createIngestorByTable(client *kusto.Client, database, tableName, ingestionType string) (ingest.Ingestor, error) {
switch strings.ToLower(ingestionType) {
case managedIngestion:
mi, err := ingest.NewManaged(client, database, tableName)

View File

@ -184,7 +184,7 @@ func (a *AzureMonitor) initHTTPClient() {
}
// vmMetadata retrieves metadata about the current Azure VM
func vmInstanceMetadata(c *http.Client) (region string, resourceID string, err error) {
func vmInstanceMetadata(c *http.Client) (region, resourceID string, err error) {
req, err := http.NewRequest("GET", vmInstanceMetadataURL, nil)
if err != nil {
return "", "", fmt.Errorf("error creating request: %w", err)

View File

@ -193,7 +193,7 @@ func verifyRawMetricPublished(t *testing.T, m telegraf.Metric, published map[str
return verifyMetricPublished(t, m, published, false, false)
}
func verifyMetricPublished(t *testing.T, m telegraf.Metric, published map[string]*pubsub.Message, base64Encoded bool, gzipEncoded bool) *pubsub.Message {
func verifyMetricPublished(t *testing.T, m telegraf.Metric, published map[string]*pubsub.Message, base64Encoded, gzipEncoded bool) *pubsub.Message {
p := influx.Parser{}
require.NoError(t, p.Init())

View File

@ -251,7 +251,7 @@ func PartitionDatums(size int, datums []types.MetricDatum) [][]types.MetricDatum
// BuildMetricDatum makes a MetricDatum from telegraf.Metric. It would check if all required fields of
// cloudwatch.StatisticSet are available. If so, it would build MetricDatum from statistic values.
// Otherwise, fields would still been built independently.
func BuildMetricDatum(buildStatistic bool, highResolutionMetrics bool, point telegraf.Metric) []types.MetricDatum {
func BuildMetricDatum(buildStatistic, highResolutionMetrics bool, point telegraf.Metric) []types.MetricDatum {
fields := make(map[string]cloudwatchField)
tags := point.Tags()
storageResolution := int64(60)

View File

@ -103,7 +103,7 @@ func (c *CrateDB) Write(metrics []telegraf.Metric) error {
return nil
}
func insertSQL(table string, keyReplacement string, metrics []telegraf.Metric) (string, error) {
func insertSQL(table, keyReplacement string, metrics []telegraf.Metric) (string, error) {
rows := make([]string, 0, len(metrics))
for _, m := range metrics {
cols := []interface{}{
@ -213,7 +213,7 @@ func escapeObject(m map[string]interface{}, keyReplacement string) (string, erro
// escapeString wraps s in the given quote string and replaces all occurrences
// of it inside of s with a double quote.
func escapeString(s string, quote string) string {
func escapeString(s, quote string) string {
return quote + strings.ReplaceAll(s, quote, quote+quote) + quote
}

View File

@ -238,7 +238,7 @@ func verifyValue(v interface{}) bool {
return true
}
func isRateable(statsDMetricType string, fieldName string) bool {
func isRateable(statsDMetricType, fieldName string) bool {
switch statsDMetricType {
case
"counter":

View File

@ -104,7 +104,7 @@ type CommandRunner struct {
}
// Run runs the command.
func (c *CommandRunner) Run(timeout time.Duration, command []string, environments []string, buffer io.Reader) error {
func (c *CommandRunner) Run(timeout time.Duration, command, environments []string, buffer io.Reader) error {
cmd := exec.Command(command[0], command[1:]...)
if len(environments) > 0 {
cmd.Env = append(os.Environ(), environments...)

View File

@ -25,7 +25,7 @@ type MockRunner struct {
}
// Run runs the command.
func (c *MockRunner) Run(_ time.Duration, _ []string, _ []string, buffer io.Reader) error {
func (c *MockRunner) Run(_ time.Duration, _, _ []string, buffer io.Reader) error {
parser := influxParser.NewStreamParser(buffer)
numMetrics := 0

View File

@ -155,7 +155,7 @@ func (g *gelfUDP) Close() (err error) {
return err
}
func (g *gelfUDP) createChunkedMessage(index int, chunkCountInt int, id []byte, compressed *bytes.Buffer) (bytes.Buffer, error) {
func (g *gelfUDP) createChunkedMessage(index, chunkCountInt int, id []byte, compressed *bytes.Buffer) (bytes.Buffer, error) {
var packet bytes.Buffer
chunksize := g.getChunksize()

View File

@ -118,7 +118,7 @@ func (l *Librato) Write(metrics []telegraf.Metric) error {
return nil
}
func (l *Librato) writeBatch(start int, sizeBatch int, metricCounter int, tempGauges []*Gauge) error {
func (l *Librato) writeBatch(start, sizeBatch, metricCounter int, tempGauges []*Gauge) error {
lmetrics := LMetrics{}
end := start + sizeBatch
if end > metricCounter {

View File

@ -16,7 +16,7 @@ type TopicNameGenerator struct {
template *template.Template
}
func NewTopicNameGenerator(topicPrefix string, topic string) (*TopicNameGenerator, error) {
func NewTopicNameGenerator(topicPrefix, topic string) (*TopicNameGenerator, error) {
tt, err := template.New("topic_name").Parse(topic)
if err != nil {
return nil, err

View File

@ -275,7 +275,7 @@ func (p *Parquet) createSchema(metrics []telegraf.Metric) (*arrow.Schema, error)
return arrow.NewSchema(fields, nil), nil
}
func (p *Parquet) createWriter(name string, filename string, schema *arrow.Schema) (*pqarrow.FileWriter, error) {
func (p *Parquet) createWriter(name, filename string, schema *arrow.Schema) (*pqarrow.FileWriter, error) {
if _, err := os.Stat(filename); err == nil {
now := time.Now()
rotatedFilename := fmt.Sprintf("%s/%s-%s-%s.parquet", p.Directory, name, now.Format("2006-01-02"), strconv.FormatInt(now.Unix(), 10))

View File

@ -404,7 +404,7 @@ func (t *Template) UnmarshalText(text []byte) error {
return nil
}
func (t *Template) Render(table *Table, newColumns []utils.Column, metricTable *Table, tagTable *Table) ([]byte, error) {
func (t *Template) Render(table *Table, newColumns []utils.Column, metricTable, tagTable *Table) ([]byte, error) {
tcs := NewColumns(newColumns).Sorted()
data := map[string]interface{}{
"table": table,

View File

@ -154,10 +154,8 @@ func (tm *TableManager) EnsureStructure(
db dbh,
tbl *tableState,
columns []utils.Column,
createTemplates []*sqltemplate.Template,
addColumnsTemplates []*sqltemplate.Template,
metricsTable *tableState,
tagsTable *tableState,
createTemplates, addColumnsTemplates []*sqltemplate.Template,
metricsTable, tagsTable *tableState,
) ([]utils.Column, error) {
// Sort so that:
// * When we create/alter the table the columns are in a sane order (telegraf gives us the fields in random order)
@ -353,8 +351,7 @@ func (tm *TableManager) update(ctx context.Context,
state *tableState,
tmpls []*sqltemplate.Template,
missingCols []utils.Column,
metricsTable *tableState,
tagsTable *tableState,
metricsTable, tagsTable *tableState,
) error {
tmplTable := sqltemplate.NewTable(tm.Schema, state.name, colMapToSlice(state.columns))
metricsTmplTable := sqltemplate.NewTable(tm.Schema, metricsTable.name, colMapToSlice(metricsTable.columns))

View File

@ -62,13 +62,7 @@ type Collector struct {
expireTicker *time.Ticker
}
func NewCollector(
expire time.Duration,
stringsAsLabel bool,
exportTimestamp bool,
typeMapping serializer.MetricTypes,
logger telegraf.Logger,
) *Collector {
func NewCollector(expire time.Duration, stringsAsLabel, exportTimestamp bool, typeMapping serializer.MetricTypes, logger telegraf.Logger) *Collector {
c := &Collector{
ExpirationInterval: expire,
StringAsLabel: stringsAsLabel,

View File

@ -43,12 +43,7 @@ type Collector struct {
coll *serializer.Collection
}
func NewCollector(
expire time.Duration,
stringsAsLabel bool,
exportTimestamp bool,
typeMapping serializer.MetricTypes,
) *Collector {
func NewCollector(expire time.Duration, stringsAsLabel, exportTimestamp bool, typeMapping serializer.MetricTypes) *Collector {
cfg := serializer.FormatConfig{
StringAsLabel: stringsAsLabel,
ExportTimestamp: exportTimestamp,

View File

@ -141,7 +141,7 @@ func (r *Riemann) attributes(name string, tags map[string]string) map[string]str
return tags
}
func (r *Riemann) service(name string, field string) string {
func (r *Riemann) service(name, field string) string {
var serviceStrings []string
// if measurement is not enabled as an attribute then prepend it to service name

View File

@ -199,7 +199,7 @@ func (s *SignalFx) isEventIncluded(name string) bool {
}
// getMetricName combines telegraf fields and tags into a full metric name
func getMetricName(metric string, field string) string {
func getMetricName(metric, field string) string {
name := metric
// Include field in metric name when it adds to the metric name

View File

@ -474,11 +474,7 @@ func getStackdriverIntervalEndpoints(
return startTime, endTime
}
func getStackdriverTimeInterval(
m metricpb.MetricDescriptor_MetricKind,
startTime *timestamppb.Timestamp,
endTime *timestamppb.Timestamp,
) (*monitoringpb.TimeInterval, error) {
func getStackdriverTimeInterval(m metricpb.MetricDescriptor_MetricKind, startTime, endTime *timestamppb.Timestamp) (*monitoringpb.TimeInterval, error) {
switch m {
case metricpb.MetricDescriptor_GAUGE:
return &monitoringpb.TimeInterval{

View File

@ -52,7 +52,7 @@ func (sm *SyslogMapper) mapStructuredData(metric telegraf.Metric, msg *rfc5424.S
}
}
func (sm *SyslogMapper) mapStructuredDataItem(key string, value string, msg *rfc5424.SyslogMessage) {
func (sm *SyslogMapper) mapStructuredDataItem(key, value string, msg *rfc5424.SyslogMessage) {
if sm.reservedKeys[key] {
return
}

View File

@ -452,7 +452,7 @@ func TestBuildMultiMeasuresInSingleAndMultiTableMode(t *testing.T) {
"will contain request: %+v\n\n", result, expectedResultSingleTable)
}
func buildExpectedMultiRecords(multiMeasureName string, tableName string) *timestreamwrite.WriteRecordsInput {
func buildExpectedMultiRecords(multiMeasureName, tableName string) *timestreamwrite.WriteRecordsInput {
var recordsMultiTableMode []types.Record
recordDouble := buildMultiRecords([]SimpleInput{
{

View File

@ -112,11 +112,7 @@ func (m *mockZabbixSender) Send(packet *zabbix.Packet) (res zabbix.Response, err
return zabbix.Response{}, nil
}
func (m *mockZabbixSender) SendMetrics(metrics []*zabbix.Metric) (
resActive zabbix.Response,
resTrapper zabbix.Response,
err error,
) {
func (m *mockZabbixSender) SendMetrics(metrics []*zabbix.Metric) (resActive, resTrapper zabbix.Response, err error) {
m.sendMetrics = append(m.sendMetrics, metrics...)
return zabbix.Response{}, zabbix.Response{}, nil
}

View File

@ -760,7 +760,7 @@ func TestAutoRegister(t *testing.T) {
// compareData compares generated data with expected data ignoring slice order if all Clocks are the same.
// This is useful for metrics with several fields that should produce several Zabbix values that
// could not be sorted by clock
func compareData(t *testing.T, expected []zabbixRequestData, data []zabbixRequestData) {
func compareData(t *testing.T, expected, data []zabbixRequestData) {
t.Helper()
var clock int64

View File

@ -29,7 +29,7 @@ type schemaRegistry struct {
const schemaByID = "%s/schemas/ids/%d"
func newSchemaRegistry(addr string, caCertPath string) (*schemaRegistry, error) {
func newSchemaRegistry(addr, caCertPath string) (*schemaRegistry, error) {
var client *http.Client
var tlsCfg *tls.Config
if caCertPath != "" {

View File

@ -54,13 +54,13 @@ func stringFieldUnescape(b []byte) string {
}
// parseIntBytes is a zero-alloc wrapper around strconv.ParseInt.
func parseIntBytes(b []byte, base int, bitSize int) (i int64, err error) {
func parseIntBytes(b []byte, base, bitSize int) (i int64, err error) {
s := unsafeBytesToString(b)
return strconv.ParseInt(s, base, bitSize)
}
// parseUintBytes is a zero-alloc wrapper around strconv.ParseUint.
func parseUintBytes(b []byte, base int, bitSize int) (i uint64, err error) {
func parseUintBytes(b []byte, base, bitSize int) (i uint64, err error) {
s := unsafeBytesToString(b)
return strconv.ParseUint(s, base, bitSize)
}

View File

@ -51,14 +51,14 @@ func (h *MetricHandler) SetMeasurement(name []byte) error {
return nil
}
func (h *MetricHandler) AddTag(key []byte, value []byte) error {
func (h *MetricHandler) AddTag(key, value []byte) error {
tk := unescape(key)
tv := unescape(value)
h.metric.AddTag(tk, tv)
return nil
}
func (h *MetricHandler) AddInt(key []byte, value []byte) error {
func (h *MetricHandler) AddInt(key, value []byte) error {
fk := unescape(key)
fv, err := parseIntBytes(bytes.TrimSuffix(value, []byte("i")), 10, 64)
if err != nil {
@ -72,7 +72,7 @@ func (h *MetricHandler) AddInt(key []byte, value []byte) error {
return nil
}
func (h *MetricHandler) AddUint(key []byte, value []byte) error {
func (h *MetricHandler) AddUint(key, value []byte) error {
fk := unescape(key)
fv, err := parseUintBytes(bytes.TrimSuffix(value, []byte("u")), 10, 64)
if err != nil {
@ -86,7 +86,7 @@ func (h *MetricHandler) AddUint(key []byte, value []byte) error {
return nil
}
func (h *MetricHandler) AddFloat(key []byte, value []byte) error {
func (h *MetricHandler) AddFloat(key, value []byte) error {
fk := unescape(key)
fv, err := parseFloatBytes(value, 64)
if err != nil {
@ -100,14 +100,14 @@ func (h *MetricHandler) AddFloat(key []byte, value []byte) error {
return nil
}
func (h *MetricHandler) AddString(key []byte, value []byte) error {
func (h *MetricHandler) AddString(key, value []byte) error {
fk := unescape(key)
fv := stringFieldUnescape(value)
h.metric.AddField(fk, fv)
return nil
}
func (h *MetricHandler) AddBool(key []byte, value []byte) error {
func (h *MetricHandler) AddBool(key, value []byte) error {
fk := unescape(key)
fv, err := parseBoolBytes(value)
if err != nil {

View File

@ -21,12 +21,7 @@ func (f *JSONFlattener) FlattenJSON(
}
// FullFlattenJSON flattens nested maps/interfaces into a fields map (including bools and string)
func (f *JSONFlattener) FullFlattenJSON(
fieldName string,
v interface{},
convertString bool,
convertBool bool,
) error {
func (f *JSONFlattener) FullFlattenJSON(fieldName string, v interface{}, convertString, convertBool bool) error {
if f.Fields == nil {
f.Fields = make(map[string]interface{})
}