chore(linters): Configure `revive:var-naming` and fix its findings (#16817)
This commit is contained in:
parent
9788d53549
commit
dba5597530
|
|
@ -367,6 +367,9 @@ linters:
|
|||
- name: unused-receiver
|
||||
- name: var-declaration
|
||||
- name: var-naming
|
||||
arguments:
|
||||
- [ ] # AllowList
|
||||
- [ "ID", "DB", "TS" ] # DenyList
|
||||
- name: waitgroup-by-value
|
||||
|
||||
staticcheck:
|
||||
|
|
|
|||
|
|
@ -119,7 +119,7 @@ func (c *CiscoTelemetryMDT) Start(acc telegraf.Accumulator) error {
|
|||
for alias, encodingPath := range c.Aliases {
|
||||
c.internalAliases[encodingPath] = alias
|
||||
}
|
||||
c.initDb()
|
||||
c.initDB()
|
||||
|
||||
c.dmesFuncs = make(map[string]string, len(c.Dmes))
|
||||
for dme, dmeKey := range c.Dmes {
|
||||
|
|
|
|||
|
|
@ -838,7 +838,7 @@ func (c *CiscoTelemetryMDT) initLldp() {
|
|||
c.nxpathMap[key]["id"] = "string"
|
||||
}
|
||||
|
||||
func (c *CiscoTelemetryMDT) initDb() {
|
||||
func (c *CiscoTelemetryMDT) initDB() {
|
||||
c.nxpathMap = make(map[string]map[string]string, 200)
|
||||
|
||||
c.initPower()
|
||||
|
|
|
|||
|
|
@ -369,7 +369,7 @@ func tailStream(
|
|||
|
||||
r := bufio.NewReaderSize(reader, 64*1024)
|
||||
|
||||
var lastTs time.Time
|
||||
var lastTS time.Time
|
||||
for {
|
||||
line, err := r.ReadBytes('\n')
|
||||
|
||||
|
|
@ -385,14 +385,14 @@ func tailStream(
|
|||
}
|
||||
|
||||
// Store the last processed timestamp
|
||||
if ts.After(lastTs) {
|
||||
lastTs = ts
|
||||
if ts.After(lastTS) {
|
||||
lastTS = ts
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return lastTs, nil
|
||||
return lastTS, nil
|
||||
}
|
||||
return time.Time{}, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -38,8 +38,8 @@ type EventHub struct {
|
|||
UserAgent string `toml:"user_agent"`
|
||||
PartitionIDs []string `toml:"partition_ids"`
|
||||
MaxUndeliveredMessages int `toml:"max_undelivered_messages"`
|
||||
EnqueuedTimeAsTs bool `toml:"enqueued_time_as_ts"`
|
||||
IotHubEnqueuedTimeAsTs bool `toml:"iot_hub_enqueued_time_as_ts"`
|
||||
EnqueuedTimeAsTS bool `toml:"enqueued_time_as_ts"`
|
||||
IotHubEnqueuedTimeAsTS bool `toml:"iot_hub_enqueued_time_as_ts"`
|
||||
|
||||
// Metadata
|
||||
ApplicationPropertyFields []string `toml:"application_property_fields"`
|
||||
|
|
@ -299,7 +299,7 @@ func (e *EventHub) createMetrics(event *eventhub.Event) ([]telegraf.Metric, erro
|
|||
metrics[i].AddField(e.SequenceNumberField, *event.SystemProperties.SequenceNumber)
|
||||
}
|
||||
|
||||
if e.EnqueuedTimeAsTs {
|
||||
if e.EnqueuedTimeAsTS {
|
||||
metrics[i].SetTime(*event.SystemProperties.EnqueuedTime)
|
||||
} else if e.EnqueuedTimeField != "" {
|
||||
metrics[i].AddField(e.EnqueuedTimeField, (*event.SystemProperties.EnqueuedTime).UnixNano()/int64(time.Millisecond))
|
||||
|
|
@ -328,7 +328,7 @@ func (e *EventHub) createMetrics(event *eventhub.Event) ([]telegraf.Metric, erro
|
|||
metrics[i].AddTag(e.IoTHubConnectionModuleIDTag, *event.SystemProperties.IoTHubConnectionModuleID)
|
||||
}
|
||||
if event.SystemProperties.IoTHubEnqueuedTime != nil {
|
||||
if e.IotHubEnqueuedTimeAsTs {
|
||||
if e.IotHubEnqueuedTimeAsTS {
|
||||
metrics[i].SetTime(*event.SystemProperties.IoTHubEnqueuedTime)
|
||||
} else if e.IoTHubEnqueuedTimeField != "" {
|
||||
metrics[i].AddField(e.IoTHubEnqueuedTimeField, (*event.SystemProperties.IoTHubEnqueuedTime).UnixNano()/int64(time.Millisecond))
|
||||
|
|
|
|||
|
|
@ -40,8 +40,8 @@ func (ki *KubernetesInventory) gatherDaemonSet(d *apps.DaemonSet, acc telegraf.A
|
|||
}
|
||||
}
|
||||
|
||||
creationTs := d.GetCreationTimestamp()
|
||||
if !creationTs.IsZero() {
|
||||
creationTS := d.GetCreationTimestamp()
|
||||
if !creationTS.IsZero() {
|
||||
fields["created"] = d.GetCreationTimestamp().UnixNano()
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -21,8 +21,8 @@ func collectEndpoints(ctx context.Context, acc telegraf.Accumulator, ki *Kuberne
|
|||
}
|
||||
|
||||
func gatherEndpoint(e corev1.Endpoints, acc telegraf.Accumulator) {
|
||||
creationTs := e.GetCreationTimestamp()
|
||||
if creationTs.IsZero() {
|
||||
creationTS := e.GetCreationTimestamp()
|
||||
if creationTS.IsZero() {
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -20,8 +20,8 @@ func collectIngress(ctx context.Context, acc telegraf.Accumulator, ki *Kubernete
|
|||
}
|
||||
|
||||
func gatherIngress(i netv1.Ingress, acc telegraf.Accumulator) {
|
||||
creationTs := i.GetCreationTimestamp()
|
||||
if creationTs.IsZero() {
|
||||
creationTS := i.GetCreationTimestamp()
|
||||
if creationTS.IsZero() {
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -30,8 +30,8 @@ func collectPods(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesIn
|
|||
}
|
||||
|
||||
func (ki *KubernetesInventory) gatherPod(p *corev1.Pod, acc telegraf.Accumulator) {
|
||||
creationTs := p.GetCreationTimestamp()
|
||||
if creationTs.IsZero() {
|
||||
creationTS := p.GetCreationTimestamp()
|
||||
if creationTS.IsZero() {
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -20,8 +20,8 @@ func collectServices(ctx context.Context, acc telegraf.Accumulator, ki *Kubernet
|
|||
}
|
||||
|
||||
func (ki *KubernetesInventory) gatherService(s *corev1.Service, acc telegraf.Accumulator) {
|
||||
creationTs := s.GetCreationTimestamp()
|
||||
if creationTs.IsZero() {
|
||||
creationTS := s.GetCreationTimestamp()
|
||||
if creationTS.IsZero() {
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -31,11 +31,11 @@ var disconnectedServersBehaviors = []string{"error", "skip"}
|
|||
type MongoDB struct {
|
||||
Servers []string `toml:"servers"`
|
||||
GatherClusterStatus bool `toml:"gather_cluster_status"`
|
||||
GatherPerdbStats bool `toml:"gather_perdb_stats"`
|
||||
GatherPerDBStats bool `toml:"gather_perdb_stats"`
|
||||
GatherColStats bool `toml:"gather_col_stats"`
|
||||
GatherTopStat bool `toml:"gather_top_stat"`
|
||||
DisconnectedServersBehavior string `toml:"disconnected_servers_behavior"`
|
||||
ColStatsDbs []string `toml:"col_stats_dbs"`
|
||||
ColStatsDBs []string `toml:"col_stats_dbs"`
|
||||
common_tls.ClientConfig
|
||||
Ssl ssl
|
||||
|
||||
|
|
@ -118,7 +118,7 @@ func (m *MongoDB) Gather(acc telegraf.Accumulator) error {
|
|||
}
|
||||
}
|
||||
|
||||
err := srv.gatherData(acc, m.GatherClusterStatus, m.GatherPerdbStats, m.GatherColStats, m.GatherTopStat, m.ColStatsDbs)
|
||||
err := srv.gatherData(acc, m.GatherClusterStatus, m.GatherPerDBStats, m.GatherColStats, m.GatherTopStat, m.ColStatsDBs)
|
||||
if err != nil {
|
||||
m.Log.Errorf("Failed to gather data: %s", err)
|
||||
}
|
||||
|
|
@ -191,10 +191,10 @@ func init() {
|
|||
inputs.Add("mongodb", func() telegraf.Input {
|
||||
return &MongoDB{
|
||||
GatherClusterStatus: true,
|
||||
GatherPerdbStats: false,
|
||||
GatherPerDBStats: false,
|
||||
GatherColStats: false,
|
||||
GatherTopStat: false,
|
||||
ColStatsDbs: []string{"local"},
|
||||
ColStatsDBs: []string{"local"},
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -8,11 +8,11 @@ import (
|
|||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
type mongodbData struct {
|
||||
type mongoDBData struct {
|
||||
StatLine *statLine
|
||||
Fields map[string]interface{}
|
||||
Tags map[string]string
|
||||
DbData []bbData
|
||||
DBData []bbData
|
||||
ColData []colData
|
||||
ShardHostData []bbData
|
||||
TopStatsData []bbData
|
||||
|
|
@ -25,12 +25,12 @@ type bbData struct {
|
|||
|
||||
type colData struct {
|
||||
Name string
|
||||
DbName string
|
||||
DBName string
|
||||
Fields map[string]interface{}
|
||||
}
|
||||
|
||||
func newMongodbData(statLine *statLine, tags map[string]string) *mongodbData {
|
||||
return &mongodbData{
|
||||
func newMongodbData(statLine *statLine, tags map[string]string) *mongoDBData {
|
||||
return &mongoDBData{
|
||||
StatLine: statLine,
|
||||
Tags: tags,
|
||||
Fields: make(map[string]interface{}),
|
||||
|
|
@ -297,30 +297,30 @@ var topDataStats = map[string]string{
|
|||
"commands_count": "CommandsCount",
|
||||
}
|
||||
|
||||
func (d *mongodbData) addDbStats() {
|
||||
for i := range d.StatLine.DbStatsLines {
|
||||
dbstat := d.StatLine.DbStatsLines[i]
|
||||
dbStatLine := reflect.ValueOf(&dbstat).Elem()
|
||||
newDbData := &bbData{
|
||||
Name: dbstat.Name,
|
||||
func (d *mongoDBData) addDBStats() {
|
||||
for i := range d.StatLine.DBStatsLines {
|
||||
dbStat := d.StatLine.DBStatsLines[i]
|
||||
dbStatLine := reflect.ValueOf(&dbStat).Elem()
|
||||
newDBData := &bbData{
|
||||
Name: dbStat.Name,
|
||||
Fields: make(map[string]interface{}),
|
||||
}
|
||||
newDbData.Fields["type"] = "db_stat"
|
||||
newDBData.Fields["type"] = "db_stat"
|
||||
for key, value := range dbDataStats {
|
||||
val := dbStatLine.FieldByName(value).Interface()
|
||||
newDbData.Fields[key] = val
|
||||
newDBData.Fields[key] = val
|
||||
}
|
||||
d.DbData = append(d.DbData, *newDbData)
|
||||
d.DBData = append(d.DBData, *newDBData)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *mongodbData) addColStats() {
|
||||
func (d *mongoDBData) addColStats() {
|
||||
for i := range d.StatLine.ColStatsLines {
|
||||
colstat := d.StatLine.ColStatsLines[i]
|
||||
colStatLine := reflect.ValueOf(&colstat).Elem()
|
||||
newColData := &colData{
|
||||
Name: colstat.Name,
|
||||
DbName: colstat.DbName,
|
||||
DBName: colstat.DBName,
|
||||
Fields: make(map[string]interface{}),
|
||||
}
|
||||
newColData.Fields["type"] = "col_stat"
|
||||
|
|
@ -332,24 +332,24 @@ func (d *mongodbData) addColStats() {
|
|||
}
|
||||
}
|
||||
|
||||
func (d *mongodbData) addShardHostStats() {
|
||||
func (d *mongoDBData) addShardHostStats() {
|
||||
for host := range d.StatLine.ShardHostStatsLines {
|
||||
hostStat := d.StatLine.ShardHostStatsLines[host]
|
||||
hostStatLine := reflect.ValueOf(&hostStat).Elem()
|
||||
newDbData := &bbData{
|
||||
newDBData := &bbData{
|
||||
Name: host,
|
||||
Fields: make(map[string]interface{}),
|
||||
}
|
||||
newDbData.Fields["type"] = "shard_host_stat"
|
||||
newDBData.Fields["type"] = "shard_host_stat"
|
||||
for k, v := range shardHostStats {
|
||||
val := hostStatLine.FieldByName(v).Interface()
|
||||
newDbData.Fields[k] = val
|
||||
newDBData.Fields[k] = val
|
||||
}
|
||||
d.ShardHostData = append(d.ShardHostData, *newDbData)
|
||||
d.ShardHostData = append(d.ShardHostData, *newDBData)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *mongodbData) addTopStats() {
|
||||
func (d *mongoDBData) addTopStats() {
|
||||
for i := range d.StatLine.TopStatLines {
|
||||
topStat := d.StatLine.TopStatLines[i]
|
||||
topStatLine := reflect.ValueOf(&topStat).Elem()
|
||||
|
|
@ -366,7 +366,7 @@ func (d *mongodbData) addTopStats() {
|
|||
}
|
||||
}
|
||||
|
||||
func (d *mongodbData) addDefaultStats() {
|
||||
func (d *mongoDBData) addDefaultStats() {
|
||||
statLine := reflect.ValueOf(d.StatLine).Elem()
|
||||
d.addStat(statLine, defaultStats)
|
||||
if d.StatLine.NodeType != "" {
|
||||
|
|
@ -414,18 +414,18 @@ func (d *mongodbData) addDefaultStats() {
|
|||
}
|
||||
}
|
||||
|
||||
func (d *mongodbData) addStat(statLine reflect.Value, stats map[string]string) {
|
||||
func (d *mongoDBData) addStat(statLine reflect.Value, stats map[string]string) {
|
||||
for key, value := range stats {
|
||||
val := statLine.FieldByName(value).Interface()
|
||||
d.add(key, val)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *mongodbData) add(key string, val interface{}) {
|
||||
func (d *mongoDBData) add(key string, val interface{}) {
|
||||
d.Fields[key] = val
|
||||
}
|
||||
|
||||
func (d *mongodbData) flush(acc telegraf.Accumulator) {
|
||||
func (d *mongoDBData) flush(acc telegraf.Accumulator) {
|
||||
acc.AddFields(
|
||||
"mongodb",
|
||||
d.Fields,
|
||||
|
|
@ -434,7 +434,7 @@ func (d *mongodbData) flush(acc telegraf.Accumulator) {
|
|||
)
|
||||
d.Fields = make(map[string]interface{})
|
||||
|
||||
for _, db := range d.DbData {
|
||||
for _, db := range d.DBData {
|
||||
d.Tags["db_name"] = db.Name
|
||||
acc.AddFields(
|
||||
"mongodb_db_stats",
|
||||
|
|
@ -446,7 +446,7 @@ func (d *mongodbData) flush(acc telegraf.Accumulator) {
|
|||
}
|
||||
for _, col := range d.ColData {
|
||||
d.Tags["collection"] = col.Name
|
||||
d.Tags["db_name"] = col.DbName
|
||||
d.Tags["db_name"] = col.DBName
|
||||
acc.AddFields(
|
||||
"mongodb_col_stats",
|
||||
col.Fields,
|
||||
|
|
|
|||
|
|
@ -182,7 +182,7 @@ func (s *server) gatherDBStats(name string) (*db, error) {
|
|||
|
||||
return &db{
|
||||
Name: name,
|
||||
DbStatsData: stats,
|
||||
DBStatsData: stats,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
@ -229,7 +229,7 @@ func (s *server) gatherOplogStats() (*oplogStats, error) {
|
|||
return s.getOplogReplLag("oplog.$main")
|
||||
}
|
||||
|
||||
func (s *server) gatherCollectionStats(colStatsDbs []string) (*colStats, error) {
|
||||
func (s *server) gatherCollectionStats(colStatsDBs []string) (*colStats, error) {
|
||||
names, err := s.client.ListDatabaseNames(context.Background(), bson.D{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -237,7 +237,7 @@ func (s *server) gatherCollectionStats(colStatsDbs []string) (*colStats, error)
|
|||
|
||||
results := &colStats{}
|
||||
for _, dbName := range names {
|
||||
if slices.Contains(colStatsDbs, dbName) || len(colStatsDbs) == 0 {
|
||||
if slices.Contains(colStatsDBs, dbName) || len(colStatsDBs) == 0 {
|
||||
// skip views as they fail on collStats below
|
||||
filter := bson.M{"type": bson.M{"$in": bson.A{"collection", "timeseries"}}}
|
||||
|
||||
|
|
@ -261,7 +261,7 @@ func (s *server) gatherCollectionStats(colStatsDbs []string) (*colStats, error)
|
|||
}
|
||||
collection := &collection{
|
||||
Name: colName,
|
||||
DbName: dbName,
|
||||
DBName: dbName,
|
||||
ColStatsData: colStatLine,
|
||||
}
|
||||
results.Collections = append(results.Collections, *collection)
|
||||
|
|
@ -271,7 +271,7 @@ func (s *server) gatherCollectionStats(colStatsDbs []string) (*colStats, error)
|
|||
return results, nil
|
||||
}
|
||||
|
||||
func (s *server) gatherData(acc telegraf.Accumulator, gatherClusterStatus, gatherDbStats, gatherColStats, gatherTopStat bool, colStatsDbs []string) error {
|
||||
func (s *server) gatherData(acc telegraf.Accumulator, gatherClusterStatus, gatherDBStats, gatherColStats, gatherTopStat bool, colStatsDBs []string) error {
|
||||
serverStatus, err := s.gatherServerStatus()
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -310,7 +310,7 @@ func (s *server) gatherData(acc telegraf.Accumulator, gatherClusterStatus, gathe
|
|||
|
||||
var collectionStats *colStats
|
||||
if gatherColStats {
|
||||
stats, err := s.gatherCollectionStats(colStatsDbs)
|
||||
stats, err := s.gatherCollectionStats(colStatsDBs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -318,7 +318,7 @@ func (s *server) gatherData(acc telegraf.Accumulator, gatherClusterStatus, gathe
|
|||
}
|
||||
|
||||
dbStats := &dbStats{}
|
||||
if gatherDbStats {
|
||||
if gatherDBStats {
|
||||
names, err := s.client.ListDatabaseNames(context.Background(), bson.D{})
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -330,7 +330,7 @@ func (s *server) gatherData(acc telegraf.Accumulator, gatherClusterStatus, gathe
|
|||
s.log.Errorf("Error getting db stats from %q: %v", name, err)
|
||||
continue
|
||||
}
|
||||
dbStats.Dbs = append(dbStats.Dbs, *db)
|
||||
dbStats.DBs = append(dbStats.DBs, *db)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -348,7 +348,7 @@ func (s *server) gatherData(acc telegraf.Accumulator, gatherClusterStatus, gathe
|
|||
ServerStatus: serverStatus,
|
||||
ReplSetStatus: replSetStatus,
|
||||
ClusterStatus: clusterStatus,
|
||||
DbStats: dbStats,
|
||||
DBStats: dbStats,
|
||||
ColStats: collectionStats,
|
||||
ShardStats: shardStats,
|
||||
OplogStats: oplogStats,
|
||||
|
|
@ -367,7 +367,7 @@ func (s *server) gatherData(acc telegraf.Accumulator, gatherClusterStatus, gathe
|
|||
s.getDefaultTags(),
|
||||
)
|
||||
data.addDefaultStats()
|
||||
data.addDbStats()
|
||||
data.addDBStats()
|
||||
data.addColStats()
|
||||
data.addShardHostStats()
|
||||
data.addTopStats()
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ type mongoStatus struct {
|
|||
ServerStatus *serverStatus
|
||||
ReplSetStatus *replSetStatus
|
||||
ClusterStatus *clusterStatus
|
||||
DbStats *dbStats
|
||||
DBStats *dbStats
|
||||
ColStats *colStats
|
||||
ShardStats *shardStats
|
||||
OplogStats *oplogStats
|
||||
|
|
@ -61,18 +61,18 @@ type serverStatus struct {
|
|||
|
||||
// dbStats stores stats from all dbs
|
||||
type dbStats struct {
|
||||
Dbs []db
|
||||
DBs []db
|
||||
}
|
||||
|
||||
// db represent a single DB
|
||||
type db struct {
|
||||
Name string
|
||||
DbStatsData *dbStatsData
|
||||
DBStatsData *dbStatsData
|
||||
}
|
||||
|
||||
// dbStatsData stores stats from a db
|
||||
type dbStatsData struct {
|
||||
Db string `bson:"db"`
|
||||
DB string `bson:"db"`
|
||||
Collections int64 `bson:"collections"`
|
||||
Objects int64 `bson:"objects"`
|
||||
AvgObjSize float64 `bson:"avgObjSize"`
|
||||
|
|
@ -93,7 +93,7 @@ type colStats struct {
|
|||
|
||||
type collection struct {
|
||||
Name string
|
||||
DbName string
|
||||
DBName string
|
||||
ColStatsData *colStatsData
|
||||
}
|
||||
|
||||
|
|
@ -751,7 +751,7 @@ type statLine struct {
|
|||
JumboChunksCount int64
|
||||
|
||||
// DB stats field
|
||||
DbStatsLines []dbStatLine
|
||||
DBStatsLines []dbStatLine
|
||||
|
||||
// Col Stats field
|
||||
ColStatsLines []colStatLine
|
||||
|
|
@ -807,7 +807,7 @@ type dbStatLine struct {
|
|||
}
|
||||
type colStatLine struct {
|
||||
Name string
|
||||
DbName string
|
||||
DBName string
|
||||
Count int64
|
||||
Size int64
|
||||
AvgObjSize float64
|
||||
|
|
@ -1347,16 +1347,16 @@ func newStatLine(oldMongo, newMongo mongoStatus, key string, sampleSecs int64) *
|
|||
returnVal.OplogStats = newMongo.OplogStats
|
||||
}
|
||||
|
||||
if newMongo.DbStats != nil {
|
||||
newDbStats := *newMongo.DbStats
|
||||
for _, db := range newDbStats.Dbs {
|
||||
dbStatsData := db.DbStatsData
|
||||
if newMongo.DBStats != nil {
|
||||
newDBStats := *newMongo.DBStats
|
||||
for _, db := range newDBStats.DBs {
|
||||
dbStatsData := db.DBStatsData
|
||||
// mongos doesn't have the db key, so setting the db name
|
||||
if dbStatsData.Db == "" {
|
||||
dbStatsData.Db = db.Name
|
||||
if dbStatsData.DB == "" {
|
||||
dbStatsData.DB = db.Name
|
||||
}
|
||||
dbStatLine := &dbStatLine{
|
||||
Name: dbStatsData.Db,
|
||||
Name: dbStatsData.DB,
|
||||
Collections: dbStatsData.Collections,
|
||||
Objects: dbStatsData.Objects,
|
||||
AvgObjSize: dbStatsData.AvgObjSize,
|
||||
|
|
@ -1369,7 +1369,7 @@ func newStatLine(oldMongo, newMongo mongoStatus, key string, sampleSecs int64) *
|
|||
FsTotalSize: dbStatsData.FsTotalSize,
|
||||
FsUsedSize: dbStatsData.FsUsedSize,
|
||||
}
|
||||
returnVal.DbStatsLines = append(returnVal.DbStatsLines, *dbStatLine)
|
||||
returnVal.DBStatsLines = append(returnVal.DBStatsLines, *dbStatLine)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1382,7 +1382,7 @@ func newStatLine(oldMongo, newMongo mongoStatus, key string, sampleSecs int64) *
|
|||
}
|
||||
colStatLine := &colStatLine{
|
||||
Name: colStatsData.Collection,
|
||||
DbName: col.DbName,
|
||||
DBName: col.DBName,
|
||||
Count: colStatsData.Count,
|
||||
Size: colStatsData.Size,
|
||||
AvgObjSize: colStatsData.AvgObjSize,
|
||||
|
|
|
|||
|
|
@ -222,7 +222,7 @@ type memory struct {
|
|||
Plugins int64 `json:"plugins"`
|
||||
OtherProc int64 `json:"other_proc"`
|
||||
Metrics int64 `json:"metrics"`
|
||||
MgmtDb int64 `json:"mgmt_db"`
|
||||
MgmtDB int64 `json:"mgmt_db"`
|
||||
Mnesia int64 `json:"mnesia"`
|
||||
OtherEts int64 `json:"other_ets"`
|
||||
Binary int64 `json:"binary"`
|
||||
|
|
@ -505,7 +505,7 @@ func gatherNodes(r *RabbitMQ, acc telegraf.Accumulator) {
|
|||
fields["mem_plugins"] = memory.Memory.Plugins
|
||||
fields["mem_other_proc"] = memory.Memory.OtherProc
|
||||
fields["mem_metrics"] = memory.Memory.Metrics
|
||||
fields["mem_mgmt_db"] = memory.Memory.MgmtDb
|
||||
fields["mem_mgmt_db"] = memory.Memory.MgmtDB
|
||||
fields["mem_mnesia"] = memory.Memory.Mnesia
|
||||
fields["mem_other_ets"] = memory.Memory.OtherEts
|
||||
fields["mem_binary"] = memory.Memory.Binary
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ const (
|
|||
FROM mce_record
|
||||
WHERE timestamp > ?
|
||||
`
|
||||
defaultDbPath = "/var/lib/rasdaemon/ras-mc_event.db"
|
||||
defaultDBPath = "/var/lib/rasdaemon/ras-mc_event.db"
|
||||
dateLayout = "2006-01-02 15:04:05 -0700"
|
||||
memoryReadCorrected = "memory_read_corrected_errors"
|
||||
memoryReadUncorrected = "memory_read_uncorrectable_errors"
|
||||
|
|
@ -76,7 +76,7 @@ func (*Ras) SampleConfig() string {
|
|||
|
||||
// Start initializes connection to DB, metrics are gathered in Gather
|
||||
func (r *Ras) Start(telegraf.Accumulator) error {
|
||||
err := validateDbPath(r.DBPath)
|
||||
err := validateDBPath(r.DBPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -177,7 +177,7 @@ func (r *Ras) updateServerCounters(mcError *machineCheckError) {
|
|||
}
|
||||
}
|
||||
|
||||
func validateDbPath(dbPath string) error {
|
||||
func validateDBPath(dbPath string) error {
|
||||
pathInfo, err := os.Stat(dbPath)
|
||||
if os.IsNotExist(err) {
|
||||
return fmt.Errorf("provided db_path does not exist: [%s]", dbPath)
|
||||
|
|
@ -321,7 +321,7 @@ func init() {
|
|||
//nolint:errcheck // known timestamp
|
||||
defaultTimestamp, _ := parseDate("1970-01-01 00:00:01 -0700")
|
||||
return &Ras{
|
||||
DBPath: defaultDbPath,
|
||||
DBPath: defaultDBPath,
|
||||
latestTimestamp: defaultTimestamp,
|
||||
cpuSocketCounters: map[int]metricCounters{
|
||||
0: *newMetricCounters(),
|
||||
|
|
|
|||
|
|
@ -136,7 +136,7 @@ func newRas() *Ras {
|
|||
//nolint:errcheck // known timestamp
|
||||
defaultTimestamp, _ := parseDate("1970-01-01 00:00:01 -0700")
|
||||
return &Ras{
|
||||
DBPath: defaultDbPath,
|
||||
DBPath: defaultDBPath,
|
||||
latestTimestamp: defaultTimestamp,
|
||||
cpuSocketCounters: map[int]metricCounters{
|
||||
0: *newMetricCounters(),
|
||||
|
|
|
|||
|
|
@ -33,9 +33,9 @@ type RavenDB struct {
|
|||
Timeout config.Duration `toml:"timeout"`
|
||||
|
||||
StatsInclude []string `toml:"stats_include"`
|
||||
DbStatsDbs []string `toml:"db_stats_dbs"`
|
||||
IndexStatsDbs []string `toml:"index_stats_dbs"`
|
||||
CollectionStatsDbs []string `toml:"collection_stats_dbs"`
|
||||
DBStatsDBs []string `toml:"db_stats_dbs"`
|
||||
IndexStatsDBs []string `toml:"index_stats_dbs"`
|
||||
CollectionStatsDBs []string `toml:"collection_stats_dbs"`
|
||||
|
||||
tls.ClientConfig
|
||||
|
||||
|
|
@ -58,9 +58,9 @@ func (r *RavenDB) Init() error {
|
|||
}
|
||||
|
||||
r.requestURLServer = r.URL + "/admin/monitoring/v1/server"
|
||||
r.requestURLDatabases = r.URL + "/admin/monitoring/v1/databases" + prepareDBNamesURLPart(r.DbStatsDbs)
|
||||
r.requestURLIndexes = r.URL + "/admin/monitoring/v1/indexes" + prepareDBNamesURLPart(r.IndexStatsDbs)
|
||||
r.requestURLCollection = r.URL + "/admin/monitoring/v1/collections" + prepareDBNamesURLPart(r.IndexStatsDbs)
|
||||
r.requestURLDatabases = r.URL + "/admin/monitoring/v1/databases" + prepareDBNamesURLPart(r.DBStatsDBs)
|
||||
r.requestURLIndexes = r.URL + "/admin/monitoring/v1/indexes" + prepareDBNamesURLPart(r.IndexStatsDBs)
|
||||
r.requestURLCollection = r.URL + "/admin/monitoring/v1/collections" + prepareDBNamesURLPart(r.IndexStatsDBs)
|
||||
|
||||
err := choice.CheckSlice(r.StatsInclude, []string{"server", "databases", "indexes", "collections"})
|
||||
if err != nil {
|
||||
|
|
@ -305,10 +305,10 @@ func (r *RavenDB) gatherIndexes(acc telegraf.Accumulator) {
|
|||
return
|
||||
}
|
||||
|
||||
for _, perDbIndexResponse := range indexesResponse.Results {
|
||||
for _, indexResponse := range perDbIndexResponse.Indexes {
|
||||
for _, perDBIndexResponse := range indexesResponse.Results {
|
||||
for _, indexResponse := range perDBIndexResponse.Indexes {
|
||||
tags := map[string]string{
|
||||
"database_name": perDbIndexResponse.DatabaseName,
|
||||
"database_name": perDBIndexResponse.DatabaseName,
|
||||
"index_name": indexResponse.IndexName,
|
||||
"node_tag": indexesResponse.NodeTag,
|
||||
"url": r.URL,
|
||||
|
|
@ -346,11 +346,11 @@ func (r *RavenDB) gatherCollections(acc telegraf.Accumulator) {
|
|||
return
|
||||
}
|
||||
|
||||
for _, perDbCollectionMetrics := range collectionsResponse.Results {
|
||||
for _, collectionMetrics := range perDbCollectionMetrics.Collections {
|
||||
for _, perDBCollectionMetrics := range collectionsResponse.Results {
|
||||
for _, collectionMetrics := range perDBCollectionMetrics.Collections {
|
||||
tags := map[string]string{
|
||||
"collection_name": collectionMetrics.CollectionName,
|
||||
"database_name": perDbCollectionMetrics.DatabaseName,
|
||||
"database_name": perDBCollectionMetrics.DatabaseName,
|
||||
"node_tag": collectionsResponse.NodeTag,
|
||||
"url": r.URL,
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1194,22 +1194,22 @@ func (e *endpoint) alignSamples(info []types.PerfSampleInfo, values []int64, int
|
|||
continue
|
||||
}
|
||||
ts := info[idx].Timestamp
|
||||
roundedTs := ts.Truncate(interval)
|
||||
roundedTS := ts.Truncate(interval)
|
||||
|
||||
// Are we still working on the same bucket?
|
||||
if roundedTs == lastBucket {
|
||||
if roundedTS == lastBucket {
|
||||
bi++
|
||||
p := len(rValues) - 1
|
||||
rValues[p] = ((bi-1)/bi)*rValues[p] + v/bi
|
||||
} else {
|
||||
rValues = append(rValues, v)
|
||||
roundedInfo := types.PerfSampleInfo{
|
||||
Timestamp: roundedTs,
|
||||
Timestamp: roundedTS,
|
||||
Interval: info[idx].Interval,
|
||||
}
|
||||
rInfo = append(rInfo, roundedInfo)
|
||||
bi = 1.0
|
||||
lastBucket = roundedTs
|
||||
lastBucket = roundedTS
|
||||
}
|
||||
}
|
||||
return rInfo, rValues
|
||||
|
|
@ -1318,8 +1318,8 @@ func (e *endpoint) collectChunk(
|
|||
count++
|
||||
|
||||
// Update hiwater marks
|
||||
adjTs := ts.Add(interval).Truncate(interval).Add(-time.Second)
|
||||
e.hwMarks.put(moid, name, adjTs)
|
||||
adjTS := ts.Add(interval).Truncate(interval).Add(-time.Second)
|
||||
e.hwMarks.put(moid, name, adjTS)
|
||||
}
|
||||
if nValues == 0 {
|
||||
e.log.Debugf("Missing value for: %s, %s", name, objectRef.name)
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ import (
|
|||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
const tsDbName = "testDb"
|
||||
const tsDBName = "testDb"
|
||||
|
||||
const testSingleTableName = "SingleTableName"
|
||||
const testSingleTableDim = "namespace"
|
||||
|
|
@ -77,13 +77,13 @@ func TestConnectValidatesConfigParameters(t *testing.T) {
|
|||
require.ErrorContains(t, noDatabaseName.Connect(), "'database_name' key is required")
|
||||
|
||||
noMappingMode := Timestream{
|
||||
DatabaseName: tsDbName,
|
||||
DatabaseName: tsDBName,
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
require.ErrorContains(t, noMappingMode.Connect(), "'mapping_mode' key is required")
|
||||
|
||||
incorrectMappingMode := Timestream{
|
||||
DatabaseName: tsDbName,
|
||||
DatabaseName: tsDBName,
|
||||
MappingMode: "foo",
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
|
|
@ -91,7 +91,7 @@ func TestConnectValidatesConfigParameters(t *testing.T) {
|
|||
|
||||
// multi-measure config validation multi table mode
|
||||
validConfigMultiMeasureMultiTableMode := Timestream{
|
||||
DatabaseName: tsDbName,
|
||||
DatabaseName: tsDBName,
|
||||
MappingMode: MappingModeMultiTable,
|
||||
UseMultiMeasureRecords: true,
|
||||
MeasureNameForMultiMeasureRecords: "multi-measure-name",
|
||||
|
|
@ -100,7 +100,7 @@ func TestConnectValidatesConfigParameters(t *testing.T) {
|
|||
require.NoError(t, validConfigMultiMeasureMultiTableMode.Connect())
|
||||
|
||||
invalidConfigMultiMeasureMultiTableMode := Timestream{
|
||||
DatabaseName: tsDbName,
|
||||
DatabaseName: tsDBName,
|
||||
MappingMode: MappingModeMultiTable,
|
||||
UseMultiMeasureRecords: true,
|
||||
// without MeasureNameForMultiMeasureRecords set we expect validation failure
|
||||
|
|
@ -110,7 +110,7 @@ func TestConnectValidatesConfigParameters(t *testing.T) {
|
|||
|
||||
// multi-measure config validation single table mode
|
||||
validConfigMultiMeasureSingleTableMode := Timestream{
|
||||
DatabaseName: tsDbName,
|
||||
DatabaseName: tsDBName,
|
||||
MappingMode: MappingModeSingleTable,
|
||||
SingleTableName: testSingleTableName,
|
||||
UseMultiMeasureRecords: true, // MeasureNameForMultiMeasureRecords is not needed as
|
||||
|
|
@ -120,7 +120,7 @@ func TestConnectValidatesConfigParameters(t *testing.T) {
|
|||
require.NoError(t, validConfigMultiMeasureSingleTableMode.Connect())
|
||||
|
||||
invalidConfigMultiMeasureSingleTableMode := Timestream{
|
||||
DatabaseName: tsDbName,
|
||||
DatabaseName: tsDBName,
|
||||
MappingMode: MappingModeSingleTable,
|
||||
SingleTableName: testSingleTableName,
|
||||
UseMultiMeasureRecords: true,
|
||||
|
|
@ -134,14 +134,14 @@ func TestConnectValidatesConfigParameters(t *testing.T) {
|
|||
|
||||
// multi-table arguments
|
||||
validMappingModeMultiTable := Timestream{
|
||||
DatabaseName: tsDbName,
|
||||
DatabaseName: tsDBName,
|
||||
MappingMode: MappingModeMultiTable,
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
require.NoError(t, validMappingModeMultiTable.Connect())
|
||||
|
||||
singleTableNameWithMultiTable := Timestream{
|
||||
DatabaseName: tsDbName,
|
||||
DatabaseName: tsDBName,
|
||||
MappingMode: MappingModeMultiTable,
|
||||
SingleTableName: testSingleTableName,
|
||||
Log: testutil.Logger{},
|
||||
|
|
@ -149,7 +149,7 @@ func TestConnectValidatesConfigParameters(t *testing.T) {
|
|||
require.Contains(t, singleTableNameWithMultiTable.Connect().Error(), "SingleTableName")
|
||||
|
||||
singleTableDimensionWithMultiTable := Timestream{
|
||||
DatabaseName: tsDbName,
|
||||
DatabaseName: tsDBName,
|
||||
MappingMode: MappingModeMultiTable,
|
||||
SingleTableDimensionNameForTelegrafMeasurementName: testSingleTableDim,
|
||||
Log: testutil.Logger{},
|
||||
|
|
@ -159,14 +159,14 @@ func TestConnectValidatesConfigParameters(t *testing.T) {
|
|||
|
||||
// single-table arguments
|
||||
noTableNameMappingModeSingleTable := Timestream{
|
||||
DatabaseName: tsDbName,
|
||||
DatabaseName: tsDBName,
|
||||
MappingMode: MappingModeSingleTable,
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
require.Contains(t, noTableNameMappingModeSingleTable.Connect().Error(), "SingleTableName")
|
||||
|
||||
noDimensionNameMappingModeSingleTable := Timestream{
|
||||
DatabaseName: tsDbName,
|
||||
DatabaseName: tsDBName,
|
||||
MappingMode: MappingModeSingleTable,
|
||||
SingleTableName: testSingleTableName,
|
||||
Log: testutil.Logger{},
|
||||
|
|
@ -175,7 +175,7 @@ func TestConnectValidatesConfigParameters(t *testing.T) {
|
|||
"SingleTableDimensionNameForTelegrafMeasurementName")
|
||||
|
||||
validConfigurationMappingModeSingleTable := Timestream{
|
||||
DatabaseName: tsDbName,
|
||||
DatabaseName: tsDBName,
|
||||
MappingMode: MappingModeSingleTable,
|
||||
SingleTableName: testSingleTableName,
|
||||
SingleTableDimensionNameForTelegrafMeasurementName: testSingleTableDim,
|
||||
|
|
@ -185,7 +185,7 @@ func TestConnectValidatesConfigParameters(t *testing.T) {
|
|||
|
||||
// create table arguments
|
||||
createTableNoMagneticRetention := Timestream{
|
||||
DatabaseName: tsDbName,
|
||||
DatabaseName: tsDBName,
|
||||
MappingMode: MappingModeMultiTable,
|
||||
CreateTableIfNotExists: true,
|
||||
Log: testutil.Logger{},
|
||||
|
|
@ -194,7 +194,7 @@ func TestConnectValidatesConfigParameters(t *testing.T) {
|
|||
"CreateTableMagneticStoreRetentionPeriodInDays")
|
||||
|
||||
createTableNoMemoryRetention := Timestream{
|
||||
DatabaseName: tsDbName,
|
||||
DatabaseName: tsDBName,
|
||||
MappingMode: MappingModeMultiTable,
|
||||
CreateTableIfNotExists: true,
|
||||
CreateTableMagneticStoreRetentionPeriodInDays: 3,
|
||||
|
|
@ -204,7 +204,7 @@ func TestConnectValidatesConfigParameters(t *testing.T) {
|
|||
"CreateTableMemoryStoreRetentionPeriodInHours")
|
||||
|
||||
createTableValid := Timestream{
|
||||
DatabaseName: tsDbName,
|
||||
DatabaseName: tsDBName,
|
||||
MappingMode: MappingModeMultiTable,
|
||||
CreateTableIfNotExists: true,
|
||||
CreateTableMagneticStoreRetentionPeriodInDays: 3,
|
||||
|
|
@ -215,7 +215,7 @@ func TestConnectValidatesConfigParameters(t *testing.T) {
|
|||
|
||||
// describe table on start arguments
|
||||
describeTableInvoked := Timestream{
|
||||
DatabaseName: tsDbName,
|
||||
DatabaseName: tsDBName,
|
||||
MappingMode: MappingModeMultiTable,
|
||||
DescribeDatabaseOnStart: true,
|
||||
Log: testutil.Logger{},
|
||||
|
|
@ -254,7 +254,7 @@ func TestWriteMultiMeasuresSingleTableMode(t *testing.T) {
|
|||
plugin := Timestream{
|
||||
MappingMode: MappingModeSingleTable,
|
||||
SingleTableName: "test-multi-single-table-mode",
|
||||
DatabaseName: tsDbName,
|
||||
DatabaseName: tsDBName,
|
||||
UseMultiMeasureRecords: true, // use multi
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
|
|
@ -311,7 +311,7 @@ func TestWriteMultiMeasuresMultiTableMode(t *testing.T) {
|
|||
|
||||
plugin := Timestream{
|
||||
MappingMode: MappingModeMultiTable,
|
||||
DatabaseName: tsDbName,
|
||||
DatabaseName: tsDBName,
|
||||
UseMultiMeasureRecords: true, // use multi
|
||||
MeasureNameForMultiMeasureRecords: "config-multi-measure-name",
|
||||
Log: testutil.Logger{},
|
||||
|
|
@ -407,7 +407,7 @@ func TestBuildMultiMeasuresInSingleAndMultiTableMode(t *testing.T) {
|
|||
|
||||
plugin := Timestream{
|
||||
MappingMode: MappingModeMultiTable,
|
||||
DatabaseName: tsDbName,
|
||||
DatabaseName: tsDBName,
|
||||
UseMultiMeasureRecords: true, // use multi
|
||||
MeasureNameForMultiMeasureRecords: "config-multi-measure-name",
|
||||
Log: testutil.Logger{},
|
||||
|
|
@ -431,7 +431,7 @@ func TestBuildMultiMeasuresInSingleAndMultiTableMode(t *testing.T) {
|
|||
plugin = Timestream{
|
||||
MappingMode: MappingModeSingleTable,
|
||||
SingleTableName: "singleTableName",
|
||||
DatabaseName: tsDbName,
|
||||
DatabaseName: tsDBName,
|
||||
UseMultiMeasureRecords: true, // use multi
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
|
|
@ -518,7 +518,7 @@ func buildExpectedMultiRecords(multiMeasureName, tableName string) *timestreamwr
|
|||
recordsMultiTableMode = append(recordsMultiTableMode, recordUint64...)
|
||||
|
||||
expectedResultMultiTable := ×treamwrite.WriteRecordsInput{
|
||||
DatabaseName: aws.String(tsDbName),
|
||||
DatabaseName: aws.String(tsDBName),
|
||||
TableName: aws.String(tableName),
|
||||
Records: recordsMultiTableMode,
|
||||
CommonAttributes: &types.Record{},
|
||||
|
|
@ -563,7 +563,7 @@ func TestThrottlingErrorIsReturnedToTelegraf(t *testing.T) {
|
|||
|
||||
plugin := Timestream{
|
||||
MappingMode: MappingModeMultiTable,
|
||||
DatabaseName: tsDbName,
|
||||
DatabaseName: tsDBName,
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
require.NoError(t, plugin.Connect())
|
||||
|
|
@ -589,7 +589,7 @@ func TestRejectedRecordsErrorResultsInMetricsBeingSkipped(t *testing.T) {
|
|||
|
||||
plugin := Timestream{
|
||||
MappingMode: MappingModeMultiTable,
|
||||
DatabaseName: tsDbName,
|
||||
DatabaseName: tsDBName,
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
require.NoError(t, plugin.Connect())
|
||||
|
|
@ -618,7 +618,7 @@ func TestWriteWhenRequestsGreaterThanMaxWriteGoRoutinesCount(t *testing.T) {
|
|||
|
||||
plugin := Timestream{
|
||||
MappingMode: MappingModeMultiTable,
|
||||
DatabaseName: tsDbName,
|
||||
DatabaseName: tsDBName,
|
||||
// Spawn only one go routine to serve all 5 write requests
|
||||
MaxWriteGoRoutinesCount: 2,
|
||||
Log: testutil.Logger{},
|
||||
|
|
@ -657,7 +657,7 @@ func TestWriteWhenRequestsLesserThanMaxWriteGoRoutinesCount(t *testing.T) {
|
|||
|
||||
plugin := Timestream{
|
||||
MappingMode: MappingModeMultiTable,
|
||||
DatabaseName: tsDbName,
|
||||
DatabaseName: tsDBName,
|
||||
// Spawn 5 parallel go routines to serve 2 write requests
|
||||
// In this case only 2 of the 5 go routines will process the write requests
|
||||
MaxWriteGoRoutinesCount: 5,
|
||||
|
|
@ -724,7 +724,7 @@ func TestTransformMetricsSkipEmptyMetric(t *testing.T) {
|
|||
})
|
||||
|
||||
expectedResultSingleTable := ×treamwrite.WriteRecordsInput{
|
||||
DatabaseName: aws.String(tsDbName),
|
||||
DatabaseName: aws.String(tsDBName),
|
||||
TableName: aws.String(testSingleTableName),
|
||||
Records: records,
|
||||
CommonAttributes: &types.Record{},
|
||||
|
|
@ -750,7 +750,7 @@ func TestTransformMetricsSkipEmptyMetric(t *testing.T) {
|
|||
})
|
||||
|
||||
expectedResultMultiTable := ×treamwrite.WriteRecordsInput{
|
||||
DatabaseName: aws.String(tsDbName),
|
||||
DatabaseName: aws.String(tsDBName),
|
||||
TableName: aws.String(metricName1),
|
||||
Records: recordsMulti,
|
||||
CommonAttributes: &types.Record{},
|
||||
|
|
@ -854,7 +854,7 @@ func TestTransformMetricsRequestsAboveLimitAreSplitSingleTable(t *testing.T) {
|
|||
}
|
||||
|
||||
expectedResult1SingleTable := ×treamwrite.WriteRecordsInput{
|
||||
DatabaseName: aws.String(tsDbName),
|
||||
DatabaseName: aws.String(tsDBName),
|
||||
TableName: aws.String(testSingleTableName),
|
||||
Records: recordsFirstReq,
|
||||
CommonAttributes: &types.Record{},
|
||||
|
|
@ -872,7 +872,7 @@ func TestTransformMetricsRequestsAboveLimitAreSplitSingleTable(t *testing.T) {
|
|||
})...)
|
||||
|
||||
expectedResult2SingleTable := ×treamwrite.WriteRecordsInput{
|
||||
DatabaseName: aws.String(tsDbName),
|
||||
DatabaseName: aws.String(tsDBName),
|
||||
TableName: aws.String(testSingleTableName),
|
||||
Records: recordsSecondReq,
|
||||
CommonAttributes: &types.Record{},
|
||||
|
|
@ -918,7 +918,7 @@ func TestTransformMetricsDifferentDimensionsSameTimestampsAreWrittenSeparate(t *
|
|||
})
|
||||
|
||||
expectedResultSingleTable := ×treamwrite.WriteRecordsInput{
|
||||
DatabaseName: aws.String(tsDbName),
|
||||
DatabaseName: aws.String(tsDBName),
|
||||
TableName: aws.String(testSingleTableName),
|
||||
Records: recordsSingle,
|
||||
CommonAttributes: &types.Record{},
|
||||
|
|
@ -981,7 +981,7 @@ func TestTransformMetricsSameDimensionsDifferentDimensionValuesAreWrittenSeparat
|
|||
})
|
||||
|
||||
expectedResultSingleTable := ×treamwrite.WriteRecordsInput{
|
||||
DatabaseName: aws.String(tsDbName),
|
||||
DatabaseName: aws.String(tsDBName),
|
||||
TableName: aws.String(testSingleTableName),
|
||||
Records: recordsSingle,
|
||||
CommonAttributes: &types.Record{},
|
||||
|
|
@ -1043,7 +1043,7 @@ func TestTransformMetricsSameDimensionsDifferentTimestampsAreWrittenSeparate(t *
|
|||
})
|
||||
|
||||
expectedResultSingleTable := ×treamwrite.WriteRecordsInput{
|
||||
DatabaseName: aws.String(tsDbName),
|
||||
DatabaseName: aws.String(tsDBName),
|
||||
TableName: aws.String(testSingleTableName),
|
||||
Records: recordsSingle,
|
||||
CommonAttributes: &types.Record{},
|
||||
|
|
@ -1069,7 +1069,7 @@ func TestTransformMetricsSameDimensionsDifferentTimestampsAreWrittenSeparate(t *
|
|||
})
|
||||
|
||||
expectedResultMultiTable := ×treamwrite.WriteRecordsInput{
|
||||
DatabaseName: aws.String(tsDbName),
|
||||
DatabaseName: aws.String(tsDBName),
|
||||
TableName: aws.String(metricName1),
|
||||
Records: recordsMultiTable,
|
||||
CommonAttributes: &types.Record{},
|
||||
|
|
@ -1155,7 +1155,7 @@ func TestTransformMetricsDifferentMetricsAreWrittenToDifferentTablesInMultiTable
|
|||
})
|
||||
|
||||
expectedResultSingleTable := ×treamwrite.WriteRecordsInput{
|
||||
DatabaseName: aws.String(tsDbName),
|
||||
DatabaseName: aws.String(tsDBName),
|
||||
TableName: aws.String(testSingleTableName),
|
||||
Records: recordsSingle,
|
||||
CommonAttributes: &types.Record{},
|
||||
|
|
@ -1219,7 +1219,7 @@ func TestCustomEndpoint(t *testing.T) {
|
|||
customEndpoint := "http://test.custom.endpoint.com"
|
||||
plugin := Timestream{
|
||||
MappingMode: MappingModeMultiTable,
|
||||
DatabaseName: tsDbName,
|
||||
DatabaseName: tsDBName,
|
||||
Log: testutil.Logger{},
|
||||
CredentialConfig: common_aws.CredentialConfig{EndpointURL: customEndpoint},
|
||||
}
|
||||
|
|
@ -1241,7 +1241,7 @@ func comparisonTest(t *testing.T,
|
|||
case MappingModeSingleTable:
|
||||
plugin = Timestream{
|
||||
MappingMode: mappingMode,
|
||||
DatabaseName: tsDbName,
|
||||
DatabaseName: tsDBName,
|
||||
|
||||
SingleTableName: testSingleTableName,
|
||||
SingleTableDimensionNameForTelegrafMeasurementName: testSingleTableDim,
|
||||
|
|
@ -1250,7 +1250,7 @@ func comparisonTest(t *testing.T,
|
|||
case MappingModeMultiTable:
|
||||
plugin = Timestream{
|
||||
MappingMode: mappingMode,
|
||||
DatabaseName: tsDbName,
|
||||
DatabaseName: tsDBName,
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
}
|
||||
|
|
@ -1337,7 +1337,7 @@ func buildExpectedInput(i SimpleInput) *timestreamwrite.WriteRecordsInput {
|
|||
}
|
||||
|
||||
result := ×treamwrite.WriteRecordsInput{
|
||||
DatabaseName: aws.String(tsDbName),
|
||||
DatabaseName: aws.String(tsDBName),
|
||||
TableName: aws.String(i.tableName),
|
||||
Records: tsRecords,
|
||||
CommonAttributes: &types.Record{},
|
||||
|
|
|
|||
|
|
@ -119,12 +119,12 @@ type Parser struct {
|
|||
// "RESPONSE_CODE": "%{NUMBER:rc:tag}"
|
||||
// }
|
||||
patternsMap map[string]string
|
||||
// foundTsLayouts is a slice of timestamp patterns that have been found
|
||||
// foundTSLayouts is a slice of timestamp patterns that have been found
|
||||
// in the log lines. This slice gets updated if the user uses the generic
|
||||
// 'ts' modifier for timestamps. This slice is checked first for matches,
|
||||
// so that previously-matched layouts get priority over all other timestamp
|
||||
// layouts.
|
||||
foundTsLayouts []string
|
||||
foundTSLayouts []string
|
||||
|
||||
timeFunc func() time.Time
|
||||
g *grok.Grok
|
||||
|
|
@ -329,32 +329,32 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
|
|||
p.Log.Errorf("Error parsing %s to time layout [%s]: %s", v, t, err)
|
||||
}
|
||||
case GenericTimestamp:
|
||||
var foundTs bool
|
||||
var foundTS bool
|
||||
// first try timestamp layouts that we've already found
|
||||
for _, layout := range p.foundTsLayouts {
|
||||
for _, layout := range p.foundTSLayouts {
|
||||
ts, err := internal.ParseTimestamp(layout, v, p.loc)
|
||||
if err == nil {
|
||||
timestamp = ts
|
||||
foundTs = true
|
||||
foundTS = true
|
||||
break
|
||||
}
|
||||
}
|
||||
// if we haven't found a timestamp layout yet, try all timestamp
|
||||
// layouts.
|
||||
if !foundTs {
|
||||
if !foundTS {
|
||||
for _, layout := range timeLayouts {
|
||||
ts, err := internal.ParseTimestamp(layout, v, p.loc)
|
||||
if err == nil {
|
||||
timestamp = ts
|
||||
foundTs = true
|
||||
p.foundTsLayouts = append(p.foundTsLayouts, layout)
|
||||
foundTS = true
|
||||
p.foundTSLayouts = append(p.foundTSLayouts, layout)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
// if we still haven't found a timestamp layout, log it and we will
|
||||
// just use time.Now()
|
||||
if !foundTs {
|
||||
if !foundTS {
|
||||
p.Log.Errorf("Error parsing timestamp [%s], could not find any "+
|
||||
"suitable time layouts.", v)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,19 +25,19 @@ func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) {
|
|||
}
|
||||
|
||||
for _, ts := range req.Timeseries {
|
||||
var metricsFromTs []telegraf.Metric
|
||||
var metricsFromTS []telegraf.Metric
|
||||
switch p.MetricVersion {
|
||||
case 0, 2:
|
||||
metricsFromTs, err = p.extractMetricsV2(&ts)
|
||||
metricsFromTS, err = p.extractMetricsV2(&ts)
|
||||
case 1:
|
||||
metricsFromTs, err = p.extractMetricsV1(&ts)
|
||||
metricsFromTS, err = p.extractMetricsV1(&ts)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown prometheus metric version %d", p.MetricVersion)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
metrics = append(metrics, metricsFromTs...)
|
||||
metrics = append(metrics, metricsFromTS...)
|
||||
}
|
||||
|
||||
return metrics, err
|
||||
|
|
|
|||
Loading…
Reference in New Issue