Add the Timestream Output Plugin (#8239)

Co-authored-by: Piotr Westfalewicz <westpiot@amazon.com>
This commit is contained in:
piotrwest 2020-10-15 12:51:17 -05:00 committed by GitHub
parent 796b3b8d41
commit 7c2c2c5d8b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 1609 additions and 8 deletions

View File

@ -30,7 +30,9 @@ func (c *CredentialConfig) Credentials() client.ConfigProvider {
func (c *CredentialConfig) rootCredentials() client.ConfigProvider {
config := &aws.Config{
Region: aws.String(c.Region),
Endpoint: &c.EndpointURL,
}
if c.EndpointURL != "" {
config.Endpoint = &c.EndpointURL
}
if c.AccessKey != "" || c.SecretKey != "" {
config.Credentials = credentials.NewStaticCredentials(c.AccessKey, c.SecretKey, c.Token)

4
go.mod
View File

@ -26,7 +26,7 @@ require (
github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 // indirect
github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740
github.com/armon/go-metrics v0.3.0 // indirect
github.com/aws/aws-sdk-go v1.33.12
github.com/aws/aws-sdk-go v1.34.34
github.com/benbjohnson/clock v1.0.3
github.com/bitly/go-hostpool v0.1.0 // indirect
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869
@ -151,7 +151,7 @@ require (
gopkg.in/ldap.v3 v3.1.0
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce
gopkg.in/olivere/elastic.v5 v5.0.70
gopkg.in/yaml.v2 v2.2.5
gopkg.in/yaml.v2 v2.2.8
gotest.tools v2.2.0+incompatible // indirect
honnef.co/go/tools v0.0.1-2020.1.3 // indirect
k8s.io/apimachinery v0.17.1 // indirect

12
go.sum
View File

@ -114,8 +114,8 @@ github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740/go.mod h1:
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-metrics v0.3.0 h1:B7AQgHi8QSEi4uHu7Sbsga+IJDU+CENgjxoo81vDUqU=
github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs=
github.com/aws/aws-sdk-go v1.33.12 h1:eydMoSwfrSTD9PWKUJOiDL7+/UwDW8AjInUGVE5Llh4=
github.com/aws/aws-sdk-go v1.33.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
github.com/aws/aws-sdk-go v1.34.34 h1:5dC0ZU0xy25+UavGNEkQ/5MOQwxXDA2YXtjCL1HfYKI=
github.com/aws/aws-sdk-go v1.34.34/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
github.com/benbjohnson/clock v1.0.3 h1:vkLuvpK4fmtSCuo60+yC63p7y0BmQ8gm5ZXGuBCJyXg=
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
@ -354,8 +354,10 @@ github.com/jackc/pgx v3.6.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGk
github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8=
github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc=
github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc=
github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7 h1:K//n/AqR5HjG3qxbrBCL4vJPW0MVFSs9CPK1OOJdRME=
@ -893,6 +895,8 @@ gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@ -37,6 +37,7 @@ import (
_ "github.com/influxdata/telegraf/plugins/outputs/stackdriver"
_ "github.com/influxdata/telegraf/plugins/outputs/sumologic"
_ "github.com/influxdata/telegraf/plugins/outputs/syslog"
_ "github.com/influxdata/telegraf/plugins/outputs/timestream"
_ "github.com/influxdata/telegraf/plugins/outputs/warp10"
_ "github.com/influxdata/telegraf/plugins/outputs/wavefront"
)

View File

@ -0,0 +1,152 @@
# Timestream Output Plugin
The Timestream output plugin writes metrics to the [Amazon Timestream] service.
### Configuration
```toml
# Configuration for sending metrics to Amazon Timestream.
[[outputs.timestream]]
## Amazon Region
region = "us-east-1"
## Amazon Credentials
## Credentials are loaded in the following order:
## 1) Assumed credentials via STS if role_arn is specified
## 2) Explicit credentials from 'access_key' and 'secret_key'
## 3) Shared profile from 'profile'
## 4) Environment variables
## 5) Shared credentials file
## 6) EC2 Instance Profile
#access_key = ""
#secret_key = ""
#token = ""
#role_arn = ""
#profile = ""
#shared_credential_file = ""
## Endpoint to make request against, the correct endpoint is automatically
## determined and this option should only be set if you wish to override the
## default.
## ex: endpoint_url = "http://localhost:8000"
# endpoint_url = ""
## Timestream database where the metrics will be inserted.
## The database must exist prior to starting Telegraf.
database_name = "yourDatabaseNameHere"
## Specifies if the plugin should describe the Timestream database upon starting
## to validate if it has access necessary permissions, connection, etc., as a safety check.
## If the describe operation fails, the plugin will not start
## and therefore the Telegraf agent will not start.
describe_database_on_start = false
## The mapping mode specifies how Telegraf records are represented in Timestream.
## Valid values are: single-table, multi-table.
## For example, consider the following data in line protocol format:
## weather,location=us-midwest,season=summer temperature=82,humidity=71 1465839830100400200
## airquality,location=us-west no2=5,pm25=16 1465839830100400200
## where weather and airquality are the measurement names, location and season are tags,
## and temperature, humidity, no2, pm25 are fields.
## In multi-table mode:
## - first line will be ingested to table named weather
## - second line will be ingested to table named airquality
## - the tags will be represented as dimensions
## - first table (weather) will have two records:
## one with measurement name equals to temperature,
## another with measurement name equals to humidity
## - second table (airquality) will have two records:
## one with measurement name equals to no2,
## another with measurement name equals to pm25
## - the Timestream tables from the example will look like this:
## TABLE "weather":
## time | location | season | measure_name | measure_value::bigint
## 2016-06-13 17:43:50 | us-midwest | summer | temperature | 82
## 2016-06-13 17:43:50 | us-midwest | summer | humidity | 71
## TABLE "airquality":
## time | location | measure_name | measure_value::bigint
## 2016-06-13 17:43:50 | us-west | no2 | 5
## 2016-06-13 17:43:50 | us-west | pm25 | 16
## In single-table mode:
## - the data will be ingested to a single table, which name will be valueOf(single_table_name)
## - measurement name will stored in dimension named valueOf(single_table_dimension_name_for_telegraf_measurement_name)
## - location and season will be represented as dimensions
## - temperature, humidity, no2, pm25 will be represented as measurement name
## - the Timestream table from the example will look like this:
## Assuming:
## - single_table_name = "my_readings"
## - single_table_dimension_name_for_telegraf_measurement_name = "namespace"
## TABLE "my_readings":
## time | location | season | namespace | measure_name | measure_value::bigint
## 2016-06-13 17:43:50 | us-midwest | summer | weather | temperature | 82
## 2016-06-13 17:43:50 | us-midwest | summer | weather | humidity | 71
## 2016-06-13 17:43:50 | us-west | NULL | airquality | no2 | 5
## 2016-06-13 17:43:50 | us-west | NULL | airquality | pm25 | 16
## In most cases, using multi-table mapping mode is recommended.
## However, you can consider using single-table in situations when you have thousands of measurement names.
mapping_mode = "multi-table"
## Only valid and required for mapping_mode = "single-table"
## Specifies the Timestream table where the metrics will be uploaded.
# single_table_name = "yourTableNameHere"
## Only valid and required for mapping_mode = "single-table"
## Describes what will be the Timestream dimension name for the Telegraf
## measurement name.
# single_table_dimension_name_for_telegraf_measurement_name = "namespace"
## Specifies if the plugin should create the table, if the table do not exist.
## The plugin writes the data without prior checking if the table exists.
## When the table does not exist, the error returned from Timestream will cause
## the plugin to create the table, if this parameter is set to true.
create_table_if_not_exists = true
## Only valid and required if create_table_if_not_exists = true
## Specifies the Timestream table magnetic store retention period in days.
## Check Timestream documentation for more details.
create_table_magnetic_store_retention_period_in_days = 365
## Only valid and required if create_table_if_not_exists = true
## Specifies the Timestream table memory store retention period in hours.
## Check Timestream documentation for more details.
create_table_memory_store_retention_period_in_hours = 24
## Only valid and optional if create_table_if_not_exists = true
## Specifies the Timestream table tags.
## Check Timestream documentation for more details
# create_table_tags = { "foo" = "bar", "environment" = "dev"}
```
### Batching
Timestream WriteInputRequest.CommonAttributes are used to efficiently write data to Timestream.
### Multithreading
Single thread is used to write the data to Timestream, following general plugin design pattern.
### Errors
In case of an attempt to write an unsupported by Timestream Telegraf Field type, the field is dropped and error is emitted to the logs.
In case of receiving ThrottlingException or InternalServerException from Timestream, the errors are returned to Telegraf, in which case Telegraf will keep the metrics in buffer and retry writing those metrics on the next flush.
In case of receiving ResourceNotFoundException:
- If `create_table_if_not_exists` configuration is set to `true`, the plugin will try to create appropriate table and write the records again, if the table creation was successful.
- If `create_table_if_not_exists` configuration is set to `false`, the records are dropped, and an error is emitted to the logs.
In case of receiving any other AWS error from Timestream, the records are dropped, and an error is emitted to the logs, as retrying such requests isn't likely to succeed.
### Logging
Turn on debug flag in the Telegraf to turn on detailed logging (including records being written to Timestream).
### Testing
Execute unit tests with:
```
go test -v ./plugins/outputs/timestream/...
```
[Amazon Timestream]: https://aws.amazon.com/timestream/

View File

@ -0,0 +1,608 @@
package timestream
import (
"encoding/binary"
"fmt"
"hash/fnv"
"reflect"
"strconv"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/outputs"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/timestreamwrite"
internalaws "github.com/influxdata/telegraf/config/aws"
)
type (
Timestream struct {
Region string `toml:"region"`
AccessKey string `toml:"access_key"`
SecretKey string `toml:"secret_key"`
RoleARN string `toml:"role_arn"`
Profile string `toml:"profile"`
Filename string `toml:"shared_credential_file"`
Token string `toml:"token"`
EndpointURL string `toml:"endpoint_url"`
MappingMode string `toml:"mapping_mode"`
DescribeDatabaseOnStart bool `toml:"describe_database_on_start"`
DatabaseName string `toml:"database_name"`
SingleTableName string `toml:"single_table_name"`
SingleTableDimensionNameForTelegrafMeasurementName string `toml:"single_table_dimension_name_for_telegraf_measurement_name"`
CreateTableIfNotExists bool `toml:"create_table_if_not_exists"`
CreateTableMagneticStoreRetentionPeriodInDays int64 `toml:"create_table_magnetic_store_retention_period_in_days"`
CreateTableMemoryStoreRetentionPeriodInHours int64 `toml:"create_table_memory_store_retention_period_in_hours"`
CreateTableTags map[string]string `toml:"create_table_tags"`
Log telegraf.Logger
svc WriteClient
}
WriteClient interface {
CreateTable(*timestreamwrite.CreateTableInput) (*timestreamwrite.CreateTableOutput, error)
WriteRecords(*timestreamwrite.WriteRecordsInput) (*timestreamwrite.WriteRecordsOutput, error)
DescribeDatabase(*timestreamwrite.DescribeDatabaseInput) (*timestreamwrite.DescribeDatabaseOutput, error)
}
)
// Mapping modes specify how Telegraf model should be represented in Timestream model.
// See sample config for more details.
const (
MappingModeSingleTable = "single-table"
MappingModeMultiTable = "multi-table"
)
// MaxRecordsPerCall reflects Timestream limit of WriteRecords API call
const MaxRecordsPerCall = 100
var sampleConfig = `
## Amazon Region
region = "us-east-1"
## Amazon Credentials
## Credentials are loaded in the following order:
## 1) Assumed credentials via STS if role_arn is specified
## 2) Explicit credentials from 'access_key' and 'secret_key'
## 3) Shared profile from 'profile'
## 4) Environment variables
## 5) Shared credentials file
## 6) EC2 Instance Profile
#access_key = ""
#secret_key = ""
#token = ""
#role_arn = ""
#profile = ""
#shared_credential_file = ""
## Endpoint to make request against, the correct endpoint is automatically
## determined and this option should only be set if you wish to override the
## default.
## ex: endpoint_url = "http://localhost:8000"
# endpoint_url = ""
## Timestream database where the metrics will be inserted.
## The database must exist prior to starting Telegraf.
database_name = "yourDatabaseNameHere"
## Specifies if the plugin should describe the Timestream database upon starting
## to validate if it has access necessary permissions, connection, etc., as a safety check.
## If the describe operation fails, the plugin will not start
## and therefore the Telegraf agent will not start.
describe_database_on_start = false
## The mapping mode specifies how Telegraf records are represented in Timestream.
## Valid values are: single-table, multi-table.
## For example, consider the following data in line protocol format:
## weather,location=us-midwest,season=summer temperature=82,humidity=71 1465839830100400200
## airquality,location=us-west no2=5,pm25=16 1465839830100400200
## where weather and airquality are the measurement names, location and season are tags,
## and temperature, humidity, no2, pm25 are fields.
## In multi-table mode:
## - first line will be ingested to table named weather
## - second line will be ingested to table named airquality
## - the tags will be represented as dimensions
## - first table (weather) will have two records:
## one with measurement name equals to temperature,
## another with measurement name equals to humidity
## - second table (airquality) will have two records:
## one with measurement name equals to no2,
## another with measurement name equals to pm25
## - the Timestream tables from the example will look like this:
## TABLE "weather":
## time | location | season | measure_name | measure_value::bigint
## 2016-06-13 17:43:50 | us-midwest | summer | temperature | 82
## 2016-06-13 17:43:50 | us-midwest | summer | humidity | 71
## TABLE "airquality":
## time | location | measure_name | measure_value::bigint
## 2016-06-13 17:43:50 | us-west | no2 | 5
## 2016-06-13 17:43:50 | us-west | pm25 | 16
## In single-table mode:
## - the data will be ingested to a single table, which name will be valueOf(single_table_name)
## - measurement name will stored in dimension named valueOf(single_table_dimension_name_for_telegraf_measurement_name)
## - location and season will be represented as dimensions
## - temperature, humidity, no2, pm25 will be represented as measurement name
## - the Timestream table from the example will look like this:
## Assuming:
## - single_table_name = "my_readings"
## - single_table_dimension_name_for_telegraf_measurement_name = "namespace"
## TABLE "my_readings":
## time | location | season | namespace | measure_name | measure_value::bigint
## 2016-06-13 17:43:50 | us-midwest | summer | weather | temperature | 82
## 2016-06-13 17:43:50 | us-midwest | summer | weather | humidity | 71
## 2016-06-13 17:43:50 | us-west | NULL | airquality | no2 | 5
## 2016-06-13 17:43:50 | us-west | NULL | airquality | pm25 | 16
## In most cases, using multi-table mapping mode is recommended.
## However, you can consider using single-table in situations when you have thousands of measurement names.
mapping_mode = "multi-table"
## Only valid and required for mapping_mode = "single-table"
## Specifies the Timestream table where the metrics will be uploaded.
# single_table_name = "yourTableNameHere"
## Only valid and required for mapping_mode = "single-table"
## Describes what will be the Timestream dimension name for the Telegraf
## measurement name.
# single_table_dimension_name_for_telegraf_measurement_name = "namespace"
## Specifies if the plugin should create the table, if the table do not exist.
## The plugin writes the data without prior checking if the table exists.
## When the table does not exist, the error returned from Timestream will cause
## the plugin to create the table, if this parameter is set to true.
create_table_if_not_exists = true
## Only valid and required if create_table_if_not_exists = true
## Specifies the Timestream table magnetic store retention period in days.
## Check Timestream documentation for more details.
create_table_magnetic_store_retention_period_in_days = 365
## Only valid and required if create_table_if_not_exists = true
## Specifies the Timestream table memory store retention period in hours.
## Check Timestream documentation for more details.
create_table_memory_store_retention_period_in_hours = 24
## Only valid and optional if create_table_if_not_exists = true
## Specifies the Timestream table tags.
## Check Timestream documentation for more details
# create_table_tags = { "foo" = "bar", "environment" = "dev"}
`
// WriteFactory function provides a way to mock the client instantiation for testing purposes.
var WriteFactory = func(credentialConfig *internalaws.CredentialConfig) WriteClient {
configProvider := credentialConfig.Credentials()
return timestreamwrite.New(configProvider)
}
func (t *Timestream) Connect() error {
if t.DatabaseName == "" {
return fmt.Errorf("DatabaseName key is required")
}
if t.MappingMode == "" {
return fmt.Errorf("MappingMode key is required")
}
if t.MappingMode != MappingModeSingleTable && t.MappingMode != MappingModeMultiTable {
return fmt.Errorf("correct MappingMode key values are: '%s', '%s'",
MappingModeSingleTable, MappingModeMultiTable)
}
if t.MappingMode == MappingModeSingleTable {
if t.SingleTableName == "" {
return fmt.Errorf("in '%s' mapping mode, SingleTableName key is required", MappingModeSingleTable)
}
if t.SingleTableDimensionNameForTelegrafMeasurementName == "" {
return fmt.Errorf("in '%s' mapping mode, SingleTableDimensionNameForTelegrafMeasurementName key is required",
MappingModeSingleTable)
}
}
if t.MappingMode == MappingModeMultiTable {
if t.SingleTableName != "" {
return fmt.Errorf("in '%s' mapping mode, do not specify SingleTableName key", MappingModeMultiTable)
}
if t.SingleTableDimensionNameForTelegrafMeasurementName != "" {
return fmt.Errorf("in '%s' mapping mode, do not specify SingleTableDimensionNameForTelegrafMeasurementName key", MappingModeMultiTable)
}
}
if t.CreateTableIfNotExists {
if t.CreateTableMagneticStoreRetentionPeriodInDays < 1 {
return fmt.Errorf("if Telegraf should create tables, CreateTableMagneticStoreRetentionPeriodInDays key should have a value greater than 0")
}
if t.CreateTableMemoryStoreRetentionPeriodInHours < 1 {
return fmt.Errorf("if Telegraf should create tables, CreateTableMemoryStoreRetentionPeriodInHours key should have a value greater than 0")
}
}
t.Log.Infof("Constructing Timestream client for '%s' mode", t.MappingMode)
credentialConfig := &internalaws.CredentialConfig{
Region: t.Region,
AccessKey: t.AccessKey,
SecretKey: t.SecretKey,
RoleARN: t.RoleARN,
Profile: t.Profile,
Filename: t.Filename,
Token: t.Token,
EndpointURL: t.EndpointURL,
}
svc := WriteFactory(credentialConfig)
if t.DescribeDatabaseOnStart {
t.Log.Infof("Describing database '%s' in region '%s'", t.DatabaseName, t.Region)
describeDatabaseInput := &timestreamwrite.DescribeDatabaseInput{
DatabaseName: aws.String(t.DatabaseName),
}
describeDatabaseOutput, err := svc.DescribeDatabase(describeDatabaseInput)
if err != nil {
t.Log.Errorf("Couldn't describe database '%s'. Check error, fix permissions, connectivity, create database.", t.DatabaseName)
return err
}
t.Log.Infof("Describe database '%s' returned: '%s'.", t.DatabaseName, describeDatabaseOutput)
}
t.svc = svc
return nil
}
func (t *Timestream) Close() error {
return nil
}
func (t *Timestream) SampleConfig() string {
return sampleConfig
}
func (t *Timestream) Description() string {
return "Configuration for Amazon Timestream output."
}
func init() {
outputs.Add("timestream", func() telegraf.Output {
return &Timestream{}
})
}
func (t *Timestream) Write(metrics []telegraf.Metric) error {
writeRecordsInputs := t.TransformMetrics(metrics)
for _, writeRecordsInput := range writeRecordsInputs {
if err := t.writeToTimestream(writeRecordsInput, true); err != nil {
return err
}
}
return nil
}
func (t *Timestream) writeToTimestream(writeRecordsInput *timestreamwrite.WriteRecordsInput, resourceNotFoundRetry bool) error {
t.Log.Debugf("Writing to Timestream: '%v' with ResourceNotFoundRetry: '%t'", writeRecordsInput, resourceNotFoundRetry)
_, err := t.svc.WriteRecords(writeRecordsInput)
if err != nil {
// Telegraf will retry ingesting the metrics if an error is returned from the plugin.
// Therefore, return error only for retryable exceptions: ThrottlingException and 5xx exceptions.
if e, ok := err.(awserr.Error); ok {
switch e.Code() {
case timestreamwrite.ErrCodeResourceNotFoundException:
if resourceNotFoundRetry {
t.Log.Warnf("Failed to write to Timestream database '%s' table '%s'. Error: '%s'",
t.DatabaseName, *writeRecordsInput.TableName, e)
return t.createTableAndRetry(writeRecordsInput)
}
t.logWriteToTimestreamError(err, writeRecordsInput.TableName)
case timestreamwrite.ErrCodeThrottlingException:
return fmt.Errorf("unable to write to Timestream database '%s' table '%s'. Error: %s",
t.DatabaseName, *writeRecordsInput.TableName, err)
case timestreamwrite.ErrCodeInternalServerException:
return fmt.Errorf("unable to write to Timestream database '%s' table '%s'. Error: %s",
t.DatabaseName, *writeRecordsInput.TableName, err)
default:
t.logWriteToTimestreamError(err, writeRecordsInput.TableName)
}
} else {
// Retry other, non-aws errors.
return fmt.Errorf("unable to write to Timestream database '%s' table '%s'. Error: %s",
t.DatabaseName, *writeRecordsInput.TableName, err)
}
}
return nil
}
func (t *Timestream) logWriteToTimestreamError(err error, tableName *string) {
t.Log.Errorf("Failed to write to Timestream database '%s' table '%s'. Skipping metric! Error: '%s'",
t.DatabaseName, *tableName, err)
}
func (t *Timestream) createTableAndRetry(writeRecordsInput *timestreamwrite.WriteRecordsInput) error {
if t.CreateTableIfNotExists {
t.Log.Infof("Trying to create table '%s' in database '%s', as 'CreateTableIfNotExists' config key is 'true'.", *writeRecordsInput.TableName, t.DatabaseName)
if err := t.createTable(writeRecordsInput.TableName); err != nil {
t.Log.Errorf("Failed to create table '%s' in database '%s': %s. Skipping metric!", *writeRecordsInput.TableName, t.DatabaseName, err)
} else {
t.Log.Infof("Table '%s' in database '%s' created. Retrying writing.", *writeRecordsInput.TableName, t.DatabaseName)
return t.writeToTimestream(writeRecordsInput, false)
}
} else {
t.Log.Errorf("Not trying to create table '%s' in database '%s', as 'CreateTableIfNotExists' config key is 'false'. Skipping metric!", *writeRecordsInput.TableName, t.DatabaseName)
}
return nil
}
// createTable creates a Timestream table according to the configuration.
func (t *Timestream) createTable(tableName *string) error {
createTableInput := &timestreamwrite.CreateTableInput{
DatabaseName: aws.String(t.DatabaseName),
TableName: aws.String(*tableName),
RetentionProperties: &timestreamwrite.RetentionProperties{
MagneticStoreRetentionPeriodInDays: aws.Int64(t.CreateTableMagneticStoreRetentionPeriodInDays),
MemoryStoreRetentionPeriodInHours: aws.Int64(t.CreateTableMemoryStoreRetentionPeriodInHours),
},
}
var tags []*timestreamwrite.Tag
for key, val := range t.CreateTableTags {
tags = append(tags, &timestreamwrite.Tag{
Key: aws.String(key),
Value: aws.String(val),
})
}
createTableInput.SetTags(tags)
_, err := t.svc.CreateTable(createTableInput)
if err != nil {
if e, ok := err.(awserr.Error); ok {
// if the table was created in the meantime, it's ok.
if e.Code() == timestreamwrite.ErrCodeConflictException {
return nil
}
}
return err
}
return nil
}
// TransformMetrics transforms a collection of Telegraf Metrics into write requests to Timestream.
// Telegraf Metrics are grouped by Name, Tag Keys and Time to use Timestream CommonAttributes.
// Returns collection of write requests to be performed to Timestream.
func (t *Timestream) TransformMetrics(metrics []telegraf.Metric) []*timestreamwrite.WriteRecordsInput {
writeRequests := make(map[uint64]*timestreamwrite.WriteRecordsInput, len(metrics))
for _, m := range metrics {
// build MeasureName, MeasureValue, MeasureValueType
records := t.buildWriteRecords(m)
if len(records) == 0 {
continue
}
id := hashFromMetricTimeNameTagKeys(m)
if curr, ok := writeRequests[id]; !ok {
// No current CommonAttributes/WriteRecordsInput found for current Telegraf Metric
dimensions := t.buildDimensions(m)
timeUnit, timeValue := getTimestreamTime(m.Time())
newWriteRecord := &timestreamwrite.WriteRecordsInput{
DatabaseName: aws.String(t.DatabaseName),
Records: records,
CommonAttributes: &timestreamwrite.Record{
Dimensions: dimensions,
Time: aws.String(timeValue),
TimeUnit: aws.String(timeUnit),
},
}
if t.MappingMode == MappingModeSingleTable {
newWriteRecord.SetTableName(t.SingleTableName)
}
if t.MappingMode == MappingModeMultiTable {
newWriteRecord.SetTableName(m.Name())
}
writeRequests[id] = newWriteRecord
} else {
curr.Records = append(curr.Records, records...)
}
}
// Create result as array of WriteRecordsInput. Split requests over records count limit to smaller requests.
var result []*timestreamwrite.WriteRecordsInput
for _, writeRequest := range writeRequests {
if len(writeRequest.Records) > MaxRecordsPerCall {
for _, recordsPartition := range partitionRecords(MaxRecordsPerCall, writeRequest.Records) {
newWriteRecord := &timestreamwrite.WriteRecordsInput{
DatabaseName: writeRequest.DatabaseName,
TableName: writeRequest.TableName,
Records: recordsPartition,
CommonAttributes: writeRequest.CommonAttributes,
}
result = append(result, newWriteRecord)
}
} else {
result = append(result, writeRequest)
}
}
return result
}
func hashFromMetricTimeNameTagKeys(m telegraf.Metric) uint64 {
h := fnv.New64a()
h.Write([]byte(m.Name()))
h.Write([]byte("\n"))
for _, tag := range m.TagList() {
if tag.Key == "" {
continue
}
h.Write([]byte(tag.Key))
h.Write([]byte("\n"))
h.Write([]byte(tag.Value))
h.Write([]byte("\n"))
}
b := make([]byte, binary.MaxVarintLen64)
n := binary.PutUvarint(b, uint64(m.Time().UnixNano()))
h.Write(b[:n])
h.Write([]byte("\n"))
return h.Sum64()
}
func (t *Timestream) buildDimensions(point telegraf.Metric) []*timestreamwrite.Dimension {
var dimensions []*timestreamwrite.Dimension
for tagName, tagValue := range point.Tags() {
dimension := &timestreamwrite.Dimension{
Name: aws.String(tagName),
Value: aws.String(tagValue),
}
dimensions = append(dimensions, dimension)
}
if t.MappingMode == MappingModeSingleTable {
dimension := &timestreamwrite.Dimension{
Name: aws.String(t.SingleTableDimensionNameForTelegrafMeasurementName),
Value: aws.String(point.Name()),
}
dimensions = append(dimensions, dimension)
}
return dimensions
}
// buildWriteRecords builds the Timestream write records from Metric Fields only.
// Tags and time are not included - common attributes are built separately.
// Records with unsupported Metric Field type are skipped.
// It returns an array of Timestream write records.
func (t *Timestream) buildWriteRecords(point telegraf.Metric) []*timestreamwrite.Record {
var records []*timestreamwrite.Record
for fieldName, fieldValue := range point.Fields() {
stringFieldValue, stringFieldValueType, ok := convertValue(fieldValue)
if !ok {
t.Log.Errorf("Skipping field '%s'. The type '%s' is not supported in Timestream as MeasureValue. "+
"Supported values are: [int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64, bool]",
fieldName, reflect.TypeOf(fieldValue))
continue
}
record := &timestreamwrite.Record{
MeasureName: aws.String(fieldName),
MeasureValueType: aws.String(stringFieldValueType),
MeasureValue: aws.String(stringFieldValue),
}
records = append(records, record)
}
return records
}
// partitionRecords splits the Timestream records into smaller slices of a max size
// so that are under the limit for the Timestream API call.
// It returns the array of array of records.
func partitionRecords(size int, records []*timestreamwrite.Record) [][]*timestreamwrite.Record {
numberOfPartitions := len(records) / size
if len(records)%size != 0 {
numberOfPartitions++
}
partitions := make([][]*timestreamwrite.Record, numberOfPartitions)
for i := 0; i < numberOfPartitions; i++ {
start := size * i
end := size * (i + 1)
if end > len(records) {
end = len(records)
}
partitions[i] = records[start:end]
}
return partitions
}
// getTimestreamTime produces Timestream TimeUnit and TimeValue with minimum possible granularity
// while maintaining the same information.
func getTimestreamTime(time time.Time) (timeUnit string, timeValue string) {
const (
TimeUnitS = "SECONDS"
TimeUnitMS = "MILLISECONDS"
TimeUnitUS = "MICROSECONDS"
TimeUnitNS = "NANOSECONDS"
)
nanosTime := time.UnixNano()
if nanosTime%1e9 == 0 {
timeUnit = TimeUnitS
timeValue = strconv.FormatInt(nanosTime/1e9, 10)
} else if nanosTime%1e6 == 0 {
timeUnit = TimeUnitMS
timeValue = strconv.FormatInt(nanosTime/1e6, 10)
} else if nanosTime%1e3 == 0 {
timeUnit = TimeUnitUS
timeValue = strconv.FormatInt(nanosTime/1e3, 10)
} else {
timeUnit = TimeUnitNS
timeValue = strconv.FormatInt(nanosTime, 10)
}
return
}
// convertValue converts single Field value from Telegraf Metric and produces
// value, valueType Timestream representation.
func convertValue(v interface{}) (value string, valueType string, ok bool) {
const (
TypeBigInt = "BIGINT"
TypeDouble = "DOUBLE"
TypeBoolean = "BOOLEAN"
TypeVarchar = "VARCHAR"
)
ok = true
switch t := v.(type) {
case int:
valueType = TypeBigInt
value = strconv.FormatInt(int64(t), 10)
case int8:
valueType = TypeBigInt
value = strconv.FormatInt(int64(t), 10)
case int16:
valueType = TypeBigInt
value = strconv.FormatInt(int64(t), 10)
case int32:
valueType = TypeBigInt
value = strconv.FormatInt(int64(t), 10)
case int64:
valueType = TypeBigInt
value = strconv.FormatInt(t, 10)
case uint:
valueType = TypeBigInt
value = strconv.FormatUint(uint64(t), 10)
case uint8:
valueType = TypeBigInt
value = strconv.FormatUint(uint64(t), 10)
case uint16:
valueType = TypeBigInt
value = strconv.FormatUint(uint64(t), 10)
case uint32:
valueType = TypeBigInt
value = strconv.FormatUint(uint64(t), 10)
case uint64:
valueType = TypeBigInt
value = strconv.FormatUint(t, 10)
case float32:
valueType = TypeDouble
value = strconv.FormatFloat(float64(t), 'f', -1, 32)
case float64:
valueType = TypeDouble
value = strconv.FormatFloat(t, 'f', -1, 64)
case bool:
valueType = TypeBoolean
if t {
value = "true"
} else {
value = "false"
}
case string:
valueType = TypeVarchar
value = t
default:
// Skip unsupported type.
ok = false
return
}
return
}

View File

@ -0,0 +1,92 @@
package timestream
import (
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/timestreamwrite"
"github.com/stretchr/testify/assert"
)
func TestGetTimestreamTime(t *testing.T) {
assertions := assert.New(t)
tWithNanos := time.Date(2020, time.November, 10, 23, 44, 20, 123, time.UTC)
tWithMicros := time.Date(2020, time.November, 10, 23, 44, 20, 123000, time.UTC)
tWithMillis := time.Date(2020, time.November, 10, 23, 44, 20, 123000000, time.UTC)
tOnlySeconds := time.Date(2020, time.November, 10, 23, 44, 20, 0, time.UTC)
tUnitNanos, tValueNanos := getTimestreamTime(tWithNanos)
assertions.Equal("NANOSECONDS", tUnitNanos)
assertions.Equal("1605051860000000123", tValueNanos)
tUnitMicros, tValueMicros := getTimestreamTime(tWithMicros)
assertions.Equal("MICROSECONDS", tUnitMicros)
assertions.Equal("1605051860000123", tValueMicros)
tUnitMillis, tValueMillis := getTimestreamTime(tWithMillis)
assertions.Equal("MILLISECONDS", tUnitMillis)
assertions.Equal("1605051860123", tValueMillis)
tUnitSeconds, tValueSeconds := getTimestreamTime(tOnlySeconds)
assertions.Equal("SECONDS", tUnitSeconds)
assertions.Equal("1605051860", tValueSeconds)
}
func TestPartitionRecords(t *testing.T) {
assertions := assert.New(t)
testDatum := timestreamwrite.Record{
MeasureName: aws.String("Foo"),
MeasureValueType: aws.String("DOUBLE"),
MeasureValue: aws.String("123"),
}
var zeroDatum []*timestreamwrite.Record
oneDatum := []*timestreamwrite.Record{&testDatum}
twoDatum := []*timestreamwrite.Record{&testDatum, &testDatum}
threeDatum := []*timestreamwrite.Record{&testDatum, &testDatum, &testDatum}
assertions.Equal([][]*timestreamwrite.Record{}, partitionRecords(2, zeroDatum))
assertions.Equal([][]*timestreamwrite.Record{oneDatum}, partitionRecords(2, oneDatum))
assertions.Equal([][]*timestreamwrite.Record{oneDatum}, partitionRecords(2, oneDatum))
assertions.Equal([][]*timestreamwrite.Record{twoDatum}, partitionRecords(2, twoDatum))
assertions.Equal([][]*timestreamwrite.Record{twoDatum, oneDatum}, partitionRecords(2, threeDatum))
}
func TestConvertValueSupported(t *testing.T) {
intInputValues := []interface{}{-1, int8(-2), int16(-3), int32(-4), int64(-5)}
intOutputValues := []string{"-1", "-2", "-3", "-4", "-5"}
intOutputValueTypes := []string{"BIGINT", "BIGINT", "BIGINT", "BIGINT", "BIGINT"}
testConvertValueSupportedCases(t, intInputValues, intOutputValues, intOutputValueTypes)
uintInputValues := []interface{}{uint(1), uint8(2), uint16(3), uint32(4), uint64(5)}
uintOutputValues := []string{"1", "2", "3", "4", "5"}
uintOutputValueTypes := []string{"BIGINT", "BIGINT", "BIGINT", "BIGINT", "BIGINT"}
testConvertValueSupportedCases(t, uintInputValues, uintOutputValues, uintOutputValueTypes)
otherInputValues := []interface{}{"foo", float32(22.123), 22.1234, true}
otherOutputValues := []string{"foo", "22.123", "22.1234", "true"}
otherOutputValueTypes := []string{"VARCHAR", "DOUBLE", "DOUBLE", "BOOLEAN"}
testConvertValueSupportedCases(t, otherInputValues, otherOutputValues, otherOutputValueTypes)
}
func TestConvertValueUnsupported(t *testing.T) {
assertions := assert.New(t)
_, _, ok := convertValue(time.Date(2020, time.November, 10, 23, 44, 20, 0, time.UTC))
assertions.False(ok, "Expected unsuccessful conversion")
}
func testConvertValueSupportedCases(t *testing.T,
inputValues []interface{}, outputValues []string, outputValueTypes []string) {
assertions := assert.New(t)
for i, inputValue := range inputValues {
v, vt, ok := convertValue(inputValue)
assertions.Equal(true, ok, "Expected successful conversion")
assertions.Equal(outputValues[i], v, "Expected different string representation of converted value")
assertions.Equal(outputValueTypes[i], vt, "Expected different value type of converted value")
}
}

View File

@ -0,0 +1,742 @@
package timestream_test
import (
"fmt"
"github.com/aws/aws-sdk-go/aws/awserr"
"reflect"
"sort"
"strconv"
"strings"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/timestreamwrite"
"github.com/influxdata/telegraf"
internalaws "github.com/influxdata/telegraf/config/aws"
ts "github.com/influxdata/telegraf/plugins/outputs/timestream"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
)
const tsDbName = "testDb"
const testSingleTableName = "SingleTableName"
const testSingleTableDim = "namespace"
var time1 = time.Date(2009, time.November, 10, 22, 0, 0, 0, time.UTC)
const time1Epoch = "1257890400"
var time2 = time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)
const time2Epoch = "1257894000"
const timeUnit = "SECONDS"
const metricName1 = "metricName1"
const metricName2 = "metricName2"
type mockTimestreamClient struct {
}
func (m *mockTimestreamClient) CreateTable(*timestreamwrite.CreateTableInput) (*timestreamwrite.CreateTableOutput, error) {
return nil, nil
}
func (m *mockTimestreamClient) WriteRecords(*timestreamwrite.WriteRecordsInput) (*timestreamwrite.WriteRecordsOutput, error) {
return nil, nil
}
func (m *mockTimestreamClient) DescribeDatabase(*timestreamwrite.DescribeDatabaseInput) (*timestreamwrite.DescribeDatabaseOutput, error) {
return nil, fmt.Errorf("hello from DescribeDatabase")
}
func TestConnectValidatesConfigParameters(t *testing.T) {
assertions := assert.New(t)
ts.WriteFactory = func(credentialConfig *internalaws.CredentialConfig) ts.WriteClient {
return &mockTimestreamClient{}
}
// checking base arguments
noDatabaseName := ts.Timestream{Log: testutil.Logger{}}
assertions.Contains(noDatabaseName.Connect().Error(), "DatabaseName")
noMappingMode := ts.Timestream{
DatabaseName: tsDbName,
Log: testutil.Logger{},
}
assertions.Contains(noMappingMode.Connect().Error(), "MappingMode")
incorrectMappingMode := ts.Timestream{
DatabaseName: tsDbName,
MappingMode: "foo",
Log: testutil.Logger{},
}
assertions.Contains(incorrectMappingMode.Connect().Error(), "single-table")
// multi-table arguments
validMappingModeMultiTable := ts.Timestream{
DatabaseName: tsDbName,
MappingMode: ts.MappingModeMultiTable,
Log: testutil.Logger{},
}
assertions.Nil(validMappingModeMultiTable.Connect())
singleTableNameWithMultiTable := ts.Timestream{
DatabaseName: tsDbName,
MappingMode: ts.MappingModeMultiTable,
SingleTableName: testSingleTableName,
Log: testutil.Logger{},
}
assertions.Contains(singleTableNameWithMultiTable.Connect().Error(), "SingleTableName")
singleTableDimensionWithMultiTable := ts.Timestream{
DatabaseName: tsDbName,
MappingMode: ts.MappingModeMultiTable,
SingleTableDimensionNameForTelegrafMeasurementName: testSingleTableDim,
Log: testutil.Logger{},
}
assertions.Contains(singleTableDimensionWithMultiTable.Connect().Error(),
"SingleTableDimensionNameForTelegrafMeasurementName")
// single-table arguments
noTableNameMappingModeSingleTable := ts.Timestream{
DatabaseName: tsDbName,
MappingMode: ts.MappingModeSingleTable,
Log: testutil.Logger{},
}
assertions.Contains(noTableNameMappingModeSingleTable.Connect().Error(), "SingleTableName")
noDimensionNameMappingModeSingleTable := ts.Timestream{
DatabaseName: tsDbName,
MappingMode: ts.MappingModeSingleTable,
SingleTableName: testSingleTableName,
Log: testutil.Logger{},
}
assertions.Contains(noDimensionNameMappingModeSingleTable.Connect().Error(),
"SingleTableDimensionNameForTelegrafMeasurementName")
validConfigurationMappingModeSingleTable := ts.Timestream{
DatabaseName: tsDbName,
MappingMode: ts.MappingModeSingleTable,
SingleTableName: testSingleTableName,
SingleTableDimensionNameForTelegrafMeasurementName: testSingleTableDim,
Log: testutil.Logger{},
}
assertions.Nil(validConfigurationMappingModeSingleTable.Connect())
// create table arguments
createTableNoMagneticRetention := ts.Timestream{
DatabaseName: tsDbName,
MappingMode: ts.MappingModeMultiTable,
CreateTableIfNotExists: true,
Log: testutil.Logger{},
}
assertions.Contains(createTableNoMagneticRetention.Connect().Error(),
"CreateTableMagneticStoreRetentionPeriodInDays")
createTableNoMemoryRetention := ts.Timestream{
DatabaseName: tsDbName,
MappingMode: ts.MappingModeMultiTable,
CreateTableIfNotExists: true,
CreateTableMagneticStoreRetentionPeriodInDays: 3,
Log: testutil.Logger{},
}
assertions.Contains(createTableNoMemoryRetention.Connect().Error(),
"CreateTableMemoryStoreRetentionPeriodInHours")
createTableValid := ts.Timestream{
DatabaseName: tsDbName,
MappingMode: ts.MappingModeMultiTable,
CreateTableIfNotExists: true,
CreateTableMagneticStoreRetentionPeriodInDays: 3,
CreateTableMemoryStoreRetentionPeriodInHours: 3,
Log: testutil.Logger{},
}
assertions.Nil(createTableValid.Connect())
// describe table on start arguments
describeTableInvoked := ts.Timestream{
DatabaseName: tsDbName,
MappingMode: ts.MappingModeMultiTable,
DescribeDatabaseOnStart: true,
Log: testutil.Logger{},
}
assertions.Contains(describeTableInvoked.Connect().Error(), "hello from DescribeDatabase")
}
type mockTimestreamErrorClient struct {
ErrorToReturnOnWriteRecords error
}
func (m *mockTimestreamErrorClient) CreateTable(*timestreamwrite.CreateTableInput) (*timestreamwrite.CreateTableOutput, error) {
return nil, nil
}
func (m *mockTimestreamErrorClient) WriteRecords(*timestreamwrite.WriteRecordsInput) (*timestreamwrite.WriteRecordsOutput, error) {
return nil, m.ErrorToReturnOnWriteRecords
}
func (m *mockTimestreamErrorClient) DescribeDatabase(*timestreamwrite.DescribeDatabaseInput) (*timestreamwrite.DescribeDatabaseOutput, error) {
return nil, nil
}
func TestThrottlingErrorIsReturnedToTelegraf(t *testing.T) {
assertions := assert.New(t)
ts.WriteFactory = func(credentialConfig *internalaws.CredentialConfig) ts.WriteClient {
return &mockTimestreamErrorClient{
awserr.New(timestreamwrite.ErrCodeThrottlingException,
"Throttling Test", nil),
}
}
plugin := ts.Timestream{
MappingMode: ts.MappingModeMultiTable,
DatabaseName: tsDbName,
Log: testutil.Logger{},
}
plugin.Connect()
input := testutil.MustMetric(
metricName1,
map[string]string{"tag1": "value1"},
map[string]interface{}{"value": float64(1)},
time1,
)
err := plugin.Write([]telegraf.Metric{input})
assertions.NotNil(err, "Expected an error to be returned to Telegraf, "+
"so that the write will be retried by Telegraf later.")
}
func TestRejectedRecordsErrorResultsInMetricsBeingSkipped(t *testing.T) {
assertions := assert.New(t)
ts.WriteFactory = func(credentialConfig *internalaws.CredentialConfig) ts.WriteClient {
return &mockTimestreamErrorClient{
awserr.New(timestreamwrite.ErrCodeRejectedRecordsException,
"RejectedRecords Test", nil),
}
}
plugin := ts.Timestream{
MappingMode: ts.MappingModeMultiTable,
DatabaseName: tsDbName,
Log: testutil.Logger{},
}
plugin.Connect()
input := testutil.MustMetric(
metricName1,
map[string]string{"tag1": "value1"},
map[string]interface{}{"value": float64(1)},
time1,
)
err := plugin.Write([]telegraf.Metric{input})
assertions.Nil(err, "Expected to silently swallow the RejectedRecordsException, "+
"as retrying this error doesn't make sense.")
}
func TestTransformMetricsSkipEmptyMetric(t *testing.T) {
input1 := testutil.MustMetric(
metricName1,
map[string]string{"tag1": "value1"},
map[string]interface{}{}, //no fields here
time1,
)
input2 := testutil.MustMetric(
metricName1,
map[string]string{"tag2": "value2"},
map[string]interface{}{
"value": float64(10),
},
time1,
)
input3 := testutil.MustMetric(
metricName1,
map[string]string{}, //record with no dimensions should appear in the results
map[string]interface{}{
"value": float64(20),
},
time1,
)
expectedResult1SingleTable := buildExpectedRecords(SimpleInput{
t: time1Epoch,
tableName: testSingleTableName,
dimensions: map[string]string{"tag2": "value2", testSingleTableDim: metricName1},
measureValues: map[string]string{"value": "10"},
})
expectedResult2SingleTable := buildExpectedRecords(SimpleInput{
t: time1Epoch,
tableName: testSingleTableName,
dimensions: map[string]string{testSingleTableDim: metricName1},
measureValues: map[string]string{"value": "20"},
})
comparisonTest(t, ts.MappingModeSingleTable,
[]telegraf.Metric{input1, input2, input3},
[]*timestreamwrite.WriteRecordsInput{expectedResult1SingleTable, expectedResult2SingleTable})
expectedResult1MultiTable := buildExpectedRecords(SimpleInput{
t: time1Epoch,
tableName: metricName1,
dimensions: map[string]string{"tag2": "value2"},
measureValues: map[string]string{"value": "10"},
})
expectedResult2MultiTable := buildExpectedRecords(SimpleInput{
t: time1Epoch,
tableName: metricName1,
dimensions: map[string]string{},
measureValues: map[string]string{"value": "20"},
})
comparisonTest(t, ts.MappingModeMultiTable,
[]telegraf.Metric{input1, input2, input3},
[]*timestreamwrite.WriteRecordsInput{expectedResult1MultiTable, expectedResult2MultiTable})
}
func TestTransformMetricsRequestsAboveLimitAreSplit(t *testing.T) {
const maxRecordsInWriteRecordsCall = 100
var inputs []telegraf.Metric
for i := 1; i <= maxRecordsInWriteRecordsCall+1; i++ {
fieldName := "value_supported" + strconv.Itoa(i)
inputs = append(inputs, testutil.MustMetric(
metricName1,
map[string]string{"tag1": "value1"},
map[string]interface{}{
fieldName: float64(10),
},
time1,
))
}
resultFields := make(map[string]string)
for i := 1; i <= maxRecordsInWriteRecordsCall; i++ {
fieldName := "value_supported" + strconv.Itoa(i)
resultFields[fieldName] = "10"
}
expectedResult1SingleTable := buildExpectedRecords(SimpleInput{
t: time1Epoch,
tableName: testSingleTableName,
dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName1},
measureValues: resultFields,
})
expectedResult2SingleTable := buildExpectedRecords(SimpleInput{
t: time1Epoch,
tableName: testSingleTableName,
dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName1},
measureValues: map[string]string{"value_supported" + strconv.Itoa(maxRecordsInWriteRecordsCall+1): "10"},
})
comparisonTest(t, ts.MappingModeSingleTable,
inputs,
[]*timestreamwrite.WriteRecordsInput{expectedResult1SingleTable, expectedResult2SingleTable})
expectedResult1MultiTable := buildExpectedRecords(SimpleInput{
t: time1Epoch,
tableName: metricName1,
dimensions: map[string]string{"tag1": "value1"},
measureValues: resultFields,
})
expectedResult2MultiTable := buildExpectedRecords(SimpleInput{
t: time1Epoch,
tableName: metricName1,
dimensions: map[string]string{"tag1": "value1"},
measureValues: map[string]string{"value_supported" + strconv.Itoa(maxRecordsInWriteRecordsCall+1): "10"},
})
comparisonTest(t, ts.MappingModeMultiTable,
inputs,
[]*timestreamwrite.WriteRecordsInput{expectedResult1MultiTable, expectedResult2MultiTable})
}
func TestTransformMetricsDifferentDimensionsSameTimestampsAreWrittenSeparate(t *testing.T) {
input1 := testutil.MustMetric(
metricName1,
map[string]string{"tag1": "value1"},
map[string]interface{}{
"value_supported1": float64(10), "value_supported2": float64(20),
},
time1,
)
input2 := testutil.MustMetric(
metricName1,
map[string]string{"tag2": "value2"},
map[string]interface{}{
"value_supported3": float64(30),
},
time1,
)
expectedResult1SingleTable := buildExpectedRecords(SimpleInput{
t: time1Epoch,
tableName: testSingleTableName,
dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName1},
measureValues: map[string]string{"value_supported1": "10", "value_supported2": "20"},
})
expectedResult2SingleTable := buildExpectedRecords(SimpleInput{
t: time1Epoch,
tableName: testSingleTableName,
dimensions: map[string]string{"tag2": "value2", testSingleTableDim: metricName1},
measureValues: map[string]string{"value_supported3": "30"},
})
comparisonTest(t, ts.MappingModeSingleTable,
[]telegraf.Metric{input1, input2},
[]*timestreamwrite.WriteRecordsInput{expectedResult1SingleTable, expectedResult2SingleTable})
expectedResult1MultiTable := buildExpectedRecords(SimpleInput{
t: time1Epoch,
tableName: metricName1,
dimensions: map[string]string{"tag1": "value1"},
measureValues: map[string]string{"value_supported1": "10", "value_supported2": "20"},
})
expectedResult2MultiTable := buildExpectedRecords(SimpleInput{
t: time1Epoch,
tableName: metricName1,
dimensions: map[string]string{"tag2": "value2"},
measureValues: map[string]string{"value_supported3": "30"},
})
comparisonTest(t, ts.MappingModeMultiTable,
[]telegraf.Metric{input1, input2},
[]*timestreamwrite.WriteRecordsInput{expectedResult1MultiTable, expectedResult2MultiTable})
}
func TestTransformMetricsSameDimensionsDifferentDimensionValuesAreWrittenSeparate(t *testing.T) {
input1 := testutil.MustMetric(
metricName1,
map[string]string{"tag1": "value1"},
map[string]interface{}{
"value_supported1": float64(10),
},
time1,
)
input2 := testutil.MustMetric(
metricName1,
map[string]string{"tag1": "value2"},
map[string]interface{}{
"value_supported1": float64(20),
},
time1,
)
expectedResult1SingleTable := buildExpectedRecords(SimpleInput{
t: time1Epoch,
tableName: testSingleTableName,
dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName1},
measureValues: map[string]string{"value_supported1": "10"},
})
expectedResult2SingleTable := buildExpectedRecords(SimpleInput{
t: time1Epoch,
tableName: testSingleTableName,
dimensions: map[string]string{"tag1": "value2", testSingleTableDim: metricName1},
measureValues: map[string]string{"value_supported1": "20"},
})
comparisonTest(t, ts.MappingModeSingleTable,
[]telegraf.Metric{input1, input2},
[]*timestreamwrite.WriteRecordsInput{expectedResult1SingleTable, expectedResult2SingleTable})
expectedResult1MultiTable := buildExpectedRecords(SimpleInput{
t: time1Epoch,
tableName: metricName1,
dimensions: map[string]string{"tag1": "value1"},
measureValues: map[string]string{"value_supported1": "10"},
})
expectedResult2MultiTable := buildExpectedRecords(SimpleInput{
t: time1Epoch,
tableName: metricName1,
dimensions: map[string]string{"tag1": "value2"},
measureValues: map[string]string{"value_supported1": "20"},
})
comparisonTest(t, ts.MappingModeMultiTable,
[]telegraf.Metric{input1, input2},
[]*timestreamwrite.WriteRecordsInput{expectedResult1MultiTable, expectedResult2MultiTable})
}
func TestTransformMetricsSameDimensionsDifferentTimestampsAreWrittenSeparate(t *testing.T) {
input1 := testutil.MustMetric(
metricName1,
map[string]string{"tag1": "value1"},
map[string]interface{}{
"value_supported1": float64(10), "value_supported2": float64(20),
},
time1,
)
input2 := testutil.MustMetric(
metricName1,
map[string]string{"tag1": "value1"},
map[string]interface{}{
"value_supported3": float64(30),
},
time2,
)
expectedResult1SingleTable := buildExpectedRecords(SimpleInput{
t: time1Epoch,
tableName: testSingleTableName,
dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName1},
measureValues: map[string]string{"value_supported1": "10", "value_supported2": "20"},
})
expectedResult2SingleTable := buildExpectedRecords(SimpleInput{
t: time2Epoch,
tableName: testSingleTableName,
dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName1},
measureValues: map[string]string{"value_supported3": "30"},
})
comparisonTest(t, ts.MappingModeSingleTable,
[]telegraf.Metric{input1, input2},
[]*timestreamwrite.WriteRecordsInput{expectedResult1SingleTable, expectedResult2SingleTable})
expectedResult1MultiTable := buildExpectedRecords(SimpleInput{
t: time1Epoch,
tableName: metricName1,
dimensions: map[string]string{"tag1": "value1"},
measureValues: map[string]string{"value_supported1": "10", "value_supported2": "20"},
})
expectedResult2MultiTable := buildExpectedRecords(SimpleInput{
t: time2Epoch,
tableName: metricName1,
dimensions: map[string]string{"tag1": "value1"},
measureValues: map[string]string{"value_supported3": "30"},
})
comparisonTest(t, ts.MappingModeMultiTable,
[]telegraf.Metric{input1, input2},
[]*timestreamwrite.WriteRecordsInput{expectedResult1MultiTable, expectedResult2MultiTable})
}
func TestTransformMetricsSameDimensionsSameTimestampsAreWrittenTogether(t *testing.T) {
input1 := testutil.MustMetric(
metricName1,
map[string]string{"tag1": "value1"},
map[string]interface{}{
"value_supported1": float64(10), "value_supported2": float64(20),
},
time1,
)
input2 := testutil.MustMetric(
metricName1,
map[string]string{"tag1": "value1"},
map[string]interface{}{
"value_supported3": float64(30),
},
time1,
)
expectedResultSingleTable := buildExpectedRecords(SimpleInput{
t: time1Epoch,
tableName: testSingleTableName,
dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName1},
measureValues: map[string]string{"value_supported1": "10", "value_supported2": "20", "value_supported3": "30"},
})
comparisonTest(t, ts.MappingModeSingleTable,
[]telegraf.Metric{input1, input2},
[]*timestreamwrite.WriteRecordsInput{expectedResultSingleTable})
expectedResultMultiTable := buildExpectedRecords(SimpleInput{
t: time1Epoch,
tableName: metricName1,
dimensions: map[string]string{"tag1": "value1"},
measureValues: map[string]string{"value_supported1": "10", "value_supported2": "20", "value_supported3": "30"},
})
comparisonTest(t, ts.MappingModeMultiTable,
[]telegraf.Metric{input1, input2},
[]*timestreamwrite.WriteRecordsInput{expectedResultMultiTable})
}
func TestTransformMetricsDifferentMetricsAreWrittenToDifferentTablesInMultiTableMapping(t *testing.T) {
input1 := testutil.MustMetric(
metricName1,
map[string]string{"tag1": "value1"},
map[string]interface{}{
"value_supported1": float64(10), "value_supported2": float64(20),
},
time1,
)
input2 := testutil.MustMetric(
metricName2,
map[string]string{"tag1": "value1"},
map[string]interface{}{
"value_supported3": float64(30),
},
time1,
)
expectedResult1SingleTable := buildExpectedRecords(SimpleInput{
t: time1Epoch,
tableName: testSingleTableName,
dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName1},
measureValues: map[string]string{"value_supported1": "10", "value_supported2": "20"},
})
expectedResult2SingleTable := buildExpectedRecords(SimpleInput{
t: time1Epoch,
tableName: testSingleTableName,
dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName2},
measureValues: map[string]string{"value_supported3": "30"},
})
comparisonTest(t, ts.MappingModeSingleTable,
[]telegraf.Metric{input1, input2},
[]*timestreamwrite.WriteRecordsInput{expectedResult1SingleTable, expectedResult2SingleTable})
expectedResult1MultiTable := buildExpectedRecords(SimpleInput{
t: time1Epoch,
tableName: metricName1,
dimensions: map[string]string{"tag1": "value1"},
measureValues: map[string]string{"value_supported1": "10", "value_supported2": "20"},
})
expectedResult2MultiTable := buildExpectedRecords(SimpleInput{
t: time1Epoch,
tableName: metricName2,
dimensions: map[string]string{"tag1": "value1"},
measureValues: map[string]string{"value_supported3": "30"},
})
comparisonTest(t, ts.MappingModeMultiTable,
[]telegraf.Metric{input1, input2},
[]*timestreamwrite.WriteRecordsInput{expectedResult1MultiTable, expectedResult2MultiTable})
}
func TestTransformMetricsUnsupportedFieldsAreSkipped(t *testing.T) {
metricWithUnsupportedField := testutil.MustMetric(
metricName1,
map[string]string{"tag1": "value1"},
map[string]interface{}{
"value_supported1": float64(10), "value_unsupported": time.Now(),
},
time1,
)
expectedResultSingleTable := buildExpectedRecords(SimpleInput{
t: time1Epoch,
tableName: testSingleTableName,
dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName1},
measureValues: map[string]string{"value_supported1": "10"},
})
comparisonTest(t, ts.MappingModeSingleTable,
[]telegraf.Metric{metricWithUnsupportedField},
[]*timestreamwrite.WriteRecordsInput{expectedResultSingleTable})
expectedResultMultiTable := buildExpectedRecords(SimpleInput{
t: time1Epoch,
tableName: metricName1,
dimensions: map[string]string{"tag1": "value1"},
measureValues: map[string]string{"value_supported1": "10"},
})
comparisonTest(t, ts.MappingModeMultiTable,
[]telegraf.Metric{metricWithUnsupportedField},
[]*timestreamwrite.WriteRecordsInput{expectedResultMultiTable})
}
func comparisonTest(t *testing.T,
mappingMode string,
telegrafMetrics []telegraf.Metric,
timestreamRecords []*timestreamwrite.WriteRecordsInput) {
var plugin ts.Timestream
switch mappingMode {
case ts.MappingModeSingleTable:
plugin = ts.Timestream{
MappingMode: mappingMode,
DatabaseName: tsDbName,
SingleTableName: testSingleTableName,
SingleTableDimensionNameForTelegrafMeasurementName: testSingleTableDim,
Log: testutil.Logger{},
}
case ts.MappingModeMultiTable:
plugin = ts.Timestream{
MappingMode: mappingMode,
DatabaseName: tsDbName,
Log: testutil.Logger{},
}
}
assertions := assert.New(t)
result := plugin.TransformMetrics(telegrafMetrics)
assertions.Equal(len(timestreamRecords), len(result), "The number of transformed records was expected to be different")
for _, tsRecord := range timestreamRecords {
assertions.True(arrayContains(result, tsRecord), "Expected that the list of requests to Timestream: \n%s\n\n "+
"will contain request: \n%s\n\nUsed MappingMode: %s", result, tsRecord, mappingMode)
}
}
func arrayContains(
array []*timestreamwrite.WriteRecordsInput,
element *timestreamwrite.WriteRecordsInput) bool {
sortWriteInputForComparison(*element)
for _, a := range array {
sortWriteInputForComparison(*a)
if reflect.DeepEqual(a, element) {
return true
}
}
return false
}
func sortWriteInputForComparison(element timestreamwrite.WriteRecordsInput) {
// sort the records by MeasureName, as they are kept in an array, but the order of records doesn't matter
sort.Slice(element.Records, func(i, j int) bool {
return strings.Compare(*element.Records[i].MeasureName, *element.Records[j].MeasureName) < 0
})
// sort the dimensions in CommonAttributes
if element.CommonAttributes != nil {
sort.Slice(element.CommonAttributes.Dimensions, func(i, j int) bool {
return strings.Compare(*element.CommonAttributes.Dimensions[i].Name,
*element.CommonAttributes.Dimensions[j].Name) < 0
})
}
// sort the dimensions in Records
for _, r := range element.Records {
sort.Slice(r.Dimensions, func(i, j int) bool {
return strings.Compare(*r.Dimensions[i].Name, *r.Dimensions[j].Name) < 0
})
}
}
type SimpleInput struct {
t string
tableName string
dimensions map[string]string
measureValues map[string]string
}
func buildExpectedRecords(i SimpleInput) *timestreamwrite.WriteRecordsInput {
var tsDimensions []*timestreamwrite.Dimension
for k, v := range i.dimensions {
tsDimensions = append(tsDimensions, &timestreamwrite.Dimension{
Name: aws.String(k),
Value: aws.String(v),
})
}
var tsRecords []*timestreamwrite.Record
for k, v := range i.measureValues {
tsRecords = append(tsRecords, &timestreamwrite.Record{
MeasureName: aws.String(k),
MeasureValue: aws.String(v),
MeasureValueType: aws.String("DOUBLE"),
})
}
result := &timestreamwrite.WriteRecordsInput{
DatabaseName: aws.String(tsDbName),
TableName: aws.String(i.tableName),
Records: tsRecords,
CommonAttributes: &timestreamwrite.Record{
Dimensions: tsDimensions,
Time: aws.String(i.t),
TimeUnit: aws.String(timeUnit),
},
}
return result
}