feat: migrate output plugins to new sample config format (#10910)

This commit is contained in:
Sebastian Spaink 2022-04-07 16:55:03 -05:00 committed by GitHub
parent be0008f9e2
commit a7df6c6aa6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
153 changed files with 759 additions and 2630 deletions

View File

@ -7,3 +7,18 @@ for the account.
If the point value being sent cannot be converted to a float64, the metric is skipped.
Metrics are grouped by converting any `_` characters to `.` in the Point Name.
## Configuration
```toml
# Configuration for Amon Server to send metrics to.
[[outputs.amon]]
## Amon Server Key
server_key = "my-server-key" # required.
## Amon Instance URL
amon_instance = "https://youramoninstance" # required
## Connection timeout.
# timeout = "5s"
```

View File

@ -22,17 +22,6 @@ type Amon struct {
client *http.Client
}
var sampleConfig = `
## Amon Server Key
server_key = "my-server-key" # required.
## Amon Instance URL
amon_instance = "https://youramoninstance" # required
## Connection timeout.
# timeout = "5s"
`
type TimeSeries struct {
Series []*Metric `json:"series"`
}
@ -106,14 +95,6 @@ func (a *Amon) Write(metrics []telegraf.Metric) error {
return nil
}
func (a *Amon) SampleConfig() string {
return sampleConfig
}
func (a *Amon) Description() string {
return "Configuration for Amon Server to send metrics to."
}
func (a *Amon) authenticatedURL() string {
return fmt.Sprintf("%s/api/system/%s", a.AmonInstance, a.ServerKey)
}

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package amon
func (a *Amon) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -71,98 +71,6 @@ type Client interface {
Close() error
}
var sampleConfig = `
## Brokers to publish to. If multiple brokers are specified a random broker
## will be selected anytime a connection is established. This can be
## helpful for load balancing when not using a dedicated load balancer.
brokers = ["amqp://localhost:5672/influxdb"]
## Maximum messages to send over a connection. Once this is reached, the
## connection is closed and a new connection is made. This can be helpful for
## load balancing when not using a dedicated load balancer.
# max_messages = 0
## Exchange to declare and publish to.
exchange = "telegraf"
## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash".
# exchange_type = "topic"
## If true, exchange will be passively declared.
# exchange_passive = false
## Exchange durability can be either "transient" or "durable".
# exchange_durability = "durable"
## Additional exchange arguments.
# exchange_arguments = { }
# exchange_arguments = {"hash_property" = "timestamp"}
## Authentication credentials for the PLAIN auth_method.
# username = ""
# password = ""
## Auth method. PLAIN and EXTERNAL are supported
## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
## described here: https://www.rabbitmq.com/plugins.html
# auth_method = "PLAIN"
## Metric tag to use as a routing key.
## ie, if this tag exists, its value will be used as the routing key
# routing_tag = "host"
## Static routing key. Used when no routing_tag is set or as a fallback
## when the tag specified in routing tag is not found.
# routing_key = ""
# routing_key = "telegraf"
## Delivery Mode controls if a published message is persistent.
## One of "transient" or "persistent".
# delivery_mode = "transient"
## Static headers added to each published message.
# headers = { }
# headers = {"database" = "telegraf", "retention_policy" = "default"}
## Connection timeout. If not provided, will default to 5s. 0s means no
## timeout (not recommended).
# timeout = "5s"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## If true use batch serialization format instead of line based delimiting.
## Only applies to data formats which are not line based such as JSON.
## Recommended to set to true.
# use_batch_format = false
## Content encoding for message payloads, can be set to "gzip" to or
## "identity" to apply no encoding.
##
## Please note that when use_batch_format = false each amqp message contains only
## a single metric, it is recommended to use compression with batch format
## for best results.
# content_encoding = "identity"
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
# data_format = "influx"
`
func (q *AMQP) SampleConfig() string {
return sampleConfig
}
func (q *AMQP) Description() string {
return "Publishes metrics to an AMQP broker"
}
func (q *AMQP) SetSerializer(serializer serializers.Serializer) {
q.serializer = serializer
}

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package amqp
func (q *AMQP) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -5,6 +5,7 @@ This plugin writes telegraf metrics to [Azure Application Insights](https://azur
## Configuration
```toml
# Send metrics to Azure Application Insights
[[outputs.application_insights]]
## Instrumentation key of the Application Insights resource.
instrumentation_key = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxx"

View File

@ -35,39 +35,10 @@ type ApplicationInsights struct {
}
var (
sampleConfig = `
## Instrumentation key of the Application Insights resource.
instrumentation_key = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxx"
## Regions that require endpoint modification https://docs.microsoft.com/en-us/azure/azure-monitor/app/custom-endpoints
# endpoint_url = "https://dc.services.visualstudio.com/v2/track"
## Timeout for closing (default: 5s).
# timeout = "5s"
## Enable additional diagnostic logging.
# enable_diagnostic_logging = false
## Context Tag Sources add Application Insights context tags to a tag value.
##
## For list of allowed context tag keys see:
## https://github.com/microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go
# [outputs.application_insights.context_tag_sources]
# "ai.cloud.role" = "kubernetes_container_name"
# "ai.cloud.roleInstance" = "kubernetes_pod_name"
`
is32Bit bool
is32BitChecked bool
)
func (a *ApplicationInsights) SampleConfig() string {
return sampleConfig
}
func (a *ApplicationInsights) Description() string {
return "Send metrics to Azure Application Insights"
}
func (a *ApplicationInsights) Connect() error {
if a.InstrumentationKey == "" {
return fmt.Errorf("instrumentation key is required")

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package application_insights
func (a *ApplicationInsights) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -11,15 +11,16 @@ Azure Data Explorer is a distributed, columnar store, purpose built for any type
## Configuration
```toml
# Sends metrics to Azure Data Explorer
[[outputs.azure_data_explorer]]
## The URI property of the Azure Data Explorer resource on Azure
## ex: https://myadxresource.australiasoutheast.kusto.windows.net
# endpoint_url = ""
## ex: endpoint_url = https://myadxresource.australiasoutheast.kusto.windows.net
endpoint_url = ""
## The Azure Data Explorer database that the metrics will be ingested into.
## The plugin will NOT generate this database automatically, it's expected that this database already exists before ingestion.
## ex: "exampledatabase"
# database = ""
database = ""
## Timeout for Azure Data Explorer operations
# timeout = "20s"

View File

@ -55,38 +55,6 @@ type ingestorFactory func(localClient, string, string) (localIngestor, error)
const createTableCommand = `.create-merge table ['%s'] (['fields']:dynamic, ['name']:string, ['tags']:dynamic, ['timestamp']:datetime);`
const createTableMappingCommand = `.create-or-alter table ['%s'] ingestion json mapping '%s_mapping' '[{"column":"fields", "Properties":{"Path":"$[\'fields\']"}},{"column":"name", "Properties":{"Path":"$[\'name\']"}},{"column":"tags", "Properties":{"Path":"$[\'tags\']"}},{"column":"timestamp", "Properties":{"Path":"$[\'timestamp\']"}}]'`
func (adx *AzureDataExplorer) Description() string {
return "Sends metrics to Azure Data Explorer"
}
func (adx *AzureDataExplorer) SampleConfig() string {
return `
## Azure Data Explorer cluster endpoint
## ex: endpoint_url = "https://clustername.australiasoutheast.kusto.windows.net"
endpoint_url = ""
## The Azure Data Explorer database that the metrics will be ingested into.
## The plugin will NOT generate this database automatically, it's expected that this database already exists before ingestion.
## ex: "exampledatabase"
database = ""
## Timeout for Azure Data Explorer operations
# timeout = "20s"
## Type of metrics grouping used when pushing to Azure Data Explorer.
## Default is "TablePerMetric" for one table per different metric.
## For more information, please check the plugin README.
# metrics_grouping_type = "TablePerMetric"
## Name of the single table to store all the metrics (Only needed if metrics_grouping_type is "SingleTable").
# table_name = ""
## Creates tables and relevant mapping if set to true(default).
## Skips table and mapping creation if set to false, this is useful for running Telegraf with the lowest possible permissions i.e. table ingestor role.
# create_tables = true
`
}
func (adx *AzureDataExplorer) Connect() error {
authorizer, err := auth.NewAuthorizerFromEnvironmentWithResource(adx.Endpoint)
if err != nil {

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package azure_data_explorer
func (adx *AzureDataExplorer) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -17,6 +17,7 @@ written as a dimension on each Azure Monitor metric.
## Configuration
```toml
# Send aggregate metrics to Azure Monitor
[[outputs.azure_monitor]]
## Timeout for HTTP writes.
# timeout = "20s"
@ -43,7 +44,7 @@ written as a dimension on each Azure Monitor metric.
## Optionally, if in Azure US Government, China, or other sovereign
## cloud environment, set the appropriate REST endpoint for receiving
## metrics. (Note: region may be unused in this context)
## metrics. (Note: region may be unused in this context)
# endpoint_url = "https://monitoring.core.usgovcloudapi.net"
```

View File

@ -101,46 +101,6 @@ const (
maxRequestBodySize = 4000000
)
var sampleConfig = `
## Timeout for HTTP writes.
# timeout = "20s"
## Set the namespace prefix, defaults to "Telegraf/<input-name>".
# namespace_prefix = "Telegraf/"
## Azure Monitor doesn't have a string value type, so convert string
## fields to dimensions (a.k.a. tags) if enabled. Azure Monitor allows
## a maximum of 10 dimensions so Telegraf will only send the first 10
## alphanumeric dimensions.
# strings_as_dimensions = false
## Both region and resource_id must be set or be available via the
## Instance Metadata service on Azure Virtual Machines.
#
## Azure Region to publish metrics against.
## ex: region = "southcentralus"
# region = ""
#
## The Azure Resource ID against which metric will be logged, e.g.
## ex: resource_id = "/subscriptions/<subscription_id>/resourceGroups/<resource_group>/providers/Microsoft.Compute/virtualMachines/<vm_name>"
# resource_id = ""
## Optionally, if in Azure US Government, China or other sovereign
## cloud environment, set appropriate REST endpoint for receiving
## metrics. (Note: region may be unused in this context)
# endpoint_url = "https://monitoring.core.usgovcloudapi.net"
`
// Description provides a description of the plugin
func (a *AzureMonitor) Description() string {
return "Send aggregate metrics to Azure Monitor"
}
// SampleConfig provides a sample configuration for the plugin
func (a *AzureMonitor) SampleConfig() string {
return sampleConfig
}
// Connect initializes the plugin and validates connectivity
func (a *AzureMonitor) Connect() error {
a.cache = make(map[time.Time]map[uint64]*aggregate, 36)

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package azure_monitor
func (a *AzureMonitor) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -8,11 +8,15 @@ Be aware that this plugin accesses APIs that are [chargeable](https://cloud.goog
## Configuration
```toml
# Configuration for Google Cloud BigQuery to send entries
[[outputs.bigquery]]
## GCP Project
project = "erudite-bloom-151019"
## Credentials File
credentials_file = "/path/to/service/account/key.json"
## The BigQuery dataset
## Google Cloud Platform Project
project = "my-gcp-project"
## The namespace for the metric descriptor
dataset = "telegraf"
## Timeout for BigQuery operations.

View File

@ -21,23 +21,6 @@ const timeStampFieldName = "timestamp"
var defaultTimeout = config.Duration(5 * time.Second)
const sampleConfig = `
## Credentials File
credentials_file = "/path/to/service/account/key.json"
## Google Cloud Platform Project
project = "my-gcp-project"
## The namespace for the metric descriptor
dataset = "telegraf"
## Timeout for BigQuery operations.
# timeout = "5s"
## Character to replace hyphens on Metric name
# replace_hyphen_to = "_"
`
type BigQuery struct {
CredentialsFile string `toml:"credentials_file"`
Project string `toml:"project"`
@ -53,16 +36,6 @@ type BigQuery struct {
warnedOnHyphens map[string]bool
}
// SampleConfig returns the formatted sample configuration for the plugin.
func (s *BigQuery) SampleConfig() string {
return sampleConfig
}
// Description returns the human-readable function definition of the plugin.
func (s *BigQuery) Description() string {
return "Configuration for Google Cloud BigQuery to send entries"
}
func (s *BigQuery) Connect() error {
if s.Project == "" {
return fmt.Errorf("Project is a required field for BigQuery output")

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package bigquery
func (s *BigQuery) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -9,6 +9,7 @@ This section contains the default TOML to configure the plugin. You can
generate it using `telegraf --usage cloud_pubsub`.
```toml
# Publish Telegraf metrics to a Google Cloud PubSub topic
[[outputs.cloud_pubsub]]
## Required. Name of Google Cloud Platform (GCP) Project that owns
## the given PubSub topic.

View File

@ -17,56 +17,6 @@ import (
"google.golang.org/api/option"
)
const sampleConfig = `
## Required. Name of Google Cloud Platform (GCP) Project that owns
## the given PubSub topic.
project = "my-project"
## Required. Name of PubSub topic to publish metrics to.
topic = "my-topic"
## Required. Data format to consume.
## Each data format has its own unique set of configuration options.
## Read more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
## Optional. Filepath for GCP credentials JSON file to authorize calls to
## PubSub APIs. If not set explicitly, Telegraf will attempt to use
## Application Default Credentials, which is preferred.
# credentials_file = "path/to/my/creds.json"
## Optional. If true, will send all metrics per write in one PubSub message.
# send_batched = true
## The following publish_* parameters specifically configures batching
## requests made to the GCP Cloud PubSub API via the PubSub Golang library. Read
## more here: https://godoc.org/cloud.google.com/go/pubsub#PublishSettings
## Optional. Send a request to PubSub (i.e. actually publish a batch)
## when it has this many PubSub messages. If send_batched is true,
## this is ignored and treated as if it were 1.
# publish_count_threshold = 1000
## Optional. Send a request to PubSub (i.e. actually publish a batch)
## when it has this many PubSub messages. If send_batched is true,
## this is ignored and treated as if it were 1
# publish_byte_threshold = 1000000
## Optional. Specifically configures requests made to the PubSub API.
# publish_num_go_routines = 2
## Optional. Specifies a timeout for requests to the PubSub API.
# publish_timeout = "30s"
## Optional. If true, published PubSub message data will be base64-encoded.
# base64_data = false
## Optional. PubSub attributes to add to metrics.
# [outputs.cloud_pubsub.attributes]
# my_attr = "tag_value"
`
type PubSub struct {
CredentialsFile string `toml:"credentials_file"`
Project string `toml:"project"`
@ -91,14 +41,6 @@ type PubSub struct {
publishResults []publishResult
}
func (ps *PubSub) Description() string {
return "Publish Telegraf metrics to a Google Cloud PubSub topic"
}
func (ps *PubSub) SampleConfig() string {
return sampleConfig
}
func (ps *PubSub) SetSerializer(serializer serializers.Serializer) {
ps.serializer = serializer
}

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package cloud_pubsub
func (ps *PubSub) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -20,7 +20,52 @@ left empty, the current timestamp will be used.
The IAM user needs only the `cloudwatch:PutMetricData` permission.
## Config
## Configuration
```toml
# Configuration for AWS CloudWatch output.
[[outputs.cloudwatch]]
## Amazon REGION
region = "us-east-1"
## Amazon Credentials
## Credentials are loaded in the following order
## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified
## 2) Assumed credentials via STS if role_arn is specified
## 3) explicit credentials from 'access_key' and 'secret_key'
## 4) shared profile from 'profile'
## 5) environment variables
## 6) shared credentials file
## 7) EC2 Instance Profile
#access_key = ""
#secret_key = ""
#token = ""
#role_arn = ""
#web_identity_token_file = ""
#role_session_name = ""
#profile = ""
#shared_credential_file = ""
## Endpoint to make request against, the correct endpoint is automatically
## determined and this option should only be set if you wish to override the
## default.
## ex: endpoint_url = "http://localhost:8000"
# endpoint_url = ""
## Namespace for the CloudWatch MetricDatums
namespace = "InfluxData/Telegraf"
## If you have a large amount of metrics, you should consider to send statistic
## values instead of raw metrics which could not only improve performance but
## also save AWS API cost. If enable this flag, this plugin would parse the required
## CloudWatch statistic fields (count, min, max, and sum) and send them to CloudWatch.
## You could use basicstats aggregator to calculate those fields. If not all statistic
## fields are available, all fields would still be sent as raw metrics.
# write_statistics = false
## Enable high resolution metrics of 1 second (if not enabled, standard resolution are of 60 seconds precision)
# high_resolution_metrics = false
```
For this output plugin to function correctly the following variables
must be configured.

View File

@ -148,57 +148,6 @@ func (f *valueField) buildDatum() []types.MetricDatum {
}
}
var sampleConfig = `
## Amazon REGION
region = "us-east-1"
## Amazon Credentials
## Credentials are loaded in the following order
## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified
## 2) Assumed credentials via STS if role_arn is specified
## 3) explicit credentials from 'access_key' and 'secret_key'
## 4) shared profile from 'profile'
## 5) environment variables
## 6) shared credentials file
## 7) EC2 Instance Profile
#access_key = ""
#secret_key = ""
#token = ""
#role_arn = ""
#web_identity_token_file = ""
#role_session_name = ""
#profile = ""
#shared_credential_file = ""
## Endpoint to make request against, the correct endpoint is automatically
## determined and this option should only be set if you wish to override the
## default.
## ex: endpoint_url = "http://localhost:8000"
# endpoint_url = ""
## Namespace for the CloudWatch MetricDatums
namespace = "InfluxData/Telegraf"
## If you have a large amount of metrics, you should consider to send statistic
## values instead of raw metrics which could not only improve performance but
## also save AWS API cost. If enable this flag, this plugin would parse the required
## CloudWatch statistic fields (count, min, max, and sum) and send them to CloudWatch.
## You could use basicstats aggregator to calculate those fields. If not all statistic
## fields are available, all fields would still be sent as raw metrics.
# write_statistics = false
## Enable high resolution metrics of 1 second (if not enabled, standard resolution are of 60 seconds precision)
# high_resolution_metrics = false
`
func (c *CloudWatch) SampleConfig() string {
return sampleConfig
}
func (c *CloudWatch) Description() string {
return "Configuration for AWS CloudWatch output."
}
func (c *CloudWatch) Connect() error {
cfg, err := c.CredentialConfig.Credentials()
if err != nil {

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package cloudwatch
func (c *CloudWatch) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -22,9 +22,10 @@ The IAM user needs the following permissions (see this [reference](https://docs.
- `logs:CreateLogStream` - required to create a new log stream in a log group.)
- `logs:PutLogEvents` - required to upload a batch of log events into log stream.
## Config
## Configuration
```toml
# Configuration for AWS CloudWatchLogs output.
[[outputs.cloudwatch_logs]]
## The region is the Amazon region that you wish to connect to.
## Examples include but are not limited to:

View File

@ -73,75 +73,6 @@ const (
// Otherwise, the operation fails.
)
var sampleConfig = `
## The region is the Amazon region that you wish to connect to.
## Examples include but are not limited to:
## - us-west-1
## - us-west-2
## - us-east-1
## - ap-southeast-1
## - ap-southeast-2
## ...
region = "us-east-1"
## Amazon Credentials
## Credentials are loaded in the following order
## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified
## 2) Assumed credentials via STS if role_arn is specified
## 3) explicit credentials from 'access_key' and 'secret_key'
## 4) shared profile from 'profile'
## 5) environment variables
## 6) shared credentials file
## 7) EC2 Instance Profile
#access_key = ""
#secret_key = ""
#token = ""
#role_arn = ""
#web_identity_token_file = ""
#role_session_name = ""
#profile = ""
#shared_credential_file = ""
## Endpoint to make request against, the correct endpoint is automatically
## determined and this option should only be set if you wish to override the
## default.
## ex: endpoint_url = "http://localhost:8000"
# endpoint_url = ""
## Cloud watch log group. Must be created in AWS cloudwatch logs upfront!
## For example, you can specify the name of the k8s cluster here to group logs from all cluster in oine place
log_group = "my-group-name"
## Log stream in log group
## Either log group name or reference to metric attribute, from which it can be parsed:
## tag:<TAG_NAME> or field:<FIELD_NAME>. If log stream is not exist, it will be created.
## Since AWS is not automatically delete logs streams with expired logs entries (i.e. empty log stream)
## you need to put in place appropriate house-keeping (https://forums.aws.amazon.com/thread.jspa?threadID=178855)
log_stream = "tag:location"
## Source of log data - metric name
## specify the name of the metric, from which the log data should be retrieved.
## I.e., if you are using docker_log plugin to stream logs from container, then
## specify log_data_metric_name = "docker_log"
log_data_metric_name = "docker_log"
## Specify from which metric attribute the log data should be retrieved:
## tag:<TAG_NAME> or field:<FIELD_NAME>.
## I.e., if you are using docker_log plugin to stream logs from container, then
## specify log_data_source = "field:message"
log_data_source = "field:message"
`
// SampleConfig returns sample config description for plugin
func (c *CloudWatchLogs) SampleConfig() string {
return sampleConfig
}
// Description returns one-liner description for plugin
func (c *CloudWatchLogs) Description() string {
return "Configuration for AWS CloudWatchLogs output."
}
// Init initialize plugin with checking configuration parameters
func (c *CloudWatchLogs) Init() error {
if c.LogGroup == "" {
@ -276,7 +207,10 @@ func (c *CloudWatchLogs) Write(metrics []telegraf.Metric) error {
lsContainer := &logStreamContainer{
currentBatchSizeBytes: 0,
currentBatchIndex: 0,
messageBatches: []messageBatch{{}}}
messageBatches: []messageBatch{
{},
},
}
switch c.lsKey {
case "tag":

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package cloudwatch_logs
func (c *CloudWatchLogs) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -29,20 +29,6 @@ type CrateDB struct {
DB *sql.DB
}
var sampleConfig = `
# A github.com/jackc/pgx/v4 connection string.
# See https://pkg.go.dev/github.com/jackc/pgx/v4#ParseConfig
url = "postgres://user:password@localhost/schema?sslmode=disable"
# Timeout for all CrateDB queries.
timeout = "5s"
# Name of the table to store metrics in.
table = "metrics"
# If true, and the metrics table does not exist, create it automatically.
table_create = true
# The character(s) to replace any '.' in an object key with
key_separator = "_"
`
func (c *CrateDB) Connect() error {
db, err := sql.Open("pgx", c.URL)
if err != nil {
@ -231,14 +217,6 @@ func hashID(m telegraf.Metric) int64 {
return int64(binary.LittleEndian.Uint64(sum))
}
func (c *CrateDB) SampleConfig() string {
return sampleConfig
}
func (c *CrateDB) Description() string {
return "Configuration for CrateDB to send metrics to."
}
func (c *CrateDB) Close() error {
return c.DB.Close()
}

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package cratedb
func (c *CrateDB) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -6,6 +6,7 @@ This plugin writes to the [Datadog Metrics API][metrics] and requires an
## Configuration
```toml
# Configuration for DataDog API to send metrics to.
[[outputs.datadog]]
## Datadog API key
apikey = "my-secret-key"

View File

@ -28,24 +28,6 @@ type Datadog struct {
proxy.HTTPProxy
}
var sampleConfig = `
## Datadog API key
apikey = "my-secret-key"
## Connection timeout.
# timeout = "5s"
## Write URL override; useful for debugging.
# url = "https://app.datadoghq.com/api/v1/series"
## Set http_proxy (telegraf uses the system wide proxy settings if it isn't set)
# http_proxy_url = "http://localhost:8888"
## Override the default (none) compression used to send data.
## Supports: "zlib", "none"
# compression = "none"
`
type TimeSeries struct {
Series []*Metric `json:"series"`
}
@ -170,14 +152,6 @@ func (d *Datadog) Write(metrics []telegraf.Metric) error {
return nil
}
func (d *Datadog) SampleConfig() string {
return sampleConfig
}
func (d *Datadog) Description() string {
return "Configuration for DataDog API to send metrics to."
}
func (d *Datadog) authenticatedURL() string {
q := url.Values{
"api_key": []string{d.Apikey},

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package datadog
func (d *Datadog) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -7,10 +7,8 @@ import (
type Discard struct{}
func (d *Discard) Connect() error { return nil }
func (d *Discard) Close() error { return nil }
func (d *Discard) SampleConfig() string { return "" }
func (d *Discard) Description() string { return "Send metrics to nowhere at all" }
func (d *Discard) Connect() error { return nil }
func (d *Discard) Close() error { return nil }
func (d *Discard) Write(_ []telegraf.Metric) error {
return nil
}

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package discard
func (d *Discard) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -58,21 +58,42 @@ You can learn more about how to use the Dynatrace API [here](https://www.dynatra
## Configuration
```toml
# Send telegraf metrics to a Dynatrace environment
[[outputs.dynatrace]]
## Leave empty or use the local ingest endpoint of your OneAgent monitored host (e.g.: http://127.0.0.1:14499/metrics/ingest).
## Set Dynatrace environment URL (e.g.: https://YOUR_DOMAIN/api/v2/metrics/ingest) if you do not use a OneAgent
## For usage with the Dynatrace OneAgent you can omit any configuration,
## the only requirement is that the OneAgent is running on the same host.
## Only setup environment url and token if you want to monitor a Host without the OneAgent present.
##
## Your Dynatrace environment URL.
## For Dynatrace OneAgent you can leave this empty or set it to "http://127.0.0.1:14499/metrics/ingest" (default)
## For Dynatrace SaaS environments the URL scheme is "https://{your-environment-id}.live.dynatrace.com/api/v2/metrics/ingest"
## For Dynatrace Managed environments the URL scheme is "https://{your-domain}/e/{your-environment-id}/api/v2/metrics/ingest"
url = ""
## Your Dynatrace API token.
## Create an API token within your Dynatrace environment, by navigating to Settings > Integration > Dynatrace API
## The API token needs data ingest scope permission. When using OneAgent, no API token is required.
api_token = ""
## Optional prefix for metric names (e.g.: "telegraf")
prefix = "telegraf"
## Flag for skipping the tls certificate check, just for testing purposes, should be false by default
insecure_skip_verify = false
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Optional flag for ignoring tls certificate check
# insecure_skip_verify = false
## Connection timeout, defaults to "5s" if not set.
timeout = "5s"
## If you want metrics to be treated and reported as delta counters, add the metric names here
additional_counters = [ ]
## Optional dimensions to be added to every metric
[outputs.dynatrace.default_dimensions]
default_key = "default value"
# [outputs.dynatrace.default_dimensions]
# default_key = "default value"
```
### `url`

View File

@ -38,45 +38,6 @@ type Dynatrace struct {
loggedMetrics map[string]bool // New empty set
}
const sampleConfig = `
## For usage with the Dynatrace OneAgent you can omit any configuration,
## the only requirement is that the OneAgent is running on the same host.
## Only setup environment url and token if you want to monitor a Host without the OneAgent present.
##
## Your Dynatrace environment URL.
## For Dynatrace OneAgent you can leave this empty or set it to "http://127.0.0.1:14499/metrics/ingest" (default)
## For Dynatrace SaaS environments the URL scheme is "https://{your-environment-id}.live.dynatrace.com/api/v2/metrics/ingest"
## For Dynatrace Managed environments the URL scheme is "https://{your-domain}/e/{your-environment-id}/api/v2/metrics/ingest"
url = ""
## Your Dynatrace API token.
## Create an API token within your Dynatrace environment, by navigating to Settings > Integration > Dynatrace API
## The API token needs data ingest scope permission. When using OneAgent, no API token is required.
api_token = ""
## Optional prefix for metric names (e.g.: "telegraf")
prefix = "telegraf"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Optional flag for ignoring tls certificate check
# insecure_skip_verify = false
## Connection timeout, defaults to "5s" if not set.
timeout = "5s"
## If you want metrics to be treated and reported as delta counters, add the metric names here
additional_counters = [ ]
## Optional dimensions to be added to every metric
# [outputs.dynatrace.default_dimensions]
# default_key = "default value"
`
// Connect Connects the Dynatrace output plugin to the Telegraf stream
func (d *Dynatrace) Connect() error {
return nil
@ -88,16 +49,6 @@ func (d *Dynatrace) Close() error {
return nil
}
// SampleConfig Returns a sample configuration for the Dynatrace output plugin
func (d *Dynatrace) SampleConfig() string {
return sampleConfig
}
// Description returns the description for the Dynatrace output plugin
func (d *Dynatrace) Description() string {
return "Send telegraf metrics to a Dynatrace environment"
}
func (d *Dynatrace) Write(metrics []telegraf.Metric) error {
if len(metrics) == 0 {
return nil

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package dynatrace
func (d *Dynatrace) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -184,22 +184,26 @@ POST https://es.us-east-1.amazonaws.com/2021-01-01/opensearch/upgradeDomain
## Configuration
```toml
# Configuration for Elasticsearch to send metrics to.
[[outputs.elasticsearch]]
## The full HTTP endpoint URL for your Elasticsearch instance
## Multiple urls can be specified as part of the same cluster,
## this means that only ONE of the urls will be written to each interval.
## this means that only ONE of the urls will be written to each interval
urls = [ "http://node1.es.example.com:9200" ] # required.
## Elasticsearch client timeout, defaults to "5s" if not set.
timeout = "5s"
## Set to true to ask Elasticsearch a list of all cluster nodes,
## thus it is not necessary to list all nodes in the urls config option
enable_sniffer = false
## Set to true to enable gzip compression
enable_gzip = false
## Set the interval to check if the Elasticsearch nodes are available
## Setting to "0s" will disable the health check (not recommended in production)
health_check_interval = "10s"
## Set the timeout for periodic health checks.
# health_check_timeout = "1s"
## HTTP basic authentication details.
## HTTP basic authentication details
# username = "telegraf"
# password = "mypassword"
## HTTP bearer token authentication details
@ -249,7 +253,7 @@ POST https://es.us-east-1.amazonaws.com/2021-01-01/opensearch/upgradeDomain
## NaNs and inf will be replaced with the given number, -inf with the negative of that number
# float_handling = "none"
# float_replacement_value = 0.0
## Pipeline Config
## To use a ingest pipeline, set this to the name of the pipeline you want to use.
# use_pipeline = "my_pipeline"

View File

@ -52,85 +52,6 @@ type Elasticsearch struct {
Client *elastic.Client
}
var sampleConfig = `
## The full HTTP endpoint URL for your Elasticsearch instance
## Multiple urls can be specified as part of the same cluster,
## this means that only ONE of the urls will be written to each interval.
urls = [ "http://node1.es.example.com:9200" ] # required.
## Elasticsearch client timeout, defaults to "5s" if not set.
timeout = "5s"
## Set to true to ask Elasticsearch a list of all cluster nodes,
## thus it is not necessary to list all nodes in the urls config option.
enable_sniffer = false
## Set to true to enable gzip compression
enable_gzip = false
## Set the interval to check if the Elasticsearch nodes are available
## Setting to "0s" will disable the health check (not recommended in production)
health_check_interval = "10s"
## Set the timeout for periodic health checks.
# health_check_timeout = "1s"
## HTTP basic authentication details
# username = "telegraf"
# password = "mypassword"
## HTTP bearer token authentication details
# auth_bearer_token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9"
## Index Config
## The target index for metrics (Elasticsearch will create if it not exists).
## You can use the date specifiers below to create indexes per time frame.
## The metric timestamp will be used to decide the destination index name
# %Y - year (2016)
# %y - last two digits of year (00..99)
# %m - month (01..12)
# %d - day of month (e.g., 01)
# %H - hour (00..23)
# %V - week of the year (ISO week) (01..53)
## Additionally, you can specify a tag name using the notation {{tag_name}}
## which will be used as part of the index name. If the tag does not exist,
## the default tag value will be used.
# index_name = "telegraf-{{host}}-%Y.%m.%d"
# default_tag_value = "none"
index_name = "telegraf-%Y.%m.%d" # required.
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## Template Config
## Set to true if you want telegraf to manage its index template.
## If enabled it will create a recommended index template for telegraf indexes
manage_template = true
## The template name used for telegraf indexes
template_name = "telegraf"
## Set to true if you want telegraf to overwrite an existing template
overwrite_template = false
## If set to true a unique ID hash will be sent as sha256(concat(timestamp,measurement,series-hash)) string
## it will enable data resend and update metric points avoiding duplicated metrics with diferent id's
force_document_id = false
## Specifies the handling of NaN and Inf values.
## This option can have the following values:
## none -- do not modify field-values (default); will produce an error if NaNs or infs are encountered
## drop -- drop fields containing NaNs or infs
## replace -- replace with the value in "float_replacement_value" (default: 0.0)
## NaNs and inf will be replaced with the given number, -inf with the negative of that number
# float_handling = "none"
# float_replacement_value = 0.0
## Pipeline Config
## To use a ingest pipeline, set this to the name of the pipeline you want to use.
# use_pipeline = "my_pipeline"
## Additionally, you can specify a tag name using the notation {{tag_name}}
## which will be used as part of the pipeline name. If the tag does not exist,
## the default pipeline will be used as the pipeline. If no default pipeline is set,
## no pipeline is used for the metric.
# use_pipeline = "{{es_pipeline}}"
# default_pipeline = "my_pipeline"
`
const telegrafTemplate = `
{
{{ if (lt .Version 6) }}
@ -530,14 +451,6 @@ func getISOWeek(eventTime time.Time) string {
return strconv.Itoa(week)
}
func (a *Elasticsearch) SampleConfig() string {
return sampleConfig
}
func (a *Elasticsearch) Description() string {
return "Configuration for Elasticsearch to send metrics to."
}
func (a *Elasticsearch) Close() error {
a.Client = nil
return nil

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package elasticsearch
func (a *Elasticsearch) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -9,17 +9,16 @@ The plugin uses the Telegraf serializers to format the metric data sent in the m
## Configuration
```toml
[[ outputs.event_hubs ]]
## The full connection string to the Event Hub (required)
## The shared access key must have "Send" permissions on the target Event Hub.
connection_string = "Endpoint=sb://namespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=superSecret1234=;EntityPath=hubName"
## Client timeout (defaults to 30s)
# timeout = "30s"
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "json"
# Configuration for Event Hubs output plugin
[[outputs.event_hubs]]
## The full connection string to the Event Hub (required)
## The shared access key must have "Send" permissions on the target Event Hub.
connection_string = "Endpoint=sb://namespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=superSecret1234=;EntityPath=hubName"
## Client timeout (defaults to 30s)
# timeout = "30s"
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "json"
```

View File

@ -60,27 +60,6 @@ const (
defaultRequestTimeout = time.Second * 30
)
func (e *EventHubs) Description() string {
return "Configuration for Event Hubs output plugin"
}
func (e *EventHubs) SampleConfig() string {
return `
## The full connection string to the Event Hub (required)
## The shared access key must have "Send" permissions on the target Event Hub.
connection_string = "Endpoint=sb://namespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=superSecret1234=;EntityPath=hubName"
## Client timeout (defaults to 30s)
# timeout = "30s"
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "json"
`
}
func (e *EventHubs) Init() error {
err := e.Hub.GetHub(e.ConnectionString)

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package event_hubs
func (e *EventHubs) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -15,6 +15,7 @@ For better performance, consider execd, which runs continuously.
## Configuration
```toml
# Send metrics to command as input over stdin
[[outputs.exec]]
## Command to ingest metrics via stdin.
command = ["tee", "-a", "/dev/null"]

View File

@ -27,20 +27,6 @@ type Exec struct {
serializer serializers.Serializer
}
var sampleConfig = `
## Command to ingest metrics via stdin.
command = ["tee", "-a", "/dev/null"]
## Timeout for command to complete.
# timeout = "5s"
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
# data_format = "influx"
`
func (e *Exec) Init() error {
e.runner = &CommandRunner{log: e.Log}
@ -62,16 +48,6 @@ func (e *Exec) Close() error {
return nil
}
// Description describes the plugin.
func (e *Exec) Description() string {
return "Send metrics to command as input over stdin"
}
// SampleConfig returns a sample configuration.
func (e *Exec) SampleConfig() string {
return sampleConfig
}
// Write writes the metrics to the configured command.
func (e *Exec) Write(metrics []telegraf.Metric) error {
var buffer bytes.Buffer

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package exec
func (e *Exec) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -94,7 +94,6 @@ func TestTruncate(t *testing.T) {
func TestExecDocs(t *testing.T) {
e := &Exec{}
e.Description()
e.SampleConfig()
require.NoError(t, e.Close())

View File

@ -7,6 +7,7 @@ Telegraf minimum version: Telegraf 1.15.0
## Configuration
```toml
# Run executable as long-running output plugin
[[outputs.execd]]
## One program to run as daemon.
## NOTE: process and each argument should each be their own string

View File

@ -14,20 +14,6 @@ import (
"github.com/influxdata/telegraf/plugins/serializers"
)
const sampleConfig = `
## Program to run as daemon
command = ["my-telegraf-output", "--some-flag", "value"]
## Delay before the process is restarted after an unexpected termination
restart_delay = "10s"
## Data format to export.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"
`
type Execd struct {
Command []string `toml:"command"`
RestartDelay config.Duration `toml:"restart_delay"`
@ -37,14 +23,6 @@ type Execd struct {
serializer serializers.Serializer
}
func (e *Execd) SampleConfig() string {
return sampleConfig
}
func (e *Execd) Description() string {
return "Run executable as long-running output plugin"
}
func (e *Execd) SetSerializer(s serializers.Serializer) {
e.serializer = s
}

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package execd
func (e *Execd) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -5,6 +5,7 @@ This plugin writes telegraf metrics to files
## Configuration
```toml
# Send telegraf metrics to file(s)
[[outputs.file]]
## Files to write to, "stdout" is a specially handled file.
files = ["stdout", "/tmp/metrics.out"]

View File

@ -26,34 +26,6 @@ type File struct {
serializer serializers.Serializer
}
var sampleConfig = `
## Files to write to, "stdout" is a specially handled file.
files = ["stdout", "/tmp/metrics.out"]
## Use batch serialization format instead of line based delimiting. The
## batch format allows for the production of non line based output formats and
## may more efficiently encode metric groups.
# use_batch_format = false
## The file will be rotated after the time interval specified. When set
## to 0 no time based rotation is performed.
# rotation_interval = "0d"
## The logfile will be rotated when it becomes larger than the specified
## size. When set to 0 no size based rotation is performed.
# rotation_max_size = "0MB"
## Maximum number of rotated archives to keep, any older logs are deleted.
## If set to -1, no archives are removed.
# rotation_max_archives = 5
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"
`
func (f *File) SetSerializer(serializer serializers.Serializer) {
f.serializer = serializer
}
@ -94,14 +66,6 @@ func (f *File) Close() error {
return err
}
func (f *File) SampleConfig() string {
return sampleConfig
}
func (f *File) Description() string {
return "Send telegraf metrics to file(s)"
}
func (f *File) Write(metrics []telegraf.Metric) error {
var writeErr error

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package file
func (f *File) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -21,6 +21,17 @@ see the [Graphite Data Format](../../../docs/DATA_FORMATS_OUTPUT.md)
## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
template = "host.tags.measurement.field"
## Enable Graphite tags support
# graphite_tag_support = false
## Define how metric names and tags are sanitized; options are "strict", or "compatible"
## strict - Default method, and backwards compatible with previous versionf of Telegraf
## compatible - More relaxed sanitizing when using tags, and compatible with the graphite spec
# graphite_tag_sanitize_mode = "strict"
## Character for separating metric name and field for Graphite tags
# graphite_separator = "."
## Graphite templates patterns
## 1. Template for cpu
## 2. Template for disk*
@ -31,12 +42,6 @@ see the [Graphite Data Format](../../../docs/DATA_FORMATS_OUTPUT.md)
# "host.measurement.tags.field"
#]
## Enable Graphite tags support
# graphite_tag_support = false
## Character for separating metric name and field for Graphite tags
# graphite_separator = "."
## timeout in seconds for the write connection to graphite
timeout = 2

View File

@ -30,49 +30,6 @@ type Graphite struct {
tlsint.ClientConfig
}
var sampleConfig = `
## TCP endpoint for your graphite instance.
## If multiple endpoints are configured, output will be load balanced.
## Only one of the endpoints will be written to with each iteration.
servers = ["localhost:2003"]
## Prefix metrics name
prefix = ""
## Graphite output template
## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
template = "host.tags.measurement.field"
## Enable Graphite tags support
# graphite_tag_support = false
## Define how metric names and tags are sanitized; options are "strict", or "compatible"
## strict - Default method, and backwards compatible with previous versionf of Telegraf
## compatible - More relaxed sanitizing when using tags, and compatible with the graphite spec
# graphite_tag_sanitize_mode = "strict"
## Character for separating metric name and field for Graphite tags
# graphite_separator = "."
## Graphite templates patterns
## 1. Template for cpu
## 2. Template for disk*
## 3. Default template
# templates = [
# "cpu tags.measurement.host.field",
# "disk* measurement.field",
# "host.measurement.tags.field"
#]
## timeout in seconds for the write connection to graphite
timeout = 2
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
`
func (g *Graphite) Connect() error {
// Set default values
if g.Timeout <= 0 {
@ -118,14 +75,6 @@ func (g *Graphite) Close() error {
return nil
}
func (g *Graphite) SampleConfig() string {
return sampleConfig
}
func (g *Graphite) Description() string {
return "Configuration for Graphite server to send metrics to"
}
// We need check eof as we can write to nothing without noticing anything is wrong
// the connection stays in a close_wait
// We can detect that by finding an eof

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package graphite
func (g *Graphite) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -24,6 +24,7 @@ to the field name.
## Configuration
```toml
# Send telegraf metrics to graylog
[[outputs.graylog]]
## Endpoints for your graylog instances.
servers = ["udp://127.0.0.1:12201"]

View File

@ -317,31 +317,6 @@ type Graylog struct {
closers []io.WriteCloser
}
var sampleConfig = `
## Endpoints for your graylog instances.
servers = ["udp://127.0.0.1:12201"]
## Connection timeout.
# timeout = "5s"
## The field to use as the GELF short_message, if unset the static string
## "telegraf" will be used.
## example: short_message_field = "message"
# short_message_field = ""
## According to GELF payload specification, additional fields names must be prefixed
## with an underscore. Previous versions did not prefix custom field 'name' with underscore.
## Set to true for backward compatibility.
# name_field_no_prefix = false
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
`
func (g *Graylog) Connect() error {
var writers []io.Writer
dialer := &net.Dialer{Timeout: time.Duration(g.Timeout)}
@ -376,14 +351,6 @@ func (g *Graylog) Close() error {
return nil
}
func (g *Graylog) SampleConfig() string {
return sampleConfig
}
func (g *Graylog) Description() string {
return "Send telegraf metrics to graylog"
}
func (g *Graylog) Write(metrics []telegraf.Metric) error {
for _, metric := range metrics {
values, err := g.serialize(metric)

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package graylog
func (g *Graylog) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -7,6 +7,7 @@ This plugin writes to a [GroundWork Monitor][1] instance. Plugin only supports G
## Configuration
```toml
# Send telegraf metrics to GroundWork Monitor
[[outputs.groundwork]]
## URL of your groundwork instance.
url = "https://groundwork.example.com"
@ -17,7 +18,7 @@ This plugin writes to a [GroundWork Monitor][1] instance. Plugin only supports G
## Username and password to access GroundWork API.
username = ""
password = ""
## Default display name for the host with services(metrics).
# default_host = "telegraf"

View File

@ -17,30 +17,6 @@ import (
"github.com/influxdata/telegraf/plugins/outputs"
)
const sampleConfig = `
## URL of your groundwork instance.
url = "https://groundwork.example.com"
## Agent uuid for GroundWork API Server.
agent_id = ""
## Username and password to access GroundWork API.
username = ""
password = ""
## Default display name for the host with services(metrics).
# default_host = "telegraf"
## Default service state.
# default_service_state = "SERVICE_OK"
## The name of the tag that contains the hostname.
# resource_tag = "host"
## The name of the tag that contains the host group name.
# group_tag = "group"
`
type metricMeta struct {
group string
resource string
@ -59,10 +35,6 @@ type Groundwork struct {
client clients.GWClient
}
func (g *Groundwork) SampleConfig() string {
return sampleConfig
}
func (g *Groundwork) Init() error {
if g.Server == "" {
return errors.New("no 'url' provided")
@ -212,10 +184,6 @@ func (g *Groundwork) Write(metrics []telegraf.Metric) error {
return nil
}
func (g *Groundwork) Description() string {
return "Send telegraf metrics to GroundWork Monitor"
}
func init() {
outputs.Add("groundwork", func() telegraf.Output {
return &Groundwork{

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package groundwork
func (g *Groundwork) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -10,6 +10,7 @@ must fail in order for the resource to enter the failed state.
## Configuration
```toml
# Configurable HTTP health check resource based on metrics
[[outputs.health]]
## Address and port to listen on.
## ex: service_address = "http://localhost:8080"

View File

@ -23,45 +23,6 @@ const (
defaultWriteTimeout = 5 * time.Second
)
var sampleConfig = `
## Address and port to listen on.
## ex: service_address = "http://localhost:8080"
## service_address = "unix:///var/run/telegraf-health.sock"
# service_address = "http://:8080"
## The maximum duration for reading the entire request.
# read_timeout = "5s"
## The maximum duration for writing the entire response.
# write_timeout = "5s"
## Username and password to accept for HTTP basic authentication.
# basic_username = "user1"
# basic_password = "secret"
## Allowed CA certificates for client certificates.
# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
## TLS server certificate and private key.
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## One or more check sub-tables should be defined, it is also recommended to
## use metric filtering to limit the metrics that flow into this output.
##
## When using the default buffer sizes, this example will fail when the
## metric buffer is half full.
##
## namepass = ["internal_write"]
## tagpass = { output = ["influxdb"] }
##
## [[outputs.health.compares]]
## field = "buffer_size"
## lt = 5000.0
##
## [[outputs.health.contains]]
## field = "buffer_size"
`
type Checker interface {
// Check returns true if the metrics meet its criteria.
Check(metrics []telegraf.Metric) bool
@ -91,14 +52,6 @@ type Health struct {
healthy bool
}
func (h *Health) SampleConfig() string {
return sampleConfig
}
func (h *Health) Description() string {
return "Configurable HTTP health check resource based on metrics"
}
func (h *Health) Init() error {
u, err := url.Parse(h.ServiceAddress)
if err != nil {

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package health
func (h *Health) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -12,7 +12,7 @@ import (
"time"
awsV2 "github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/influxdata/telegraf"
internalaws "github.com/influxdata/telegraf/config/aws"
"github.com/influxdata/telegraf/internal"
@ -26,90 +26,6 @@ const (
defaultURL = "http://127.0.0.1:8080/telegraf"
)
var sampleConfig = `
## URL is the address to send metrics to
url = "http://127.0.0.1:8080/telegraf"
## Timeout for HTTP message
# timeout = "5s"
## HTTP method, one of: "POST" or "PUT"
# method = "POST"
## HTTP Basic Auth credentials
# username = "username"
# password = "pa$$word"
## OAuth2 Client Credentials Grant
# client_id = "clientid"
# client_secret = "secret"
# token_url = "https://indentityprovider/oauth2/v1/token"
# scopes = ["urn:opc:idm:__myscopes__"]
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## Optional Cookie authentication
# cookie_auth_url = "https://localhost/authMe"
# cookie_auth_method = "POST"
# cookie_auth_username = "username"
# cookie_auth_password = "pa$$word"
# cookie_auth_headers = '{"Content-Type": "application/json", "X-MY-HEADER":"hello"}'
# cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}'
## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie
# cookie_auth_renewal = "5m"
## Data format to output.
## Each data format has it's own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
# data_format = "influx"
## Use batch serialization format (default) instead of line based format.
## Batch format is more efficient and should be used unless line based
## format is really needed.
# use_batch_format = true
## HTTP Content-Encoding for write request body, can be set to "gzip" to
## compress body or "identity" to apply no encoding.
# content_encoding = "identity"
## Additional HTTP headers
# [outputs.http.headers]
# # Should be set manually to "application/json" for json data_format
# Content-Type = "text/plain; charset=utf-8"
## Idle (keep-alive) connection timeout.
## Maximum amount of time before idle connection is closed.
## Zero means no limit.
# idle_conn_timeout = 0
## Amazon Region
#region = "us-east-1"
## Amazon Credentials
## Credentials are loaded in the following order
## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified
## 2) Assumed credentials via STS if role_arn is specified
## 3) explicit credentials from 'access_key' and 'secret_key'
## 4) shared profile from 'profile'
## 5) environment variables
## 6) shared credentials file
## 7) EC2 Instance Profile
#access_key = ""
#secret_key = ""
#token = ""
#role_arn = ""
#web_identity_token_file = ""
#role_session_name = ""
#profile = ""
#shared_credential_file = ""
`
const (
defaultContentType = "text/plain; charset=utf-8"
defaultMethod = http.MethodPost
@ -171,14 +87,6 @@ func (h *HTTP) Close() error {
return nil
}
func (h *HTTP) Description() string {
return "A plugin that can transmit metrics over HTTP"
}
func (h *HTTP) SampleConfig() string {
return sampleConfig
}
func (h *HTTP) Write(metrics []telegraf.Metric) error {
var reqBody []byte

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package http
func (h *HTTP) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -63,84 +63,6 @@ type InfluxDB struct {
Log telegraf.Logger
}
var sampleConfig = `
## The full HTTP or UDP URL for your InfluxDB instance.
##
## Multiple URLs can be specified for a single cluster, only ONE of the
## urls will be written to each interval.
# urls = ["unix:///var/run/influxdb.sock"]
# urls = ["udp://127.0.0.1:8089"]
# urls = ["http://127.0.0.1:8086"]
## The target database for metrics; will be created as needed.
## For UDP url endpoint database needs to be configured on server side.
# database = "telegraf"
## The value of this tag will be used to determine the database. If this
## tag is not set the 'database' option is used as the default.
# database_tag = ""
## If true, the 'database_tag' will not be included in the written metric.
# exclude_database_tag = false
## If true, no CREATE DATABASE queries will be sent. Set to true when using
## Telegraf with a user without permissions to create databases or when the
## database already exists.
# skip_database_creation = false
## Name of existing retention policy to write to. Empty string writes to
## the default retention policy. Only takes effect when using HTTP.
# retention_policy = ""
## The value of this tag will be used to determine the retention policy. If this
## tag is not set the 'retention_policy' option is used as the default.
# retention_policy_tag = ""
## If true, the 'retention_policy_tag' will not be included in the written metric.
# exclude_retention_policy_tag = false
## Write consistency (clusters only), can be: "any", "one", "quorum", "all".
## Only takes effect when using HTTP.
# write_consistency = "any"
## Timeout for HTTP messages.
# timeout = "5s"
## HTTP Basic Auth
# username = "telegraf"
# password = "metricsmetricsmetricsmetrics"
## HTTP User-Agent
# user_agent = "telegraf"
## UDP payload size is the maximum packet size to send.
# udp_payload = "512B"
## Optional TLS Config for use on HTTP connections.
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## HTTP Proxy override, if unset values the standard proxy environment
## variables are consulted to determine which proxy, if any, should be used.
# http_proxy = "http://corporate.proxy:3128"
## Additional HTTP headers
# http_headers = {"X-Special-Header" = "Special-Value"}
## HTTP Content-Encoding for write request body, can be set to "gzip" to
## compress body or "identity" to apply no encoding.
# content_encoding = "gzip"
## When true, Telegraf will output unsigned integers as unsigned values,
## i.e.: "42u". You will need a version of InfluxDB supporting unsigned
## integer values. Enabling this option will result in field type errors if
## existing data has been written.
# influx_uint_support = false
`
func (i *InfluxDB) Connect() error {
ctx := context.Background()
@ -198,14 +120,6 @@ func (i *InfluxDB) Close() error {
return nil
}
func (i *InfluxDB) Description() string {
return "Configuration for sending metrics to InfluxDB"
}
func (i *InfluxDB) SampleConfig() string {
return sampleConfig
}
// Write sends metrics to one of the configured servers, logging each
// unsuccessful. If all servers fail, return an error.
func (i *InfluxDB) Write(metrics []telegraf.Metric) error {

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package influxdb
func (i *InfluxDB) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -21,58 +21,6 @@ var (
ErrMissingURL = errors.New("missing URL")
)
var sampleConfig = `
## The URLs of the InfluxDB cluster nodes.
##
## Multiple URLs can be specified for a single cluster, only ONE of the
## urls will be written to each interval.
## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"]
urls = ["http://127.0.0.1:8086"]
## Token for authentication.
token = ""
## Organization is the name of the organization you wish to write to; must exist.
organization = ""
## Destination bucket to write into.
bucket = ""
## The value of this tag will be used to determine the bucket. If this
## tag is not set the 'bucket' option is used as the default.
# bucket_tag = ""
## If true, the bucket tag will not be added to the metric.
# exclude_bucket_tag = false
## Timeout for HTTP messages.
# timeout = "5s"
## Additional HTTP headers
# http_headers = {"X-Special-Header" = "Special-Value"}
## HTTP Proxy override, if unset values the standard proxy environment
## variables are consulted to determine which proxy, if any, should be used.
# http_proxy = "http://corporate.proxy:3128"
## HTTP User-Agent
# user_agent = "telegraf"
## Content-Encoding for write request body, can be set to "gzip" to
## compress body or "identity" to apply no encoding.
# content_encoding = "gzip"
## Enable or disable uint support for writing uints influxdb 2.0.
# influx_uint_support = false
## Optional TLS Config for use on HTTP connections.
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
`
type Client interface {
Write(context.Context, []telegraf.Metric) error
@ -142,14 +90,6 @@ func (i *InfluxDB) Close() error {
return nil
}
func (i *InfluxDB) Description() string {
return "Configuration for sending metrics to InfluxDB"
}
func (i *InfluxDB) SampleConfig() string {
return sampleConfig
}
// Write sends metrics to one of the configured servers, logging each
// unsuccessful. If all servers fail, return an error.
func (i *InfluxDB) Write(metrics []telegraf.Metric) error {

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package influxdb_v2
func (i *InfluxDB) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -97,7 +97,6 @@ func TestConnect(t *testing.T) {
func TestUnused(_ *testing.T) {
thing := influxdb.InfluxDB{}
thing.Close()
thing.Description()
thing.SampleConfig()
outputs.Outputs["influxdb_v2"]()
}

View File

@ -10,6 +10,7 @@ by whitespace. The `increment` type is only used if the metric comes in as a cou
## Configuration
```toml
# Configuration for sending metrics to an Instrumental project
[[outputs.instrumental]]
## Project API Token (required)
api_token = "API Token" # required

View File

@ -43,20 +43,6 @@ const (
HandshakeFormat = HelloMessage + AuthFormat
)
var sampleConfig = `
## Project API Token (required)
api_token = "API Token" # required
## Prefix the metrics with a given name
prefix = ""
## Stats output template (Graphite formatting)
## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite
template = "host.tags.measurement.field"
## Timeout in seconds to connect
timeout = "2s"
## Display Communication to Instrumental
debug = false
`
func (i *Instrumental) Connect() error {
connection, err := net.DialTimeout("tcp", i.Host+":8000", time.Duration(i.Timeout))
@ -168,14 +154,6 @@ func (i *Instrumental) Write(metrics []telegraf.Metric) error {
return nil
}
func (i *Instrumental) Description() string {
return "Configuration for sending metrics to an Instrumental project"
}
func (i *Instrumental) SampleConfig() string {
return sampleConfig
}
func (i *Instrumental) authenticate(conn net.Conn) error {
_, err := fmt.Fprintf(conn, HandshakeFormat, i.APIToken)
if err != nil {

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package instrumental
func (i *Instrumental) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -5,6 +5,7 @@ This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.htm
## Configuration
```toml
# Configuration for the Kafka server to send metrics to
[[outputs.kafka]]
## URLs of kafka brokers
brokers = ["localhost:9092"]
@ -106,6 +107,10 @@ This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.htm
## until the next flush.
# max_retry = 3
## The maximum permitted size of a message. Should be set equal to or
## smaller than the broker's 'message.max.bytes'.
# max_message_bytes = 1000000
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"

View File

@ -80,158 +80,6 @@ func (*DebugLogger) Println(v ...interface{}) {
log.Println(args...)
}
var sampleConfig = `
## URLs of kafka brokers
brokers = ["localhost:9092"]
## Kafka topic for producer messages
topic = "telegraf"
## The value of this tag will be used as the topic. If not set the 'topic'
## option is used.
# topic_tag = ""
## If true, the 'topic_tag' will be removed from to the metric.
# exclude_topic_tag = false
## Optional Client id
# client_id = "Telegraf"
## Set the minimal supported Kafka version. Setting this enables the use of new
## Kafka features and APIs. Of particular interest, lz4 compression
## requires at least version 0.10.0.0.
## ex: version = "1.1.0"
# version = ""
## Optional topic suffix configuration.
## If the section is omitted, no suffix is used.
## Following topic suffix methods are supported:
## measurement - suffix equals to separator + measurement's name
## tags - suffix equals to separator + specified tags' values
## interleaved with separator
## Suffix equals to "_" + measurement name
# [outputs.kafka.topic_suffix]
# method = "measurement"
# separator = "_"
## Suffix equals to "__" + measurement's "foo" tag value.
## If there's no such a tag, suffix equals to an empty string
# [outputs.kafka.topic_suffix]
# method = "tags"
# keys = ["foo"]
# separator = "__"
## Suffix equals to "_" + measurement's "foo" and "bar"
## tag values, separated by "_". If there is no such tags,
## their values treated as empty strings.
# [outputs.kafka.topic_suffix]
# method = "tags"
# keys = ["foo", "bar"]
# separator = "_"
## The routing tag specifies a tagkey on the metric whose value is used as
## the message key. The message key is used to determine which partition to
## send the message to. This tag is prefered over the routing_key option.
routing_tag = "host"
## The routing key is set as the message key and used to determine which
## partition to send the message to. This value is only used when no
## routing_tag is set or as a fallback when the tag specified in routing tag
## is not found.
##
## If set to "random", a random value will be generated for each message.
##
## When unset, no message key is added and each message is routed to a random
## partition.
##
## ex: routing_key = "random"
## routing_key = "telegraf"
# routing_key = ""
## Compression codec represents the various compression codecs recognized by
## Kafka in messages.
## 0 : None
## 1 : Gzip
## 2 : Snappy
## 3 : LZ4
## 4 : ZSTD
# compression_codec = 0
## Idempotent Writes
## If enabled, exactly one copy of each message is written.
# idempotent_writes = false
## RequiredAcks is used in Produce Requests to tell the broker how many
## replica acknowledgements it must see before responding
## 0 : the producer never waits for an acknowledgement from the broker.
## This option provides the lowest latency but the weakest durability
## guarantees (some data will be lost when a server fails).
## 1 : the producer gets an acknowledgement after the leader replica has
## received the data. This option provides better durability as the
## client waits until the server acknowledges the request as successful
## (only messages that were written to the now-dead leader but not yet
## replicated will be lost).
## -1: the producer gets an acknowledgement after all in-sync replicas have
## received the data. This option provides the best durability, we
## guarantee that no messages will be lost as long as at least one in
## sync replica remains.
# required_acks = -1
## The maximum number of times to retry sending a metric before failing
## until the next flush.
# max_retry = 3
## The maximum permitted size of a message. Should be set equal to or
## smaller than the broker's 'message.max.bytes'.
# max_message_bytes = 1000000
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## Optional SOCKS5 proxy to use when connecting to brokers
# socks5_enabled = true
# socks5_address = "127.0.0.1:1080"
# socks5_username = "alice"
# socks5_password = "pass123"
## Optional SASL Config
# sasl_username = "kafka"
# sasl_password = "secret"
## Optional SASL:
## one of: OAUTHBEARER, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI
## (defaults to PLAIN)
# sasl_mechanism = ""
## used if sasl_mechanism is GSSAPI (experimental)
# sasl_gssapi_service_name = ""
# ## One of: KRB5_USER_AUTH and KRB5_KEYTAB_AUTH
# sasl_gssapi_auth_type = "KRB5_USER_AUTH"
# sasl_gssapi_kerberos_config_path = "/"
# sasl_gssapi_realm = "realm"
# sasl_gssapi_key_tab_path = ""
# sasl_gssapi_disable_pafxfast = false
## used if sasl_mechanism is OAUTHBEARER (experimental)
# sasl_access_token = ""
## SASL protocol version. When connecting to Azure EventHub set to 0.
# sasl_version = 1
# Disable Kafka metadata full fetch
# metadata_full = false
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
# data_format = "influx"
`
func ValidateTopicSuffixMethod(method string) error {
for _, validMethod := range ValidTopicSuffixMethods {
if method == validMethod {
@ -327,14 +175,6 @@ func (k *Kafka) Close() error {
return k.producer.Close()
}
func (k *Kafka) SampleConfig() string {
return sampleConfig
}
func (k *Kafka) Description() string {
return "Configuration for the Kafka server to send metrics to"
}
func (k *Kafka) routingKey(metric telegraf.Metric) (string, error) {
if k.RoutingTag != "" {
key, ok := metric.GetTag(k.RoutingTag)

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package kafka
func (k *Kafka) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -25,7 +25,73 @@ will attempt to authenticate.
If you are using credentials from a web identity provider, you can specify the session name using `role_session_name`. If
left empty, the current timestamp will be used.
## Config
## Configuration
```toml
# Configuration for the AWS Kinesis output.
[[outputs.kinesis]]
## Amazon REGION of kinesis endpoint.
region = "ap-southeast-2"
## Amazon Credentials
## Credentials are loaded in the following order
## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified
## 2) Assumed credentials via STS if role_arn is specified
## 3) explicit credentials from 'access_key' and 'secret_key'
## 4) shared profile from 'profile'
## 5) environment variables
## 6) shared credentials file
## 7) EC2 Instance Profile
#access_key = ""
#secret_key = ""
#token = ""
#role_arn = ""
#web_identity_token_file = ""
#role_session_name = ""
#profile = ""
#shared_credential_file = ""
## Endpoint to make request against, the correct endpoint is automatically
## determined and this option should only be set if you wish to override the
## default.
## ex: endpoint_url = "http://localhost:8000"
# endpoint_url = ""
## Kinesis StreamName must exist prior to starting telegraf.
streamname = "StreamName"
## The partition key can be calculated using one of several methods:
##
## Use a static value for all writes:
# [outputs.kinesis.partition]
# method = "static"
# key = "howdy"
#
## Use a random partition key on each write:
# [outputs.kinesis.partition]
# method = "random"
#
## Use the measurement name as the partition key:
# [outputs.kinesis.partition]
# method = "measurement"
#
## Use the value of a tag for all writes, if the tag is not set the empty
## default option will be used. When no default, defaults to "telegraf"
# [outputs.kinesis.partition]
# method = "tag"
# key = "host"
# default = "mykey"
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"
## debug will show upstream aws messages.
debug = false
```
For this output plugin to function correctly the following variables must be configured.

View File

@ -43,78 +43,6 @@ type kinesisClient interface {
PutRecords(context.Context, *kinesis.PutRecordsInput, ...func(*kinesis.Options)) (*kinesis.PutRecordsOutput, error)
}
var sampleConfig = `
## Amazon REGION of kinesis endpoint.
region = "ap-southeast-2"
## Amazon Credentials
## Credentials are loaded in the following order
## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified
## 2) Assumed credentials via STS if role_arn is specified
## 3) explicit credentials from 'access_key' and 'secret_key'
## 4) shared profile from 'profile'
## 5) environment variables
## 6) shared credentials file
## 7) EC2 Instance Profile
#access_key = ""
#secret_key = ""
#token = ""
#role_arn = ""
#web_identity_token_file = ""
#role_session_name = ""
#profile = ""
#shared_credential_file = ""
## Endpoint to make request against, the correct endpoint is automatically
## determined and this option should only be set if you wish to override the
## default.
## ex: endpoint_url = "http://localhost:8000"
# endpoint_url = ""
## Kinesis StreamName must exist prior to starting telegraf.
streamname = "StreamName"
## The partition key can be calculated using one of several methods:
##
## Use a static value for all writes:
# [outputs.kinesis.partition]
# method = "static"
# key = "howdy"
#
## Use a random partition key on each write:
# [outputs.kinesis.partition]
# method = "random"
#
## Use the measurement name as the partition key:
# [outputs.kinesis.partition]
# method = "measurement"
#
## Use the value of a tag for all writes, if the tag is not set the empty
## default option will be used. When no default, defaults to "telegraf"
# [outputs.kinesis.partition]
# method = "tag"
# key = "host"
# default = "mykey"
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"
## debug will show upstream aws messages.
debug = false
`
func (k *KinesisOutput) SampleConfig() string {
return sampleConfig
}
func (k *KinesisOutput) Description() string {
return "Configuration for the AWS Kinesis output."
}
func (k *KinesisOutput) Connect() error {
if k.Partition == nil {
k.Log.Error("Deprecated partitionkey configuration in use, please consider using outputs.kinesis.partition")

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package kinesis
func (k *KinesisOutput) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -10,3 +10,24 @@ Point Tags to the API.
If the point value being sent cannot be converted to a float64, the metric is skipped.
Currently, the plugin does not send any associated Point Tags.
## Configuration
```toml
# Configuration for Librato API to send metrics to.
[[outputs.librato]]
## Librato API Docs
## http://dev.librato.com/v1/metrics-authentication
## Librato API user
api_user = "telegraf@influxdb.com" # required.
## Librato API token
api_token = "my-secret-token" # required.
## Debug
# debug = false
## Connection timeout.
# timeout = "5s"
## Output source Template (same as graphite buckets)
## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite
## This template is used in librato's source (not metric's name)
template = "host"
```

View File

@ -32,24 +32,6 @@ type Librato struct {
// https://www.librato.com/docs/kb/faq/best_practices/naming_convention_metrics_sources.html#naming-limitations-for-sources-and-metrics
var reUnacceptedChar = regexp.MustCompile("[^.a-zA-Z0-9_-]")
var sampleConfig = `
## Librato API Docs
## http://dev.librato.com/v1/metrics-authentication
## Librato API user
api_user = "telegraf@influxdb.com" # required.
## Librato API token
api_token = "my-secret-token" # required.
## Debug
# debug = false
## Connection timeout.
# timeout = "5s"
## Output source Template (same as graphite buckets)
## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite
## This template is used in librato's source (not metric's name)
template = "host"
`
// LMetrics is the default struct for Librato's API fromat
type LMetrics struct {
Gauges []*Gauge `json:"gauges"`
@ -176,17 +158,6 @@ func (l *Librato) writeBatch(start int, sizeBatch int, metricCounter int, tempGa
return nil
}
// SampleConfig is function who return the default configuration for this
// output
func (l *Librato) SampleConfig() string {
return sampleConfig
}
// Description is function who return the Description of this output
func (l *Librato) Description() string {
return "Configuration for Librato API to send metrics to."
}
func (l *Librato) buildGauges(m telegraf.Metric) ([]*Gauge, error) {
gauges := []*Gauge{}
if m.Time().Unix() == 0 {

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package librato
func (l *Librato) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -22,22 +22,6 @@ const (
logzioType = "telegraf"
)
var sampleConfig = `
## Connection timeout, defaults to "5s" if not set.
timeout = "5s"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Logz.io account token
token = "your logz.io token" # required
## Use your listener URL for your Logz.io account region.
# url = "https://listener.logz.io:8071"
`
type Logzio struct {
Log telegraf.Logger `toml:"-"`
Timeout config.Duration `toml:"timeout"`
@ -89,16 +73,6 @@ func (l *Logzio) Close() error {
return nil
}
// Description returns a one-sentence description on the Output
func (l *Logzio) Description() string {
return logzioDescription
}
// SampleConfig returns the default configuration of the Output
func (l *Logzio) SampleConfig() string {
return sampleConfig
}
// Write takes in group of points to be written to the Output
func (l *Logzio) Write(metrics []telegraf.Metric) error {
if len(metrics) == 0 {

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package logzio
func (l *Logzio) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -26,32 +26,6 @@ const (
defaultClientTimeout = 5 * time.Second
)
var sampleConfig = `
## The domain of Loki
domain = "https://loki.domain.tld"
## Endpoint to write api
# endpoint = "/loki/api/v1/push"
## Connection timeout, defaults to "5s" if not set.
# timeout = "5s"
## Basic auth credential
# username = "loki"
# password = "pass"
## Additional HTTP headers
# http_headers = {"X-Scope-OrgID" = "1"}
## If the request must be gzip encoded
# gzip_request = false
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
`
type Loki struct {
Domain string `toml:"domain"`
Endpoint string `toml:"endpoint"`
@ -70,14 +44,6 @@ type Loki struct {
tls.ClientConfig
}
func (l *Loki) SampleConfig() string {
return sampleConfig
}
func (l *Loki) Description() string {
return "Send logs to Loki"
}
func (l *Loki) createClient(ctx context.Context) (*http.Client, error) {
tlsCfg, err := l.ClientConfig.TLSConfig()
if err != nil {

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package loki
func (l *Loki) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -61,49 +61,6 @@ type MongoDB struct {
tls.ClientConfig
}
func (s *MongoDB) Description() string {
return "Sends metrics to MongoDB"
}
var sampleConfig = `
# connection string examples for mongodb
dsn = "mongodb://localhost:27017"
# dsn = "mongodb://mongod1:27017,mongod2:27017,mongod3:27017/admin&replicaSet=myReplSet&w=1"
# overrides serverSelectionTimeoutMS in dsn if set
# timeout = "30s"
# default authentication, optional
# authentication = "NONE"
# for SCRAM-SHA-256 authentication
# authentication = "SCRAM"
# username = "root"
# password = "***"
# for x509 certificate authentication
# authentication = "X509"
# tls_ca = "ca.pem"
# tls_key = "client.pem"
# # tls_key_pwd = "changeme" # required for encrypted tls_key
# insecure_skip_verify = false
# database to store measurements and time series collections
# database = "telegraf"
# granularity can be seconds, minutes, or hours.
# configuring this value will be based on your input collection frequency.
# see https://docs.mongodb.com/manual/core/timeseries-collections/#create-a-time-series-collection
# granularity = "seconds"
# optionally set a TTL to automatically expire documents from the measurement collections.
# ttl = "360h"
`
func (s *MongoDB) SampleConfig() string {
return sampleConfig
}
func (s *MongoDB) Init() error {
if s.MetricDatabase == "" {
s.MetricDatabase = "telegraf"

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package mongodb
func (s *MongoDB) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -14,6 +14,7 @@ As a reference `eclipse/paho.mqtt.golang` sets the `keep_alive` to 30.
## Configuration
```toml
# Configuration for MQTT server to send metrics to
[[outputs.mqtt]]
## MQTT Brokers
## The list of brokers should only include the hostname or IP address and the

View File

@ -20,69 +20,6 @@ const (
defaultKeepAlive = 0
)
var sampleConfig = `
## MQTT Brokers
## The list of brokers should only include the hostname or IP address and the
## port to the broker. This should follow the format '{host}:{port}'. For
## example, "localhost:1883" or "127.0.0.1:8883".
servers = ["localhost:1883"]
## MQTT Topic for Producer Messages
## MQTT outputs send metrics to this topic format:
## <topic_prefix>/<hostname>/<pluginname>/ (e.g. prefix/web01.example.com/mem)
topic_prefix = "telegraf"
## QoS policy for messages
## The mqtt QoS policy for sending messages.
## See https://www.ibm.com/support/knowledgecenter/en/SSFKSJ_9.0.0/com.ibm.mq.dev.doc/q029090_.htm
## 0 = at most once
## 1 = at least once
## 2 = exactly once
# qos = 2
## Keep Alive
## Defines the maximum length of time that the broker and client may not
## communicate. Defaults to 0 which turns the feature off.
##
## For version v2.0.12 and later mosquitto there is a bug
## (see https://github.com/eclipse/mosquitto/issues/2117), which requires
## this to be non-zero. As a reference eclipse/paho.mqtt.golang defaults to 30.
# keep_alive = 0
## username and password to connect MQTT server.
# username = "telegraf"
# password = "metricsmetricsmetricsmetrics"
## client ID
## The unique client id to connect MQTT server. If this parameter is not set
## then a random ID is generated.
# client_id = ""
## Timeout for write operations. default: 5s
# timeout = "5s"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## When true, metrics will be sent in one MQTT message per flush. Otherwise,
## metrics are written one metric per MQTT message.
# batch = false
## When true, metric will have RETAIN flag set, making broker cache entries until someone
## actually reads it
# retain = false
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"
`
type MQTT struct {
Servers []string `toml:"servers"`
Username string
@ -138,14 +75,6 @@ func (m *MQTT) Close() error {
return nil
}
func (m *MQTT) SampleConfig() string {
return sampleConfig
}
func (m *MQTT) Description() string {
return "Configuration for MQTT server to send metrics to"
}
func (m *MQTT) Write(metrics []telegraf.Metric) error {
m.Lock()
defer m.Unlock()

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package mqtt
func (m *MQTT) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -2,7 +2,10 @@
This plugin writes to a (list of) specified NATS instance(s).
## Configuration
```toml
# Send telegraf measurements to NATS
[[outputs.nats]]
## URLs of NATS servers
servers = ["nats://localhost:4222"]

View File

@ -29,40 +29,6 @@ type NATS struct {
serializer serializers.Serializer
}
var sampleConfig = `
## URLs of NATS servers
servers = ["nats://localhost:4222"]
## Optional client name
# name = ""
## Optional credentials
# username = ""
# password = ""
## Optional NATS 2.0 and NATS NGS compatible user credentials
# credentials = "/etc/telegraf/nats.creds"
## NATS subject for producer messages
subject = "telegraf"
## Use Transport Layer Security
# secure = false
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"
`
func (n *NATS) SetSerializer(serializer serializers.Serializer) {
n.serializer = serializer
}
@ -107,14 +73,6 @@ func (n *NATS) Close() error {
return nil
}
func (n *NATS) SampleConfig() string {
return sampleConfig
}
func (n *NATS) Description() string {
return "Send telegraf measurements to NATS"
}
func (n *NATS) Write(metrics []telegraf.Metric) error {
if len(metrics) == 0 {
return nil

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package nats
func (n *NATS) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -9,12 +9,13 @@ Telegraf minimum version: Telegraf 1.15.0
## Configuration
```toml
# Send metrics to New Relic metrics endpoint
[[outputs.newrelic]]
## The 'insights_key' parameter requires a NR license key.
## New Relic recommends you create one
## with a convenient name such as TELEGRAF_INSERT_KEY.
## reference: https://docs.newrelic.com/docs/apis/intro-apis/new-relic-api-keys/#ingest-license-key
insights_key = "New Relic License Key Here"
# insights_key = "New Relic License Key Here"
## Prefix to add to add to metric name for easy identification.
## This is very useful if your metric names are ambiguous.

View File

@ -31,37 +31,6 @@ type NewRelic struct {
client http.Client
}
// Description returns a one-sentence description on the Output
func (nr *NewRelic) Description() string {
return "Send metrics to New Relic metrics endpoint"
}
// SampleConfig : return default configuration of the Output
func (nr *NewRelic) SampleConfig() string {
return `
## The 'insights_key' parameter requires a NR license key.
## New Relic recommends you create one
## with a convenient name such as TELEGRAF_INSERT_KEY.
## reference: https://docs.newrelic.com/docs/apis/intro-apis/new-relic-api-keys/#ingest-license-key
# insights_key = "New Relic License Key Here"
## Prefix to add to add to metric name for easy identification.
## This is very useful if your metric names are ambiguous.
# metric_prefix = ""
## Timeout for writes to the New Relic API.
# timeout = "15s"
## HTTP Proxy override. If unset use values from the standard
## proxy environment variables to determine proxy, if any.
# http_proxy = "http://corporate.proxy:3128"
## Metric URL override to enable geographic location endpoints.
# If not set use values from the standard
# metric_url = "https://metric-api.newrelic.com/metric/v1"
`
}
// Connect to the Output
func (nr *NewRelic) Connect() error {
if nr.InsightsKey == "" {

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package newrelic
func (nr *NewRelic) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -2,3 +2,20 @@
This plugin writes to a specified NSQD instance, usually local to the producer. It requires
a `server` name and a `topic` name.
## Configuration
```toml
# Send telegraf measurements to NSQD
[[outputs.nsq]]
## Location of nsqd instance listening on TCP
server = "localhost:4150"
## NSQ topic for producer messages
topic = "telegraf"
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"
```

Some files were not shown because too many files have changed in this diff Show More