diff --git a/plugins/outputs/amon/README.md b/plugins/outputs/amon/README.md index 57ecf2e18..9ae26f509 100644 --- a/plugins/outputs/amon/README.md +++ b/plugins/outputs/amon/README.md @@ -7,3 +7,18 @@ for the account. If the point value being sent cannot be converted to a float64, the metric is skipped. Metrics are grouped by converting any `_` characters to `.` in the Point Name. + +## Configuration + +```toml +# Configuration for Amon Server to send metrics to. +[[outputs.amon]] + ## Amon Server Key + server_key = "my-server-key" # required. + + ## Amon Instance URL + amon_instance = "https://youramoninstance" # required + + ## Connection timeout. + # timeout = "5s" +``` diff --git a/plugins/outputs/amon/amon.go b/plugins/outputs/amon/amon.go index 952d3b023..79dcca290 100644 --- a/plugins/outputs/amon/amon.go +++ b/plugins/outputs/amon/amon.go @@ -22,17 +22,6 @@ type Amon struct { client *http.Client } -var sampleConfig = ` - ## Amon Server Key - server_key = "my-server-key" # required. - - ## Amon Instance URL - amon_instance = "https://youramoninstance" # required - - ## Connection timeout. - # timeout = "5s" -` - type TimeSeries struct { Series []*Metric `json:"series"` } @@ -106,14 +95,6 @@ func (a *Amon) Write(metrics []telegraf.Metric) error { return nil } -func (a *Amon) SampleConfig() string { - return sampleConfig -} - -func (a *Amon) Description() string { - return "Configuration for Amon Server to send metrics to." -} - func (a *Amon) authenticatedURL() string { return fmt.Sprintf("%s/api/system/%s", a.AmonInstance, a.ServerKey) } diff --git a/plugins/outputs/amon/amon_sample_config.go b/plugins/outputs/amon/amon_sample_config.go new file mode 100644 index 000000000..f363d2efa --- /dev/null +++ b/plugins/outputs/amon/amon_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package amon + +func (a *Amon) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/amqp/amqp.go b/plugins/outputs/amqp/amqp.go index f1c9e05cd..2643d2685 100644 --- a/plugins/outputs/amqp/amqp.go +++ b/plugins/outputs/amqp/amqp.go @@ -71,98 +71,6 @@ type Client interface { Close() error } -var sampleConfig = ` - ## Brokers to publish to. If multiple brokers are specified a random broker - ## will be selected anytime a connection is established. This can be - ## helpful for load balancing when not using a dedicated load balancer. - brokers = ["amqp://localhost:5672/influxdb"] - - ## Maximum messages to send over a connection. Once this is reached, the - ## connection is closed and a new connection is made. This can be helpful for - ## load balancing when not using a dedicated load balancer. - # max_messages = 0 - - ## Exchange to declare and publish to. - exchange = "telegraf" - - ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash". - # exchange_type = "topic" - - ## If true, exchange will be passively declared. - # exchange_passive = false - - ## Exchange durability can be either "transient" or "durable". - # exchange_durability = "durable" - - ## Additional exchange arguments. - # exchange_arguments = { } - # exchange_arguments = {"hash_property" = "timestamp"} - - ## Authentication credentials for the PLAIN auth_method. - # username = "" - # password = "" - - ## Auth method. PLAIN and EXTERNAL are supported - ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as - ## described here: https://www.rabbitmq.com/plugins.html - # auth_method = "PLAIN" - - ## Metric tag to use as a routing key. - ## ie, if this tag exists, its value will be used as the routing key - # routing_tag = "host" - - ## Static routing key. Used when no routing_tag is set or as a fallback - ## when the tag specified in routing tag is not found. - # routing_key = "" - # routing_key = "telegraf" - - ## Delivery Mode controls if a published message is persistent. - ## One of "transient" or "persistent". - # delivery_mode = "transient" - - ## Static headers added to each published message. - # headers = { } - # headers = {"database" = "telegraf", "retention_policy" = "default"} - - ## Connection timeout. If not provided, will default to 5s. 0s means no - ## timeout (not recommended). - # timeout = "5s" - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false - - ## If true use batch serialization format instead of line based delimiting. - ## Only applies to data formats which are not line based such as JSON. - ## Recommended to set to true. - # use_batch_format = false - - ## Content encoding for message payloads, can be set to "gzip" to or - ## "identity" to apply no encoding. - ## - ## Please note that when use_batch_format = false each amqp message contains only - ## a single metric, it is recommended to use compression with batch format - ## for best results. - # content_encoding = "identity" - - ## Data format to output. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md - # data_format = "influx" -` - -func (q *AMQP) SampleConfig() string { - return sampleConfig -} - -func (q *AMQP) Description() string { - return "Publishes metrics to an AMQP broker" -} - func (q *AMQP) SetSerializer(serializer serializers.Serializer) { q.serializer = serializer } diff --git a/plugins/outputs/amqp/amqp_sample_config.go b/plugins/outputs/amqp/amqp_sample_config.go new file mode 100644 index 000000000..1d0227c94 --- /dev/null +++ b/plugins/outputs/amqp/amqp_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package amqp + +func (q *AMQP) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/application_insights/README.md b/plugins/outputs/application_insights/README.md index 4beeb1ec8..d2f492ded 100644 --- a/plugins/outputs/application_insights/README.md +++ b/plugins/outputs/application_insights/README.md @@ -5,6 +5,7 @@ This plugin writes telegraf metrics to [Azure Application Insights](https://azur ## Configuration ```toml +# Send metrics to Azure Application Insights [[outputs.application_insights]] ## Instrumentation key of the Application Insights resource. instrumentation_key = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxx" diff --git a/plugins/outputs/application_insights/application_insights.go b/plugins/outputs/application_insights/application_insights.go index 54635ee7d..507bf7f8e 100644 --- a/plugins/outputs/application_insights/application_insights.go +++ b/plugins/outputs/application_insights/application_insights.go @@ -35,39 +35,10 @@ type ApplicationInsights struct { } var ( - sampleConfig = ` - ## Instrumentation key of the Application Insights resource. - instrumentation_key = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxx" - - ## Regions that require endpoint modification https://docs.microsoft.com/en-us/azure/azure-monitor/app/custom-endpoints - # endpoint_url = "https://dc.services.visualstudio.com/v2/track" - - ## Timeout for closing (default: 5s). - # timeout = "5s" - - ## Enable additional diagnostic logging. - # enable_diagnostic_logging = false - - ## Context Tag Sources add Application Insights context tags to a tag value. - ## - ## For list of allowed context tag keys see: - ## https://github.com/microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go - # [outputs.application_insights.context_tag_sources] - # "ai.cloud.role" = "kubernetes_container_name" - # "ai.cloud.roleInstance" = "kubernetes_pod_name" -` is32Bit bool is32BitChecked bool ) -func (a *ApplicationInsights) SampleConfig() string { - return sampleConfig -} - -func (a *ApplicationInsights) Description() string { - return "Send metrics to Azure Application Insights" -} - func (a *ApplicationInsights) Connect() error { if a.InstrumentationKey == "" { return fmt.Errorf("instrumentation key is required") diff --git a/plugins/outputs/application_insights/application_insights_sample_config.go b/plugins/outputs/application_insights/application_insights_sample_config.go new file mode 100644 index 000000000..d9a6b427b --- /dev/null +++ b/plugins/outputs/application_insights/application_insights_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package application_insights + +func (a *ApplicationInsights) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/azure_data_explorer/README.md b/plugins/outputs/azure_data_explorer/README.md index 96193f7fc..865186801 100644 --- a/plugins/outputs/azure_data_explorer/README.md +++ b/plugins/outputs/azure_data_explorer/README.md @@ -11,15 +11,16 @@ Azure Data Explorer is a distributed, columnar store, purpose built for any type ## Configuration ```toml +# Sends metrics to Azure Data Explorer [[outputs.azure_data_explorer]] ## The URI property of the Azure Data Explorer resource on Azure - ## ex: https://myadxresource.australiasoutheast.kusto.windows.net - # endpoint_url = "" + ## ex: endpoint_url = https://myadxresource.australiasoutheast.kusto.windows.net + endpoint_url = "" ## The Azure Data Explorer database that the metrics will be ingested into. ## The plugin will NOT generate this database automatically, it's expected that this database already exists before ingestion. ## ex: "exampledatabase" - # database = "" + database = "" ## Timeout for Azure Data Explorer operations # timeout = "20s" diff --git a/plugins/outputs/azure_data_explorer/azure_data_explorer.go b/plugins/outputs/azure_data_explorer/azure_data_explorer.go index 3befd1211..58c50c2a1 100644 --- a/plugins/outputs/azure_data_explorer/azure_data_explorer.go +++ b/plugins/outputs/azure_data_explorer/azure_data_explorer.go @@ -55,38 +55,6 @@ type ingestorFactory func(localClient, string, string) (localIngestor, error) const createTableCommand = `.create-merge table ['%s'] (['fields']:dynamic, ['name']:string, ['tags']:dynamic, ['timestamp']:datetime);` const createTableMappingCommand = `.create-or-alter table ['%s'] ingestion json mapping '%s_mapping' '[{"column":"fields", "Properties":{"Path":"$[\'fields\']"}},{"column":"name", "Properties":{"Path":"$[\'name\']"}},{"column":"tags", "Properties":{"Path":"$[\'tags\']"}},{"column":"timestamp", "Properties":{"Path":"$[\'timestamp\']"}}]'` -func (adx *AzureDataExplorer) Description() string { - return "Sends metrics to Azure Data Explorer" -} - -func (adx *AzureDataExplorer) SampleConfig() string { - return ` - ## Azure Data Explorer cluster endpoint - ## ex: endpoint_url = "https://clustername.australiasoutheast.kusto.windows.net" - endpoint_url = "" - - ## The Azure Data Explorer database that the metrics will be ingested into. - ## The plugin will NOT generate this database automatically, it's expected that this database already exists before ingestion. - ## ex: "exampledatabase" - database = "" - - ## Timeout for Azure Data Explorer operations - # timeout = "20s" - - ## Type of metrics grouping used when pushing to Azure Data Explorer. - ## Default is "TablePerMetric" for one table per different metric. - ## For more information, please check the plugin README. - # metrics_grouping_type = "TablePerMetric" - - ## Name of the single table to store all the metrics (Only needed if metrics_grouping_type is "SingleTable"). - # table_name = "" - - ## Creates tables and relevant mapping if set to true(default). - ## Skips table and mapping creation if set to false, this is useful for running Telegraf with the lowest possible permissions i.e. table ingestor role. - # create_tables = true -` -} - func (adx *AzureDataExplorer) Connect() error { authorizer, err := auth.NewAuthorizerFromEnvironmentWithResource(adx.Endpoint) if err != nil { diff --git a/plugins/outputs/azure_data_explorer/azure_data_explorer_sample_config.go b/plugins/outputs/azure_data_explorer/azure_data_explorer_sample_config.go new file mode 100644 index 000000000..0b92bcfef --- /dev/null +++ b/plugins/outputs/azure_data_explorer/azure_data_explorer_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package azure_data_explorer + +func (adx *AzureDataExplorer) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/azure_monitor/README.md b/plugins/outputs/azure_monitor/README.md index 8f7bbb9cb..f3d04c3cf 100644 --- a/plugins/outputs/azure_monitor/README.md +++ b/plugins/outputs/azure_monitor/README.md @@ -17,6 +17,7 @@ written as a dimension on each Azure Monitor metric. ## Configuration ```toml +# Send aggregate metrics to Azure Monitor [[outputs.azure_monitor]] ## Timeout for HTTP writes. # timeout = "20s" @@ -43,7 +44,7 @@ written as a dimension on each Azure Monitor metric. ## Optionally, if in Azure US Government, China, or other sovereign ## cloud environment, set the appropriate REST endpoint for receiving - ## metrics. (Note: region may be unused in this context) + ## metrics. (Note: region may be unused in this context) # endpoint_url = "https://monitoring.core.usgovcloudapi.net" ``` diff --git a/plugins/outputs/azure_monitor/azure_monitor.go b/plugins/outputs/azure_monitor/azure_monitor.go index 89291b2e6..6b0637cac 100644 --- a/plugins/outputs/azure_monitor/azure_monitor.go +++ b/plugins/outputs/azure_monitor/azure_monitor.go @@ -101,46 +101,6 @@ const ( maxRequestBodySize = 4000000 ) -var sampleConfig = ` - ## Timeout for HTTP writes. - # timeout = "20s" - - ## Set the namespace prefix, defaults to "Telegraf/". - # namespace_prefix = "Telegraf/" - - ## Azure Monitor doesn't have a string value type, so convert string - ## fields to dimensions (a.k.a. tags) if enabled. Azure Monitor allows - ## a maximum of 10 dimensions so Telegraf will only send the first 10 - ## alphanumeric dimensions. - # strings_as_dimensions = false - - ## Both region and resource_id must be set or be available via the - ## Instance Metadata service on Azure Virtual Machines. - # - ## Azure Region to publish metrics against. - ## ex: region = "southcentralus" - # region = "" - # - ## The Azure Resource ID against which metric will be logged, e.g. - ## ex: resource_id = "/subscriptions//resourceGroups//providers/Microsoft.Compute/virtualMachines/" - # resource_id = "" - - ## Optionally, if in Azure US Government, China or other sovereign - ## cloud environment, set appropriate REST endpoint for receiving - ## metrics. (Note: region may be unused in this context) - # endpoint_url = "https://monitoring.core.usgovcloudapi.net" -` - -// Description provides a description of the plugin -func (a *AzureMonitor) Description() string { - return "Send aggregate metrics to Azure Monitor" -} - -// SampleConfig provides a sample configuration for the plugin -func (a *AzureMonitor) SampleConfig() string { - return sampleConfig -} - // Connect initializes the plugin and validates connectivity func (a *AzureMonitor) Connect() error { a.cache = make(map[time.Time]map[uint64]*aggregate, 36) diff --git a/plugins/outputs/azure_monitor/azure_monitor_sample_config.go b/plugins/outputs/azure_monitor/azure_monitor_sample_config.go new file mode 100644 index 000000000..aa978eaec --- /dev/null +++ b/plugins/outputs/azure_monitor/azure_monitor_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package azure_monitor + +func (a *AzureMonitor) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/bigquery/README.md b/plugins/outputs/bigquery/README.md index 8ca265cc0..fa2778fcb 100644 --- a/plugins/outputs/bigquery/README.md +++ b/plugins/outputs/bigquery/README.md @@ -8,11 +8,15 @@ Be aware that this plugin accesses APIs that are [chargeable](https://cloud.goog ## Configuration ```toml +# Configuration for Google Cloud BigQuery to send entries [[outputs.bigquery]] - ## GCP Project - project = "erudite-bloom-151019" + ## Credentials File + credentials_file = "/path/to/service/account/key.json" - ## The BigQuery dataset + ## Google Cloud Platform Project + project = "my-gcp-project" + + ## The namespace for the metric descriptor dataset = "telegraf" ## Timeout for BigQuery operations. diff --git a/plugins/outputs/bigquery/bigquery.go b/plugins/outputs/bigquery/bigquery.go index 41af19d38..9fbc2ce5e 100644 --- a/plugins/outputs/bigquery/bigquery.go +++ b/plugins/outputs/bigquery/bigquery.go @@ -21,23 +21,6 @@ const timeStampFieldName = "timestamp" var defaultTimeout = config.Duration(5 * time.Second) -const sampleConfig = ` - ## Credentials File - credentials_file = "/path/to/service/account/key.json" - - ## Google Cloud Platform Project - project = "my-gcp-project" - - ## The namespace for the metric descriptor - dataset = "telegraf" - - ## Timeout for BigQuery operations. - # timeout = "5s" - - ## Character to replace hyphens on Metric name - # replace_hyphen_to = "_" -` - type BigQuery struct { CredentialsFile string `toml:"credentials_file"` Project string `toml:"project"` @@ -53,16 +36,6 @@ type BigQuery struct { warnedOnHyphens map[string]bool } -// SampleConfig returns the formatted sample configuration for the plugin. -func (s *BigQuery) SampleConfig() string { - return sampleConfig -} - -// Description returns the human-readable function definition of the plugin. -func (s *BigQuery) Description() string { - return "Configuration for Google Cloud BigQuery to send entries" -} - func (s *BigQuery) Connect() error { if s.Project == "" { return fmt.Errorf("Project is a required field for BigQuery output") diff --git a/plugins/outputs/bigquery/bigquery_sample_config.go b/plugins/outputs/bigquery/bigquery_sample_config.go new file mode 100644 index 000000000..acd42201e --- /dev/null +++ b/plugins/outputs/bigquery/bigquery_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package bigquery + +func (s *BigQuery) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/cloud_pubsub/README.md b/plugins/outputs/cloud_pubsub/README.md index 6274e1dac..3869534ae 100644 --- a/plugins/outputs/cloud_pubsub/README.md +++ b/plugins/outputs/cloud_pubsub/README.md @@ -9,6 +9,7 @@ This section contains the default TOML to configure the plugin. You can generate it using `telegraf --usage cloud_pubsub`. ```toml +# Publish Telegraf metrics to a Google Cloud PubSub topic [[outputs.cloud_pubsub]] ## Required. Name of Google Cloud Platform (GCP) Project that owns ## the given PubSub topic. diff --git a/plugins/outputs/cloud_pubsub/pubsub.go b/plugins/outputs/cloud_pubsub/cloud_pubsub.go similarity index 71% rename from plugins/outputs/cloud_pubsub/pubsub.go rename to plugins/outputs/cloud_pubsub/cloud_pubsub.go index 38f037dd1..e147eb1b0 100644 --- a/plugins/outputs/cloud_pubsub/pubsub.go +++ b/plugins/outputs/cloud_pubsub/cloud_pubsub.go @@ -17,56 +17,6 @@ import ( "google.golang.org/api/option" ) -const sampleConfig = ` - ## Required. Name of Google Cloud Platform (GCP) Project that owns - ## the given PubSub topic. - project = "my-project" - - ## Required. Name of PubSub topic to publish metrics to. - topic = "my-topic" - - ## Required. Data format to consume. - ## Each data format has its own unique set of configuration options. - ## Read more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "influx" - - ## Optional. Filepath for GCP credentials JSON file to authorize calls to - ## PubSub APIs. If not set explicitly, Telegraf will attempt to use - ## Application Default Credentials, which is preferred. - # credentials_file = "path/to/my/creds.json" - - ## Optional. If true, will send all metrics per write in one PubSub message. - # send_batched = true - - ## The following publish_* parameters specifically configures batching - ## requests made to the GCP Cloud PubSub API via the PubSub Golang library. Read - ## more here: https://godoc.org/cloud.google.com/go/pubsub#PublishSettings - - ## Optional. Send a request to PubSub (i.e. actually publish a batch) - ## when it has this many PubSub messages. If send_batched is true, - ## this is ignored and treated as if it were 1. - # publish_count_threshold = 1000 - - ## Optional. Send a request to PubSub (i.e. actually publish a batch) - ## when it has this many PubSub messages. If send_batched is true, - ## this is ignored and treated as if it were 1 - # publish_byte_threshold = 1000000 - - ## Optional. Specifically configures requests made to the PubSub API. - # publish_num_go_routines = 2 - - ## Optional. Specifies a timeout for requests to the PubSub API. - # publish_timeout = "30s" - - ## Optional. If true, published PubSub message data will be base64-encoded. - # base64_data = false - - ## Optional. PubSub attributes to add to metrics. - # [outputs.cloud_pubsub.attributes] - # my_attr = "tag_value" -` - type PubSub struct { CredentialsFile string `toml:"credentials_file"` Project string `toml:"project"` @@ -91,14 +41,6 @@ type PubSub struct { publishResults []publishResult } -func (ps *PubSub) Description() string { - return "Publish Telegraf metrics to a Google Cloud PubSub topic" -} - -func (ps *PubSub) SampleConfig() string { - return sampleConfig -} - func (ps *PubSub) SetSerializer(serializer serializers.Serializer) { ps.serializer = serializer } diff --git a/plugins/outputs/cloud_pubsub/cloud_pubsub_sample_config.go b/plugins/outputs/cloud_pubsub/cloud_pubsub_sample_config.go new file mode 100644 index 000000000..c07b8e983 --- /dev/null +++ b/plugins/outputs/cloud_pubsub/cloud_pubsub_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package cloud_pubsub + +func (ps *PubSub) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/cloud_pubsub/pubsub_test.go b/plugins/outputs/cloud_pubsub/cloud_pubsub_test.go similarity index 100% rename from plugins/outputs/cloud_pubsub/pubsub_test.go rename to plugins/outputs/cloud_pubsub/cloud_pubsub_test.go diff --git a/plugins/outputs/cloudwatch/README.md b/plugins/outputs/cloudwatch/README.md index ff62726de..8c0a90ad7 100644 --- a/plugins/outputs/cloudwatch/README.md +++ b/plugins/outputs/cloudwatch/README.md @@ -20,7 +20,52 @@ left empty, the current timestamp will be used. The IAM user needs only the `cloudwatch:PutMetricData` permission. -## Config +## Configuration + +```toml +# Configuration for AWS CloudWatch output. +[[outputs.cloudwatch]] + ## Amazon REGION + region = "us-east-1" + + ## Amazon Credentials + ## Credentials are loaded in the following order + ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified + ## 2) Assumed credentials via STS if role_arn is specified + ## 3) explicit credentials from 'access_key' and 'secret_key' + ## 4) shared profile from 'profile' + ## 5) environment variables + ## 6) shared credentials file + ## 7) EC2 Instance Profile + #access_key = "" + #secret_key = "" + #token = "" + #role_arn = "" + #web_identity_token_file = "" + #role_session_name = "" + #profile = "" + #shared_credential_file = "" + + ## Endpoint to make request against, the correct endpoint is automatically + ## determined and this option should only be set if you wish to override the + ## default. + ## ex: endpoint_url = "http://localhost:8000" + # endpoint_url = "" + + ## Namespace for the CloudWatch MetricDatums + namespace = "InfluxData/Telegraf" + + ## If you have a large amount of metrics, you should consider to send statistic + ## values instead of raw metrics which could not only improve performance but + ## also save AWS API cost. If enable this flag, this plugin would parse the required + ## CloudWatch statistic fields (count, min, max, and sum) and send them to CloudWatch. + ## You could use basicstats aggregator to calculate those fields. If not all statistic + ## fields are available, all fields would still be sent as raw metrics. + # write_statistics = false + + ## Enable high resolution metrics of 1 second (if not enabled, standard resolution are of 60 seconds precision) + # high_resolution_metrics = false +``` For this output plugin to function correctly the following variables must be configured. diff --git a/plugins/outputs/cloudwatch/cloudwatch.go b/plugins/outputs/cloudwatch/cloudwatch.go index a48a3ee54..288864a59 100644 --- a/plugins/outputs/cloudwatch/cloudwatch.go +++ b/plugins/outputs/cloudwatch/cloudwatch.go @@ -148,57 +148,6 @@ func (f *valueField) buildDatum() []types.MetricDatum { } } -var sampleConfig = ` - ## Amazon REGION - region = "us-east-1" - - ## Amazon Credentials - ## Credentials are loaded in the following order - ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified - ## 2) Assumed credentials via STS if role_arn is specified - ## 3) explicit credentials from 'access_key' and 'secret_key' - ## 4) shared profile from 'profile' - ## 5) environment variables - ## 6) shared credentials file - ## 7) EC2 Instance Profile - #access_key = "" - #secret_key = "" - #token = "" - #role_arn = "" - #web_identity_token_file = "" - #role_session_name = "" - #profile = "" - #shared_credential_file = "" - - ## Endpoint to make request against, the correct endpoint is automatically - ## determined and this option should only be set if you wish to override the - ## default. - ## ex: endpoint_url = "http://localhost:8000" - # endpoint_url = "" - - ## Namespace for the CloudWatch MetricDatums - namespace = "InfluxData/Telegraf" - - ## If you have a large amount of metrics, you should consider to send statistic - ## values instead of raw metrics which could not only improve performance but - ## also save AWS API cost. If enable this flag, this plugin would parse the required - ## CloudWatch statistic fields (count, min, max, and sum) and send them to CloudWatch. - ## You could use basicstats aggregator to calculate those fields. If not all statistic - ## fields are available, all fields would still be sent as raw metrics. - # write_statistics = false - - ## Enable high resolution metrics of 1 second (if not enabled, standard resolution are of 60 seconds precision) - # high_resolution_metrics = false -` - -func (c *CloudWatch) SampleConfig() string { - return sampleConfig -} - -func (c *CloudWatch) Description() string { - return "Configuration for AWS CloudWatch output." -} - func (c *CloudWatch) Connect() error { cfg, err := c.CredentialConfig.Credentials() if err != nil { diff --git a/plugins/outputs/cloudwatch/cloudwatch_sample_config.go b/plugins/outputs/cloudwatch/cloudwatch_sample_config.go new file mode 100644 index 000000000..2e9c84c46 --- /dev/null +++ b/plugins/outputs/cloudwatch/cloudwatch_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package cloudwatch + +func (c *CloudWatch) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/cloudwatch_logs/README.md b/plugins/outputs/cloudwatch_logs/README.md index 9898f9e84..08028d24f 100644 --- a/plugins/outputs/cloudwatch_logs/README.md +++ b/plugins/outputs/cloudwatch_logs/README.md @@ -22,9 +22,10 @@ The IAM user needs the following permissions (see this [reference](https://docs. - `logs:CreateLogStream` - required to create a new log stream in a log group.) - `logs:PutLogEvents` - required to upload a batch of log events into log stream. -## Config +## Configuration ```toml +# Configuration for AWS CloudWatchLogs output. [[outputs.cloudwatch_logs]] ## The region is the Amazon region that you wish to connect to. ## Examples include but are not limited to: diff --git a/plugins/outputs/cloudwatch_logs/cloudwatch_logs.go b/plugins/outputs/cloudwatch_logs/cloudwatch_logs.go index 952fea4b2..9bac6138c 100644 --- a/plugins/outputs/cloudwatch_logs/cloudwatch_logs.go +++ b/plugins/outputs/cloudwatch_logs/cloudwatch_logs.go @@ -73,75 +73,6 @@ const ( // Otherwise, the operation fails. ) -var sampleConfig = ` -## The region is the Amazon region that you wish to connect to. -## Examples include but are not limited to: -## - us-west-1 -## - us-west-2 -## - us-east-1 -## - ap-southeast-1 -## - ap-southeast-2 -## ... -region = "us-east-1" - -## Amazon Credentials -## Credentials are loaded in the following order -## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified -## 2) Assumed credentials via STS if role_arn is specified -## 3) explicit credentials from 'access_key' and 'secret_key' -## 4) shared profile from 'profile' -## 5) environment variables -## 6) shared credentials file -## 7) EC2 Instance Profile -#access_key = "" -#secret_key = "" -#token = "" -#role_arn = "" -#web_identity_token_file = "" -#role_session_name = "" -#profile = "" -#shared_credential_file = "" - -## Endpoint to make request against, the correct endpoint is automatically -## determined and this option should only be set if you wish to override the -## default. -## ex: endpoint_url = "http://localhost:8000" -# endpoint_url = "" - -## Cloud watch log group. Must be created in AWS cloudwatch logs upfront! -## For example, you can specify the name of the k8s cluster here to group logs from all cluster in oine place -log_group = "my-group-name" - -## Log stream in log group -## Either log group name or reference to metric attribute, from which it can be parsed: -## tag: or field:. If log stream is not exist, it will be created. -## Since AWS is not automatically delete logs streams with expired logs entries (i.e. empty log stream) -## you need to put in place appropriate house-keeping (https://forums.aws.amazon.com/thread.jspa?threadID=178855) -log_stream = "tag:location" - -## Source of log data - metric name -## specify the name of the metric, from which the log data should be retrieved. -## I.e., if you are using docker_log plugin to stream logs from container, then -## specify log_data_metric_name = "docker_log" -log_data_metric_name = "docker_log" - -## Specify from which metric attribute the log data should be retrieved: -## tag: or field:. -## I.e., if you are using docker_log plugin to stream logs from container, then -## specify log_data_source = "field:message" -log_data_source = "field:message" -` - -// SampleConfig returns sample config description for plugin -func (c *CloudWatchLogs) SampleConfig() string { - return sampleConfig -} - -// Description returns one-liner description for plugin -func (c *CloudWatchLogs) Description() string { - return "Configuration for AWS CloudWatchLogs output." -} - // Init initialize plugin with checking configuration parameters func (c *CloudWatchLogs) Init() error { if c.LogGroup == "" { @@ -276,7 +207,10 @@ func (c *CloudWatchLogs) Write(metrics []telegraf.Metric) error { lsContainer := &logStreamContainer{ currentBatchSizeBytes: 0, currentBatchIndex: 0, - messageBatches: []messageBatch{{}}} + messageBatches: []messageBatch{ + {}, + }, + } switch c.lsKey { case "tag": diff --git a/plugins/outputs/cloudwatch_logs/cloudwatch_logs_sample_config.go b/plugins/outputs/cloudwatch_logs/cloudwatch_logs_sample_config.go new file mode 100644 index 000000000..fb60595d1 --- /dev/null +++ b/plugins/outputs/cloudwatch_logs/cloudwatch_logs_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package cloudwatch_logs + +func (c *CloudWatchLogs) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/cratedb/cratedb.go b/plugins/outputs/cratedb/cratedb.go index 40c8c2728..60b41a69b 100644 --- a/plugins/outputs/cratedb/cratedb.go +++ b/plugins/outputs/cratedb/cratedb.go @@ -29,20 +29,6 @@ type CrateDB struct { DB *sql.DB } -var sampleConfig = ` - # A github.com/jackc/pgx/v4 connection string. - # See https://pkg.go.dev/github.com/jackc/pgx/v4#ParseConfig - url = "postgres://user:password@localhost/schema?sslmode=disable" - # Timeout for all CrateDB queries. - timeout = "5s" - # Name of the table to store metrics in. - table = "metrics" - # If true, and the metrics table does not exist, create it automatically. - table_create = true - # The character(s) to replace any '.' in an object key with - key_separator = "_" -` - func (c *CrateDB) Connect() error { db, err := sql.Open("pgx", c.URL) if err != nil { @@ -231,14 +217,6 @@ func hashID(m telegraf.Metric) int64 { return int64(binary.LittleEndian.Uint64(sum)) } -func (c *CrateDB) SampleConfig() string { - return sampleConfig -} - -func (c *CrateDB) Description() string { - return "Configuration for CrateDB to send metrics to." -} - func (c *CrateDB) Close() error { return c.DB.Close() } diff --git a/plugins/outputs/cratedb/cratedb_sample_config.go b/plugins/outputs/cratedb/cratedb_sample_config.go new file mode 100644 index 000000000..7dea8cf49 --- /dev/null +++ b/plugins/outputs/cratedb/cratedb_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package cratedb + +func (c *CrateDB) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/datadog/README.md b/plugins/outputs/datadog/README.md index 2414ccfac..ec20f74cb 100644 --- a/plugins/outputs/datadog/README.md +++ b/plugins/outputs/datadog/README.md @@ -6,6 +6,7 @@ This plugin writes to the [Datadog Metrics API][metrics] and requires an ## Configuration ```toml +# Configuration for DataDog API to send metrics to. [[outputs.datadog]] ## Datadog API key apikey = "my-secret-key" diff --git a/plugins/outputs/datadog/datadog.go b/plugins/outputs/datadog/datadog.go index ecc707cb9..c97929eae 100644 --- a/plugins/outputs/datadog/datadog.go +++ b/plugins/outputs/datadog/datadog.go @@ -28,24 +28,6 @@ type Datadog struct { proxy.HTTPProxy } -var sampleConfig = ` - ## Datadog API key - apikey = "my-secret-key" - - ## Connection timeout. - # timeout = "5s" - - ## Write URL override; useful for debugging. - # url = "https://app.datadoghq.com/api/v1/series" - - ## Set http_proxy (telegraf uses the system wide proxy settings if it isn't set) - # http_proxy_url = "http://localhost:8888" - - ## Override the default (none) compression used to send data. - ## Supports: "zlib", "none" - # compression = "none" -` - type TimeSeries struct { Series []*Metric `json:"series"` } @@ -170,14 +152,6 @@ func (d *Datadog) Write(metrics []telegraf.Metric) error { return nil } -func (d *Datadog) SampleConfig() string { - return sampleConfig -} - -func (d *Datadog) Description() string { - return "Configuration for DataDog API to send metrics to." -} - func (d *Datadog) authenticatedURL() string { q := url.Values{ "api_key": []string{d.Apikey}, diff --git a/plugins/outputs/datadog/datadog_sample_config.go b/plugins/outputs/datadog/datadog_sample_config.go new file mode 100644 index 000000000..89e09ae73 --- /dev/null +++ b/plugins/outputs/datadog/datadog_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package datadog + +func (d *Datadog) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/discard/discard.go b/plugins/outputs/discard/discard.go index de3696c3e..617c616c1 100644 --- a/plugins/outputs/discard/discard.go +++ b/plugins/outputs/discard/discard.go @@ -7,10 +7,8 @@ import ( type Discard struct{} -func (d *Discard) Connect() error { return nil } -func (d *Discard) Close() error { return nil } -func (d *Discard) SampleConfig() string { return "" } -func (d *Discard) Description() string { return "Send metrics to nowhere at all" } +func (d *Discard) Connect() error { return nil } +func (d *Discard) Close() error { return nil } func (d *Discard) Write(_ []telegraf.Metric) error { return nil } diff --git a/plugins/outputs/discard/discard_sample_config.go b/plugins/outputs/discard/discard_sample_config.go new file mode 100644 index 000000000..00da98589 --- /dev/null +++ b/plugins/outputs/discard/discard_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package discard + +func (d *Discard) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/dynatrace/README.md b/plugins/outputs/dynatrace/README.md index 2776fa23e..a6d25eaa4 100644 --- a/plugins/outputs/dynatrace/README.md +++ b/plugins/outputs/dynatrace/README.md @@ -58,21 +58,42 @@ You can learn more about how to use the Dynatrace API [here](https://www.dynatra ## Configuration ```toml +# Send telegraf metrics to a Dynatrace environment [[outputs.dynatrace]] - ## Leave empty or use the local ingest endpoint of your OneAgent monitored host (e.g.: http://127.0.0.1:14499/metrics/ingest). - ## Set Dynatrace environment URL (e.g.: https://YOUR_DOMAIN/api/v2/metrics/ingest) if you do not use a OneAgent + ## For usage with the Dynatrace OneAgent you can omit any configuration, + ## the only requirement is that the OneAgent is running on the same host. + ## Only setup environment url and token if you want to monitor a Host without the OneAgent present. + ## + ## Your Dynatrace environment URL. + ## For Dynatrace OneAgent you can leave this empty or set it to "http://127.0.0.1:14499/metrics/ingest" (default) + ## For Dynatrace SaaS environments the URL scheme is "https://{your-environment-id}.live.dynatrace.com/api/v2/metrics/ingest" + ## For Dynatrace Managed environments the URL scheme is "https://{your-domain}/e/{your-environment-id}/api/v2/metrics/ingest" url = "" + + ## Your Dynatrace API token. + ## Create an API token within your Dynatrace environment, by navigating to Settings > Integration > Dynatrace API + ## The API token needs data ingest scope permission. When using OneAgent, no API token is required. api_token = "" + ## Optional prefix for metric names (e.g.: "telegraf") prefix = "telegraf" - ## Flag for skipping the tls certificate check, just for testing purposes, should be false by default - insecure_skip_verify = false + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Optional flag for ignoring tls certificate check + # insecure_skip_verify = false + + ## Connection timeout, defaults to "5s" if not set. + timeout = "5s" + ## If you want metrics to be treated and reported as delta counters, add the metric names here additional_counters = [ ] ## Optional dimensions to be added to every metric - [outputs.dynatrace.default_dimensions] - default_key = "default value" + # [outputs.dynatrace.default_dimensions] + # default_key = "default value" ``` ### `url` diff --git a/plugins/outputs/dynatrace/dynatrace.go b/plugins/outputs/dynatrace/dynatrace.go index adf74ea48..c0feda909 100644 --- a/plugins/outputs/dynatrace/dynatrace.go +++ b/plugins/outputs/dynatrace/dynatrace.go @@ -38,45 +38,6 @@ type Dynatrace struct { loggedMetrics map[string]bool // New empty set } -const sampleConfig = ` - ## For usage with the Dynatrace OneAgent you can omit any configuration, - ## the only requirement is that the OneAgent is running on the same host. - ## Only setup environment url and token if you want to monitor a Host without the OneAgent present. - ## - ## Your Dynatrace environment URL. - ## For Dynatrace OneAgent you can leave this empty or set it to "http://127.0.0.1:14499/metrics/ingest" (default) - ## For Dynatrace SaaS environments the URL scheme is "https://{your-environment-id}.live.dynatrace.com/api/v2/metrics/ingest" - ## For Dynatrace Managed environments the URL scheme is "https://{your-domain}/e/{your-environment-id}/api/v2/metrics/ingest" - url = "" - - ## Your Dynatrace API token. - ## Create an API token within your Dynatrace environment, by navigating to Settings > Integration > Dynatrace API - ## The API token needs data ingest scope permission. When using OneAgent, no API token is required. - api_token = "" - - ## Optional prefix for metric names (e.g.: "telegraf") - prefix = "telegraf" - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - - ## Optional flag for ignoring tls certificate check - # insecure_skip_verify = false - - - ## Connection timeout, defaults to "5s" if not set. - timeout = "5s" - - ## If you want metrics to be treated and reported as delta counters, add the metric names here - additional_counters = [ ] - - ## Optional dimensions to be added to every metric - # [outputs.dynatrace.default_dimensions] - # default_key = "default value" -` - // Connect Connects the Dynatrace output plugin to the Telegraf stream func (d *Dynatrace) Connect() error { return nil @@ -88,16 +49,6 @@ func (d *Dynatrace) Close() error { return nil } -// SampleConfig Returns a sample configuration for the Dynatrace output plugin -func (d *Dynatrace) SampleConfig() string { - return sampleConfig -} - -// Description returns the description for the Dynatrace output plugin -func (d *Dynatrace) Description() string { - return "Send telegraf metrics to a Dynatrace environment" -} - func (d *Dynatrace) Write(metrics []telegraf.Metric) error { if len(metrics) == 0 { return nil diff --git a/plugins/outputs/dynatrace/dynatrace_sample_config.go b/plugins/outputs/dynatrace/dynatrace_sample_config.go new file mode 100644 index 000000000..ba020ea1c --- /dev/null +++ b/plugins/outputs/dynatrace/dynatrace_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package dynatrace + +func (d *Dynatrace) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/elasticsearch/README.md b/plugins/outputs/elasticsearch/README.md index 072a285ed..025e05553 100644 --- a/plugins/outputs/elasticsearch/README.md +++ b/plugins/outputs/elasticsearch/README.md @@ -184,22 +184,26 @@ POST https://es.us-east-1.amazonaws.com/2021-01-01/opensearch/upgradeDomain ## Configuration ```toml +# Configuration for Elasticsearch to send metrics to. [[outputs.elasticsearch]] ## The full HTTP endpoint URL for your Elasticsearch instance ## Multiple urls can be specified as part of the same cluster, - ## this means that only ONE of the urls will be written to each interval. + ## this means that only ONE of the urls will be written to each interval urls = [ "http://node1.es.example.com:9200" ] # required. ## Elasticsearch client timeout, defaults to "5s" if not set. timeout = "5s" ## Set to true to ask Elasticsearch a list of all cluster nodes, ## thus it is not necessary to list all nodes in the urls config option enable_sniffer = false + ## Set to true to enable gzip compression + enable_gzip = false ## Set the interval to check if the Elasticsearch nodes are available ## Setting to "0s" will disable the health check (not recommended in production) health_check_interval = "10s" ## Set the timeout for periodic health checks. # health_check_timeout = "1s" ## HTTP basic authentication details. + ## HTTP basic authentication details # username = "telegraf" # password = "mypassword" ## HTTP bearer token authentication details @@ -249,7 +253,7 @@ POST https://es.us-east-1.amazonaws.com/2021-01-01/opensearch/upgradeDomain ## NaNs and inf will be replaced with the given number, -inf with the negative of that number # float_handling = "none" # float_replacement_value = 0.0 - + ## Pipeline Config ## To use a ingest pipeline, set this to the name of the pipeline you want to use. # use_pipeline = "my_pipeline" diff --git a/plugins/outputs/elasticsearch/elasticsearch.go b/plugins/outputs/elasticsearch/elasticsearch.go index 7b58a1c53..80a448a96 100644 --- a/plugins/outputs/elasticsearch/elasticsearch.go +++ b/plugins/outputs/elasticsearch/elasticsearch.go @@ -52,85 +52,6 @@ type Elasticsearch struct { Client *elastic.Client } -var sampleConfig = ` - ## The full HTTP endpoint URL for your Elasticsearch instance - ## Multiple urls can be specified as part of the same cluster, - ## this means that only ONE of the urls will be written to each interval. - urls = [ "http://node1.es.example.com:9200" ] # required. - ## Elasticsearch client timeout, defaults to "5s" if not set. - timeout = "5s" - ## Set to true to ask Elasticsearch a list of all cluster nodes, - ## thus it is not necessary to list all nodes in the urls config option. - enable_sniffer = false - ## Set to true to enable gzip compression - enable_gzip = false - ## Set the interval to check if the Elasticsearch nodes are available - ## Setting to "0s" will disable the health check (not recommended in production) - health_check_interval = "10s" - ## Set the timeout for periodic health checks. - # health_check_timeout = "1s" - ## HTTP basic authentication details - # username = "telegraf" - # password = "mypassword" - ## HTTP bearer token authentication details - # auth_bearer_token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9" - - ## Index Config - ## The target index for metrics (Elasticsearch will create if it not exists). - ## You can use the date specifiers below to create indexes per time frame. - ## The metric timestamp will be used to decide the destination index name - # %Y - year (2016) - # %y - last two digits of year (00..99) - # %m - month (01..12) - # %d - day of month (e.g., 01) - # %H - hour (00..23) - # %V - week of the year (ISO week) (01..53) - ## Additionally, you can specify a tag name using the notation {{tag_name}} - ## which will be used as part of the index name. If the tag does not exist, - ## the default tag value will be used. - # index_name = "telegraf-{{host}}-%Y.%m.%d" - # default_tag_value = "none" - index_name = "telegraf-%Y.%m.%d" # required. - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false - - ## Template Config - ## Set to true if you want telegraf to manage its index template. - ## If enabled it will create a recommended index template for telegraf indexes - manage_template = true - ## The template name used for telegraf indexes - template_name = "telegraf" - ## Set to true if you want telegraf to overwrite an existing template - overwrite_template = false - ## If set to true a unique ID hash will be sent as sha256(concat(timestamp,measurement,series-hash)) string - ## it will enable data resend and update metric points avoiding duplicated metrics with diferent id's - force_document_id = false - - ## Specifies the handling of NaN and Inf values. - ## This option can have the following values: - ## none -- do not modify field-values (default); will produce an error if NaNs or infs are encountered - ## drop -- drop fields containing NaNs or infs - ## replace -- replace with the value in "float_replacement_value" (default: 0.0) - ## NaNs and inf will be replaced with the given number, -inf with the negative of that number - # float_handling = "none" - # float_replacement_value = 0.0 - - ## Pipeline Config - ## To use a ingest pipeline, set this to the name of the pipeline you want to use. - # use_pipeline = "my_pipeline" - ## Additionally, you can specify a tag name using the notation {{tag_name}} - ## which will be used as part of the pipeline name. If the tag does not exist, - ## the default pipeline will be used as the pipeline. If no default pipeline is set, - ## no pipeline is used for the metric. - # use_pipeline = "{{es_pipeline}}" - # default_pipeline = "my_pipeline" -` - const telegrafTemplate = ` { {{ if (lt .Version 6) }} @@ -530,14 +451,6 @@ func getISOWeek(eventTime time.Time) string { return strconv.Itoa(week) } -func (a *Elasticsearch) SampleConfig() string { - return sampleConfig -} - -func (a *Elasticsearch) Description() string { - return "Configuration for Elasticsearch to send metrics to." -} - func (a *Elasticsearch) Close() error { a.Client = nil return nil diff --git a/plugins/outputs/elasticsearch/elasticsearch_sample_config.go b/plugins/outputs/elasticsearch/elasticsearch_sample_config.go new file mode 100644 index 000000000..f0ea010f8 --- /dev/null +++ b/plugins/outputs/elasticsearch/elasticsearch_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package elasticsearch + +func (a *Elasticsearch) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/event_hubs/README.md b/plugins/outputs/event_hubs/README.md index c71c06f99..405726de8 100644 --- a/plugins/outputs/event_hubs/README.md +++ b/plugins/outputs/event_hubs/README.md @@ -9,17 +9,16 @@ The plugin uses the Telegraf serializers to format the metric data sent in the m ## Configuration ```toml -[[ outputs.event_hubs ]] -## The full connection string to the Event Hub (required) -## The shared access key must have "Send" permissions on the target Event Hub. -connection_string = "Endpoint=sb://namespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=superSecret1234=;EntityPath=hubName" - -## Client timeout (defaults to 30s) -# timeout = "30s" - -## Data format to output. -## Each data format has its own unique set of configuration options, read -## more about them here: -## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md -data_format = "json" +# Configuration for Event Hubs output plugin +[[outputs.event_hubs]] + ## The full connection string to the Event Hub (required) + ## The shared access key must have "Send" permissions on the target Event Hub. + connection_string = "Endpoint=sb://namespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=superSecret1234=;EntityPath=hubName" + ## Client timeout (defaults to 30s) + # timeout = "30s" + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "json" ``` diff --git a/plugins/outputs/event_hubs/event_hubs.go b/plugins/outputs/event_hubs/event_hubs.go index 3c87a84fb..e6a4c1da5 100644 --- a/plugins/outputs/event_hubs/event_hubs.go +++ b/plugins/outputs/event_hubs/event_hubs.go @@ -60,27 +60,6 @@ const ( defaultRequestTimeout = time.Second * 30 ) -func (e *EventHubs) Description() string { - return "Configuration for Event Hubs output plugin" -} - -func (e *EventHubs) SampleConfig() string { - return ` - ## The full connection string to the Event Hub (required) - ## The shared access key must have "Send" permissions on the target Event Hub. - connection_string = "Endpoint=sb://namespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=superSecret1234=;EntityPath=hubName" - - ## Client timeout (defaults to 30s) - # timeout = "30s" - - ## Data format to output. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md - data_format = "json" -` -} - func (e *EventHubs) Init() error { err := e.Hub.GetHub(e.ConnectionString) diff --git a/plugins/outputs/event_hubs/event_hubs_sample_config.go b/plugins/outputs/event_hubs/event_hubs_sample_config.go new file mode 100644 index 000000000..a9ab0c2bd --- /dev/null +++ b/plugins/outputs/event_hubs/event_hubs_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package event_hubs + +func (e *EventHubs) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/exec/README.md b/plugins/outputs/exec/README.md index 60b4ac385..1a08d884e 100644 --- a/plugins/outputs/exec/README.md +++ b/plugins/outputs/exec/README.md @@ -15,6 +15,7 @@ For better performance, consider execd, which runs continuously. ## Configuration ```toml +# Send metrics to command as input over stdin [[outputs.exec]] ## Command to ingest metrics via stdin. command = ["tee", "-a", "/dev/null"] diff --git a/plugins/outputs/exec/exec.go b/plugins/outputs/exec/exec.go index 68c61e1ca..395e49346 100644 --- a/plugins/outputs/exec/exec.go +++ b/plugins/outputs/exec/exec.go @@ -27,20 +27,6 @@ type Exec struct { serializer serializers.Serializer } -var sampleConfig = ` - ## Command to ingest metrics via stdin. - command = ["tee", "-a", "/dev/null"] - - ## Timeout for command to complete. - # timeout = "5s" - - ## Data format to output. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md - # data_format = "influx" -` - func (e *Exec) Init() error { e.runner = &CommandRunner{log: e.Log} @@ -62,16 +48,6 @@ func (e *Exec) Close() error { return nil } -// Description describes the plugin. -func (e *Exec) Description() string { - return "Send metrics to command as input over stdin" -} - -// SampleConfig returns a sample configuration. -func (e *Exec) SampleConfig() string { - return sampleConfig -} - // Write writes the metrics to the configured command. func (e *Exec) Write(metrics []telegraf.Metric) error { var buffer bytes.Buffer diff --git a/plugins/outputs/exec/exec_sample_config.go b/plugins/outputs/exec/exec_sample_config.go new file mode 100644 index 000000000..9d0d8bbf7 --- /dev/null +++ b/plugins/outputs/exec/exec_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package exec + +func (e *Exec) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/exec/exec_test.go b/plugins/outputs/exec/exec_test.go index 40fac6327..a83b3c1d2 100644 --- a/plugins/outputs/exec/exec_test.go +++ b/plugins/outputs/exec/exec_test.go @@ -94,7 +94,6 @@ func TestTruncate(t *testing.T) { func TestExecDocs(t *testing.T) { e := &Exec{} - e.Description() e.SampleConfig() require.NoError(t, e.Close()) diff --git a/plugins/outputs/execd/README.md b/plugins/outputs/execd/README.md index 5b2124625..46890340a 100644 --- a/plugins/outputs/execd/README.md +++ b/plugins/outputs/execd/README.md @@ -7,6 +7,7 @@ Telegraf minimum version: Telegraf 1.15.0 ## Configuration ```toml +# Run executable as long-running output plugin [[outputs.execd]] ## One program to run as daemon. ## NOTE: process and each argument should each be their own string diff --git a/plugins/outputs/execd/execd.go b/plugins/outputs/execd/execd.go index acace77ad..4f9f52da2 100644 --- a/plugins/outputs/execd/execd.go +++ b/plugins/outputs/execd/execd.go @@ -14,20 +14,6 @@ import ( "github.com/influxdata/telegraf/plugins/serializers" ) -const sampleConfig = ` - ## Program to run as daemon - command = ["my-telegraf-output", "--some-flag", "value"] - - ## Delay before the process is restarted after an unexpected termination - restart_delay = "10s" - - ## Data format to export. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md - data_format = "influx" -` - type Execd struct { Command []string `toml:"command"` RestartDelay config.Duration `toml:"restart_delay"` @@ -37,14 +23,6 @@ type Execd struct { serializer serializers.Serializer } -func (e *Execd) SampleConfig() string { - return sampleConfig -} - -func (e *Execd) Description() string { - return "Run executable as long-running output plugin" -} - func (e *Execd) SetSerializer(s serializers.Serializer) { e.serializer = s } diff --git a/plugins/outputs/execd/execd_sample_config.go b/plugins/outputs/execd/execd_sample_config.go new file mode 100644 index 000000000..bc59d8ef3 --- /dev/null +++ b/plugins/outputs/execd/execd_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package execd + +func (e *Execd) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/file/README.md b/plugins/outputs/file/README.md index 2e6a12d97..13e22ae40 100644 --- a/plugins/outputs/file/README.md +++ b/plugins/outputs/file/README.md @@ -5,6 +5,7 @@ This plugin writes telegraf metrics to files ## Configuration ```toml +# Send telegraf metrics to file(s) [[outputs.file]] ## Files to write to, "stdout" is a specially handled file. files = ["stdout", "/tmp/metrics.out"] diff --git a/plugins/outputs/file/file.go b/plugins/outputs/file/file.go index 0c8ff903e..5f377ec47 100644 --- a/plugins/outputs/file/file.go +++ b/plugins/outputs/file/file.go @@ -26,34 +26,6 @@ type File struct { serializer serializers.Serializer } -var sampleConfig = ` - ## Files to write to, "stdout" is a specially handled file. - files = ["stdout", "/tmp/metrics.out"] - - ## Use batch serialization format instead of line based delimiting. The - ## batch format allows for the production of non line based output formats and - ## may more efficiently encode metric groups. - # use_batch_format = false - - ## The file will be rotated after the time interval specified. When set - ## to 0 no time based rotation is performed. - # rotation_interval = "0d" - - ## The logfile will be rotated when it becomes larger than the specified - ## size. When set to 0 no size based rotation is performed. - # rotation_max_size = "0MB" - - ## Maximum number of rotated archives to keep, any older logs are deleted. - ## If set to -1, no archives are removed. - # rotation_max_archives = 5 - - ## Data format to output. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md - data_format = "influx" -` - func (f *File) SetSerializer(serializer serializers.Serializer) { f.serializer = serializer } @@ -94,14 +66,6 @@ func (f *File) Close() error { return err } -func (f *File) SampleConfig() string { - return sampleConfig -} - -func (f *File) Description() string { - return "Send telegraf metrics to file(s)" -} - func (f *File) Write(metrics []telegraf.Metric) error { var writeErr error diff --git a/plugins/outputs/file/file_sample_config.go b/plugins/outputs/file/file_sample_config.go new file mode 100644 index 000000000..aae05dce6 --- /dev/null +++ b/plugins/outputs/file/file_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package file + +func (f *File) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/graphite/README.md b/plugins/outputs/graphite/README.md index ddd85278f..cb50102fe 100644 --- a/plugins/outputs/graphite/README.md +++ b/plugins/outputs/graphite/README.md @@ -21,6 +21,17 @@ see the [Graphite Data Format](../../../docs/DATA_FORMATS_OUTPUT.md) ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md template = "host.tags.measurement.field" + ## Enable Graphite tags support + # graphite_tag_support = false + + ## Define how metric names and tags are sanitized; options are "strict", or "compatible" + ## strict - Default method, and backwards compatible with previous versionf of Telegraf + ## compatible - More relaxed sanitizing when using tags, and compatible with the graphite spec + # graphite_tag_sanitize_mode = "strict" + + ## Character for separating metric name and field for Graphite tags + # graphite_separator = "." + ## Graphite templates patterns ## 1. Template for cpu ## 2. Template for disk* @@ -31,12 +42,6 @@ see the [Graphite Data Format](../../../docs/DATA_FORMATS_OUTPUT.md) # "host.measurement.tags.field" #] - ## Enable Graphite tags support - # graphite_tag_support = false - - ## Character for separating metric name and field for Graphite tags - # graphite_separator = "." - ## timeout in seconds for the write connection to graphite timeout = 2 diff --git a/plugins/outputs/graphite/graphite.go b/plugins/outputs/graphite/graphite.go index 11a712c36..cd7f36033 100644 --- a/plugins/outputs/graphite/graphite.go +++ b/plugins/outputs/graphite/graphite.go @@ -30,49 +30,6 @@ type Graphite struct { tlsint.ClientConfig } -var sampleConfig = ` - ## TCP endpoint for your graphite instance. - ## If multiple endpoints are configured, output will be load balanced. - ## Only one of the endpoints will be written to with each iteration. - servers = ["localhost:2003"] - ## Prefix metrics name - prefix = "" - ## Graphite output template - ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md - template = "host.tags.measurement.field" - - ## Enable Graphite tags support - # graphite_tag_support = false - - ## Define how metric names and tags are sanitized; options are "strict", or "compatible" - ## strict - Default method, and backwards compatible with previous versionf of Telegraf - ## compatible - More relaxed sanitizing when using tags, and compatible with the graphite spec - # graphite_tag_sanitize_mode = "strict" - - ## Character for separating metric name and field for Graphite tags - # graphite_separator = "." - - ## Graphite templates patterns - ## 1. Template for cpu - ## 2. Template for disk* - ## 3. Default template - # templates = [ - # "cpu tags.measurement.host.field", - # "disk* measurement.field", - # "host.measurement.tags.field" - #] - - ## timeout in seconds for the write connection to graphite - timeout = 2 - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -` - func (g *Graphite) Connect() error { // Set default values if g.Timeout <= 0 { @@ -118,14 +75,6 @@ func (g *Graphite) Close() error { return nil } -func (g *Graphite) SampleConfig() string { - return sampleConfig -} - -func (g *Graphite) Description() string { - return "Configuration for Graphite server to send metrics to" -} - // We need check eof as we can write to nothing without noticing anything is wrong // the connection stays in a close_wait // We can detect that by finding an eof diff --git a/plugins/outputs/graphite/graphite_sample_config.go b/plugins/outputs/graphite/graphite_sample_config.go new file mode 100644 index 000000000..183dec344 --- /dev/null +++ b/plugins/outputs/graphite/graphite_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package graphite + +func (g *Graphite) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/graylog/README.md b/plugins/outputs/graylog/README.md index 2b0475263..7490b17c1 100644 --- a/plugins/outputs/graylog/README.md +++ b/plugins/outputs/graylog/README.md @@ -24,6 +24,7 @@ to the field name. ## Configuration ```toml +# Send telegraf metrics to graylog [[outputs.graylog]] ## Endpoints for your graylog instances. servers = ["udp://127.0.0.1:12201"] diff --git a/plugins/outputs/graylog/graylog.go b/plugins/outputs/graylog/graylog.go index 03762aeb1..0afef5358 100644 --- a/plugins/outputs/graylog/graylog.go +++ b/plugins/outputs/graylog/graylog.go @@ -317,31 +317,6 @@ type Graylog struct { closers []io.WriteCloser } -var sampleConfig = ` - ## Endpoints for your graylog instances. - servers = ["udp://127.0.0.1:12201"] - - ## Connection timeout. - # timeout = "5s" - - ## The field to use as the GELF short_message, if unset the static string - ## "telegraf" will be used. - ## example: short_message_field = "message" - # short_message_field = "" - - ## According to GELF payload specification, additional fields names must be prefixed - ## with an underscore. Previous versions did not prefix custom field 'name' with underscore. - ## Set to true for backward compatibility. - # name_field_no_prefix = false - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -` - func (g *Graylog) Connect() error { var writers []io.Writer dialer := &net.Dialer{Timeout: time.Duration(g.Timeout)} @@ -376,14 +351,6 @@ func (g *Graylog) Close() error { return nil } -func (g *Graylog) SampleConfig() string { - return sampleConfig -} - -func (g *Graylog) Description() string { - return "Send telegraf metrics to graylog" -} - func (g *Graylog) Write(metrics []telegraf.Metric) error { for _, metric := range metrics { values, err := g.serialize(metric) diff --git a/plugins/outputs/graylog/graylog_sample_config.go b/plugins/outputs/graylog/graylog_sample_config.go new file mode 100644 index 000000000..a8bfa00e3 --- /dev/null +++ b/plugins/outputs/graylog/graylog_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package graylog + +func (g *Graylog) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/groundwork/README.md b/plugins/outputs/groundwork/README.md index 9d7d443aa..f3af05696 100644 --- a/plugins/outputs/groundwork/README.md +++ b/plugins/outputs/groundwork/README.md @@ -7,6 +7,7 @@ This plugin writes to a [GroundWork Monitor][1] instance. Plugin only supports G ## Configuration ```toml +# Send telegraf metrics to GroundWork Monitor [[outputs.groundwork]] ## URL of your groundwork instance. url = "https://groundwork.example.com" @@ -17,7 +18,7 @@ This plugin writes to a [GroundWork Monitor][1] instance. Plugin only supports G ## Username and password to access GroundWork API. username = "" password = "" - + ## Default display name for the host with services(metrics). # default_host = "telegraf" diff --git a/plugins/outputs/groundwork/groundwork.go b/plugins/outputs/groundwork/groundwork.go index 739cdf941..2c398a511 100644 --- a/plugins/outputs/groundwork/groundwork.go +++ b/plugins/outputs/groundwork/groundwork.go @@ -17,30 +17,6 @@ import ( "github.com/influxdata/telegraf/plugins/outputs" ) -const sampleConfig = ` - ## URL of your groundwork instance. - url = "https://groundwork.example.com" - - ## Agent uuid for GroundWork API Server. - agent_id = "" - - ## Username and password to access GroundWork API. - username = "" - password = "" - - ## Default display name for the host with services(metrics). - # default_host = "telegraf" - - ## Default service state. - # default_service_state = "SERVICE_OK" - - ## The name of the tag that contains the hostname. - # resource_tag = "host" - - ## The name of the tag that contains the host group name. - # group_tag = "group" -` - type metricMeta struct { group string resource string @@ -59,10 +35,6 @@ type Groundwork struct { client clients.GWClient } -func (g *Groundwork) SampleConfig() string { - return sampleConfig -} - func (g *Groundwork) Init() error { if g.Server == "" { return errors.New("no 'url' provided") @@ -212,10 +184,6 @@ func (g *Groundwork) Write(metrics []telegraf.Metric) error { return nil } -func (g *Groundwork) Description() string { - return "Send telegraf metrics to GroundWork Monitor" -} - func init() { outputs.Add("groundwork", func() telegraf.Output { return &Groundwork{ diff --git a/plugins/outputs/groundwork/groundwork_sample_config.go b/plugins/outputs/groundwork/groundwork_sample_config.go new file mode 100644 index 000000000..25d0fa5d9 --- /dev/null +++ b/plugins/outputs/groundwork/groundwork_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package groundwork + +func (g *Groundwork) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/health/README.md b/plugins/outputs/health/README.md index a88417f63..fd842a75d 100644 --- a/plugins/outputs/health/README.md +++ b/plugins/outputs/health/README.md @@ -10,6 +10,7 @@ must fail in order for the resource to enter the failed state. ## Configuration ```toml +# Configurable HTTP health check resource based on metrics [[outputs.health]] ## Address and port to listen on. ## ex: service_address = "http://localhost:8080" diff --git a/plugins/outputs/health/health.go b/plugins/outputs/health/health.go index 0782f7be2..00731127c 100644 --- a/plugins/outputs/health/health.go +++ b/plugins/outputs/health/health.go @@ -23,45 +23,6 @@ const ( defaultWriteTimeout = 5 * time.Second ) -var sampleConfig = ` - ## Address and port to listen on. - ## ex: service_address = "http://localhost:8080" - ## service_address = "unix:///var/run/telegraf-health.sock" - # service_address = "http://:8080" - - ## The maximum duration for reading the entire request. - # read_timeout = "5s" - ## The maximum duration for writing the entire response. - # write_timeout = "5s" - - ## Username and password to accept for HTTP basic authentication. - # basic_username = "user1" - # basic_password = "secret" - - ## Allowed CA certificates for client certificates. - # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] - - ## TLS server certificate and private key. - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - - ## One or more check sub-tables should be defined, it is also recommended to - ## use metric filtering to limit the metrics that flow into this output. - ## - ## When using the default buffer sizes, this example will fail when the - ## metric buffer is half full. - ## - ## namepass = ["internal_write"] - ## tagpass = { output = ["influxdb"] } - ## - ## [[outputs.health.compares]] - ## field = "buffer_size" - ## lt = 5000.0 - ## - ## [[outputs.health.contains]] - ## field = "buffer_size" -` - type Checker interface { // Check returns true if the metrics meet its criteria. Check(metrics []telegraf.Metric) bool @@ -91,14 +52,6 @@ type Health struct { healthy bool } -func (h *Health) SampleConfig() string { - return sampleConfig -} - -func (h *Health) Description() string { - return "Configurable HTTP health check resource based on metrics" -} - func (h *Health) Init() error { u, err := url.Parse(h.ServiceAddress) if err != nil { diff --git a/plugins/outputs/health/health_sample_config.go b/plugins/outputs/health/health_sample_config.go new file mode 100644 index 000000000..5254a21a5 --- /dev/null +++ b/plugins/outputs/health/health_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package health + +func (h *Health) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/http/http.go b/plugins/outputs/http/http.go index 2a13258ae..1c6a7b29a 100644 --- a/plugins/outputs/http/http.go +++ b/plugins/outputs/http/http.go @@ -12,7 +12,7 @@ import ( "time" awsV2 "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" "github.com/influxdata/telegraf" internalaws "github.com/influxdata/telegraf/config/aws" "github.com/influxdata/telegraf/internal" @@ -26,90 +26,6 @@ const ( defaultURL = "http://127.0.0.1:8080/telegraf" ) -var sampleConfig = ` - ## URL is the address to send metrics to - url = "http://127.0.0.1:8080/telegraf" - - ## Timeout for HTTP message - # timeout = "5s" - - ## HTTP method, one of: "POST" or "PUT" - # method = "POST" - - ## HTTP Basic Auth credentials - # username = "username" - # password = "pa$$word" - - ## OAuth2 Client Credentials Grant - # client_id = "clientid" - # client_secret = "secret" - # token_url = "https://indentityprovider/oauth2/v1/token" - # scopes = ["urn:opc:idm:__myscopes__"] - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false - - ## Optional Cookie authentication - # cookie_auth_url = "https://localhost/authMe" - # cookie_auth_method = "POST" - # cookie_auth_username = "username" - # cookie_auth_password = "pa$$word" - # cookie_auth_headers = '{"Content-Type": "application/json", "X-MY-HEADER":"hello"}' - # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' - ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie - # cookie_auth_renewal = "5m" - - ## Data format to output. - ## Each data format has it's own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md - # data_format = "influx" - - ## Use batch serialization format (default) instead of line based format. - ## Batch format is more efficient and should be used unless line based - ## format is really needed. - # use_batch_format = true - - ## HTTP Content-Encoding for write request body, can be set to "gzip" to - ## compress body or "identity" to apply no encoding. - # content_encoding = "identity" - - ## Additional HTTP headers - # [outputs.http.headers] - # # Should be set manually to "application/json" for json data_format - # Content-Type = "text/plain; charset=utf-8" - - ## Idle (keep-alive) connection timeout. - ## Maximum amount of time before idle connection is closed. - ## Zero means no limit. - # idle_conn_timeout = 0 - - ## Amazon Region - #region = "us-east-1" - - ## Amazon Credentials - ## Credentials are loaded in the following order - ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified - ## 2) Assumed credentials via STS if role_arn is specified - ## 3) explicit credentials from 'access_key' and 'secret_key' - ## 4) shared profile from 'profile' - ## 5) environment variables - ## 6) shared credentials file - ## 7) EC2 Instance Profile - #access_key = "" - #secret_key = "" - #token = "" - #role_arn = "" - #web_identity_token_file = "" - #role_session_name = "" - #profile = "" - #shared_credential_file = "" -` - const ( defaultContentType = "text/plain; charset=utf-8" defaultMethod = http.MethodPost @@ -171,14 +87,6 @@ func (h *HTTP) Close() error { return nil } -func (h *HTTP) Description() string { - return "A plugin that can transmit metrics over HTTP" -} - -func (h *HTTP) SampleConfig() string { - return sampleConfig -} - func (h *HTTP) Write(metrics []telegraf.Metric) error { var reqBody []byte diff --git a/plugins/outputs/http/http_sample_config.go b/plugins/outputs/http/http_sample_config.go new file mode 100644 index 000000000..a2ad3e928 --- /dev/null +++ b/plugins/outputs/http/http_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package http + +func (h *HTTP) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/influxdb/influxdb.go b/plugins/outputs/influxdb/influxdb.go index 59b70c74f..1d9ba9cac 100644 --- a/plugins/outputs/influxdb/influxdb.go +++ b/plugins/outputs/influxdb/influxdb.go @@ -63,84 +63,6 @@ type InfluxDB struct { Log telegraf.Logger } -var sampleConfig = ` - ## The full HTTP or UDP URL for your InfluxDB instance. - ## - ## Multiple URLs can be specified for a single cluster, only ONE of the - ## urls will be written to each interval. - # urls = ["unix:///var/run/influxdb.sock"] - # urls = ["udp://127.0.0.1:8089"] - # urls = ["http://127.0.0.1:8086"] - - ## The target database for metrics; will be created as needed. - ## For UDP url endpoint database needs to be configured on server side. - # database = "telegraf" - - ## The value of this tag will be used to determine the database. If this - ## tag is not set the 'database' option is used as the default. - # database_tag = "" - - ## If true, the 'database_tag' will not be included in the written metric. - # exclude_database_tag = false - - ## If true, no CREATE DATABASE queries will be sent. Set to true when using - ## Telegraf with a user without permissions to create databases or when the - ## database already exists. - # skip_database_creation = false - - ## Name of existing retention policy to write to. Empty string writes to - ## the default retention policy. Only takes effect when using HTTP. - # retention_policy = "" - - ## The value of this tag will be used to determine the retention policy. If this - ## tag is not set the 'retention_policy' option is used as the default. - # retention_policy_tag = "" - - ## If true, the 'retention_policy_tag' will not be included in the written metric. - # exclude_retention_policy_tag = false - - ## Write consistency (clusters only), can be: "any", "one", "quorum", "all". - ## Only takes effect when using HTTP. - # write_consistency = "any" - - ## Timeout for HTTP messages. - # timeout = "5s" - - ## HTTP Basic Auth - # username = "telegraf" - # password = "metricsmetricsmetricsmetrics" - - ## HTTP User-Agent - # user_agent = "telegraf" - - ## UDP payload size is the maximum packet size to send. - # udp_payload = "512B" - - ## Optional TLS Config for use on HTTP connections. - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false - - ## HTTP Proxy override, if unset values the standard proxy environment - ## variables are consulted to determine which proxy, if any, should be used. - # http_proxy = "http://corporate.proxy:3128" - - ## Additional HTTP headers - # http_headers = {"X-Special-Header" = "Special-Value"} - - ## HTTP Content-Encoding for write request body, can be set to "gzip" to - ## compress body or "identity" to apply no encoding. - # content_encoding = "gzip" - - ## When true, Telegraf will output unsigned integers as unsigned values, - ## i.e.: "42u". You will need a version of InfluxDB supporting unsigned - ## integer values. Enabling this option will result in field type errors if - ## existing data has been written. - # influx_uint_support = false -` - func (i *InfluxDB) Connect() error { ctx := context.Background() @@ -198,14 +120,6 @@ func (i *InfluxDB) Close() error { return nil } -func (i *InfluxDB) Description() string { - return "Configuration for sending metrics to InfluxDB" -} - -func (i *InfluxDB) SampleConfig() string { - return sampleConfig -} - // Write sends metrics to one of the configured servers, logging each // unsuccessful. If all servers fail, return an error. func (i *InfluxDB) Write(metrics []telegraf.Metric) error { diff --git a/plugins/outputs/influxdb/influxdb_sample_config.go b/plugins/outputs/influxdb/influxdb_sample_config.go new file mode 100644 index 000000000..0c92a7a50 --- /dev/null +++ b/plugins/outputs/influxdb/influxdb_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package influxdb + +func (i *InfluxDB) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/influxdb_v2/influxdb.go b/plugins/outputs/influxdb_v2/influxdb_v2.go similarity index 67% rename from plugins/outputs/influxdb_v2/influxdb.go rename to plugins/outputs/influxdb_v2/influxdb_v2.go index cdaefc41d..cff59f88e 100644 --- a/plugins/outputs/influxdb_v2/influxdb.go +++ b/plugins/outputs/influxdb_v2/influxdb_v2.go @@ -21,58 +21,6 @@ var ( ErrMissingURL = errors.New("missing URL") ) -var sampleConfig = ` - ## The URLs of the InfluxDB cluster nodes. - ## - ## Multiple URLs can be specified for a single cluster, only ONE of the - ## urls will be written to each interval. - ## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"] - urls = ["http://127.0.0.1:8086"] - - ## Token for authentication. - token = "" - - ## Organization is the name of the organization you wish to write to; must exist. - organization = "" - - ## Destination bucket to write into. - bucket = "" - - ## The value of this tag will be used to determine the bucket. If this - ## tag is not set the 'bucket' option is used as the default. - # bucket_tag = "" - - ## If true, the bucket tag will not be added to the metric. - # exclude_bucket_tag = false - - ## Timeout for HTTP messages. - # timeout = "5s" - - ## Additional HTTP headers - # http_headers = {"X-Special-Header" = "Special-Value"} - - ## HTTP Proxy override, if unset values the standard proxy environment - ## variables are consulted to determine which proxy, if any, should be used. - # http_proxy = "http://corporate.proxy:3128" - - ## HTTP User-Agent - # user_agent = "telegraf" - - ## Content-Encoding for write request body, can be set to "gzip" to - ## compress body or "identity" to apply no encoding. - # content_encoding = "gzip" - - ## Enable or disable uint support for writing uints influxdb 2.0. - # influx_uint_support = false - - ## Optional TLS Config for use on HTTP connections. - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -` - type Client interface { Write(context.Context, []telegraf.Metric) error @@ -142,14 +90,6 @@ func (i *InfluxDB) Close() error { return nil } -func (i *InfluxDB) Description() string { - return "Configuration for sending metrics to InfluxDB" -} - -func (i *InfluxDB) SampleConfig() string { - return sampleConfig -} - // Write sends metrics to one of the configured servers, logging each // unsuccessful. If all servers fail, return an error. func (i *InfluxDB) Write(metrics []telegraf.Metric) error { diff --git a/plugins/outputs/influxdb_v2/influxdb_v2_sample_config.go b/plugins/outputs/influxdb_v2/influxdb_v2_sample_config.go new file mode 100644 index 000000000..2ce7b4f2c --- /dev/null +++ b/plugins/outputs/influxdb_v2/influxdb_v2_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package influxdb_v2 + +func (i *InfluxDB) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/influxdb_v2/influxdb_test.go b/plugins/outputs/influxdb_v2/influxdb_v2_test.go similarity index 98% rename from plugins/outputs/influxdb_v2/influxdb_test.go rename to plugins/outputs/influxdb_v2/influxdb_v2_test.go index b16fd944d..51a810e0c 100644 --- a/plugins/outputs/influxdb_v2/influxdb_test.go +++ b/plugins/outputs/influxdb_v2/influxdb_v2_test.go @@ -97,7 +97,6 @@ func TestConnect(t *testing.T) { func TestUnused(_ *testing.T) { thing := influxdb.InfluxDB{} thing.Close() - thing.Description() thing.SampleConfig() outputs.Outputs["influxdb_v2"]() } diff --git a/plugins/outputs/instrumental/README.md b/plugins/outputs/instrumental/README.md index 65113aecc..9cc059eef 100644 --- a/plugins/outputs/instrumental/README.md +++ b/plugins/outputs/instrumental/README.md @@ -10,6 +10,7 @@ by whitespace. The `increment` type is only used if the metric comes in as a cou ## Configuration ```toml +# Configuration for sending metrics to an Instrumental project [[outputs.instrumental]] ## Project API Token (required) api_token = "API Token" # required diff --git a/plugins/outputs/instrumental/instrumental.go b/plugins/outputs/instrumental/instrumental.go index b0b52a921..1fa7f8098 100644 --- a/plugins/outputs/instrumental/instrumental.go +++ b/plugins/outputs/instrumental/instrumental.go @@ -43,20 +43,6 @@ const ( HandshakeFormat = HelloMessage + AuthFormat ) -var sampleConfig = ` - ## Project API Token (required) - api_token = "API Token" # required - ## Prefix the metrics with a given name - prefix = "" - ## Stats output template (Graphite formatting) - ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite - template = "host.tags.measurement.field" - ## Timeout in seconds to connect - timeout = "2s" - ## Display Communication to Instrumental - debug = false -` - func (i *Instrumental) Connect() error { connection, err := net.DialTimeout("tcp", i.Host+":8000", time.Duration(i.Timeout)) @@ -168,14 +154,6 @@ func (i *Instrumental) Write(metrics []telegraf.Metric) error { return nil } -func (i *Instrumental) Description() string { - return "Configuration for sending metrics to an Instrumental project" -} - -func (i *Instrumental) SampleConfig() string { - return sampleConfig -} - func (i *Instrumental) authenticate(conn net.Conn) error { _, err := fmt.Fprintf(conn, HandshakeFormat, i.APIToken) if err != nil { diff --git a/plugins/outputs/instrumental/instrumental_sample_config.go b/plugins/outputs/instrumental/instrumental_sample_config.go new file mode 100644 index 000000000..504f42202 --- /dev/null +++ b/plugins/outputs/instrumental/instrumental_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package instrumental + +func (i *Instrumental) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/kafka/README.md b/plugins/outputs/kafka/README.md index 6186eeebd..33cb435e0 100644 --- a/plugins/outputs/kafka/README.md +++ b/plugins/outputs/kafka/README.md @@ -5,6 +5,7 @@ This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.htm ## Configuration ```toml +# Configuration for the Kafka server to send metrics to [[outputs.kafka]] ## URLs of kafka brokers brokers = ["localhost:9092"] @@ -106,6 +107,10 @@ This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.htm ## until the next flush. # max_retry = 3 + ## The maximum permitted size of a message. Should be set equal to or + ## smaller than the broker's 'message.max.bytes'. + # max_message_bytes = 1000000 + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go index 118af9868..269591f57 100644 --- a/plugins/outputs/kafka/kafka.go +++ b/plugins/outputs/kafka/kafka.go @@ -80,158 +80,6 @@ func (*DebugLogger) Println(v ...interface{}) { log.Println(args...) } -var sampleConfig = ` - ## URLs of kafka brokers - brokers = ["localhost:9092"] - ## Kafka topic for producer messages - topic = "telegraf" - - ## The value of this tag will be used as the topic. If not set the 'topic' - ## option is used. - # topic_tag = "" - - ## If true, the 'topic_tag' will be removed from to the metric. - # exclude_topic_tag = false - - ## Optional Client id - # client_id = "Telegraf" - - ## Set the minimal supported Kafka version. Setting this enables the use of new - ## Kafka features and APIs. Of particular interest, lz4 compression - ## requires at least version 0.10.0.0. - ## ex: version = "1.1.0" - # version = "" - - ## Optional topic suffix configuration. - ## If the section is omitted, no suffix is used. - ## Following topic suffix methods are supported: - ## measurement - suffix equals to separator + measurement's name - ## tags - suffix equals to separator + specified tags' values - ## interleaved with separator - - ## Suffix equals to "_" + measurement name - # [outputs.kafka.topic_suffix] - # method = "measurement" - # separator = "_" - - ## Suffix equals to "__" + measurement's "foo" tag value. - ## If there's no such a tag, suffix equals to an empty string - # [outputs.kafka.topic_suffix] - # method = "tags" - # keys = ["foo"] - # separator = "__" - - ## Suffix equals to "_" + measurement's "foo" and "bar" - ## tag values, separated by "_". If there is no such tags, - ## their values treated as empty strings. - # [outputs.kafka.topic_suffix] - # method = "tags" - # keys = ["foo", "bar"] - # separator = "_" - - ## The routing tag specifies a tagkey on the metric whose value is used as - ## the message key. The message key is used to determine which partition to - ## send the message to. This tag is prefered over the routing_key option. - routing_tag = "host" - - ## The routing key is set as the message key and used to determine which - ## partition to send the message to. This value is only used when no - ## routing_tag is set or as a fallback when the tag specified in routing tag - ## is not found. - ## - ## If set to "random", a random value will be generated for each message. - ## - ## When unset, no message key is added and each message is routed to a random - ## partition. - ## - ## ex: routing_key = "random" - ## routing_key = "telegraf" - # routing_key = "" - - ## Compression codec represents the various compression codecs recognized by - ## Kafka in messages. - ## 0 : None - ## 1 : Gzip - ## 2 : Snappy - ## 3 : LZ4 - ## 4 : ZSTD - # compression_codec = 0 - - ## Idempotent Writes - ## If enabled, exactly one copy of each message is written. - # idempotent_writes = false - - ## RequiredAcks is used in Produce Requests to tell the broker how many - ## replica acknowledgements it must see before responding - ## 0 : the producer never waits for an acknowledgement from the broker. - ## This option provides the lowest latency but the weakest durability - ## guarantees (some data will be lost when a server fails). - ## 1 : the producer gets an acknowledgement after the leader replica has - ## received the data. This option provides better durability as the - ## client waits until the server acknowledges the request as successful - ## (only messages that were written to the now-dead leader but not yet - ## replicated will be lost). - ## -1: the producer gets an acknowledgement after all in-sync replicas have - ## received the data. This option provides the best durability, we - ## guarantee that no messages will be lost as long as at least one in - ## sync replica remains. - # required_acks = -1 - - ## The maximum number of times to retry sending a metric before failing - ## until the next flush. - # max_retry = 3 - - ## The maximum permitted size of a message. Should be set equal to or - ## smaller than the broker's 'message.max.bytes'. - # max_message_bytes = 1000000 - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false - - ## Optional SOCKS5 proxy to use when connecting to brokers - # socks5_enabled = true - # socks5_address = "127.0.0.1:1080" - # socks5_username = "alice" - # socks5_password = "pass123" - - ## Optional SASL Config - # sasl_username = "kafka" - # sasl_password = "secret" - - ## Optional SASL: - ## one of: OAUTHBEARER, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI - ## (defaults to PLAIN) - # sasl_mechanism = "" - - ## used if sasl_mechanism is GSSAPI (experimental) - # sasl_gssapi_service_name = "" - # ## One of: KRB5_USER_AUTH and KRB5_KEYTAB_AUTH - # sasl_gssapi_auth_type = "KRB5_USER_AUTH" - # sasl_gssapi_kerberos_config_path = "/" - # sasl_gssapi_realm = "realm" - # sasl_gssapi_key_tab_path = "" - # sasl_gssapi_disable_pafxfast = false - - ## used if sasl_mechanism is OAUTHBEARER (experimental) - # sasl_access_token = "" - - ## SASL protocol version. When connecting to Azure EventHub set to 0. - # sasl_version = 1 - - # Disable Kafka metadata full fetch - # metadata_full = false - - ## Data format to output. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md - # data_format = "influx" -` - func ValidateTopicSuffixMethod(method string) error { for _, validMethod := range ValidTopicSuffixMethods { if method == validMethod { @@ -327,14 +175,6 @@ func (k *Kafka) Close() error { return k.producer.Close() } -func (k *Kafka) SampleConfig() string { - return sampleConfig -} - -func (k *Kafka) Description() string { - return "Configuration for the Kafka server to send metrics to" -} - func (k *Kafka) routingKey(metric telegraf.Metric) (string, error) { if k.RoutingTag != "" { key, ok := metric.GetTag(k.RoutingTag) diff --git a/plugins/outputs/kafka/kafka_sample_config.go b/plugins/outputs/kafka/kafka_sample_config.go new file mode 100644 index 000000000..291b05bcd --- /dev/null +++ b/plugins/outputs/kafka/kafka_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package kafka + +func (k *Kafka) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/kinesis/README.md b/plugins/outputs/kinesis/README.md index b5f9422f8..fde49bff2 100644 --- a/plugins/outputs/kinesis/README.md +++ b/plugins/outputs/kinesis/README.md @@ -25,7 +25,73 @@ will attempt to authenticate. If you are using credentials from a web identity provider, you can specify the session name using `role_session_name`. If left empty, the current timestamp will be used. -## Config +## Configuration + +```toml +# Configuration for the AWS Kinesis output. +[[outputs.kinesis]] + ## Amazon REGION of kinesis endpoint. + region = "ap-southeast-2" + + ## Amazon Credentials + ## Credentials are loaded in the following order + ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified + ## 2) Assumed credentials via STS if role_arn is specified + ## 3) explicit credentials from 'access_key' and 'secret_key' + ## 4) shared profile from 'profile' + ## 5) environment variables + ## 6) shared credentials file + ## 7) EC2 Instance Profile + #access_key = "" + #secret_key = "" + #token = "" + #role_arn = "" + #web_identity_token_file = "" + #role_session_name = "" + #profile = "" + #shared_credential_file = "" + + ## Endpoint to make request against, the correct endpoint is automatically + ## determined and this option should only be set if you wish to override the + ## default. + ## ex: endpoint_url = "http://localhost:8000" + # endpoint_url = "" + + ## Kinesis StreamName must exist prior to starting telegraf. + streamname = "StreamName" + + ## The partition key can be calculated using one of several methods: + ## + ## Use a static value for all writes: + # [outputs.kinesis.partition] + # method = "static" + # key = "howdy" + # + ## Use a random partition key on each write: + # [outputs.kinesis.partition] + # method = "random" + # + ## Use the measurement name as the partition key: + # [outputs.kinesis.partition] + # method = "measurement" + # + ## Use the value of a tag for all writes, if the tag is not set the empty + ## default option will be used. When no default, defaults to "telegraf" + # [outputs.kinesis.partition] + # method = "tag" + # key = "host" + # default = "mykey" + + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "influx" + + ## debug will show upstream aws messages. + debug = false +``` For this output plugin to function correctly the following variables must be configured. diff --git a/plugins/outputs/kinesis/kinesis.go b/plugins/outputs/kinesis/kinesis.go index 579527990..e3a900e1e 100644 --- a/plugins/outputs/kinesis/kinesis.go +++ b/plugins/outputs/kinesis/kinesis.go @@ -43,78 +43,6 @@ type kinesisClient interface { PutRecords(context.Context, *kinesis.PutRecordsInput, ...func(*kinesis.Options)) (*kinesis.PutRecordsOutput, error) } -var sampleConfig = ` - ## Amazon REGION of kinesis endpoint. - region = "ap-southeast-2" - - ## Amazon Credentials - ## Credentials are loaded in the following order - ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified - ## 2) Assumed credentials via STS if role_arn is specified - ## 3) explicit credentials from 'access_key' and 'secret_key' - ## 4) shared profile from 'profile' - ## 5) environment variables - ## 6) shared credentials file - ## 7) EC2 Instance Profile - #access_key = "" - #secret_key = "" - #token = "" - #role_arn = "" - #web_identity_token_file = "" - #role_session_name = "" - #profile = "" - #shared_credential_file = "" - - ## Endpoint to make request against, the correct endpoint is automatically - ## determined and this option should only be set if you wish to override the - ## default. - ## ex: endpoint_url = "http://localhost:8000" - # endpoint_url = "" - - ## Kinesis StreamName must exist prior to starting telegraf. - streamname = "StreamName" - - ## The partition key can be calculated using one of several methods: - ## - ## Use a static value for all writes: - # [outputs.kinesis.partition] - # method = "static" - # key = "howdy" - # - ## Use a random partition key on each write: - # [outputs.kinesis.partition] - # method = "random" - # - ## Use the measurement name as the partition key: - # [outputs.kinesis.partition] - # method = "measurement" - # - ## Use the value of a tag for all writes, if the tag is not set the empty - ## default option will be used. When no default, defaults to "telegraf" - # [outputs.kinesis.partition] - # method = "tag" - # key = "host" - # default = "mykey" - - - ## Data format to output. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md - data_format = "influx" - - ## debug will show upstream aws messages. - debug = false -` - -func (k *KinesisOutput) SampleConfig() string { - return sampleConfig -} - -func (k *KinesisOutput) Description() string { - return "Configuration for the AWS Kinesis output." -} - func (k *KinesisOutput) Connect() error { if k.Partition == nil { k.Log.Error("Deprecated partitionkey configuration in use, please consider using outputs.kinesis.partition") diff --git a/plugins/outputs/kinesis/kinesis_sample_config.go b/plugins/outputs/kinesis/kinesis_sample_config.go new file mode 100644 index 000000000..cdefa070e --- /dev/null +++ b/plugins/outputs/kinesis/kinesis_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package kinesis + +func (k *KinesisOutput) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/librato/README.md b/plugins/outputs/librato/README.md index 685c36432..5676d1fb0 100644 --- a/plugins/outputs/librato/README.md +++ b/plugins/outputs/librato/README.md @@ -10,3 +10,24 @@ Point Tags to the API. If the point value being sent cannot be converted to a float64, the metric is skipped. Currently, the plugin does not send any associated Point Tags. + +## Configuration + +```toml +# Configuration for Librato API to send metrics to. +[[outputs.librato]] + ## Librato API Docs + ## http://dev.librato.com/v1/metrics-authentication + ## Librato API user + api_user = "telegraf@influxdb.com" # required. + ## Librato API token + api_token = "my-secret-token" # required. + ## Debug + # debug = false + ## Connection timeout. + # timeout = "5s" + ## Output source Template (same as graphite buckets) + ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite + ## This template is used in librato's source (not metric's name) + template = "host" +``` diff --git a/plugins/outputs/librato/librato.go b/plugins/outputs/librato/librato.go index 4c471e5a6..721ee264d 100644 --- a/plugins/outputs/librato/librato.go +++ b/plugins/outputs/librato/librato.go @@ -32,24 +32,6 @@ type Librato struct { // https://www.librato.com/docs/kb/faq/best_practices/naming_convention_metrics_sources.html#naming-limitations-for-sources-and-metrics var reUnacceptedChar = regexp.MustCompile("[^.a-zA-Z0-9_-]") -var sampleConfig = ` - ## Librato API Docs - ## http://dev.librato.com/v1/metrics-authentication - ## Librato API user - api_user = "telegraf@influxdb.com" # required. - ## Librato API token - api_token = "my-secret-token" # required. - ## Debug - # debug = false - ## Connection timeout. - # timeout = "5s" - ## Output source Template (same as graphite buckets) - ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite - ## This template is used in librato's source (not metric's name) - template = "host" - -` - // LMetrics is the default struct for Librato's API fromat type LMetrics struct { Gauges []*Gauge `json:"gauges"` @@ -176,17 +158,6 @@ func (l *Librato) writeBatch(start int, sizeBatch int, metricCounter int, tempGa return nil } -// SampleConfig is function who return the default configuration for this -// output -func (l *Librato) SampleConfig() string { - return sampleConfig -} - -// Description is function who return the Description of this output -func (l *Librato) Description() string { - return "Configuration for Librato API to send metrics to." -} - func (l *Librato) buildGauges(m telegraf.Metric) ([]*Gauge, error) { gauges := []*Gauge{} if m.Time().Unix() == 0 { diff --git a/plugins/outputs/librato/librato_sample_config.go b/plugins/outputs/librato/librato_sample_config.go new file mode 100644 index 000000000..fa09650c4 --- /dev/null +++ b/plugins/outputs/librato/librato_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package librato + +func (l *Librato) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/logzio/logzio.go b/plugins/outputs/logzio/logzio.go index caec293b1..db4e39931 100644 --- a/plugins/outputs/logzio/logzio.go +++ b/plugins/outputs/logzio/logzio.go @@ -22,22 +22,6 @@ const ( logzioType = "telegraf" ) -var sampleConfig = ` - ## Connection timeout, defaults to "5s" if not set. - timeout = "5s" - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - - ## Logz.io account token - token = "your logz.io token" # required - - ## Use your listener URL for your Logz.io account region. - # url = "https://listener.logz.io:8071" -` - type Logzio struct { Log telegraf.Logger `toml:"-"` Timeout config.Duration `toml:"timeout"` @@ -89,16 +73,6 @@ func (l *Logzio) Close() error { return nil } -// Description returns a one-sentence description on the Output -func (l *Logzio) Description() string { - return logzioDescription -} - -// SampleConfig returns the default configuration of the Output -func (l *Logzio) SampleConfig() string { - return sampleConfig -} - // Write takes in group of points to be written to the Output func (l *Logzio) Write(metrics []telegraf.Metric) error { if len(metrics) == 0 { diff --git a/plugins/outputs/logzio/logzio_sample_config.go b/plugins/outputs/logzio/logzio_sample_config.go new file mode 100644 index 000000000..42cadabbb --- /dev/null +++ b/plugins/outputs/logzio/logzio_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package logzio + +func (l *Logzio) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/loki/loki.go b/plugins/outputs/loki/loki.go index c3787e952..52a2c3ecd 100644 --- a/plugins/outputs/loki/loki.go +++ b/plugins/outputs/loki/loki.go @@ -26,32 +26,6 @@ const ( defaultClientTimeout = 5 * time.Second ) -var sampleConfig = ` - ## The domain of Loki - domain = "https://loki.domain.tld" - - ## Endpoint to write api - # endpoint = "/loki/api/v1/push" - - ## Connection timeout, defaults to "5s" if not set. - # timeout = "5s" - - ## Basic auth credential - # username = "loki" - # password = "pass" - - ## Additional HTTP headers - # http_headers = {"X-Scope-OrgID" = "1"} - - ## If the request must be gzip encoded - # gzip_request = false - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" -` - type Loki struct { Domain string `toml:"domain"` Endpoint string `toml:"endpoint"` @@ -70,14 +44,6 @@ type Loki struct { tls.ClientConfig } -func (l *Loki) SampleConfig() string { - return sampleConfig -} - -func (l *Loki) Description() string { - return "Send logs to Loki" -} - func (l *Loki) createClient(ctx context.Context) (*http.Client, error) { tlsCfg, err := l.ClientConfig.TLSConfig() if err != nil { diff --git a/plugins/outputs/loki/loki_sample_config.go b/plugins/outputs/loki/loki_sample_config.go new file mode 100644 index 000000000..f0ab4151c --- /dev/null +++ b/plugins/outputs/loki/loki_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package loki + +func (l *Loki) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/mongodb/mongodb.go b/plugins/outputs/mongodb/mongodb.go index 0540e7d10..153a9526d 100644 --- a/plugins/outputs/mongodb/mongodb.go +++ b/plugins/outputs/mongodb/mongodb.go @@ -61,49 +61,6 @@ type MongoDB struct { tls.ClientConfig } -func (s *MongoDB) Description() string { - return "Sends metrics to MongoDB" -} - -var sampleConfig = ` - # connection string examples for mongodb - dsn = "mongodb://localhost:27017" - # dsn = "mongodb://mongod1:27017,mongod2:27017,mongod3:27017/admin&replicaSet=myReplSet&w=1" - - # overrides serverSelectionTimeoutMS in dsn if set - # timeout = "30s" - - # default authentication, optional - # authentication = "NONE" - - # for SCRAM-SHA-256 authentication - # authentication = "SCRAM" - # username = "root" - # password = "***" - - # for x509 certificate authentication - # authentication = "X509" - # tls_ca = "ca.pem" - # tls_key = "client.pem" - # # tls_key_pwd = "changeme" # required for encrypted tls_key - # insecure_skip_verify = false - - # database to store measurements and time series collections - # database = "telegraf" - - # granularity can be seconds, minutes, or hours. - # configuring this value will be based on your input collection frequency. - # see https://docs.mongodb.com/manual/core/timeseries-collections/#create-a-time-series-collection - # granularity = "seconds" - - # optionally set a TTL to automatically expire documents from the measurement collections. - # ttl = "360h" -` - -func (s *MongoDB) SampleConfig() string { - return sampleConfig -} - func (s *MongoDB) Init() error { if s.MetricDatabase == "" { s.MetricDatabase = "telegraf" diff --git a/plugins/outputs/mongodb/mongodb_sample_config.go b/plugins/outputs/mongodb/mongodb_sample_config.go new file mode 100644 index 000000000..feea34f2a --- /dev/null +++ b/plugins/outputs/mongodb/mongodb_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package mongodb + +func (s *MongoDB) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/mqtt/README.md b/plugins/outputs/mqtt/README.md index 426d61349..d67a14a1e 100644 --- a/plugins/outputs/mqtt/README.md +++ b/plugins/outputs/mqtt/README.md @@ -14,6 +14,7 @@ As a reference `eclipse/paho.mqtt.golang` sets the `keep_alive` to 30. ## Configuration ```toml +# Configuration for MQTT server to send metrics to [[outputs.mqtt]] ## MQTT Brokers ## The list of brokers should only include the hostname or IP address and the diff --git a/plugins/outputs/mqtt/mqtt.go b/plugins/outputs/mqtt/mqtt.go index f3f4d17a0..35ede5e30 100644 --- a/plugins/outputs/mqtt/mqtt.go +++ b/plugins/outputs/mqtt/mqtt.go @@ -20,69 +20,6 @@ const ( defaultKeepAlive = 0 ) -var sampleConfig = ` - ## MQTT Brokers - ## The list of brokers should only include the hostname or IP address and the - ## port to the broker. This should follow the format '{host}:{port}'. For - ## example, "localhost:1883" or "127.0.0.1:8883". - servers = ["localhost:1883"] - - ## MQTT Topic for Producer Messages - ## MQTT outputs send metrics to this topic format: - ## /// (e.g. prefix/web01.example.com/mem) - topic_prefix = "telegraf" - - ## QoS policy for messages - ## The mqtt QoS policy for sending messages. - ## See https://www.ibm.com/support/knowledgecenter/en/SSFKSJ_9.0.0/com.ibm.mq.dev.doc/q029090_.htm - ## 0 = at most once - ## 1 = at least once - ## 2 = exactly once - # qos = 2 - - ## Keep Alive - ## Defines the maximum length of time that the broker and client may not - ## communicate. Defaults to 0 which turns the feature off. - ## - ## For version v2.0.12 and later mosquitto there is a bug - ## (see https://github.com/eclipse/mosquitto/issues/2117), which requires - ## this to be non-zero. As a reference eclipse/paho.mqtt.golang defaults to 30. - # keep_alive = 0 - - ## username and password to connect MQTT server. - # username = "telegraf" - # password = "metricsmetricsmetricsmetrics" - - ## client ID - ## The unique client id to connect MQTT server. If this parameter is not set - ## then a random ID is generated. - # client_id = "" - - ## Timeout for write operations. default: 5s - # timeout = "5s" - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false - - ## When true, metrics will be sent in one MQTT message per flush. Otherwise, - ## metrics are written one metric per MQTT message. - # batch = false - - ## When true, metric will have RETAIN flag set, making broker cache entries until someone - ## actually reads it - # retain = false - - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md - data_format = "influx" -` - type MQTT struct { Servers []string `toml:"servers"` Username string @@ -138,14 +75,6 @@ func (m *MQTT) Close() error { return nil } -func (m *MQTT) SampleConfig() string { - return sampleConfig -} - -func (m *MQTT) Description() string { - return "Configuration for MQTT server to send metrics to" -} - func (m *MQTT) Write(metrics []telegraf.Metric) error { m.Lock() defer m.Unlock() diff --git a/plugins/outputs/mqtt/mqtt_sample_config.go b/plugins/outputs/mqtt/mqtt_sample_config.go new file mode 100644 index 000000000..04fbfa6ae --- /dev/null +++ b/plugins/outputs/mqtt/mqtt_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package mqtt + +func (m *MQTT) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/nats/README.md b/plugins/outputs/nats/README.md index 1fb1a2b4b..af918442d 100644 --- a/plugins/outputs/nats/README.md +++ b/plugins/outputs/nats/README.md @@ -2,7 +2,10 @@ This plugin writes to a (list of) specified NATS instance(s). +## Configuration + ```toml +# Send telegraf measurements to NATS [[outputs.nats]] ## URLs of NATS servers servers = ["nats://localhost:4222"] diff --git a/plugins/outputs/nats/nats.go b/plugins/outputs/nats/nats.go index 9f7780eea..7b71ab2ae 100644 --- a/plugins/outputs/nats/nats.go +++ b/plugins/outputs/nats/nats.go @@ -29,40 +29,6 @@ type NATS struct { serializer serializers.Serializer } -var sampleConfig = ` - ## URLs of NATS servers - servers = ["nats://localhost:4222"] - - ## Optional client name - # name = "" - - ## Optional credentials - # username = "" - # password = "" - - ## Optional NATS 2.0 and NATS NGS compatible user credentials - # credentials = "/etc/telegraf/nats.creds" - - ## NATS subject for producer messages - subject = "telegraf" - - ## Use Transport Layer Security - # secure = false - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false - - ## Data format to output. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md - data_format = "influx" -` - func (n *NATS) SetSerializer(serializer serializers.Serializer) { n.serializer = serializer } @@ -107,14 +73,6 @@ func (n *NATS) Close() error { return nil } -func (n *NATS) SampleConfig() string { - return sampleConfig -} - -func (n *NATS) Description() string { - return "Send telegraf measurements to NATS" -} - func (n *NATS) Write(metrics []telegraf.Metric) error { if len(metrics) == 0 { return nil diff --git a/plugins/outputs/nats/nats_sample_config.go b/plugins/outputs/nats/nats_sample_config.go new file mode 100644 index 000000000..4b404d97b --- /dev/null +++ b/plugins/outputs/nats/nats_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package nats + +func (n *NATS) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/newrelic/README.md b/plugins/outputs/newrelic/README.md index 800b12e7a..a61020891 100644 --- a/plugins/outputs/newrelic/README.md +++ b/plugins/outputs/newrelic/README.md @@ -9,12 +9,13 @@ Telegraf minimum version: Telegraf 1.15.0 ## Configuration ```toml +# Send metrics to New Relic metrics endpoint [[outputs.newrelic]] ## The 'insights_key' parameter requires a NR license key. ## New Relic recommends you create one ## with a convenient name such as TELEGRAF_INSERT_KEY. ## reference: https://docs.newrelic.com/docs/apis/intro-apis/new-relic-api-keys/#ingest-license-key - insights_key = "New Relic License Key Here" + # insights_key = "New Relic License Key Here" ## Prefix to add to add to metric name for easy identification. ## This is very useful if your metric names are ambiguous. diff --git a/plugins/outputs/newrelic/newrelic.go b/plugins/outputs/newrelic/newrelic.go index 5845b4b8d..0b0125327 100644 --- a/plugins/outputs/newrelic/newrelic.go +++ b/plugins/outputs/newrelic/newrelic.go @@ -31,37 +31,6 @@ type NewRelic struct { client http.Client } -// Description returns a one-sentence description on the Output -func (nr *NewRelic) Description() string { - return "Send metrics to New Relic metrics endpoint" -} - -// SampleConfig : return default configuration of the Output -func (nr *NewRelic) SampleConfig() string { - return ` - ## The 'insights_key' parameter requires a NR license key. - ## New Relic recommends you create one - ## with a convenient name such as TELEGRAF_INSERT_KEY. - ## reference: https://docs.newrelic.com/docs/apis/intro-apis/new-relic-api-keys/#ingest-license-key - # insights_key = "New Relic License Key Here" - - ## Prefix to add to add to metric name for easy identification. - ## This is very useful if your metric names are ambiguous. - # metric_prefix = "" - - ## Timeout for writes to the New Relic API. - # timeout = "15s" - - ## HTTP Proxy override. If unset use values from the standard - ## proxy environment variables to determine proxy, if any. - # http_proxy = "http://corporate.proxy:3128" - - ## Metric URL override to enable geographic location endpoints. - # If not set use values from the standard - # metric_url = "https://metric-api.newrelic.com/metric/v1" -` -} - // Connect to the Output func (nr *NewRelic) Connect() error { if nr.InsightsKey == "" { diff --git a/plugins/outputs/newrelic/newrelic_sample_config.go b/plugins/outputs/newrelic/newrelic_sample_config.go new file mode 100644 index 000000000..cbfb83833 --- /dev/null +++ b/plugins/outputs/newrelic/newrelic_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package newrelic + +func (nr *NewRelic) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/nsq/README.md b/plugins/outputs/nsq/README.md index bf5958d32..923f3f3c3 100644 --- a/plugins/outputs/nsq/README.md +++ b/plugins/outputs/nsq/README.md @@ -2,3 +2,20 @@ This plugin writes to a specified NSQD instance, usually local to the producer. It requires a `server` name and a `topic` name. + +## Configuration + +```toml +# Send telegraf measurements to NSQD +[[outputs.nsq]] + ## Location of nsqd instance listening on TCP + server = "localhost:4150" + ## NSQ topic for producer messages + topic = "telegraf" + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "influx" +``` diff --git a/plugins/outputs/nsq/nsq.go b/plugins/outputs/nsq/nsq.go index 6d719d0a0..3b188f0e3 100644 --- a/plugins/outputs/nsq/nsq.go +++ b/plugins/outputs/nsq/nsq.go @@ -19,19 +19,6 @@ type NSQ struct { serializer serializers.Serializer } -var sampleConfig = ` - ## Location of nsqd instance listening on TCP - server = "localhost:4150" - ## NSQ topic for producer messages - topic = "telegraf" - - ## Data format to output. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md - data_format = "influx" -` - func (n *NSQ) SetSerializer(serializer serializers.Serializer) { n.serializer = serializer } @@ -53,14 +40,6 @@ func (n *NSQ) Close() error { return nil } -func (n *NSQ) SampleConfig() string { - return sampleConfig -} - -func (n *NSQ) Description() string { - return "Send telegraf measurements to NSQD" -} - func (n *NSQ) Write(metrics []telegraf.Metric) error { if len(metrics) == 0 { return nil diff --git a/plugins/outputs/nsq/nsq_sample_config.go b/plugins/outputs/nsq/nsq_sample_config.go new file mode 100644 index 000000000..75efaba5a --- /dev/null +++ b/plugins/outputs/nsq/nsq_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package nsq + +func (n *NSQ) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/opentelemetry/README.md b/plugins/outputs/opentelemetry/README.md index 135540190..d7c88b7b3 100644 --- a/plugins/outputs/opentelemetry/README.md +++ b/plugins/outputs/opentelemetry/README.md @@ -5,6 +5,7 @@ This plugin sends metrics to [OpenTelemetry](https://opentelemetry.io) servers a ## Configuration ```toml +# Send OpenTelemetry metrics over gRPC [[outputs.opentelemetry]] ## Override the default (localhost:4317) OpenTelemetry gRPC service ## address:port diff --git a/plugins/outputs/opentelemetry/opentelemetry.go b/plugins/outputs/opentelemetry/opentelemetry.go index 7cfe1341b..0f8f29ee8 100644 --- a/plugins/outputs/opentelemetry/opentelemetry.go +++ b/plugins/outputs/opentelemetry/opentelemetry.go @@ -13,6 +13,7 @@ import ( "go.opentelemetry.io/collector/model/otlpgrpc" "google.golang.org/grpc" "google.golang.org/grpc/credentials" + // This causes the gRPC library to register gzip compression. _ "google.golang.org/grpc/encoding/gzip" "google.golang.org/grpc/metadata" @@ -35,48 +36,6 @@ type OpenTelemetry struct { callOptions []grpc.CallOption } -const sampleConfig = ` - ## Override the default (localhost:4317) OpenTelemetry gRPC service - ## address:port - # service_address = "localhost:4317" - - ## Override the default (5s) request timeout - # timeout = "5s" - - ## Optional TLS Config. - ## - ## Root certificates for verifying server certificates encoded in PEM format. - # tls_ca = "/etc/telegraf/ca.pem" - ## The public and private keypairs for the client encoded in PEM format. - ## May contain intermediate certificates. - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS, but skip TLS chain and host verification. - # insecure_skip_verify = false - ## Send the specified TLS server name via SNI. - # tls_server_name = "foo.example.com" - - ## Override the default (gzip) compression used to send data. - ## Supports: "gzip", "none" - # compression = "gzip" - - ## Additional OpenTelemetry resource attributes - # [outputs.opentelemetry.attributes] - # "service.name" = "demo" - - ## Additional gRPC request metadata - # [outputs.opentelemetry.headers] - # key1 = "value1" -` - -func (o *OpenTelemetry) SampleConfig() string { - return sampleConfig -} - -func (o *OpenTelemetry) Description() string { - return "Send OpenTelemetry metrics over gRPC" -} - func (o *OpenTelemetry) Connect() error { logger := &otelLogger{o.Log} diff --git a/plugins/outputs/opentelemetry/opentelemetry_sample_config.go b/plugins/outputs/opentelemetry/opentelemetry_sample_config.go new file mode 100644 index 000000000..82380c71d --- /dev/null +++ b/plugins/outputs/opentelemetry/opentelemetry_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package opentelemetry + +func (o *OpenTelemetry) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/opentsdb/README.md b/plugins/outputs/opentsdb/README.md index b89c6c8a5..4c1bf0e89 100644 --- a/plugins/outputs/opentsdb/README.md +++ b/plugins/outputs/opentsdb/README.md @@ -8,6 +8,37 @@ metrics is sent in each http request by setting batchSize in config. See [the docs](http://opentsdb.net/docs/build/html/api_http/put.html) for details. +## Configuration + +```toml +# Configuration for OpenTSDB server to send metrics to +[[outputs.opentsdb]] + ## prefix for metrics keys + prefix = "my.specific.prefix." + + ## DNS name of the OpenTSDB server + ## Using "opentsdb.example.com" or "tcp://opentsdb.example.com" will use the + ## telnet API. "http://opentsdb.example.com" will use the Http API. + host = "opentsdb.example.com" + + ## Port of the OpenTSDB server + port = 4242 + + ## Number of data points to send to OpenTSDB in Http requests. + ## Not used with telnet API. + http_batch_size = 50 + + ## URI Path for Http requests to OpenTSDB. + ## Used in cases where OpenTSDB is located behind a reverse proxy. + http_path = "/api/put" + + ## Debug true - Prints OpenTSDB communication + debug = false + + ## Separator separates measurement name from field + separator = "_" +``` + ## Transfer "Protocol" in the telnet mode The expected input from OpenTSDB is specified in the following way: diff --git a/plugins/outputs/opentsdb/opentsdb.go b/plugins/outputs/opentsdb/opentsdb.go index b73a6480b..efde7a6a1 100644 --- a/plugins/outputs/opentsdb/opentsdb.go +++ b/plugins/outputs/opentsdb/opentsdb.go @@ -42,33 +42,6 @@ type OpenTSDB struct { Log telegraf.Logger `toml:"-"` } -var sampleConfig = ` - ## prefix for metrics keys - prefix = "my.specific.prefix." - - ## DNS name of the OpenTSDB server - ## Using "opentsdb.example.com" or "tcp://opentsdb.example.com" will use the - ## telnet API. "http://opentsdb.example.com" will use the Http API. - host = "opentsdb.example.com" - - ## Port of the OpenTSDB server - port = 4242 - - ## Number of data points to send to OpenTSDB in Http requests. - ## Not used with telnet API. - http_batch_size = 50 - - ## URI Path for Http requests to OpenTSDB. - ## Used in cases where OpenTSDB is located behind a reverse proxy. - http_path = "/api/put" - - ## Debug true - Prints OpenTSDB communication - debug = false - - ## Separator separates measurement name from field - separator = "_" -` - func ToLineFormat(tags map[string]string) string { tagsArray := make([]string, len(tags)) index := 0 @@ -254,14 +227,6 @@ func FloatToString(inputNum float64) string { return strconv.FormatFloat(inputNum, 'f', 6, 64) } -func (o *OpenTSDB) SampleConfig() string { - return sampleConfig -} - -func (o *OpenTSDB) Description() string { - return "Configuration for OpenTSDB server to send metrics to" -} - func (o *OpenTSDB) Close() error { return nil } diff --git a/plugins/outputs/opentsdb/opentsdb_sample_config.go b/plugins/outputs/opentsdb/opentsdb_sample_config.go new file mode 100644 index 000000000..1f185ea10 --- /dev/null +++ b/plugins/outputs/opentsdb/opentsdb_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package opentsdb + +func (o *OpenTSDB) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/prometheus_client/README.md b/plugins/outputs/prometheus_client/README.md index 085fc4649..594f5a359 100644 --- a/plugins/outputs/prometheus_client/README.md +++ b/plugins/outputs/prometheus_client/README.md @@ -6,6 +6,7 @@ all metrics on `/metrics` (default) to be polled by a Prometheus server. ## Configuration ```toml +# Configuration for the Prometheus client to spawn [[outputs.prometheus_client]] ## Address to listen on. listen = ":9273" diff --git a/plugins/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go index 795163b4f..257049001 100644 --- a/plugins/outputs/prometheus_client/prometheus_client.go +++ b/plugins/outputs/prometheus_client/prometheus_client.go @@ -1,4 +1,4 @@ -package prometheus +package prometheus_client import ( "context" @@ -19,8 +19,8 @@ import ( "github.com/influxdata/telegraf/internal" tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/influxdata/telegraf/plugins/outputs/prometheus_client/v1" - "github.com/influxdata/telegraf/plugins/outputs/prometheus_client/v2" + v1 "github.com/influxdata/telegraf/plugins/outputs/prometheus_client/v1" + v2 "github.com/influxdata/telegraf/plugins/outputs/prometheus_client/v2" ) var ( @@ -29,52 +29,6 @@ var ( defaultExpirationInterval = config.Duration(60 * time.Second) ) -var sampleConfig = ` - ## Address to listen on - listen = ":9273" - - ## Metric version controls the mapping from Telegraf metrics into - ## Prometheus format. When using the prometheus input, use the same value in - ## both plugins to ensure metrics are round-tripped without modification. - ## - ## example: metric_version = 1; - ## metric_version = 2; recommended version - # metric_version = 1 - - ## Use HTTP Basic Authentication. - # basic_username = "Foo" - # basic_password = "Bar" - - ## If set, the IP Ranges which are allowed to access metrics. - ## ex: ip_range = ["192.168.0.0/24", "192.168.1.0/30"] - # ip_range = [] - - ## Path to publish the metrics on. - # path = "/metrics" - - ## Expiration interval for each metric. 0 == no expiration - # expiration_interval = "60s" - - ## Collectors to enable, valid entries are "gocollector" and "process". - ## If unset, both are enabled. - # collectors_exclude = ["gocollector", "process"] - - ## Send string metrics as Prometheus labels. - ## Unless set to false all string metrics will be sent as labels. - # string_as_label = true - - ## If set, enable TLS with the given certificate. - # tls_cert = "/etc/ssl/telegraf.crt" - # tls_key = "/etc/ssl/telegraf.key" - - ## Set one or more allowed client CA certificate file names to - ## enable mutually authenticated TLS connections - # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] - - ## Export metric collection time. - # export_timestamp = false -` - type Collector interface { Describe(ch chan<- *prometheus.Desc) Collect(ch chan<- prometheus.Metric) @@ -102,14 +56,6 @@ type PrometheusClient struct { wg sync.WaitGroup } -func (p *PrometheusClient) Description() string { - return "Configuration for the Prometheus client to spawn" -} - -func (p *PrometheusClient) SampleConfig() string { - return sampleConfig -} - func (p *PrometheusClient) Init() error { defaultCollectors := map[string]bool{ "gocollector": true, diff --git a/plugins/outputs/prometheus_client/prometheus_client_sample_config.go b/plugins/outputs/prometheus_client/prometheus_client_sample_config.go new file mode 100644 index 000000000..6786d7b21 --- /dev/null +++ b/plugins/outputs/prometheus_client/prometheus_client_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package prometheus_client + +func (p *PrometheusClient) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/prometheus_client/prometheus_client_v1_test.go b/plugins/outputs/prometheus_client/prometheus_client_v1_test.go index 7a93a9bc5..5bc795fcd 100644 --- a/plugins/outputs/prometheus_client/prometheus_client_v1_test.go +++ b/plugins/outputs/prometheus_client/prometheus_client_v1_test.go @@ -1,4 +1,4 @@ -package prometheus +package prometheus_client import ( "fmt" diff --git a/plugins/outputs/prometheus_client/prometheus_client_v2_test.go b/plugins/outputs/prometheus_client/prometheus_client_v2_test.go index 2096caf6d..16b2ada69 100644 --- a/plugins/outputs/prometheus_client/prometheus_client_v2_test.go +++ b/plugins/outputs/prometheus_client/prometheus_client_v2_test.go @@ -1,4 +1,4 @@ -package prometheus +package prometheus_client import ( "fmt" diff --git a/plugins/outputs/riemann/riemann.go b/plugins/outputs/riemann/riemann.go index bfcc1e337..ed384aaaa 100644 --- a/plugins/outputs/riemann/riemann.go +++ b/plugins/outputs/riemann/riemann.go @@ -30,39 +30,6 @@ type Riemann struct { client *raidman.Client } -var sampleConfig = ` - ## The full TCP or UDP URL of the Riemann server - url = "tcp://localhost:5555" - - ## Riemann event TTL, floating-point time in seconds. - ## Defines how long that an event is considered valid for in Riemann - # ttl = 30.0 - - ## Separator to use between measurement and field name in Riemann service name - ## This does not have any effect if 'measurement_as_attribute' is set to 'true' - separator = "/" - - ## Set measurement name as Riemann attribute 'measurement', instead of prepending it to the Riemann service name - # measurement_as_attribute = false - - ## Send string metrics as Riemann event states. - ## Unless enabled all string metrics will be ignored - # string_as_state = false - - ## A list of tag keys whose values get sent as Riemann tags. - ## If empty, all Telegraf tag values will be sent as tags - # tag_keys = ["telegraf","custom_tag"] - - ## Additional Riemann tags to send. - # tags = ["telegraf-output"] - - ## Description for Riemann event - # description_text = "metrics collected from telegraf" - - ## Riemann client write timeout, defaults to "5s" if not set. - # timeout = "5s" -` - func (r *Riemann) Connect() error { parsedURL, err := url.Parse(r.URL) if err != nil { @@ -87,14 +54,6 @@ func (r *Riemann) Close() (err error) { return err } -func (r *Riemann) SampleConfig() string { - return sampleConfig -} - -func (r *Riemann) Description() string { - return "Configuration for the Riemann server to send metrics to" -} - func (r *Riemann) Write(metrics []telegraf.Metric) error { if len(metrics) == 0 { return nil diff --git a/plugins/outputs/riemann/riemann_sample_config.go b/plugins/outputs/riemann/riemann_sample_config.go new file mode 100644 index 000000000..834fa0312 --- /dev/null +++ b/plugins/outputs/riemann/riemann_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package riemann + +func (r *Riemann) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/riemann_legacy/README.md b/plugins/outputs/riemann_legacy/README.md new file mode 100644 index 000000000..7a239a11b --- /dev/null +++ b/plugins/outputs/riemann_legacy/README.md @@ -0,0 +1,16 @@ +# Riemann Legacy + +This is a deprecated plugin + +## Configuration + +```toml +# Configuration for the Riemann server to send metrics to +[[outputs.riemann_legacy]] + ## URL of server + url = "localhost:5555" + ## transport protocol to use either tcp or udp + transport = "tcp" + ## separator to use between input name and field name in Riemann service name + separator = " " +``` diff --git a/plugins/outputs/riemann_legacy/riemann.go b/plugins/outputs/riemann_legacy/riemann_legacy.go similarity index 87% rename from plugins/outputs/riemann_legacy/riemann.go rename to plugins/outputs/riemann_legacy/riemann_legacy.go index 0bd0f6b87..13cf34dd8 100644 --- a/plugins/outputs/riemann_legacy/riemann.go +++ b/plugins/outputs/riemann_legacy/riemann_legacy.go @@ -23,15 +23,6 @@ type Riemann struct { client *raidman.Client } -var sampleConfig = ` - ## URL of server - url = "localhost:5555" - ## transport protocol to use either tcp or udp - transport = "tcp" - ## separator to use between input name and field name in Riemann service name - separator = " " -` - func (r *Riemann) Connect() error { r.Log.Error(deprecationMsg) c, err := raidman.Dial(r.Transport, r.URL) @@ -54,14 +45,6 @@ func (r *Riemann) Close() error { return err } -func (r *Riemann) SampleConfig() string { - return sampleConfig -} - -func (r *Riemann) Description() string { - return "Configuration for the Riemann server to send metrics to" -} - func (r *Riemann) Write(metrics []telegraf.Metric) error { r.Log.Error(deprecationMsg) if len(metrics) == 0 { diff --git a/plugins/outputs/riemann_legacy/riemann_legacy_sample_config.go b/plugins/outputs/riemann_legacy/riemann_legacy_sample_config.go new file mode 100644 index 000000000..ea6ab9e93 --- /dev/null +++ b/plugins/outputs/riemann_legacy/riemann_legacy_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package riemann_legacy + +func (r *Riemann) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/riemann_legacy/riemann_test.go b/plugins/outputs/riemann_legacy/riemann_legacy_test.go similarity index 100% rename from plugins/outputs/riemann_legacy/riemann_test.go rename to plugins/outputs/riemann_legacy/riemann_legacy_test.go diff --git a/plugins/outputs/sensu/README.md b/plugins/outputs/sensu/README.md index 3d6c7d53d..7376b440e 100644 --- a/plugins/outputs/sensu/README.md +++ b/plugins/outputs/sensu/README.md @@ -6,6 +6,7 @@ HTTP events API. ## Configuration ```toml +# Send aggregate metrics to Sensu Monitor [[outputs.sensu]] ## BACKEND API URL is the Sensu Backend API root URL to send metrics to ## (protocol, host, and port only). The output plugin will automatically diff --git a/plugins/outputs/sensu/sensu.go b/plugins/outputs/sensu/sensu.go index b1a937209..ab14aac84 100644 --- a/plugins/outputs/sensu/sensu.go +++ b/plugins/outputs/sensu/sensu.go @@ -101,106 +101,6 @@ type Sensu struct { client *http.Client } -var sampleConfig = ` - ## BACKEND API URL is the Sensu Backend API root URL to send metrics to - ## (protocol, host, and port only). The output plugin will automatically - ## append the corresponding backend API path - ## /api/core/v2/namespaces/:entity_namespace/events/:entity_name/:check_name). - ## - ## Backend Events API reference: - ## https://docs.sensu.io/sensu-go/latest/api/events/ - ## - ## AGENT API URL is the Sensu Agent API root URL to send metrics to - ## (protocol, host, and port only). The output plugin will automatically - ## append the correspeonding agent API path (/events). - ## - ## Agent API Events API reference: - ## https://docs.sensu.io/sensu-go/latest/api/events/ - ## - ## NOTE: if backend_api_url and agent_api_url and api_key are set, the output - ## plugin will use backend_api_url. If backend_api_url and agent_api_url are - ## not provided, the output plugin will default to use an agent_api_url of - ## http://127.0.0.1:3031 - ## - # backend_api_url = "http://127.0.0.1:8080" - # agent_api_url = "http://127.0.0.1:3031" - - ## API KEY is the Sensu Backend API token - ## Generate a new API token via: - ## - ## $ sensuctl cluster-role create telegraf --verb create --resource events,entities - ## $ sensuctl cluster-role-binding create telegraf --cluster-role telegraf --group telegraf - ## $ sensuctl user create telegraf --group telegraf --password REDACTED - ## $ sensuctl api-key grant telegraf - ## - ## For more information on Sensu RBAC profiles & API tokens, please visit: - ## - https://docs.sensu.io/sensu-go/latest/reference/rbac/ - ## - https://docs.sensu.io/sensu-go/latest/reference/apikeys/ - ## - # api_key = "${SENSU_API_KEY}" - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false - - ## Timeout for HTTP message - # timeout = "5s" - - ## HTTP Content-Encoding for write request body, can be set to "gzip" to - ## compress body or "identity" to apply no encoding. - # content_encoding = "identity" - - ## Sensu Event details - ## - ## Below are the event details to be sent to Sensu. The main portions of the - ## event are the check, entity, and metrics specifications. For more information - ## on Sensu events and its components, please visit: - ## - Events - https://docs.sensu.io/sensu-go/latest/reference/events - ## - Checks - https://docs.sensu.io/sensu-go/latest/reference/checks - ## - Entities - https://docs.sensu.io/sensu-go/latest/reference/entities - ## - Metrics - https://docs.sensu.io/sensu-go/latest/reference/events#metrics - ## - ## Check specification - ## The check name is the name to give the Sensu check associated with the event - ## created. This maps to check.metatadata.name in the event. - [outputs.sensu.check] - name = "telegraf" - - ## Entity specification - ## Configure the entity name and namespace, if necessary. This will be part of - ## the entity.metadata in the event. - ## - ## NOTE: if the output plugin is configured to send events to a - ## backend_api_url and entity_name is not set, the value returned by - ## os.Hostname() will be used; if the output plugin is configured to send - ## events to an agent_api_url, entity_name and entity_namespace are not used. - # [outputs.sensu.entity] - # name = "server-01" - # namespace = "default" - - ## Metrics specification - ## Configure the tags for the metrics that are sent as part of the Sensu event - # [outputs.sensu.tags] - # source = "telegraf" - - ## Configure the handler(s) for processing the provided metrics - # [outputs.sensu.metrics] - # handlers = ["influxdb","elasticsearch"] -` - -// Description provides a description of the plugin -func (s *Sensu) Description() string { - return "Send aggregate metrics to Sensu Monitor" -} - -// SampleConfig provides a sample configuration for the plugin -func (s *Sensu) SampleConfig() string { - return sampleConfig -} - func (s *Sensu) createClient() (*http.Client, error) { tlsCfg, err := s.ClientConfig.TLSConfig() if err != nil { diff --git a/plugins/outputs/sensu/sensu_sample_config.go b/plugins/outputs/sensu/sensu_sample_config.go new file mode 100644 index 000000000..bf91158d0 --- /dev/null +++ b/plugins/outputs/sensu/sensu_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package sensu + +func (s *Sensu) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/signalfx/README.md b/plugins/outputs/signalfx/README.md index 09b7f41db..2406d5ce9 100644 --- a/plugins/outputs/signalfx/README.md +++ b/plugins/outputs/signalfx/README.md @@ -5,6 +5,7 @@ The SignalFx output plugin sends metrics to [SignalFx](https://docs.signalfx.com ## Configuration ```toml +# Send metrics and events to SignalFx [[outputs.signalfx]] ## SignalFx Org Access Token access_token = "my-secret-token" diff --git a/plugins/outputs/signalfx/signalfx.go b/plugins/outputs/signalfx/signalfx.go index b7550ae5b..b5ff8d8cd 100644 --- a/plugins/outputs/signalfx/signalfx.go +++ b/plugins/outputs/signalfx/signalfx.go @@ -38,24 +38,6 @@ type SignalFx struct { cancel context.CancelFunc } -var sampleConfig = ` - ## SignalFx Org Access Token - access_token = "my-secret-token" - - ## The SignalFx realm that your organization resides in - signalfx_realm = "us9" # Required if ingest_url is not set - - ## You can optionally provide a custom ingest url instead of the - ## signalfx_realm option above if you are using a gateway or proxy - ## instance. This option takes precident over signalfx_realm. - ingest_url = "https://my-custom-ingest/" - - ## Event typed metrics are omitted by default, - ## If you require an event typed metric you must specify the - ## metric name in the following list. - included_event_names = ["plugin.metric_name"] -` - // GetMetricType returns the equivalent telegraf ValueType for a signalfx metric type func GetMetricType(mtype telegraf.ValueType) (metricType datapoint.MetricType) { switch mtype { @@ -89,16 +71,6 @@ func NewSignalFx() *SignalFx { } } -// Description returns a description for the plugin -func (s *SignalFx) Description() string { - return "Send metrics and events to SignalFx" -} - -// SampleConfig returns the sample configuration for the plugin -func (s *SignalFx) SampleConfig() string { - return sampleConfig -} - // Connect establishes a connection to SignalFx func (s *SignalFx) Connect() error { client := s.client.(*sfxclient.HTTPSink) diff --git a/plugins/outputs/signalfx/signalfx_sample_config.go b/plugins/outputs/signalfx/signalfx_sample_config.go new file mode 100644 index 000000000..34a09107e --- /dev/null +++ b/plugins/outputs/signalfx/signalfx_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package signalfx + +func (s *SignalFx) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/signalfx/signalfx_test.go b/plugins/outputs/signalfx/signalfx_test.go index 8d69aace1..936de9b26 100644 --- a/plugins/outputs/signalfx/signalfx_test.go +++ b/plugins/outputs/signalfx/signalfx_test.go @@ -614,48 +614,6 @@ func TestSignalFx_Errors(t *testing.T) { } } -// this is really just for complete code coverage -func TestSignalFx_Description(t *testing.T) { - tests := []struct { - name string - want string - }{ - { - name: "verify description is correct", - want: "Send metrics and events to SignalFx", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := &SignalFx{} - if got := s.Description(); got != tt.want { - t.Errorf("SignalFx.Description() = %v, want %v", got, tt.want) - } - }) - } -} - -// this is also just for complete code coverage -func TestSignalFx_SampleConfig(t *testing.T) { - tests := []struct { - name string - want string - }{ - { - name: "verify sample config is returned", - want: sampleConfig, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := &SignalFx{} - if got := s.SampleConfig(); got != tt.want { - t.Errorf("SignalFx.SampleConfig() = %v, want %v", got, tt.want) - } - }) - } -} - func TestGetMetricName(t *testing.T) { type args struct { metric string diff --git a/plugins/outputs/socket_writer/README.md b/plugins/outputs/socket_writer/README.md index 5dc9d0246..05a1884e6 100644 --- a/plugins/outputs/socket_writer/README.md +++ b/plugins/outputs/socket_writer/README.md @@ -4,6 +4,8 @@ The socket_writer plugin can write to a UDP, TCP, or unix socket. It can output data in any of the [supported output formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md). +## Configuration + ```toml # Generic socket writer capable of handling multiple socket types. [[outputs.socket_writer]] diff --git a/plugins/outputs/socket_writer/socket_writer.go b/plugins/outputs/socket_writer/socket_writer.go index 130a0f738..016b9f327 100644 --- a/plugins/outputs/socket_writer/socket_writer.go +++ b/plugins/outputs/socket_writer/socket_writer.go @@ -29,50 +29,6 @@ type SocketWriter struct { net.Conn } -func (sw *SocketWriter) Description() string { - return "Generic socket writer capable of handling multiple socket types." -} - -func (sw *SocketWriter) SampleConfig() string { - return ` - ## URL to connect to - # address = "tcp://127.0.0.1:8094" - # address = "tcp://example.com:http" - # address = "tcp4://127.0.0.1:8094" - # address = "tcp6://127.0.0.1:8094" - # address = "tcp6://[2001:db8::1]:8094" - # address = "udp://127.0.0.1:8094" - # address = "udp4://127.0.0.1:8094" - # address = "udp6://127.0.0.1:8094" - # address = "unix:///tmp/telegraf.sock" - # address = "unixgram:///tmp/telegraf.sock" - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false - - ## Period between keep alive probes. - ## Only applies to TCP sockets. - ## 0 disables keep alive probes. - ## Defaults to the OS configuration. - # keep_alive_period = "5m" - - ## Content encoding for packet-based connections (i.e. UDP, unixgram). - ## Can be set to "gzip" or to "identity" to apply no encoding. - ## - # content_encoding = "identity" - - ## Data format to generate. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - # data_format = "influx" -` -} - func (sw *SocketWriter) SetSerializer(s serializers.Serializer) { sw.Serializer = s } diff --git a/plugins/outputs/socket_writer/socket_writer_sample_config.go b/plugins/outputs/socket_writer/socket_writer_sample_config.go new file mode 100644 index 000000000..cfd503a0c --- /dev/null +++ b/plugins/outputs/socket_writer/socket_writer_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package socket_writer + +func (sw *SocketWriter) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/sql/sql.go b/plugins/outputs/sql/sql.go index 6f4f9b36c..4340f04c2 100644 --- a/plugins/outputs/sql/sql.go +++ b/plugins/outputs/sql/sql.go @@ -121,63 +121,6 @@ func (p *SQL) deriveDatatype(value interface{}) string { return datatype } -var sampleConfig = ` - ## Database driver - ## Valid options: mssql (Microsoft SQL Server), mysql (MySQL), pgx (Postgres), - ## sqlite (SQLite3), snowflake (snowflake.com) clickhouse (ClickHouse) - # driver = "" - - ## Data source name - ## The format of the data source name is different for each database driver. - ## See the plugin readme for details. - # data_source_name = "" - - ## Timestamp column name - # timestamp_column = "timestamp" - - ## Table creation template - ## Available template variables: - ## {TABLE} - table name as a quoted identifier - ## {TABLELITERAL} - table name as a quoted string literal - ## {COLUMNS} - column definitions (list of quoted identifiers and types) - # table_template = "CREATE TABLE {TABLE}({COLUMNS})" - - ## Table existence check template - ## Available template variables: - ## {TABLE} - tablename as a quoted identifier - # table_exists_template = "SELECT 1 FROM {TABLE} LIMIT 1" - - ## Initialization SQL - # init_sql = "" - - ## Metric type to SQL type conversion - ## The values on the left are the data types Telegraf has and the values on - ## the right are the data types Telegraf will use when sending to a database. - ## - ## The database values used must be data types the destination database - ## understands. It is up to the user to ensure that the selected data type is - ## available in the database they are using. Refer to your database - ## documentation for what data types are available and supported. - #[outputs.sql.convert] - # integer = "INT" - # real = "DOUBLE" - # text = "TEXT" - # timestamp = "TIMESTAMP" - # defaultvalue = "TEXT" - # unsigned = "UNSIGNED" - # bool = "BOOL" - - ## This setting controls the behavior of the unsigned value. By default the - ## setting will take the integer value and append the unsigned value to it. The other - ## option is "literal", which will use the actual value the user provides to - ## the unsigned option. This is useful for a database like ClickHouse where - ## the unsigned value should use a value like "uint64". - # conversion_style = "unsigned_suffix" -` - -func (p *SQL) SampleConfig() string { return sampleConfig } -func (p *SQL) Description() string { return "Send metrics to SQL Database" } - func (p *SQL) generateCreateTable(metric telegraf.Metric) string { var columns []string // ## {KEY_COLUMNS} is a comma-separated list of key columns (timestamp and tags) diff --git a/plugins/outputs/sql/sql_sample_config.go b/plugins/outputs/sql/sql_sample_config.go new file mode 100644 index 000000000..533a058fd --- /dev/null +++ b/plugins/outputs/sql/sql_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package sql + +func (p *SQL) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/stackdriver/README.md b/plugins/outputs/stackdriver/README.md index 1b074751e..e7b18a953 100644 --- a/plugins/outputs/stackdriver/README.md +++ b/plugins/outputs/stackdriver/README.md @@ -18,6 +18,7 @@ Additional resource labels can be configured by `resource_labels`. By default th ## Configuration ```toml +# Configuration for Google Cloud Stackdriver to send metrics to [[outputs.stackdriver]] ## GCP Project project = "erudite-bloom-151019" diff --git a/plugins/outputs/stackdriver/stackdriver.go b/plugins/outputs/stackdriver/stackdriver.go index 0c4a7f958..da5b9a31d 100644 --- a/plugins/outputs/stackdriver/stackdriver.go +++ b/plugins/outputs/stackdriver/stackdriver.go @@ -51,23 +51,6 @@ const ( errStringPointsTooFrequent = "one or more points were written more frequently than the maximum sampling period configured for the metric" ) -var sampleConfig = ` - ## GCP Project - project = "erudite-bloom-151019" - - ## The namespace for the metric descriptor - namespace = "telegraf" - - ## Custom resource type - # resource_type = "generic_node" - - ## Additional resource labels - # [outputs.stackdriver.resource_labels] - # node_id = "$HOSTNAME" - # namespace = "myapp" - # location = "eu-north0" -` - // Connect initiates the primary connection to the GCP project. func (s *Stackdriver) Connect() error { if s.Project == "" { @@ -378,16 +361,6 @@ func (s *Stackdriver) Close() error { return s.client.Close() } -// SampleConfig returns the formatted sample configuration for the plugin. -func (s *Stackdriver) SampleConfig() string { - return sampleConfig -} - -// Description returns the human-readable function definition of the plugin. -func (s *Stackdriver) Description() string { - return "Configuration for Google Cloud Stackdriver to send metrics to" -} - func newStackdriver() *Stackdriver { return &Stackdriver{} } diff --git a/plugins/outputs/stackdriver/stackdriver_sample_config.go b/plugins/outputs/stackdriver/stackdriver_sample_config.go new file mode 100644 index 000000000..5d3123e34 --- /dev/null +++ b/plugins/outputs/stackdriver/stackdriver_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package stackdriver + +func (s *Stackdriver) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/sumologic/sumologic.go b/plugins/outputs/sumologic/sumologic.go index 889a28bc2..1ac198787 100644 --- a/plugins/outputs/sumologic/sumologic.go +++ b/plugins/outputs/sumologic/sumologic.go @@ -19,58 +19,6 @@ import ( ) const ( - sampleConfig = ` - ## Unique URL generated for your HTTP Metrics Source. - ## This is the address to send metrics to. - # url = "https://events.sumologic.net/receiver/v1/http/" - - ## Data format to be used for sending metrics. - ## This will set the "Content-Type" header accordingly. - ## Currently supported formats: - ## * graphite - for Content-Type of application/vnd.sumologic.graphite - ## * carbon2 - for Content-Type of application/vnd.sumologic.carbon2 - ## * prometheus - for Content-Type of application/vnd.sumologic.prometheus - ## - ## More information can be found at: - ## https://help.sumologic.com/03Send-Data/Sources/02Sources-for-Hosted-Collectors/HTTP-Source/Upload-Metrics-to-an-HTTP-Source#content-type-headers-for-metrics - ## - ## NOTE: - ## When unset, telegraf will by default use the influx serializer which is currently unsupported - ## in HTTP Source. - data_format = "carbon2" - - ## Timeout used for HTTP request - # timeout = "5s" - - ## Max HTTP request body size in bytes before compression (if applied). - ## By default 1MB is recommended. - ## NOTE: - ## Bear in mind that in some serializer a metric even though serialized to multiple - ## lines cannot be split any further so setting this very low might not work - ## as expected. - # max_request_body_size = 1000000 - - ## Additional, Sumo specific options. - ## Full list can be found here: - ## https://help.sumologic.com/03Send-Data/Sources/02Sources-for-Hosted-Collectors/HTTP-Source/Upload-Metrics-to-an-HTTP-Source#supported-http-headers - - ## Desired source name. - ## Useful if you want to override the source name configured for the source. - # source_name = "" - - ## Desired host name. - ## Useful if you want to override the source host configured for the source. - # source_host = "" - - ## Desired source category. - ## Useful if you want to override the source category configured for the source. - # source_category = "" - - ## Comma-separated key=value list of dimensions to apply to every metric. - ## Custom dimensions will allow you to query your metrics at a more granular level. - # dimensions = "" -` - defaultClientTimeout = 5 * time.Second defaultMethod = http.MethodPost defaultMaxRequestBodySize = 1000000 @@ -164,14 +112,6 @@ func (s *SumoLogic) Close() error { return s.err } -func (s *SumoLogic) Description() string { - return "A plugin that can transmit metrics to Sumo Logic HTTP Source" -} - -func (s *SumoLogic) SampleConfig() string { - return sampleConfig -} - func (s *SumoLogic) Write(metrics []telegraf.Metric) error { if s.err != nil { return errors.Wrap(s.err, "sumologic: incorrect configuration") diff --git a/plugins/outputs/sumologic/sumologic_sample_config.go b/plugins/outputs/sumologic/sumologic_sample_config.go new file mode 100644 index 000000000..675e53b1f --- /dev/null +++ b/plugins/outputs/sumologic/sumologic_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package sumologic + +func (s *SumoLogic) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/syslog/README.md b/plugins/outputs/syslog/README.md index 7b2c480f3..b6f25b2a0 100644 --- a/plugins/outputs/syslog/README.md +++ b/plugins/outputs/syslog/README.md @@ -11,6 +11,7 @@ Syslog messages are formatted according to ## Configuration ```toml +# Configuration for Syslog server to send metrics to [[outputs.syslog]] ## URL to connect to ## ex: address = "tcp://127.0.0.1:8094" diff --git a/plugins/outputs/syslog/syslog.go b/plugins/outputs/syslog/syslog.go index 78308b6b0..ce2564b5e 100644 --- a/plugins/outputs/syslog/syslog.go +++ b/plugins/outputs/syslog/syslog.go @@ -35,83 +35,6 @@ type Syslog struct { mapper *SyslogMapper } -var sampleConfig = ` - ## URL to connect to - ## ex: address = "tcp://127.0.0.1:8094" - ## ex: address = "tcp4://127.0.0.1:8094" - ## ex: address = "tcp6://127.0.0.1:8094" - ## ex: address = "tcp6://[2001:db8::1]:8094" - ## ex: address = "udp://127.0.0.1:8094" - ## ex: address = "udp4://127.0.0.1:8094" - ## ex: address = "udp6://127.0.0.1:8094" - address = "tcp://127.0.0.1:8094" - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false - - ## Period between keep alive probes. - ## Only applies to TCP sockets. - ## 0 disables keep alive probes. - ## Defaults to the OS configuration. - # keep_alive_period = "5m" - - ## The framing technique with which it is expected that messages are - ## transported (default = "octet-counting"). Whether the messages come - ## using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), - ## or the non-transparent framing technique (RFC6587#section-3.4.2). Must - ## be one of "octet-counting", "non-transparent". - # framing = "octet-counting" - - ## The trailer to be expected in case of non-transparent framing (default = "LF"). - ## Must be one of "LF", or "NUL". - # trailer = "LF" - - ## SD-PARAMs settings - ## Syslog messages can contain key/value pairs within zero or more - ## structured data sections. For each unrecognized metric tag/field a - ## SD-PARAMS is created. - ## - ## Example: - ## [[outputs.syslog]] - ## sdparam_separator = "_" - ## default_sdid = "default@32473" - ## sdids = ["foo@123", "bar@456"] - ## - ## input => xyzzy,x=y foo@123_value=42,bar@456_value2=84,something_else=1 - ## output (structured data only) => [foo@123 value=42][bar@456 value2=84][default@32473 something_else=1 x=y] - - ## SD-PARAMs separator between the sdid and tag/field key (default = "_") - # sdparam_separator = "_" - - ## Default sdid used for tags/fields that don't contain a prefix defined in - ## the explicit sdids setting below If no default is specified, no SD-PARAMs - ## will be used for unrecognized field. - # default_sdid = "default@32473" - - ## List of explicit prefixes to extract from tag/field keys and use as the - ## SDID, if they match (see above example for more details): - # sdids = ["foo@123", "bar@456"] - - ## Default severity value. Severity and Facility are used to calculate the - ## message PRI value (RFC5424#section-6.2.1). Used when no metric field - ## with key "severity_code" is defined. If unset, 5 (notice) is the default - # default_severity_code = 5 - - ## Default facility value. Facility and Severity are used to calculate the - ## message PRI value (RFC5424#section-6.2.1). Used when no metric field with - ## key "facility_code" is defined. If unset, 1 (user-level) is the default - # default_facility_code = 1 - - ## Default APP-NAME value (RFC5424#section-6.2.5) - ## Used when no metric tag with key "appname" is defined. - ## If unset, "Telegraf" is the default - # default_appname = "Telegraf" -` - func (s *Syslog) Connect() error { s.initializeSyslogMapper() @@ -169,14 +92,6 @@ func (s *Syslog) Close() error { return err } -func (s *Syslog) SampleConfig() string { - return sampleConfig -} - -func (s *Syslog) Description() string { - return "Configuration for Syslog server to send metrics to" -} - func (s *Syslog) Write(metrics []telegraf.Metric) (err error) { if s.Conn == nil { // previous write failed with permanent error and socket was closed. diff --git a/plugins/outputs/syslog/syslog_sample_config.go b/plugins/outputs/syslog/syslog_sample_config.go new file mode 100644 index 000000000..002c39f0e --- /dev/null +++ b/plugins/outputs/syslog/syslog_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package syslog + +func (s *Syslog) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/timestream/README.md b/plugins/outputs/timestream/README.md index 6761ad4da..5f66664ea 100644 --- a/plugins/outputs/timestream/README.md +++ b/plugins/outputs/timestream/README.md @@ -118,6 +118,10 @@ The Timestream output plugin writes metrics to the [Amazon Timestream] service. ## Specifies the Timestream table tags. ## Check Timestream documentation for more details # create_table_tags = { "foo" = "bar", "environment" = "dev"} + + ## Specify the maximum number of parallel go routines to ingest/write data + ## If not specified, defaulted to 1 go routines + max_write_go_routines = 25 ``` ### Batching diff --git a/plugins/outputs/timestream/timestream.go b/plugins/outputs/timestream/timestream.go index 3f87bb1ee..267177418 100644 --- a/plugins/outputs/timestream/timestream.go +++ b/plugins/outputs/timestream/timestream.go @@ -61,124 +61,6 @@ const MaxRecordsPerCall = 100 // when max_write_go_routines is not specified in the config const MaxWriteRoutinesDefault = 1 -var sampleConfig = ` - ## Amazon Region - region = "us-east-1" - - ## Amazon Credentials - ## Credentials are loaded in the following order: - ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified - ## 2) Assumed credentials via STS if role_arn is specified - ## 3) explicit credentials from 'access_key' and 'secret_key' - ## 4) shared profile from 'profile' - ## 5) environment variables - ## 6) shared credentials file - ## 7) EC2 Instance Profile - #access_key = "" - #secret_key = "" - #token = "" - #role_arn = "" - #web_identity_token_file = "" - #role_session_name = "" - #profile = "" - #shared_credential_file = "" - - ## Endpoint to make request against, the correct endpoint is automatically - ## determined and this option should only be set if you wish to override the - ## default. - ## ex: endpoint_url = "http://localhost:8000" - # endpoint_url = "" - - ## Timestream database where the metrics will be inserted. - ## The database must exist prior to starting Telegraf. - database_name = "yourDatabaseNameHere" - - ## Specifies if the plugin should describe the Timestream database upon starting - ## to validate if it has access necessary permissions, connection, etc., as a safety check. - ## If the describe operation fails, the plugin will not start - ## and therefore the Telegraf agent will not start. - describe_database_on_start = false - - ## The mapping mode specifies how Telegraf records are represented in Timestream. - ## Valid values are: single-table, multi-table. - ## For example, consider the following data in line protocol format: - ## weather,location=us-midwest,season=summer temperature=82,humidity=71 1465839830100400200 - ## airquality,location=us-west no2=5,pm25=16 1465839830100400200 - ## where weather and airquality are the measurement names, location and season are tags, - ## and temperature, humidity, no2, pm25 are fields. - ## In multi-table mode: - ## - first line will be ingested to table named weather - ## - second line will be ingested to table named airquality - ## - the tags will be represented as dimensions - ## - first table (weather) will have two records: - ## one with measurement name equals to temperature, - ## another with measurement name equals to humidity - ## - second table (airquality) will have two records: - ## one with measurement name equals to no2, - ## another with measurement name equals to pm25 - ## - the Timestream tables from the example will look like this: - ## TABLE "weather": - ## time | location | season | measure_name | measure_value::bigint - ## 2016-06-13 17:43:50 | us-midwest | summer | temperature | 82 - ## 2016-06-13 17:43:50 | us-midwest | summer | humidity | 71 - ## TABLE "airquality": - ## time | location | measure_name | measure_value::bigint - ## 2016-06-13 17:43:50 | us-west | no2 | 5 - ## 2016-06-13 17:43:50 | us-west | pm25 | 16 - ## In single-table mode: - ## - the data will be ingested to a single table, which name will be valueOf(single_table_name) - ## - measurement name will stored in dimension named valueOf(single_table_dimension_name_for_telegraf_measurement_name) - ## - location and season will be represented as dimensions - ## - temperature, humidity, no2, pm25 will be represented as measurement name - ## - the Timestream table from the example will look like this: - ## Assuming: - ## - single_table_name = "my_readings" - ## - single_table_dimension_name_for_telegraf_measurement_name = "namespace" - ## TABLE "my_readings": - ## time | location | season | namespace | measure_name | measure_value::bigint - ## 2016-06-13 17:43:50 | us-midwest | summer | weather | temperature | 82 - ## 2016-06-13 17:43:50 | us-midwest | summer | weather | humidity | 71 - ## 2016-06-13 17:43:50 | us-west | NULL | airquality | no2 | 5 - ## 2016-06-13 17:43:50 | us-west | NULL | airquality | pm25 | 16 - ## In most cases, using multi-table mapping mode is recommended. - ## However, you can consider using single-table in situations when you have thousands of measurement names. - mapping_mode = "multi-table" - - ## Only valid and required for mapping_mode = "single-table" - ## Specifies the Timestream table where the metrics will be uploaded. - # single_table_name = "yourTableNameHere" - - ## Only valid and required for mapping_mode = "single-table" - ## Describes what will be the Timestream dimension name for the Telegraf - ## measurement name. - # single_table_dimension_name_for_telegraf_measurement_name = "namespace" - - ## Specifies if the plugin should create the table, if the table do not exist. - ## The plugin writes the data without prior checking if the table exists. - ## When the table does not exist, the error returned from Timestream will cause - ## the plugin to create the table, if this parameter is set to true. - create_table_if_not_exists = true - - ## Only valid and required if create_table_if_not_exists = true - ## Specifies the Timestream table magnetic store retention period in days. - ## Check Timestream documentation for more details. - create_table_magnetic_store_retention_period_in_days = 365 - - ## Only valid and required if create_table_if_not_exists = true - ## Specifies the Timestream table memory store retention period in hours. - ## Check Timestream documentation for more details. - create_table_memory_store_retention_period_in_hours = 24 - - ## Only valid and optional if create_table_if_not_exists = true - ## Specifies the Timestream table tags. - ## Check Timestream documentation for more details - # create_table_tags = { "foo" = "bar", "environment" = "dev"} - - ## Specify the maximum number of parallel go routines to ingest/write data - ## If not specified, defaulted to 1 go routines - max_write_go_routines = 25 -` - // WriteFactory function provides a way to mock the client instantiation for testing purposes. var WriteFactory = func(credentialConfig *internalaws.CredentialConfig) (WriteClient, error) { cfg, err := credentialConfig.Credentials() @@ -266,14 +148,6 @@ func (t *Timestream) Close() error { return nil } -func (t *Timestream) SampleConfig() string { - return sampleConfig -} - -func (t *Timestream) Description() string { - return "Configuration for Amazon Timestream output." -} - func init() { outputs.Add("timestream", func() telegraf.Output { return &Timestream{} diff --git a/plugins/outputs/timestream/timestream_sample_config.go b/plugins/outputs/timestream/timestream_sample_config.go new file mode 100644 index 000000000..597fcfbd2 --- /dev/null +++ b/plugins/outputs/timestream/timestream_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package timestream + +func (t *Timestream) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/warp10/README.md b/plugins/outputs/warp10/README.md index 4ffc2ce73..78a1adb17 100644 --- a/plugins/outputs/warp10/README.md +++ b/plugins/outputs/warp10/README.md @@ -5,6 +5,7 @@ The `warp10` output plugin writes metrics to [Warp 10][]. ## Configuration ```toml +# Write metrics to Warp 10 [[outputs.warp10]] # Prefix to add to the measurement. prefix = "telegraf." diff --git a/plugins/outputs/warp10/warp10.go b/plugins/outputs/warp10/warp10.go index 740bb0198..bfe4ef356 100644 --- a/plugins/outputs/warp10/warp10.go +++ b/plugins/outputs/warp10/warp10.go @@ -35,33 +35,6 @@ type Warp10 struct { Log telegraf.Logger `toml:"-"` } -var sampleConfig = ` - # Prefix to add to the measurement. - prefix = "telegraf." - - # URL of the Warp 10 server - warp_url = "http://localhost:8080" - - # Write token to access your app on warp 10 - token = "Token" - - # Warp 10 query timeout - # timeout = "15s" - - ## Print Warp 10 error body - # print_error_body = false - - ## Max string error size - # max_string_error_size = 511 - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -` - // MetricLine Warp 10 metrics type MetricLine struct { Metric string @@ -218,16 +191,6 @@ func floatToString(inputNum float64) string { return strconv.FormatFloat(inputNum, 'f', 6, 64) } -// SampleConfig get config -func (w *Warp10) SampleConfig() string { - return sampleConfig -} - -// Description get description -func (w *Warp10) Description() string { - return "Write metrics to Warp 10" -} - // Close close func (w *Warp10) Close() error { return nil diff --git a/plugins/outputs/warp10/warp10_sample_config.go b/plugins/outputs/warp10/warp10_sample_config.go new file mode 100644 index 000000000..afd8bd966 --- /dev/null +++ b/plugins/outputs/warp10/warp10_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package warp10 + +func (w *Warp10) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/wavefront/README.md b/plugins/outputs/wavefront/README.md index 5e37b6f2f..91802e52d 100644 --- a/plugins/outputs/wavefront/README.md +++ b/plugins/outputs/wavefront/README.md @@ -5,6 +5,8 @@ This plugin writes to a [Wavefront](https://www.wavefront.com) proxy, in Wavefro ## Configuration ```toml +# Configuration for Wavefront server to send metrics to +[[outputs.wavefront]] ## Url for Wavefront Direct Ingestion. For Wavefront Proxy Ingestion, see ## the 'host' and 'port' options below. url = "https://metrics.wavefront.com" diff --git a/plugins/outputs/wavefront/wavefront.go b/plugins/outputs/wavefront/wavefront.go index 6e0dbd63a..46ae7b637 100644 --- a/plugins/outputs/wavefront/wavefront.go +++ b/plugins/outputs/wavefront/wavefront.go @@ -58,58 +58,6 @@ var tagValueReplacer = strings.NewReplacer("*", "-") var pathReplacer = strings.NewReplacer("_", "_") -var sampleConfig = ` - ## Url for Wavefront Direct Ingestion. For Wavefront Proxy Ingestion, see - ## the 'host' and 'port' optioins below. - url = "https://metrics.wavefront.com" - - ## Authentication Token for Wavefront. Only required if using Direct Ingestion - #token = "DUMMY_TOKEN" - - ## DNS name of the wavefront proxy server. Do not use if url is specified - #host = "wavefront.example.com" - - ## Port that the Wavefront proxy server listens on. Do not use if url is specified - #port = 2878 - - ## prefix for metrics keys - #prefix = "my.specific.prefix." - - ## whether to use "value" for name of simple fields. default is false - #simple_fields = false - - ## character to use between metric and field name. default is . (dot) - #metric_separator = "." - - ## Convert metric name paths to use metricSeparator character - ## When true will convert all _ (underscore) characters in final metric name. default is true - #convert_paths = true - - ## Use Strict rules to sanitize metric and tag names from invalid characters - ## When enabled forward slash (/) and comma (,) will be accepted - #use_strict = false - - ## Use Regex to sanitize metric and tag names from invalid characters - ## Regex is more thorough, but significantly slower. default is false - #use_regex = false - - ## point tags to use as the source name for Wavefront (if none found, host will be used) - #source_override = ["hostname", "address", "agent_host", "node_host"] - - ## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default is true - #convert_bool = true - - ## Truncate metric tags to a total of 254 characters for the tag name value. Wavefront will reject any - ## data point exceeding this limit if not truncated. Defaults to 'false' to provide backwards compatibility. - #truncate_tags = false - - ## Flush the internal buffers after each batch. This effectively bypasses the background sending of metrics - ## normally done by the Wavefront SDK. This can be used if you are experiencing buffer overruns. The sending - ## of metrics will block for a longer time, but this will be handled gracefully by the internal buffering in - ## Telegraf. - #immediate_flush = true -` - type MetricPoint struct { Metric string Value float64 @@ -323,14 +271,6 @@ func buildValue(v interface{}, name string, w *Wavefront) (float64, error) { return 0, fmt.Errorf("unexpected type: %T, with value: %v, for: %s", v, v, name) } -func (w *Wavefront) SampleConfig() string { - return sampleConfig -} - -func (w *Wavefront) Description() string { - return "Configuration for Wavefront server to send metrics to" -} - func (w *Wavefront) Close() error { w.sender.Close() return nil diff --git a/plugins/outputs/wavefront/wavefront_sample_config.go b/plugins/outputs/wavefront/wavefront_sample_config.go new file mode 100644 index 000000000..e79fee6c6 --- /dev/null +++ b/plugins/outputs/wavefront/wavefront_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package wavefront + +func (w *Wavefront) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/websocket/websocket.go b/plugins/outputs/websocket/websocket.go index 402e6f5a9..3ff94739d 100644 --- a/plugins/outputs/websocket/websocket.go +++ b/plugins/outputs/websocket/websocket.go @@ -17,42 +17,6 @@ import ( ws "github.com/gorilla/websocket" ) -var sampleConfig = ` - ## URL is the address to send metrics to. Make sure ws or wss scheme is used. - url = "ws://127.0.0.1:8080/telegraf" - - ## Timeouts (make sure read_timeout is larger than server ping interval or set to zero). - # connect_timeout = "30s" - # write_timeout = "30s" - # read_timeout = "30s" - - ## Optionally turn on using text data frames (binary by default). - # use_text_frames = false - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false - - ## Optional SOCKS5 proxy to use - # socks5_enabled = true - # socks5_address = "127.0.0.1:1080" - # socks5_username = "alice" - # socks5_password = "pass123" - - ## Data format to output. - ## Each data format has it's own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md - # data_format = "influx" - - ## Additional HTTP Upgrade headers - # [outputs.websocket.headers] - # Authorization = "Bearer " -` - const ( defaultConnectTimeout = 30 * time.Second defaultWriteTimeout = 30 * time.Second @@ -81,16 +45,6 @@ func (w *WebSocket) SetSerializer(serializer serializers.Serializer) { w.serializer = serializer } -// Description of plugin. -func (w *WebSocket) Description() string { - return "Generic WebSocket output writer." -} - -// SampleConfig returns plugin config sample. -func (w *WebSocket) SampleConfig() string { - return sampleConfig -} - var errInvalidURL = errors.New("invalid websocket URL") // Init the output plugin. diff --git a/plugins/outputs/websocket/websocket_sample_config.go b/plugins/outputs/websocket/websocket_sample_config.go new file mode 100644 index 000000000..af46cfa14 --- /dev/null +++ b/plugins/outputs/websocket/websocket_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package websocket + +func (w *WebSocket) SampleConfig() string { + return `{{ .SampleConfig }}` +} diff --git a/plugins/outputs/yandex_cloud_monitoring/README.md b/plugins/outputs/yandex_cloud_monitoring/README.md index 412a57e4e..ff409e9cd 100644 --- a/plugins/outputs/yandex_cloud_monitoring/README.md +++ b/plugins/outputs/yandex_cloud_monitoring/README.md @@ -5,6 +5,7 @@ This plugin will send custom metrics to [Yandex Cloud Monitoring](https://cloud. ## Configuration ```toml +# Send aggregated metrics to Yandex.Cloud Monitoring [[outputs.yandex_cloud_monitoring]] ## Timeout for HTTP writes. # timeout = "20s" diff --git a/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring.go b/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring.go index dc097da45..08d353ad9 100644 --- a/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring.go +++ b/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring.go @@ -63,27 +63,6 @@ const ( defaultMetadataFolderURL = "http://169.254.169.254/computeMetadata/v1/yandex/folder-id" ) -var sampleConfig = ` - ## Timeout for HTTP writes. - # timeout = "20s" - - ## Yandex.Cloud monitoring API endpoint. Normally should not be changed - # endpoint_url = "https://monitoring.api.cloud.yandex.net/monitoring/v2/data/write" - - ## All user metrics should be sent with "custom" service specified. Normally should not be changed - # service = "custom" -` - -// Description provides a description of the plugin -func (a *YandexCloudMonitoring) Description() string { - return "Send aggregated metrics to Yandex.Cloud Monitoring" -} - -// SampleConfig provides a sample configuration for the plugin -func (a *YandexCloudMonitoring) SampleConfig() string { - return sampleConfig -} - // Connect initializes the plugin and validates connectivity func (a *YandexCloudMonitoring) Connect() error { if a.Timeout <= 0 { diff --git a/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring_sample_config.go b/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring_sample_config.go new file mode 100644 index 000000000..a53afdd59 --- /dev/null +++ b/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring_sample_config.go @@ -0,0 +1,8 @@ +//go:generate go run ../../../tools/generate_plugindata/main.go +//go:generate go run ../../../tools/generate_plugindata/main.go --clean +// DON'T EDIT; This file is used as a template by tools/generate_plugindata +package yandex_cloud_monitoring + +func (a *YandexCloudMonitoring) SampleConfig() string { + return `{{ .SampleConfig }}` +}