chore(outputs): migrate sample configs into separate files (#11131)

This commit is contained in:
Sebastian Spaink 2022-05-18 11:30:06 -05:00 committed by GitHub
parent 4b3a5d5559
commit 256caede89
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
53 changed files with 2097 additions and 0 deletions

View File

@ -0,0 +1,10 @@
# Configuration for Amon Server to send metrics to.
[[outputs.amon]]
## Amon Server Key
server_key = "my-server-key" # required.
## Amon Instance URL
amon_instance = "https://youramoninstance" # required
## Connection timeout.
# timeout = "5s"

View File

@ -0,0 +1,95 @@
# Publishes metrics to an AMQP broker
[[outputs.amqp]]
## Broker to publish to.
## deprecated in 1.7; use the brokers option
# url = "amqp://localhost:5672/influxdb"
## Brokers to publish to. If multiple brokers are specified a random broker
## will be selected anytime a connection is established. This can be
## helpful for load balancing when not using a dedicated load balancer.
brokers = ["amqp://localhost:5672/influxdb"]
## Maximum messages to send over a connection. Once this is reached, the
## connection is closed and a new connection is made. This can be helpful for
## load balancing when not using a dedicated load balancer.
# max_messages = 0
## Exchange to declare and publish to.
exchange = "telegraf"
## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash".
# exchange_type = "topic"
## If true, exchange will be passively declared.
# exchange_passive = false
## Exchange durability can be either "transient" or "durable".
# exchange_durability = "durable"
## Additional exchange arguments.
# exchange_arguments = { }
# exchange_arguments = {"hash_property" = "timestamp"}
## Authentication credentials for the PLAIN auth_method.
# username = ""
# password = ""
## Auth method. PLAIN and EXTERNAL are supported
## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
## described here: https://www.rabbitmq.com/plugins.html
# auth_method = "PLAIN"
## Metric tag to use as a routing key.
## ie, if this tag exists, its value will be used as the routing key
# routing_tag = "host"
## Static routing key. Used when no routing_tag is set or as a fallback
## when the tag specified in routing tag is not found.
# routing_key = ""
# routing_key = "telegraf"
## Delivery Mode controls if a published message is persistent.
## One of "transient" or "persistent".
# delivery_mode = "transient"
## InfluxDB database added as a message header.
## deprecated in 1.7; use the headers option
# database = "telegraf"
## InfluxDB retention policy added as a message header
## deprecated in 1.7; use the headers option
# retention_policy = "default"
## Static headers added to each published message.
# headers = { }
# headers = {"database" = "telegraf", "retention_policy" = "default"}
## Connection timeout. If not provided, will default to 5s. 0s means no
## timeout (not recommended).
# timeout = "5s"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## If true use batch serialization format instead of line based delimiting.
## Only applies to data formats which are not line based such as JSON.
## Recommended to set to true.
# use_batch_format = false
## Content encoding for message payloads, can be set to "gzip" to or
## "identity" to apply no encoding.
##
## Please note that when use_batch_format = false each amqp message contains only
## a single metric, it is recommended to use compression with batch format
## for best results.
# content_encoding = "identity"
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
# data_format = "influx"

View File

@ -0,0 +1,21 @@
# Send metrics to Azure Application Insights
[[outputs.application_insights]]
## Instrumentation key of the Application Insights resource.
instrumentation_key = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxx"
## Regions that require endpoint modification https://docs.microsoft.com/en-us/azure/azure-monitor/app/custom-endpoints
# endpoint_url = "https://dc.services.visualstudio.com/v2/track"
## Timeout for closing (default: 5s).
# timeout = "5s"
## Enable additional diagnostic logging.
# enable_diagnostic_logging = false
## Context Tag Sources add Application Insights context tags to a tag value.
##
## For list of allowed context tag keys see:
## https://github.com/microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go
# [outputs.application_insights.context_tag_sources]
# "ai.cloud.role" = "kubernetes_container_name"
# "ai.cloud.roleInstance" = "kubernetes_pod_name"

View File

@ -0,0 +1,25 @@
# Sends metrics to Azure Data Explorer
[[outputs.azure_data_explorer]]
## The URI property of the Azure Data Explorer resource on Azure
## ex: endpoint_url = https://myadxresource.australiasoutheast.kusto.windows.net
endpoint_url = ""
## The Azure Data Explorer database that the metrics will be ingested into.
## The plugin will NOT generate this database automatically, it's expected that this database already exists before ingestion.
## ex: "exampledatabase"
database = ""
## Timeout for Azure Data Explorer operations
# timeout = "20s"
## Type of metrics grouping used when pushing to Azure Data Explorer.
## Default is "TablePerMetric" for one table per different metric.
## For more information, please check the plugin README.
# metrics_grouping_type = "TablePerMetric"
## Name of the single table to store all the metrics (Only needed if metrics_grouping_type is "SingleTable").
# table_name = ""
## Creates tables and relevant mapping if set to true(default).
## Skips table and mapping creation if set to false, this is useful for running Telegraf with the lowest possible permissions i.e. table ingestor role.
# create_tables = true

View File

@ -0,0 +1,29 @@
# Send aggregate metrics to Azure Monitor
[[outputs.azure_monitor]]
## Timeout for HTTP writes.
# timeout = "20s"
## Set the namespace prefix, defaults to "Telegraf/<input-name>".
# namespace_prefix = "Telegraf/"
## Azure Monitor doesn't have a string value type, so convert string
## fields to dimensions (a.k.a. tags) if enabled. Azure Monitor allows
## a maximum of 10 dimensions so Telegraf will only send the first 10
## alphanumeric dimensions.
# strings_as_dimensions = false
## Both region and resource_id must be set or be available via the
## Instance Metadata service on Azure Virtual Machines.
#
## Azure Region to publish metrics against.
## ex: region = "southcentralus"
# region = ""
#
## The Azure Resource ID against which metric will be logged, e.g.
## ex: resource_id = "/subscriptions/<subscription_id>/resourceGroups/<resource_group>/providers/Microsoft.Compute/virtualMachines/<vm_name>"
# resource_id = ""
## Optionally, if in Azure US Government, China, or other sovereign
## cloud environment, set the appropriate REST endpoint for receiving
## metrics. (Note: region may be unused in this context)
# endpoint_url = "https://monitoring.core.usgovcloudapi.net"

View File

@ -0,0 +1,16 @@
# Configuration for Google Cloud BigQuery to send entries
[[outputs.bigquery]]
## Credentials File
credentials_file = "/path/to/service/account/key.json"
## Google Cloud Platform Project
project = "my-gcp-project"
## The namespace for the metric descriptor
dataset = "telegraf"
## Timeout for BigQuery operations.
# timeout = "5s"
## Character to replace hyphens on Metric name
# replace_hyphen_to = "_"

View File

@ -0,0 +1,49 @@
# Publish Telegraf metrics to a Google Cloud PubSub topic
[[outputs.cloud_pubsub]]
## Required. Name of Google Cloud Platform (GCP) Project that owns
## the given PubSub topic.
project = "my-project"
## Required. Name of PubSub topic to publish metrics to.
topic = "my-topic"
## Required. Data format to consume.
## Each data format has its own unique set of configuration options.
## Read more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
## Optional. Filepath for GCP credentials JSON file to authorize calls to
## PubSub APIs. If not set explicitly, Telegraf will attempt to use
## Application Default Credentials, which is preferred.
# credentials_file = "path/to/my/creds.json"
## Optional. If true, will send all metrics per write in one PubSub message.
# send_batched = true
## The following publish_* parameters specifically configures batching
## requests made to the GCP Cloud PubSub API via the PubSub Golang library. Read
## more here: https://godoc.org/cloud.google.com/go/pubsub#PublishSettings
## Optional. Send a request to PubSub (i.e. actually publish a batch)
## when it has this many PubSub messages. If send_batched is true,
## this is ignored and treated as if it were 1.
# publish_count_threshold = 1000
## Optional. Send a request to PubSub (i.e. actually publish a batch)
## when it has this many PubSub messages. If send_batched is true,
## this is ignored and treated as if it were 1
# publish_byte_threshold = 1000000
## Optional. Specifically configures requests made to the PubSub API.
# publish_num_go_routines = 2
## Optional. Specifies a timeout for requests to the PubSub API.
# publish_timeout = "30s"
## Optional. If true, published PubSub message data will be base64-encoded.
# base64_data = false
## Optional. PubSub attributes to add to metrics.
# [outputs.cloud_pubsub.attributes]
# my_attr = "tag_value"

View File

@ -0,0 +1,42 @@
# Configuration for AWS CloudWatch output.
[[outputs.cloudwatch]]
## Amazon REGION
region = "us-east-1"
## Amazon Credentials
## Credentials are loaded in the following order
## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified
## 2) Assumed credentials via STS if role_arn is specified
## 3) explicit credentials from 'access_key' and 'secret_key'
## 4) shared profile from 'profile'
## 5) environment variables
## 6) shared credentials file
## 7) EC2 Instance Profile
#access_key = ""
#secret_key = ""
#token = ""
#role_arn = ""
#web_identity_token_file = ""
#role_session_name = ""
#profile = ""
#shared_credential_file = ""
## Endpoint to make request against, the correct endpoint is automatically
## determined and this option should only be set if you wish to override the
## default.
## ex: endpoint_url = "http://localhost:8000"
# endpoint_url = ""
## Namespace for the CloudWatch MetricDatums
namespace = "InfluxData/Telegraf"
## If you have a large amount of metrics, you should consider to send statistic
## values instead of raw metrics which could not only improve performance but
## also save AWS API cost. If enable this flag, this plugin would parse the required
## CloudWatch statistic fields (count, min, max, and sum) and send them to CloudWatch.
## You could use basicstats aggregator to calculate those fields. If not all statistic
## fields are available, all fields would still be sent as raw metrics.
# write_statistics = false
## Enable high resolution metrics of 1 second (if not enabled, standard resolution are of 60 seconds precision)
# high_resolution_metrics = false

View File

@ -0,0 +1,58 @@
# Configuration for AWS CloudWatchLogs output.
[[outputs.cloudwatch_logs]]
## The region is the Amazon region that you wish to connect to.
## Examples include but are not limited to:
## - us-west-1
## - us-west-2
## - us-east-1
## - ap-southeast-1
## - ap-southeast-2
## ...
region = "us-east-1"
## Amazon Credentials
## Credentials are loaded in the following order
## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified
## 2) Assumed credentials via STS if role_arn is specified
## 3) explicit credentials from 'access_key' and 'secret_key'
## 4) shared profile from 'profile'
## 5) environment variables
## 6) shared credentials file
## 7) EC2 Instance Profile
#access_key = ""
#secret_key = ""
#token = ""
#role_arn = ""
#web_identity_token_file = ""
#role_session_name = ""
#profile = ""
#shared_credential_file = ""
## Endpoint to make request against, the correct endpoint is automatically
## determined and this option should only be set if you wish to override the
## default.
## ex: endpoint_url = "http://localhost:8000"
# endpoint_url = ""
## Cloud watch log group. Must be created in AWS cloudwatch logs upfront!
## For example, you can specify the name of the k8s cluster here to group logs from all cluster in oine place
log_group = "my-group-name"
## Log stream in log group
## Either log group name or reference to metric attribute, from which it can be parsed:
## tag:<TAG_NAME> or field:<FIELD_NAME>. If log stream is not exist, it will be created.
## Since AWS is not automatically delete logs streams with expired logs entries (i.e. empty log stream)
## you need to put in place appropriate house-keeping (https://forums.aws.amazon.com/thread.jspa?threadID=178855)
log_stream = "tag:location"
## Source of log data - metric name
## specify the name of the metric, from which the log data should be retrieved.
## I.e., if you are using docker_log plugin to stream logs from container, then
## specify log_data_metric_name = "docker_log"
log_data_metric_name = "docker_log"
## Specify from which metric attribute the log data should be retrieved:
## tag:<TAG_NAME> or field:<FIELD_NAME>.
## I.e., if you are using docker_log plugin to stream logs from container, then
## specify log_data_source = "field:message"
log_data_source = "field:message"

View File

@ -0,0 +1,13 @@
# Configuration for CrateDB to send metrics to.
[[outputs.cratedb]]
# A github.com/jackc/pgx/v4 connection string.
# See https://pkg.go.dev/github.com/jackc/pgx/v4#ParseConfig
url = "postgres://user:password@localhost/schema?sslmode=disable"
# Timeout for all CrateDB queries.
timeout = "5s"
# Name of the table to store metrics in.
table = "metrics"
# If true, and the metrics table does not exist, create it automatically.
table_create = true
# The character(s) to replace any '.' in an object key with
key_separator = "_"

View File

@ -0,0 +1,17 @@
# Configuration for DataDog API to send metrics to.
[[outputs.datadog]]
## Datadog API key
apikey = "my-secret-key"
## Connection timeout.
# timeout = "5s"
## Write URL override; useful for debugging.
# url = "https://app.datadoghq.com/api/v1/series"
## Set http_proxy (telegraf uses the system wide proxy settings if it isn't set)
# http_proxy_url = "http://localhost:8888"
## Override the default (none) compression used to send data.
## Supports: "zlib", "none"
# compression = "none"

View File

@ -0,0 +1,3 @@
# Send metrics to nowhere at all
[[outputs.discard]]
# no configuration

View File

@ -0,0 +1,36 @@
# Send telegraf metrics to a Dynatrace environment
[[outputs.dynatrace]]
## For usage with the Dynatrace OneAgent you can omit any configuration,
## the only requirement is that the OneAgent is running on the same host.
## Only setup environment url and token if you want to monitor a Host without the OneAgent present.
##
## Your Dynatrace environment URL.
## For Dynatrace OneAgent you can leave this empty or set it to "http://127.0.0.1:14499/metrics/ingest" (default)
## For Dynatrace SaaS environments the URL scheme is "https://{your-environment-id}.live.dynatrace.com/api/v2/metrics/ingest"
## For Dynatrace Managed environments the URL scheme is "https://{your-domain}/e/{your-environment-id}/api/v2/metrics/ingest"
url = ""
## Your Dynatrace API token.
## Create an API token within your Dynatrace environment, by navigating to Settings > Integration > Dynatrace API
## The API token needs data ingest scope permission. When using OneAgent, no API token is required.
api_token = ""
## Optional prefix for metric names (e.g.: "telegraf")
prefix = "telegraf"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Optional flag for ignoring tls certificate check
# insecure_skip_verify = false
## Connection timeout, defaults to "5s" if not set.
timeout = "5s"
## If you want metrics to be treated and reported as delta counters, add the metric names here
additional_counters = [ ]
## Optional dimensions to be added to every metric
# [outputs.dynatrace.default_dimensions]
# default_key = "default value"

View File

@ -0,0 +1,79 @@
# Configuration for Elasticsearch to send metrics to.
[[outputs.elasticsearch]]
## The full HTTP endpoint URL for your Elasticsearch instance
## Multiple urls can be specified as part of the same cluster,
## this means that only ONE of the urls will be written to each interval
urls = [ "http://node1.es.example.com:9200" ] # required.
## Elasticsearch client timeout, defaults to "5s" if not set.
timeout = "5s"
## Set to true to ask Elasticsearch a list of all cluster nodes,
## thus it is not necessary to list all nodes in the urls config option
enable_sniffer = false
## Set to true to enable gzip compression
enable_gzip = false
## Set the interval to check if the Elasticsearch nodes are available
## Setting to "0s" will disable the health check (not recommended in production)
health_check_interval = "10s"
## Set the timeout for periodic health checks.
# health_check_timeout = "1s"
## HTTP basic authentication details.
## HTTP basic authentication details
# username = "telegraf"
# password = "mypassword"
## HTTP bearer token authentication details
# auth_bearer_token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9"
## Index Config
## The target index for metrics (Elasticsearch will create if it not exists).
## You can use the date specifiers below to create indexes per time frame.
## The metric timestamp will be used to decide the destination index name
# %Y - year (2016)
# %y - last two digits of year (00..99)
# %m - month (01..12)
# %d - day of month (e.g., 01)
# %H - hour (00..23)
# %V - week of the year (ISO week) (01..53)
## Additionally, you can specify a tag name using the notation {{tag_name}}
## which will be used as part of the index name. If the tag does not exist,
## the default tag value will be used.
# index_name = "telegraf-{{host}}-%Y.%m.%d"
# default_tag_value = "none"
index_name = "telegraf-%Y.%m.%d" # required.
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## Template Config
## Set to true if you want telegraf to manage its index template.
## If enabled it will create a recommended index template for telegraf indexes
manage_template = true
## The template name used for telegraf indexes
template_name = "telegraf"
## Set to true if you want telegraf to overwrite an existing template
overwrite_template = false
## If set to true a unique ID hash will be sent as sha256(concat(timestamp,measurement,series-hash)) string
## it will enable data resend and update metric points avoiding duplicated metrics with diferent id's
force_document_id = false
## Specifies the handling of NaN and Inf values.
## This option can have the following values:
## none -- do not modify field-values (default); will produce an error if NaNs or infs are encountered
## drop -- drop fields containing NaNs or infs
## replace -- replace with the value in "float_replacement_value" (default: 0.0)
## NaNs and inf will be replaced with the given number, -inf with the negative of that number
# float_handling = "none"
# float_replacement_value = 0.0
## Pipeline Config
## To use a ingest pipeline, set this to the name of the pipeline you want to use.
# use_pipeline = "my_pipeline"
## Additionally, you can specify a tag name using the notation {{tag_name}}
## which will be used as part of the pipeline name. If the tag does not exist,
## the default pipeline will be used as the pipeline. If no default pipeline is set,
## no pipeline is used for the metric.
# use_pipeline = "{{es_pipeline}}"
# default_pipeline = "my_pipeline"

View File

@ -0,0 +1,12 @@
# Configuration for Event Hubs output plugin
[[outputs.event_hubs]]
## The full connection string to the Event Hub (required)
## The shared access key must have "Send" permissions on the target Event Hub.
connection_string = "Endpoint=sb://namespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=superSecret1234=;EntityPath=hubName"
## Client timeout (defaults to 30s)
# timeout = "30s"
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "json"

View File

@ -0,0 +1,19 @@
# Send metrics to command as input over stdin
[[outputs.exec]]
## Command to ingest metrics via stdin.
command = ["tee", "-a", "/dev/null"]
## Environment variables
## Array of "key=value" pairs to pass as environment variables
## e.g. "KEY=value", "USERNAME=John Doe",
## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs"
# environment = []
## Timeout for command to complete.
# timeout = "5s"
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
# data_format = "influx"

View File

@ -0,0 +1,20 @@
# Run executable as long-running output plugin
[[outputs.execd]]
## One program to run as daemon.
## NOTE: process and each argument should each be their own string
command = ["my-telegraf-output", "--some-flag", "value"]
## Environment variables
## Array of "key=value" pairs to pass as environment variables
## e.g. "KEY=value", "USERNAME=John Doe",
## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs"
# environment = []
## Delay before the process is restarted after an unexpected termination
restart_delay = "10s"
## Data format to export.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"

View File

@ -0,0 +1,27 @@
# Send telegraf metrics to file(s)
[[outputs.file]]
## Files to write to, "stdout" is a specially handled file.
files = ["stdout", "/tmp/metrics.out"]
## Use batch serialization format instead of line based delimiting. The
## batch format allows for the production of non line based output formats and
## may more efficiently encode and write metrics.
# use_batch_format = false
## The file will be rotated after the time interval specified. When set
## to 0 no time based rotation is performed.
# rotation_interval = "0h"
## The logfile will be rotated when it becomes larger than the specified
## size. When set to 0 no size based rotation is performed.
# rotation_max_size = "0MB"
## Maximum number of rotated archives to keep, any older logs are deleted.
## If set to -1, no archives are removed.
# rotation_max_archives = 5
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"

View File

@ -0,0 +1,42 @@
# Configuration for Graphite server to send metrics to
[[outputs.graphite]]
## TCP endpoint for your graphite instance.
## If multiple endpoints are configured, the output will be load balanced.
## Only one of the endpoints will be written to with each iteration.
servers = ["localhost:2003"]
## Prefix metrics name
prefix = ""
## Graphite output template
## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
template = "host.tags.measurement.field"
## Enable Graphite tags support
# graphite_tag_support = false
## Define how metric names and tags are sanitized; options are "strict", or "compatible"
## strict - Default method, and backwards compatible with previous versionf of Telegraf
## compatible - More relaxed sanitizing when using tags, and compatible with the graphite spec
# graphite_tag_sanitize_mode = "strict"
## Character for separating metric name and field for Graphite tags
# graphite_separator = "."
## Graphite templates patterns
## 1. Template for cpu
## 2. Template for disk*
## 3. Default template
# templates = [
# "cpu tags.measurement.host.field",
# "disk* measurement.field",
# "host.measurement.tags.field"
#]
## timeout in seconds for the write connection to graphite
timeout = 2
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

View File

@ -0,0 +1,24 @@
# Send telegraf metrics to graylog
[[outputs.graylog]]
## Endpoints for your graylog instances.
servers = ["udp://127.0.0.1:12201"]
## Connection timeout.
# timeout = "5s"
## The field to use as the GELF short_message, if unset the static string
## "telegraf" will be used.
## example: short_message_field = "message"
# short_message_field = ""
## According to GELF payload specification, additional fields names must be prefixed
## with an underscore. Previous versions did not prefix custom field 'name' with underscore.
## Set to true for backward compatibility.
# name_field_no_prefix = false
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

View File

@ -0,0 +1,23 @@
# Send telegraf metrics to GroundWork Monitor
[[outputs.groundwork]]
## URL of your groundwork instance.
url = "https://groundwork.example.com"
## Agent uuid for GroundWork API Server.
agent_id = ""
## Username and password to access GroundWork API.
username = ""
password = ""
## Default display name for the host with services(metrics).
# default_host = "telegraf"
## Default service state.
# default_service_state = "SERVICE_OK"
## The name of the tag that contains the hostname.
# resource_tag = "host"
## The name of the tag that contains the host group name.
# group_tag = "group"

View File

@ -0,0 +1,38 @@
# Configurable HTTP health check resource based on metrics
[[outputs.health]]
## Address and port to listen on.
## ex: service_address = "http://localhost:8080"
## service_address = "unix:///var/run/telegraf-health.sock"
# service_address = "http://:8080"
## The maximum duration for reading the entire request.
# read_timeout = "5s"
## The maximum duration for writing the entire response.
# write_timeout = "5s"
## Username and password to accept for HTTP basic authentication.
# basic_username = "user1"
# basic_password = "secret"
## Allowed CA certificates for client certificates.
# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
## TLS server certificate and private key.
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## One or more check sub-tables should be defined, it is also recommended to
## use metric filtering to limit the metrics that flow into this output.
##
## When using the default buffer sizes, this example will fail when the
## metric buffer is half full.
##
## namepass = ["internal_write"]
## tagpass = { output = ["influxdb"] }
##
## [[outputs.health.compares]]
## field = "buffer_size"
## lt = 5000.0
##
## [[outputs.health.contains]]
## field = "buffer_size"

View File

@ -0,0 +1,95 @@
# A plugin that can transmit metrics over HTTP
[[outputs.http]]
## URL is the address to send metrics to
url = "http://127.0.0.1:8080/telegraf"
## Timeout for HTTP message
# timeout = "5s"
## HTTP method, one of: "POST" or "PUT"
# method = "POST"
## HTTP Basic Auth credentials
# username = "username"
# password = "pa$$word"
## OAuth2 Client Credentials Grant
# client_id = "clientid"
# client_secret = "secret"
# token_url = "https://indentityprovider/oauth2/v1/token"
# scopes = ["urn:opc:idm:__myscopes__"]
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## Optional Cookie authentication
# cookie_auth_url = "https://localhost/authMe"
# cookie_auth_method = "POST"
# cookie_auth_username = "username"
# cookie_auth_password = "pa$$word"
# cookie_auth_headers = '{"Content-Type": "application/json", "X-MY-HEADER":"hello"}'
# cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}'
## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie
# cookie_auth_renewal = "5m"
## Data format to output.
## Each data format has it's own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
# data_format = "influx"
## Use batch serialization format (default) instead of line based format.
## Batch format is more efficient and should be used unless line based
## format is really needed.
# use_batch_format = true
## HTTP Content-Encoding for write request body, can be set to "gzip" to
## compress body or "identity" to apply no encoding.
# content_encoding = "identity"
## Additional HTTP headers
# [outputs.http.headers]
# # Should be set manually to "application/json" for json data_format
# Content-Type = "text/plain; charset=utf-8"
## MaxIdleConns controls the maximum number of idle (keep-alive)
## connections across all hosts. Zero means no limit.
# max_idle_conn = 0
## MaxIdleConnsPerHost, if non-zero, controls the maximum idle
## (keep-alive) connections to keep per-host. If zero,
## DefaultMaxIdleConnsPerHost is used(2).
# max_idle_conn_per_host = 2
## Idle (keep-alive) connection timeout.
## Maximum amount of time before idle connection is closed.
## Zero means no limit.
# idle_conn_timeout = 0
## Amazon Region
#region = "us-east-1"
## Amazon Credentials
## Credentials are loaded in the following order
## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified
## 2) Assumed credentials via STS if role_arn is specified
## 3) explicit credentials from 'access_key' and 'secret_key'
## 4) shared profile from 'profile'
## 5) environment variables
## 6) shared credentials file
## 7) EC2 Instance Profile
#access_key = ""
#secret_key = ""
#token = ""
#role_arn = ""
#web_identity_token_file = ""
#role_session_name = ""
#profile = ""
#shared_credential_file = ""
## Optional list of statuscodes (<200 or >300) upon which requests should not be retried
# non_retryable_statuscodes = [409, 413]

View File

@ -0,0 +1,77 @@
# Configuration for sending metrics to InfluxDB
[[outputs.influxdb]]
## The full HTTP or UDP URL for your InfluxDB instance.
##
## Multiple URLs can be specified for a single cluster, only ONE of the
## urls will be written to each interval.
# urls = ["unix:///var/run/influxdb.sock"]
# urls = ["udp://127.0.0.1:8089"]
# urls = ["http://127.0.0.1:8086"]
## The target database for metrics; will be created as needed.
## For UDP url endpoint database needs to be configured on server side.
# database = "telegraf"
## The value of this tag will be used to determine the database. If this
## tag is not set the 'database' option is used as the default.
# database_tag = ""
## If true, the 'database_tag' will not be included in the written metric.
# exclude_database_tag = false
## If true, no CREATE DATABASE queries will be sent. Set to true when using
## Telegraf with a user without permissions to create databases or when the
## database already exists.
# skip_database_creation = false
## Name of existing retention policy to write to. Empty string writes to
## the default retention policy. Only takes effect when using HTTP.
# retention_policy = ""
## The value of this tag will be used to determine the retention policy. If this
## tag is not set the 'retention_policy' option is used as the default.
# retention_policy_tag = ""
## If true, the 'retention_policy_tag' will not be included in the written metric.
# exclude_retention_policy_tag = false
## Write consistency (clusters only), can be: "any", "one", "quorum", "all".
## Only takes effect when using HTTP.
# write_consistency = "any"
## Timeout for HTTP messages.
# timeout = "5s"
## HTTP Basic Auth
# username = "telegraf"
# password = "metricsmetricsmetricsmetrics"
## HTTP User-Agent
# user_agent = "telegraf"
## UDP payload size is the maximum packet size to send.
# udp_payload = "512B"
## Optional TLS Config for use on HTTP connections.
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## HTTP Proxy override, if unset values the standard proxy environment
## variables are consulted to determine which proxy, if any, should be used.
# http_proxy = "http://corporate.proxy:3128"
## Additional HTTP headers
# http_headers = {"X-Special-Header" = "Special-Value"}
## HTTP Content-Encoding for write request body, can be set to "gzip" to
## compress body or "identity" to apply no encoding.
# content_encoding = "gzip"
## When true, Telegraf will output unsigned integers as unsigned values,
## i.e.: "42u". You will need a version of InfluxDB supporting unsigned
## integer values. Enabling this option will result in field type errors if
## existing data has been written.
# influx_uint_support = false

View File

@ -0,0 +1,51 @@
# Configuration for sending metrics to InfluxDB 2.0
[[outputs.influxdb_v2]]
## The URLs of the InfluxDB cluster nodes.
##
## Multiple URLs can be specified for a single cluster, only ONE of the
## urls will be written to each interval.
## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"]
urls = ["http://127.0.0.1:8086"]
## Token for authentication.
token = ""
## Organization is the name of the organization you wish to write to.
organization = ""
## Destination bucket to write into.
bucket = ""
## The value of this tag will be used to determine the bucket. If this
## tag is not set the 'bucket' option is used as the default.
# bucket_tag = ""
## If true, the bucket tag will not be added to the metric.
# exclude_bucket_tag = false
## Timeout for HTTP messages.
# timeout = "5s"
## Additional HTTP headers
# http_headers = {"X-Special-Header" = "Special-Value"}
## HTTP Proxy override, if unset values the standard proxy environment
## variables are consulted to determine which proxy, if any, should be used.
# http_proxy = "http://corporate.proxy:3128"
## HTTP User-Agent
# user_agent = "telegraf"
## Content-Encoding for write request body, can be set to "gzip" to
## compress body or "identity" to apply no encoding.
# content_encoding = "gzip"
## Enable or disable uint support for writing uints influxdb 2.0.
# influx_uint_support = false
## Optional TLS Config for use on HTTP connections.
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

View File

@ -0,0 +1,13 @@
# Configuration for sending metrics to an Instrumental project
[[outputs.instrumental]]
## Project API Token (required)
api_token = "API Token" # required
## Prefix the metrics with a given name
prefix = ""
## Stats output template (Graphite formatting)
## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite
template = "host.tags.measurement.field"
## Timeout in seconds to connect
timeout = "2s"
## Debug true - Print communication to Instrumental
debug = false

View File

@ -0,0 +1,151 @@
# Configuration for the Kafka server to send metrics to
[[outputs.kafka]]
## URLs of kafka brokers
brokers = ["localhost:9092"]
## Kafka topic for producer messages
topic = "telegraf"
## The value of this tag will be used as the topic. If not set the 'topic'
## option is used.
# topic_tag = ""
## If true, the 'topic_tag' will be removed from to the metric.
# exclude_topic_tag = false
## Optional Client id
# client_id = "Telegraf"
## Set the minimal supported Kafka version. Setting this enables the use of new
## Kafka features and APIs. Of particular interested, lz4 compression
## requires at least version 0.10.0.0.
## ex: version = "1.1.0"
# version = ""
## Optional topic suffix configuration.
## If the section is omitted, no suffix is used.
## Following topic suffix methods are supported:
## measurement - suffix equals to separator + measurement's name
## tags - suffix equals to separator + specified tags' values
## interleaved with separator
## Suffix equals to "_" + measurement name
# [outputs.kafka.topic_suffix]
# method = "measurement"
# separator = "_"
## Suffix equals to "__" + measurement's "foo" tag value.
## If there's no such a tag, suffix equals to an empty string
# [outputs.kafka.topic_suffix]
# method = "tags"
# keys = ["foo"]
# separator = "__"
## Suffix equals to "_" + measurement's "foo" and "bar"
## tag values, separated by "_". If there is no such tags,
## their values treated as empty strings.
# [outputs.kafka.topic_suffix]
# method = "tags"
# keys = ["foo", "bar"]
# separator = "_"
## The routing tag specifies a tagkey on the metric whose value is used as
## the message key. The message key is used to determine which partition to
## send the message to. This tag is prefered over the routing_key option.
routing_tag = "host"
## The routing key is set as the message key and used to determine which
## partition to send the message to. This value is only used when no
## routing_tag is set or as a fallback when the tag specified in routing tag
## is not found.
##
## If set to "random", a random value will be generated for each message.
##
## When unset, no message key is added and each message is routed to a random
## partition.
##
## ex: routing_key = "random"
## routing_key = "telegraf"
# routing_key = ""
## Compression codec represents the various compression codecs recognized by
## Kafka in messages.
## 0 : None
## 1 : Gzip
## 2 : Snappy
## 3 : LZ4
## 4 : ZSTD
# compression_codec = 0
## Idempotent Writes
## If enabled, exactly one copy of each message is written.
# idempotent_writes = false
## RequiredAcks is used in Produce Requests to tell the broker how many
## replica acknowledgements it must see before responding
## 0 : the producer never waits for an acknowledgement from the broker.
## This option provides the lowest latency but the weakest durability
## guarantees (some data will be lost when a server fails).
## 1 : the producer gets an acknowledgement after the leader replica has
## received the data. This option provides better durability as the
## client waits until the server acknowledges the request as successful
## (only messages that were written to the now-dead leader but not yet
## replicated will be lost).
## -1: the producer gets an acknowledgement after all in-sync replicas have
## received the data. This option provides the best durability, we
## guarantee that no messages will be lost as long as at least one in
## sync replica remains.
# required_acks = -1
## The maximum number of times to retry sending a metric before failing
## until the next flush.
# max_retry = 3
## The maximum permitted size of a message. Should be set equal to or
## smaller than the broker's 'message.max.bytes'.
# max_message_bytes = 1000000
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## Optional SOCKS5 proxy to use when connecting to brokers
# socks5_enabled = true
# socks5_address = "127.0.0.1:1080"
# socks5_username = "alice"
# socks5_password = "pass123"
## Optional SASL Config
# sasl_username = "kafka"
# sasl_password = "secret"
## Optional SASL:
## one of: OAUTHBEARER, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI
## (defaults to PLAIN)
# sasl_mechanism = ""
## used if sasl_mechanism is GSSAPI (experimental)
# sasl_gssapi_service_name = ""
# ## One of: KRB5_USER_AUTH and KRB5_KEYTAB_AUTH
# sasl_gssapi_auth_type = "KRB5_USER_AUTH"
# sasl_gssapi_kerberos_config_path = "/"
# sasl_gssapi_realm = "realm"
# sasl_gssapi_key_tab_path = ""
# sasl_gssapi_disable_pafxfast = false
## used if sasl_mechanism is OAUTHBEARER (experimental)
# sasl_access_token = ""
## SASL protocol version. When connecting to Azure EventHub set to 0.
# sasl_version = 1
# Disable Kafka metadata full fetch
# metadata_full = false
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
# data_format = "influx"

View File

@ -0,0 +1,63 @@
# Configuration for the AWS Kinesis output.
[[outputs.kinesis]]
## Amazon REGION of kinesis endpoint.
region = "ap-southeast-2"
## Amazon Credentials
## Credentials are loaded in the following order
## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified
## 2) Assumed credentials via STS if role_arn is specified
## 3) explicit credentials from 'access_key' and 'secret_key'
## 4) shared profile from 'profile'
## 5) environment variables
## 6) shared credentials file
## 7) EC2 Instance Profile
#access_key = ""
#secret_key = ""
#token = ""
#role_arn = ""
#web_identity_token_file = ""
#role_session_name = ""
#profile = ""
#shared_credential_file = ""
## Endpoint to make request against, the correct endpoint is automatically
## determined and this option should only be set if you wish to override the
## default.
## ex: endpoint_url = "http://localhost:8000"
# endpoint_url = ""
## Kinesis StreamName must exist prior to starting telegraf.
streamname = "StreamName"
## The partition key can be calculated using one of several methods:
##
## Use a static value for all writes:
# [outputs.kinesis.partition]
# method = "static"
# key = "howdy"
#
## Use a random partition key on each write:
# [outputs.kinesis.partition]
# method = "random"
#
## Use the measurement name as the partition key:
# [outputs.kinesis.partition]
# method = "measurement"
#
## Use the value of a tag for all writes, if the tag is not set the empty
## default option will be used. When no default, defaults to "telegraf"
# [outputs.kinesis.partition]
# method = "tag"
# key = "host"
# default = "mykey"
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"
## debug will show upstream aws messages.
debug = false

View File

@ -0,0 +1,16 @@
# Configuration for Librato API to send metrics to.
[[outputs.librato]]
## Librato API Docs
## http://dev.librato.com/v1/metrics-authentication
## Librato API user
api_user = "telegraf@influxdb.com" # required.
## Librato API token
api_token = "my-secret-token" # required.
## Debug
# debug = false
## Connection timeout.
# timeout = "5s"
## Output source Template (same as graphite buckets)
## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite
## This template is used in librato's source (not metric's name)
template = "host"

View File

@ -0,0 +1,23 @@
# A plugin that can send metrics over HTTPs to Logz.io
[[outputs.logzio]]
## Set to true if Logz.io sender checks the disk space before adding metrics to the disk queue.
# check_disk_space = true
## The percent of used file system space at which the sender will stop queueing.
## When we will reach that percentage, the file system in which the queue is stored will drop
## all new logs until the percentage of used space drops below that threshold.
# disk_threshold = 98
## How often Logz.io sender should drain the queue.
## Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
# drain_duration = "3s"
## Where Logz.io sender should store the queue
## queue_dir = Sprintf("%s%s%s%s%d", os.TempDir(), string(os.PathSeparator),
## "logzio-buffer", string(os.PathSeparator), time.Now().UnixNano())
## Logz.io account token
token = "your Logz.io token" # required
## Use your listener URL for your Logz.io account region.
# url = "https://listener.logz.io:8071"

View File

@ -0,0 +1,25 @@
# A plugin that can transmit logs to Loki
[[outputs.loki]]
## The domain of Loki
domain = "https://loki.domain.tld"
## Endpoint to write api
# endpoint = "/loki/api/v1/push"
## Connection timeout, defaults to "5s" if not set.
# timeout = "5s"
## Basic auth credential
# username = "loki"
# password = "pass"
## Additional HTTP headers
# http_headers = {"X-Scope-OrgID" = "1"}
## If the request must be gzip encoded
# gzip_request = false
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"

View File

@ -0,0 +1,34 @@
# A plugin that can transmit logs to mongodb
[[outputs.mongodb]]
# connection string examples for mongodb
dsn = "mongodb://localhost:27017"
# dsn = "mongodb://mongod1:27017,mongod2:27017,mongod3:27017/admin&replicaSet=myReplSet&w=1"
# overrides serverSelectionTimeoutMS in dsn if set
# timeout = "30s"
# default authentication, optional
# authentication = "NONE"
# for SCRAM-SHA-256 authentication
# authentication = "SCRAM"
# username = "root"
# password = "***"
# for x509 certificate authentication
# authentication = "X509"
# tls_ca = "ca.pem"
# tls_key = "client.pem"
# # tls_key_pwd = "changeme" # required for encrypted tls_key
# insecure_skip_verify = false
# database to store measurements and time series collections
# database = "telegraf"
# granularity can be seconds, minutes, or hours.
# configuring this value will be based on your input collection frequency.
# see https://docs.mongodb.com/manual/core/timeseries-collections/#create-a-time-series-collection
# granularity = "seconds"
# optionally set a TTL to automatically expire documents from the measurement collections.
# ttl = "360h"

View File

@ -0,0 +1,62 @@
# Configuration for MQTT server to send metrics to
[[outputs.mqtt]]
## MQTT Brokers
## The list of brokers should only include the hostname or IP address and the
## port to the broker. This should follow the format '{host}:{port}'. For
## example, "localhost:1883" or "127.0.0.1:8883".
servers = ["localhost:1883"]
## MQTT Topic for Producer Messages
## MQTT outputs send metrics to this topic format:
## <topic_prefix>/<hostname>/<pluginname>/ (e.g. prefix/web01.example.com/mem)
topic_prefix = "telegraf"
## QoS policy for messages
## The mqtt QoS policy for sending messages.
## See https://www.ibm.com/support/knowledgecenter/en/SSFKSJ_9.0.0/com.ibm.mq.dev.doc/q029090_.htm
## 0 = at most once
## 1 = at least once
## 2 = exactly once
# qos = 2
## Keep Alive
## Defines the maximum length of time that the broker and client may not
## communicate. Defaults to 0 which turns the feature off.
##
## For version v2.0.12 and later mosquitto there is a bug
## (see https://github.com/eclipse/mosquitto/issues/2117), which requires
## this to be non-zero. As a reference eclipse/paho.mqtt.golang defaults to 30.
# keep_alive = 0
## username and password to connect MQTT server.
# username = "telegraf"
# password = "metricsmetricsmetricsmetrics"
## client ID
## The unique client id to connect MQTT server. If this parameter is not set
## then a random ID is generated.
# client_id = ""
## Timeout for write operations. default: 5s
# timeout = "5s"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## When true, metrics will be sent in one MQTT message per flush. Otherwise,
## metrics are written one metric per MQTT message.
# batch = false
## When true, metric will have RETAIN flag set, making broker cache entries until someone
## actually reads it
# retain = false
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"

View File

@ -0,0 +1,33 @@
# Send telegraf measurements to NATS
[[outputs.nats]]
## URLs of NATS servers
servers = ["nats://localhost:4222"]
## Optional client name
# name = ""
## Optional credentials
# username = ""
# password = ""
## Optional NATS 2.0 and NATS NGS compatible user credentials
# credentials = "/etc/telegraf/nats.creds"
## NATS subject for producer messages
subject = "telegraf"
## Use Transport Layer Security
# secure = false
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"

View File

@ -0,0 +1,22 @@
# Send metrics to New Relic metrics endpoint
[[outputs.newrelic]]
## The 'insights_key' parameter requires a NR license key.
## New Relic recommends you create one
## with a convenient name such as TELEGRAF_INSERT_KEY.
## reference: https://docs.newrelic.com/docs/apis/intro-apis/new-relic-api-keys/#ingest-license-key
# insights_key = "New Relic License Key Here"
## Prefix to add to add to metric name for easy identification.
## This is very useful if your metric names are ambiguous.
# metric_prefix = ""
## Timeout for writes to the New Relic API.
# timeout = "15s"
## HTTP Proxy override. If unset use values from the standard
## proxy environment variables to determine proxy, if any.
# http_proxy = "http://corporate.proxy:3128"
## Metric URL override to enable geographic location endpoints.
# If not set use values from the standard
# metric_url = "https://metric-api.newrelic.com/metric/v1"

View File

@ -0,0 +1,12 @@
# Send telegraf measurements to NSQD
[[outputs.nsq]]
## Location of nsqd instance listening on TCP
server = "localhost:4150"
## NSQ topic for producer messages
topic = "telegraf"
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"

View File

@ -0,0 +1,33 @@
# Send OpenTelemetry metrics over gRPC
[[outputs.opentelemetry]]
## Override the default (localhost:4317) OpenTelemetry gRPC service
## address:port
# service_address = "localhost:4317"
## Override the default (5s) request timeout
# timeout = "5s"
## Optional TLS Config.
##
## Root certificates for verifying server certificates encoded in PEM format.
# tls_ca = "/etc/telegraf/ca.pem"
## The public and private keypairs for the client encoded in PEM format.
## May contain intermediate certificates.
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS, but skip TLS chain and host verification.
# insecure_skip_verify = false
## Send the specified TLS server name via SNI.
# tls_server_name = "foo.example.com"
## Override the default (gzip) compression used to send data.
## Supports: "gzip", "none"
# compression = "gzip"
## Additional OpenTelemetry resource attributes
# [outputs.opentelemetry.attributes]
# "service.name" = "demo"
## Additional gRPC request metadata
# [outputs.opentelemetry.headers]
# key1 = "value1"

View File

@ -0,0 +1,26 @@
# Configuration for OpenTSDB server to send metrics to
[[outputs.opentsdb]]
## prefix for metrics keys
prefix = "my.specific.prefix."
## DNS name of the OpenTSDB server
## Using "opentsdb.example.com" or "tcp://opentsdb.example.com" will use the
## telnet API. "http://opentsdb.example.com" will use the Http API.
host = "opentsdb.example.com"
## Port of the OpenTSDB server
port = 4242
## Number of data points to send to OpenTSDB in Http requests.
## Not used with telnet API.
http_batch_size = 50
## URI Path for Http requests to OpenTSDB.
## Used in cases where OpenTSDB is located behind a reverse proxy.
http_path = "/api/put"
## Debug true - Prints OpenTSDB communication
debug = false
## Separator separates measurement name from field
separator = "_"

View File

@ -0,0 +1,45 @@
# Configuration for the Prometheus client to spawn
[[outputs.prometheus_client]]
## Address to listen on.
listen = ":9273"
## Metric version controls the mapping from Telegraf metrics into
## Prometheus format. When using the prometheus input, use the same value in
## both plugins to ensure metrics are round-tripped without modification.
##
## example: metric_version = 1;
## metric_version = 2; recommended version
# metric_version = 1
## Use HTTP Basic Authentication.
# basic_username = "Foo"
# basic_password = "Bar"
## If set, the IP Ranges which are allowed to access metrics.
## ex: ip_range = ["192.168.0.0/24", "192.168.1.0/30"]
# ip_range = []
## Path to publish the metrics on.
# path = "/metrics"
## Expiration interval for each metric. 0 == no expiration
# expiration_interval = "60s"
## Collectors to enable, valid entries are "gocollector" and "process".
## If unset, both are enabled.
# collectors_exclude = ["gocollector", "process"]
## Send string metrics as Prometheus labels.
## Unless set to false all string metrics will be sent as labels.
# string_as_label = true
## If set, enable TLS with the given certificate.
# tls_cert = "/etc/ssl/telegraf.crt"
# tls_key = "/etc/ssl/telegraf.key"
## Set one or more allowed client CA certificate file names to
## enable mutually authenticated TLS connections
# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
## Export metric collection time.
# export_timestamp = false

View File

@ -0,0 +1,32 @@
# Configuration for Riemann to send metrics to
[[outputs.riemann]]
## The full TCP or UDP URL of the Riemann server
url = "tcp://localhost:5555"
## Riemann event TTL, floating-point time in seconds.
## Defines how long that an event is considered valid for in Riemann
# ttl = 30.0
## Separator to use between measurement and field name in Riemann service name
## This does not have any effect if 'measurement_as_attribute' is set to 'true'
separator = "/"
## Set measurement name as Riemann attribute 'measurement', instead of prepending it to the Riemann service name
# measurement_as_attribute = false
## Send string metrics as Riemann event states.
## Unless enabled all string metrics will be ignored
# string_as_state = false
## A list of tag keys whose values get sent as Riemann tags.
## If empty, all Telegraf tag values will be sent as tags
# tag_keys = ["telegraf","custom_tag"]
## Additional Riemann tags to send.
# tags = ["telegraf-output"]
## Description for Riemann event
# description_text = "metrics collected from telegraf"
## Riemann client write timeout, defaults to "5s" if not set.
# timeout = "5s"

View File

@ -0,0 +1,8 @@
# Configuration for the Riemann server to send metrics to
[[outputs.riemann_legacy]]
## URL of server
url = "localhost:5555"
## transport protocol to use either tcp or udp
transport = "tcp"
## separator to use between input name and field name in Riemann service name
separator = " "

View File

@ -0,0 +1,89 @@
# Send aggregate metrics to Sensu Monitor
[[outputs.sensu]]
## BACKEND API URL is the Sensu Backend API root URL to send metrics to
## (protocol, host, and port only). The output plugin will automatically
## append the corresponding backend API path
## /api/core/v2/namespaces/:entity_namespace/events/:entity_name/:check_name).
##
## Backend Events API reference:
## https://docs.sensu.io/sensu-go/latest/api/events/
##
## AGENT API URL is the Sensu Agent API root URL to send metrics to
## (protocol, host, and port only). The output plugin will automatically
## append the correspeonding agent API path (/events).
##
## Agent API Events API reference:
## https://docs.sensu.io/sensu-go/latest/api/events/
##
## NOTE: if backend_api_url and agent_api_url and api_key are set, the output
## plugin will use backend_api_url. If backend_api_url and agent_api_url are
## not provided, the output plugin will default to use an agent_api_url of
## http://127.0.0.1:3031
##
# backend_api_url = "http://127.0.0.1:8080"
# agent_api_url = "http://127.0.0.1:3031"
## API KEY is the Sensu Backend API token
## Generate a new API token via:
##
## $ sensuctl cluster-role create telegraf --verb create --resource events,entities
## $ sensuctl cluster-role-binding create telegraf --cluster-role telegraf --group telegraf
## $ sensuctl user create telegraf --group telegraf --password REDACTED
## $ sensuctl api-key grant telegraf
##
## For more information on Sensu RBAC profiles & API tokens, please visit:
## - https://docs.sensu.io/sensu-go/latest/reference/rbac/
## - https://docs.sensu.io/sensu-go/latest/reference/apikeys/
##
# api_key = "${SENSU_API_KEY}"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## Timeout for HTTP message
# timeout = "5s"
## HTTP Content-Encoding for write request body, can be set to "gzip" to
## compress body or "identity" to apply no encoding.
# content_encoding = "identity"
## Sensu Event details
##
## Below are the event details to be sent to Sensu. The main portions of the
## event are the check, entity, and metrics specifications. For more information
## on Sensu events and its components, please visit:
## - Events - https://docs.sensu.io/sensu-go/latest/reference/events
## - Checks - https://docs.sensu.io/sensu-go/latest/reference/checks
## - Entities - https://docs.sensu.io/sensu-go/latest/reference/entities
## - Metrics - https://docs.sensu.io/sensu-go/latest/reference/events#metrics
##
## Check specification
## The check name is the name to give the Sensu check associated with the event
## created. This maps to check.metatadata.name in the event.
[outputs.sensu.check]
name = "telegraf"
## Entity specification
## Configure the entity name and namespace, if necessary. This will be part of
## the entity.metadata in the event.
##
## NOTE: if the output plugin is configured to send events to a
## backend_api_url and entity_name is not set, the value returned by
## os.Hostname() will be used; if the output plugin is configured to send
## events to an agent_api_url, entity_name and entity_namespace are not used.
# [outputs.sensu.entity]
# name = "server-01"
# namespace = "default"
## Metrics specification
## Configure the tags for the metrics that are sent as part of the Sensu event
# [outputs.sensu.tags]
# source = "telegraf"
## Configure the handler(s) for processing the provided metrics
# [outputs.sensu.metrics]
# handlers = ["influxdb","elasticsearch"]

View File

@ -0,0 +1,17 @@
# Send metrics and events to SignalFx
[[outputs.signalfx]]
## SignalFx Org Access Token
access_token = "my-secret-token"
## The SignalFx realm that your organization resides in
signalfx_realm = "us9" # Required if ingest_url is not set
## You can optionally provide a custom ingest url instead of the
## signalfx_realm option above if you are using a gateway or proxy
## instance. This option takes precident over signalfx_realm.
ingest_url = "https://my-custom-ingest/"
## Event typed metrics are omitted by default,
## If you require an event typed metric you must specify the
## metric name in the following list.
included_event_names = ["plugin.metric_name"]

View File

@ -0,0 +1,37 @@
# Generic socket writer capable of handling multiple socket types.
[[outputs.socket_writer]]
## URL to connect to
# address = "tcp://127.0.0.1:8094"
# address = "tcp://example.com:http"
# address = "tcp4://127.0.0.1:8094"
# address = "tcp6://127.0.0.1:8094"
# address = "tcp6://[2001:db8::1]:8094"
# address = "udp://127.0.0.1:8094"
# address = "udp4://127.0.0.1:8094"
# address = "udp6://127.0.0.1:8094"
# address = "unix:///tmp/telegraf.sock"
# address = "unixgram:///tmp/telegraf.sock"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## Period between keep alive probes.
## Only applies to TCP sockets.
## 0 disables keep alive probes.
## Defaults to the OS configuration.
# keep_alive_period = "5m"
## Content encoding for message payloads, can be set to "gzip" or to
## "identity" to apply no encoding.
##
# content_encoding = "identity"
## Data format to generate.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
# data_format = "influx"

View File

@ -0,0 +1,53 @@
# Save metrics to an SQL Database
[[outputs.sql]]
## Database driver
## Valid options: mssql (Microsoft SQL Server), mysql (MySQL), pgx (Postgres),
## sqlite (SQLite3), snowflake (snowflake.com) clickhouse (ClickHouse)
# driver = ""
## Data source name
## The format of the data source name is different for each database driver.
## See the plugin readme for details.
# data_source_name = ""
## Timestamp column name
# timestamp_column = "timestamp"
## Table creation template
## Available template variables:
## {TABLE} - table name as a quoted identifier
## {TABLELITERAL} - table name as a quoted string literal
## {COLUMNS} - column definitions (list of quoted identifiers and types)
# table_template = "CREATE TABLE {TABLE}({COLUMNS})"
## Table existence check template
## Available template variables:
## {TABLE} - tablename as a quoted identifier
# table_exists_template = "SELECT 1 FROM {TABLE} LIMIT 1"
## Initialization SQL
# init_sql = ""
## Metric type to SQL type conversion
## The values on the left are the data types Telegraf has and the values on
## the right are the data types Telegraf will use when sending to a database.
##
## The database values used must be data types the destination database
## understands. It is up to the user to ensure that the selected data type is
## available in the database they are using. Refer to your database
## documentation for what data types are available and supported.
#[outputs.sql.convert]
# integer = "INT"
# real = "DOUBLE"
# text = "TEXT"
# timestamp = "TIMESTAMP"
# defaultvalue = "TEXT"
# unsigned = "UNSIGNED"
# bool = "BOOL"
## This setting controls the behavior of the unsigned value. By default the
## setting will take the integer value and append the unsigned value to it. The other
## option is "literal", which will use the actual value the user provides to
## the unsigned option. This is useful for a database like ClickHouse where
## the unsigned value should use a value like "uint64".
# conversion_style = "unsigned_suffix"

View File

@ -0,0 +1,16 @@
# Configuration for Google Cloud Stackdriver to send metrics to
[[outputs.stackdriver]]
## GCP Project
project = "erudite-bloom-151019"
## The namespace for the metric descriptor
namespace = "telegraf"
## Custom resource type
# resource_type = "generic_node"
## Additional resource labels
# [outputs.stackdriver.resource_labels]
# node_id = "$HOSTNAME"
# namespace = "myapp"
# location = "eu-north0"

View File

@ -0,0 +1,51 @@
# A plugin that can send metrics to Sumo Logic HTTP metric collector.
[[outputs.sumologic]]
## Unique URL generated for your HTTP Metrics Source.
## This is the address to send metrics to.
# url = "https://events.sumologic.net/receiver/v1/http/<UniqueHTTPCollectorCode>"
## Data format to be used for sending metrics.
## This will set the "Content-Type" header accordingly.
## Currently supported formats:
## * graphite - for Content-Type of application/vnd.sumologic.graphite
## * carbon2 - for Content-Type of application/vnd.sumologic.carbon2
## * prometheus - for Content-Type of application/vnd.sumologic.prometheus
##
## More information can be found at:
## https://help.sumologic.com/03Send-Data/Sources/02Sources-for-Hosted-Collectors/HTTP-Source/Upload-Metrics-to-an-HTTP-Source#content-type-headers-for-metrics
##
## NOTE:
## When unset, telegraf will by default use the influx serializer which is currently unsupported
## in HTTP Source.
data_format = "carbon2"
## Timeout used for HTTP request
# timeout = "5s"
## Max HTTP request body size in bytes before compression (if applied).
## By default 1MB is recommended.
## NOTE:
## Bear in mind that in some serializer a metric even though serialized to multiple
## lines cannot be split any further so setting this very low might not work
## as expected.
# max_request_body_size = 1000000
## Additional, Sumo specific options.
## Full list can be found here:
## https://help.sumologic.com/03Send-Data/Sources/02Sources-for-Hosted-Collectors/HTTP-Source/Upload-Metrics-to-an-HTTP-Source#supported-http-headers
## Desired source name.
## Useful if you want to override the source name configured for the source.
# source_name = ""
## Desired host name.
## Useful if you want to override the source host configured for the source.
# source_host = ""
## Desired source category.
## Useful if you want to override the source category configured for the source.
# source_category = ""
## Comma-separated key=value list of dimensions to apply to every metric.
## Custom dimensions will allow you to query your metrics at a more granular level.
# dimensions = ""

View File

@ -0,0 +1,76 @@
# Configuration for Syslog server to send metrics to
[[outputs.syslog]]
## URL to connect to
## ex: address = "tcp://127.0.0.1:8094"
## ex: address = "tcp4://127.0.0.1:8094"
## ex: address = "tcp6://127.0.0.1:8094"
## ex: address = "tcp6://[2001:db8::1]:8094"
## ex: address = "udp://127.0.0.1:8094"
## ex: address = "udp4://127.0.0.1:8094"
## ex: address = "udp6://127.0.0.1:8094"
address = "tcp://127.0.0.1:8094"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## Period between keep alive probes.
## Only applies to TCP sockets.
## 0 disables keep alive probes.
## Defaults to the OS configuration.
# keep_alive_period = "5m"
## The framing technique with which it is expected that messages are
## transported (default = "octet-counting"). Whether the messages come
## using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1),
## or the non-transparent framing technique (RFC6587#section-3.4.2). Must
## be one of "octet-counting", "non-transparent".
# framing = "octet-counting"
## The trailer to be expected in case of non-transparent framing (default = "LF").
## Must be one of "LF", or "NUL".
# trailer = "LF"
## SD-PARAMs settings
## Syslog messages can contain key/value pairs within zero or more
## structured data sections. For each unrecognized metric tag/field a
## SD-PARAMS is created.
##
## Example:
## [[outputs.syslog]]
## sdparam_separator = "_"
## default_sdid = "default@32473"
## sdids = ["foo@123", "bar@456"]
##
## input => xyzzy,x=y foo@123_value=42,bar@456_value2=84,something_else=1
## output (structured data only) => [foo@123 value=42][bar@456 value2=84][default@32473 something_else=1 x=y]
## SD-PARAMs separator between the sdid and tag/field key (default = "_")
# sdparam_separator = "_"
## Default sdid used for tags/fields that don't contain a prefix defined in
## the explicit sdids setting below If no default is specified, no SD-PARAMs
## will be used for unrecognized field.
# default_sdid = "default@32473"
## List of explicit prefixes to extract from tag/field keys and use as the
## SDID, if they match (see above example for more details):
# sdids = ["foo@123", "bar@456"]
## Default severity value. Severity and Facility are used to calculate the
## message PRI value (RFC5424#section-6.2.1). Used when no metric field
## with key "severity_code" is defined. If unset, 5 (notice) is the default
# default_severity_code = 5
## Default facility value. Facility and Severity are used to calculate the
## message PRI value (RFC5424#section-6.2.1). Used when no metric field with
## key "facility_code" is defined. If unset, 1 (user-level) is the default
# default_facility_code = 1
## Default APP-NAME value (RFC5424#section-6.2.5)
## Used when no metric tag with key "appname" is defined.
## If unset, "Telegraf" is the default
# default_appname = "Telegraf"

View File

@ -0,0 +1,117 @@
# Configuration for sending metrics to Amazon Timestream.
[[outputs.timestream]]
## Amazon Region
region = "us-east-1"
## Amazon Credentials
## Credentials are loaded in the following order
## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified
## 2) Assumed credentials via STS if role_arn is specified
## 3) explicit credentials from 'access_key' and 'secret_key'
## 4) shared profile from 'profile'
## 5) environment variables
## 6) shared credentials file
## 7) EC2 Instance Profile
#access_key = ""
#secret_key = ""
#token = ""
#role_arn = ""
#web_identity_token_file = ""
#role_session_name = ""
#profile = ""
#shared_credential_file = ""
## Endpoint to make request against, the correct endpoint is automatically
## determined and this option should only be set if you wish to override the
## default.
## ex: endpoint_url = "http://localhost:8000"
# endpoint_url = ""
## Timestream database where the metrics will be inserted.
## The database must exist prior to starting Telegraf.
database_name = "yourDatabaseNameHere"
## Specifies if the plugin should describe the Timestream database upon starting
## to validate if it has access necessary permissions, connection, etc., as a safety check.
## If the describe operation fails, the plugin will not start
## and therefore the Telegraf agent will not start.
describe_database_on_start = false
## The mapping mode specifies how Telegraf records are represented in Timestream.
## Valid values are: single-table, multi-table.
## For example, consider the following data in line protocol format:
## weather,location=us-midwest,season=summer temperature=82,humidity=71 1465839830100400200
## airquality,location=us-west no2=5,pm25=16 1465839830100400200
## where weather and airquality are the measurement names, location and season are tags,
## and temperature, humidity, no2, pm25 are fields.
## In multi-table mode:
## - first line will be ingested to table named weather
## - second line will be ingested to table named airquality
## - the tags will be represented as dimensions
## - first table (weather) will have two records:
## one with measurement name equals to temperature,
## another with measurement name equals to humidity
## - second table (airquality) will have two records:
## one with measurement name equals to no2,
## another with measurement name equals to pm25
## - the Timestream tables from the example will look like this:
## TABLE "weather":
## time | location | season | measure_name | measure_value::bigint
## 2016-06-13 17:43:50 | us-midwest | summer | temperature | 82
## 2016-06-13 17:43:50 | us-midwest | summer | humidity | 71
## TABLE "airquality":
## time | location | measure_name | measure_value::bigint
## 2016-06-13 17:43:50 | us-west | no2 | 5
## 2016-06-13 17:43:50 | us-west | pm25 | 16
## In single-table mode:
## - the data will be ingested to a single table, which name will be valueOf(single_table_name)
## - measurement name will stored in dimension named valueOf(single_table_dimension_name_for_telegraf_measurement_name)
## - location and season will be represented as dimensions
## - temperature, humidity, no2, pm25 will be represented as measurement name
## - the Timestream table from the example will look like this:
## Assuming:
## - single_table_name = "my_readings"
## - single_table_dimension_name_for_telegraf_measurement_name = "namespace"
## TABLE "my_readings":
## time | location | season | namespace | measure_name | measure_value::bigint
## 2016-06-13 17:43:50 | us-midwest | summer | weather | temperature | 82
## 2016-06-13 17:43:50 | us-midwest | summer | weather | humidity | 71
## 2016-06-13 17:43:50 | us-west | NULL | airquality | no2 | 5
## 2016-06-13 17:43:50 | us-west | NULL | airquality | pm25 | 16
## In most cases, using multi-table mapping mode is recommended.
## However, you can consider using single-table in situations when you have thousands of measurement names.
mapping_mode = "multi-table"
## Only valid and required for mapping_mode = "single-table"
## Specifies the Timestream table where the metrics will be uploaded.
# single_table_name = "yourTableNameHere"
## Only valid and required for mapping_mode = "single-table"
## Describes what will be the Timestream dimension name for the Telegraf
## measurement name.
# single_table_dimension_name_for_telegraf_measurement_name = "namespace"
## Specifies if the plugin should create the table, if the table do not exist.
## The plugin writes the data without prior checking if the table exists.
## When the table does not exist, the error returned from Timestream will cause
## the plugin to create the table, if this parameter is set to true.
create_table_if_not_exists = true
## Only valid and required if create_table_if_not_exists = true
## Specifies the Timestream table magnetic store retention period in days.
## Check Timestream documentation for more details.
create_table_magnetic_store_retention_period_in_days = 365
## Only valid and required if create_table_if_not_exists = true
## Specifies the Timestream table memory store retention period in hours.
## Check Timestream documentation for more details.
create_table_memory_store_retention_period_in_hours = 24
## Only valid and optional if create_table_if_not_exists = true
## Specifies the Timestream table tags.
## Check Timestream documentation for more details
# create_table_tags = { "foo" = "bar", "environment" = "dev"}
## Specify the maximum number of parallel go routines to ingest/write data
## If not specified, defaulted to 1 go routines
max_write_go_routines = 25

View File

@ -0,0 +1,26 @@
# Write metrics to Warp 10
[[outputs.warp10]]
# Prefix to add to the measurement.
prefix = "telegraf."
# URL of the Warp 10 server
warp_url = "http://localhost:8080"
# Write token to access your app on warp 10
token = "Token"
# Warp 10 query timeout
# timeout = "15s"
## Print Warp 10 error body
# print_error_body = false
## Max string error size
# max_string_error_size = 511
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

View File

@ -0,0 +1,51 @@
# Configuration for Wavefront server to send metrics to
[[outputs.wavefront]]
## Url for Wavefront Direct Ingestion. For Wavefront Proxy Ingestion, see
## the 'host' and 'port' options below.
url = "https://metrics.wavefront.com"
## Authentication Token for Wavefront. Only required if using Direct Ingestion
#token = "DUMMY_TOKEN"
## DNS name of the wavefront proxy server. Do not use if url is specified
#host = "wavefront.example.com"
## Port that the Wavefront proxy server listens on. Do not use if url is specified
#port = 2878
## prefix for metrics keys
#prefix = "my.specific.prefix."
## whether to use "value" for name of simple fields. default is false
#simple_fields = false
## character to use between metric and field name. default is . (dot)
#metric_separator = "."
## Convert metric name paths to use metricSeparator character
## When true will convert all _ (underscore) characters in final metric name. default is true
#convert_paths = true
## Use Strict rules to sanitize metric and tag names from invalid characters
## When enabled forward slash (/) and comma (,) will be accepted
#use_strict = false
## Use Regex to sanitize metric and tag names from invalid characters
## Regex is more thorough, but significantly slower. default is false
#use_regex = false
## point tags to use as the source name for Wavefront (if none found, host will be used)
#source_override = ["hostname", "address", "agent_host", "node_host"]
## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default is true
#convert_bool = true
## Truncate metric tags to a total of 254 characters for the tag name value. Wavefront will reject any
## data point exceeding this limit if not truncated. Defaults to 'false' to provide backwards compatibility.
#truncate_tags = false
## Flush the internal buffers after each batch. This effectively bypasses the background sending of metrics
## normally done by the Wavefront SDK. This can be used if you are experiencing buffer overruns. The sending
## of metrics will block for a longer time, but this will be handled gracefully by the internal buffering in
## Telegraf.
#immediate_flush = true

View File

@ -0,0 +1,35 @@
# A plugin that can transmit metrics over WebSocket.
[[outputs.websocket]]
## URL is the address to send metrics to. Make sure ws or wss scheme is used.
url = "ws://127.0.0.1:3000/telegraf"
## Timeouts (make sure read_timeout is larger than server ping interval or set to zero).
# connect_timeout = "30s"
# write_timeout = "30s"
# read_timeout = "30s"
## Optionally turn on using text data frames (binary by default).
# use_text_frames = false
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## Optional SOCKS5 proxy to use
# socks5_enabled = true
# socks5_address = "127.0.0.1:1080"
# socks5_username = "alice"
# socks5_password = "pass123"
## Data format to output.
## Each data format has it's own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
# data_format = "influx"
## Additional HTTP Upgrade headers
# [outputs.websocket.headers]
# Authorization = "Bearer <TOKEN>"

View File

@ -0,0 +1,10 @@
# Send aggregated metrics to Yandex.Cloud Monitoring
[[outputs.yandex_cloud_monitoring]]
## Timeout for HTTP writes.
# timeout = "20s"
## Yandex.Cloud monitoring API endpoint. Normally should not be changed
# endpoint_url = "https://monitoring.api.cloud.yandex.net/monitoring/v2/data/write"
## All user metrics should be sent with "custom" service specified. Normally should not be changed
# service = "custom"