chore(inputs_a-l): migrate sample configs into separate files (#11132)

This commit is contained in:
Sebastian Spaink 2022-05-18 11:31:52 -05:00 committed by GitHub
parent 0f5dc9946c
commit 6b697db11e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
104 changed files with 3070 additions and 0 deletions

View File

@ -0,0 +1,26 @@
# Gather ActiveMQ metrics
[[inputs.activemq]]
## ActiveMQ WebConsole URL
url = "http://127.0.0.1:8161"
## Required ActiveMQ Endpoint
## deprecated in 1.11; use the url option
# server = "192.168.50.10"
# port = 8161
## Credentials for basic HTTP authentication
# username = "admin"
# password = "admin"
## Required ActiveMQ webadmin root path
# webadmin = "admin"
## Maximum time to receive response.
# response_timeout = "5s"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

View File

@ -0,0 +1,41 @@
# Read stats from aerospike server(s)
[[inputs.aerospike]]
## Aerospike servers to connect to (with port)
## This plugin will query all namespaces the aerospike
## server has configured and get stats for them.
servers = ["localhost:3000"]
# username = "telegraf"
# password = "pa$$word"
## Optional TLS Config
# enable_tls = false
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
# tls_name = "tlsname"
## If false, skip chain & host verification
# insecure_skip_verify = true
# Feature Options
# Add namespace variable to limit the namespaces executed on
# Leave blank to do all
# disable_query_namespaces = true # default false
# namespaces = ["namespace1", "namespace2"]
# Enable set level telemetry
# query_sets = true # default: false
# Add namespace set combinations to limit sets executed on
# Leave blank to do all sets
# sets = ["namespace1/set1", "namespace1/set2", "namespace3"]
# Histograms
# enable_ttl_histogram = true # default: false
# enable_object_size_linear_histogram = true # default: false
# by default, aerospike produces a 100 bucket histogram
# this is not great for most graphing tools, this will allow
# the ability to squash this to a smaller number of buckets
# To have a balanced histogram, the number of buckets chosen
# should divide evenly into 100.
# num_histogram_buckets = 100 # default: 10

View File

@ -0,0 +1,100 @@
# Pull Metric Statistics from Aliyun CMS
[[inputs.aliyuncms]]
## Aliyun Credentials
## Credentials are loaded in the following order
## 1) Ram RoleArn credential
## 2) AccessKey STS token credential
## 3) AccessKey credential
## 4) Ecs Ram Role credential
## 5) RSA keypair credential
## 6) Environment variables credential
## 7) Instance metadata credential
# access_key_id = ""
# access_key_secret = ""
# access_key_sts_token = ""
# role_arn = ""
# role_session_name = ""
# private_key = ""
# public_key_id = ""
# role_name = ""
## Specify the ali cloud region list to be queried for metrics and objects discovery
## If not set, all supported regions (see below) would be covered, it can provide a significant load on API, so the recommendation here
## is to limit the list as much as possible. Allowed values: https://www.alibabacloud.com/help/zh/doc-detail/40654.htm
## Default supported regions are:
## 21 items: cn-qingdao,cn-beijing,cn-zhangjiakou,cn-huhehaote,cn-hangzhou,cn-shanghai,cn-shenzhen,
## cn-heyuan,cn-chengdu,cn-hongkong,ap-southeast-1,ap-southeast-2,ap-southeast-3,ap-southeast-5,
## ap-south-1,ap-northeast-1,us-west-1,us-east-1,eu-central-1,eu-west-1,me-east-1
##
## From discovery perspective it set the scope for object discovery, the discovered info can be used to enrich
## the metrics with objects attributes/tags. Discovery is supported not for all projects (if not supported, then
## it will be reported on the start - for example for 'acs_cdn' project:
## 'E! [inputs.aliyuncms] Discovery tool is not activated: no discovery support for project "acs_cdn"' )
## Currently, discovery supported for the following projects:
## - acs_ecs_dashboard
## - acs_rds_dashboard
## - acs_slb_dashboard
## - acs_vpc_eip
regions = ["cn-hongkong"]
# The minimum period for AliyunCMS metrics is 1 minute (60s). However not all
# metrics are made available to the 1 minute period. Some are collected at
# 3 minute, 5 minute, or larger intervals.
# See: https://help.aliyun.com/document_detail/51936.html?spm=a2c4g.11186623.2.18.2bc1750eeOw1Pv
# Note that if a period is configured that is smaller than the minimum for a
# particular metric, that metric will not be returned by the Aliyun OpenAPI
# and will not be collected by Telegraf.
#
## Requested AliyunCMS aggregation Period (required - must be a multiple of 60s)
period = "5m"
## Collection Delay (required - must account for metrics availability via AliyunCMS API)
delay = "1m"
## Recommended: use metric 'interval' that is a multiple of 'period' to avoid
## gaps or overlap in pulled data
interval = "5m"
## Metric Statistic Project (required)
project = "acs_slb_dashboard"
## Maximum requests per second, default value is 200
ratelimit = 200
## How often the discovery API call executed (default 1m)
#discovery_interval = "1m"
## Metrics to Pull (Required)
[[inputs.aliyuncms.metrics]]
## Metrics names to be requested,
## described here (per project): https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq
names = ["InstanceActiveConnection", "InstanceNewConnection"]
## Dimension filters for Metric (these are optional).
## This allows to get additional metric dimension. If dimension is not specified it can be returned or
## the data can be aggregated - it depends on particular metric, you can find details here: https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq
##
## Note, that by default dimension filter includes the list of discovered objects in scope (if discovery is enabled)
## Values specified here would be added into the list of discovered objects.
## You can specify either single dimension:
#dimensions = '{"instanceId": "p-example"}'
## Or you can specify several dimensions at once:
#dimensions = '[{"instanceId": "p-example"},{"instanceId": "q-example"}]'
## Enrichment tags, can be added from discovery (if supported)
## Notation is <measurement_tag_name>:<JMES query path (https://jmespath.org/tutorial.html)>
## To figure out which fields are available, consult the Describe<ObjectType> API per project.
## For example, for SLB: https://api.aliyun.com/#/?product=Slb&version=2014-05-15&api=DescribeLoadBalancers&params={}&tab=MOCK&lang=GO
#tag_query_path = [
# "address:Address",
# "name:LoadBalancerName",
# "cluster_owner:Tags.Tag[?TagKey=='cs.cluster.name'].TagValue | [0]"
# ]
## The following tags added by default: regionId (if discovery enabled), userId, instanceId.
## Allow metrics without discovery data, if discovery is enabled. If set to true, then metric without discovery
## data would be emitted, otherwise dropped. This cane be of help, in case debugging dimension filters, or partial coverage
## of discovery scope vs monitoring scope
#allow_dps_without_discovery = false

View File

@ -0,0 +1,7 @@
# Query statistics from AMD Graphics cards using rocm-smi binary
[[inputs.amd_rocm_smi]]
## Optional: path to rocm-smi binary, defaults to $PATH via exec.LookPath
# bin_path = "/opt/rocm/bin/rocm-smi"
## Optional: timeout for GPU polling
# timeout = "5s"

View File

@ -0,0 +1,74 @@
# AMQP consumer plugin
[[inputs.amqp_consumer]]
## Brokers to consume from. If multiple brokers are specified a random broker
## will be selected anytime a connection is established. This can be
## helpful for load balancing when not using a dedicated load balancer.
brokers = ["amqp://localhost:5672/influxdb"]
## Authentication credentials for the PLAIN auth_method.
# username = ""
# password = ""
## Name of the exchange to declare. If unset, no exchange will be declared.
exchange = "telegraf"
## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash".
# exchange_type = "topic"
## If true, exchange will be passively declared.
# exchange_passive = false
## Exchange durability can be either "transient" or "durable".
# exchange_durability = "durable"
## Additional exchange arguments.
# exchange_arguments = { }
# exchange_arguments = {"hash_property" = "timestamp"}
## AMQP queue name.
queue = "telegraf"
## AMQP queue durability can be "transient" or "durable".
queue_durability = "durable"
## If true, queue will be passively declared.
# queue_passive = false
## A binding between the exchange and queue using this binding key is
## created. If unset, no binding is created.
binding_key = "#"
## Maximum number of messages server should give to the worker.
# prefetch_count = 50
## Maximum messages to read from the broker that have not been written by an
## output. For best throughput set based on the number of metrics within
## each message and the size of the output's metric_batch_size.
##
## For example, if each message from the queue contains 10 metrics and the
## output metric_batch_size is 1000, setting this to 100 will ensure that a
## full batch is collected and the write is triggered immediately without
## waiting until the next flush_interval.
# max_undelivered_messages = 1000
## Auth method. PLAIN and EXTERNAL are supported
## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
## described here: https://www.rabbitmq.com/plugins.html
# auth_method = "PLAIN"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## Content encoding for message payloads, can be set to "gzip" to or
## "identity" to apply no encoding.
# content_encoding = "identity"
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"

View File

@ -0,0 +1,20 @@
# Read Apache status information (mod_status)
[[inputs.apache]]
## An array of URLs to gather from, must be directed at the machine
## readable version of the mod_status page including the auto query string.
## Default is "http://localhost/server-status?auto".
urls = ["http://localhost/server-status?auto"]
## Credentials for basic HTTP authentication.
# username = "myuser"
# password = "mypassword"
## Maximum time to receive response.
# response_timeout = "5s"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

View File

@ -0,0 +1,8 @@
# Monitor APC UPSes connected to apcupsd
[[inputs.apcupsd]]
# A list of running apcupsd server to connect to.
# If not provided will default to tcp://127.0.0.1:3551
servers = ["tcp://127.0.0.1:3551"]
## Timeout for dialing server.
timeout = "5s"

View File

@ -0,0 +1,24 @@
# Gather metrics from Apache Aurora schedulers
[[inputs.aurora]]
## Schedulers are the base addresses of your Aurora Schedulers
schedulers = ["http://127.0.0.1:8081"]
## Set of role types to collect metrics from.
##
## The scheduler roles are checked each interval by contacting the
## scheduler nodes; zookeeper is not contacted.
# roles = ["leader", "follower"]
## Timeout is the max time for total network operations.
# timeout = "5s"
## Username and password are sent using HTTP Basic Auth.
# username = "username"
# password = "pa$$word"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

View File

@ -0,0 +1,10 @@
# Gather Azure Storage Queue metrics
[[inputs.azure_storage_queue]]
## Required Azure Storage Account name
account_name = "mystorageaccount"
## Required Azure Storage Account access key
account_key = "storageaccountaccesskey"
## Set to false to disable peeking age of oldest message (executes faster)
# peek_oldest_message_age = true

View File

@ -0,0 +1,10 @@
# Read metrics of bcache from stats_total and dirty_data
[[inputs.bcache]]
## Bcache sets path
## If not specified, then default is:
bcachePath = "/sys/fs/bcache"
## By default, Telegraf gather stats for all bcache devices
## Setting devices will restrict the stats to the specified
## bcache devices.
bcacheDevs = ["bcache0"]

View File

@ -0,0 +1,8 @@
# Collects Beanstalkd server and tubes stats
[[inputs.beanstalkd]]
## Server to collect data from
server = "localhost:11300"
## List of tubes to gather stats about.
## If no tubes specified then data gathered for each tube on server reported by list-tubes command
tubes = ["notifications"]

View File

@ -0,0 +1,33 @@
# Read metrics exposed by Beat
[[inputs.beat]]
## An URL from which to read Beat-formatted JSON
## Default is "http://127.0.0.1:5066".
url = "http://127.0.0.1:5066"
## Enable collection of the listed stats
## An empty list means collect all. Available options are currently
## "beat", "libbeat", "system" and "filebeat".
# include = ["beat", "libbeat", "filebeat"]
## HTTP method
# method = "GET"
## Optional HTTP headers
# headers = {"X-Special-Header" = "Special-Value"}
## Override HTTP "Host" header
# host_header = "logstash.example.com"
## Timeout for HTTP requests
# timeout = "5s"
## Optional HTTP Basic Auth credentials
# username = "username"
# password = "pa$$word"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

View File

@ -0,0 +1,10 @@
# Read BIND nameserver XML statistics
[[inputs.bind]]
## An array of BIND XML statistics URI to gather stats.
## Default is "http://localhost:8053/xml/v3".
# urls = ["http://localhost:8053/xml/v3"]
# gather_memory_contexts = false
# gather_views = false
## Timeout for http requests made by bind nameserver
# timeout = "4s"

View File

@ -0,0 +1,18 @@
# Collect bond interface status, slaves statuses and failures count
[[inputs.bond]]
## Sets 'proc' directory path
## If not specified, then default is /proc
# host_proc = "/proc"
## Sets 'sys' directory path
## If not specified, then default is /sys
# host_sys = "/sys"
## By default, telegraf gather stats for all bond interfaces
## Setting interfaces will restrict the stats to the specified
## bond interfaces.
# bond_interfaces = ["bond0"]
## Tries to collect additional bond details from /sys/class/net/{bond}
## currently only useful for LACP (mode 4) bonds
# collect_sys_details = false

View File

@ -0,0 +1,41 @@
# Collect Kafka topics and consumers status from Burrow HTTP API.
[[inputs.burrow]]
## Burrow API endpoints in format "schema://host:port".
## Default is "http://localhost:8000".
servers = ["http://localhost:8000"]
## Override Burrow API prefix.
## Useful when Burrow is behind reverse-proxy.
# api_prefix = "/v3/kafka"
## Maximum time to receive response.
# response_timeout = "5s"
## Limit per-server concurrent connections.
## Useful in case of large number of topics or consumer groups.
# concurrent_connections = 20
## Filter clusters, default is no filtering.
## Values can be specified as glob patterns.
# clusters_include = []
# clusters_exclude = []
## Filter consumer groups, default is no filtering.
## Values can be specified as glob patterns.
# groups_include = []
# groups_exclude = []
## Filter topics, default is no filtering.
## Values can be specified as glob patterns.
# topics_include = []
# topics_exclude = []
## Credentials for basic HTTP authentication.
# username = ""
# password = ""
## Optional SSL config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
# insecure_skip_verify = false

View File

@ -0,0 +1,20 @@
# Read Cassandra metrics through Jolokia
[[inputs.cassandra]]
## DEPRECATED: The cassandra plugin has been deprecated. Please use the
## jolokia2 plugin instead.
##
## see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2
context = "/jolokia/read"
## List of cassandra servers exposing jolokia read service
servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"]
## List of metrics collected on above servers
## Each metric consists of a jmx path.
## This will collect all heap memory usage metrics from the jvm and
## ReadLatency metrics for all keyspaces and tables.
## "type=Table" in the query works with Cassandra3.0. Older versions might
## need to use "type=ColumnFamily"
metrics = [
"/java.lang:type=Memory/HeapMemoryUsage",
"/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency"
]

View File

@ -0,0 +1,42 @@
# Collects performance metrics from the MON, OSD, MDS and RGW nodes in a Ceph storage cluster.
[[inputs.ceph]]
## This is the recommended interval to poll. Too frequent and you will lose
## data points due to timeouts during rebalancing and recovery
interval = '1m'
## All configuration values are optional, defaults are shown below
## location of ceph binary
ceph_binary = "/usr/bin/ceph"
## directory in which to look for socket files
socket_dir = "/var/run/ceph"
## prefix of MON and OSD socket files, used to determine socket type
mon_prefix = "ceph-mon"
osd_prefix = "ceph-osd"
mds_prefix = "ceph-mds"
rgw_prefix = "ceph-client"
## suffix used to identify socket files
socket_suffix = "asok"
## Ceph user to authenticate as, ceph will search for the corresponding keyring
## e.g. client.admin.keyring in /etc/ceph, or the explicit path defined in the
## client section of ceph.conf for example:
##
## [client.telegraf]
## keyring = /etc/ceph/client.telegraf.keyring
##
## Consult the ceph documentation for more detail on keyring generation.
ceph_user = "client.admin"
## Ceph configuration to use to locate the cluster
ceph_config = "/etc/ceph/ceph.conf"
## Whether to gather statistics via the admin socket
gather_admin_socket_stats = true
## Whether to gather statistics via ceph commands, requires ceph_user and ceph_config
## to be specified
gather_cluster_stats = false

View File

@ -0,0 +1,14 @@
# Read specific statistics per cgroup
[[inputs.cgroup]]
## Directories in which to look for files, globs are supported.
## Consider restricting paths to the set of cgroups you really
## want to monitor if you have a large number of cgroups, to avoid
## any cardinality issues.
# paths = [
# "/sys/fs/cgroup/memory",
# "/sys/fs/cgroup/memory/child1",
# "/sys/fs/cgroup/memory/child2/*",
# ]
## cgroup stat fields, as file names, globs are supported.
## these file names are appended to each path from above.
# files = ["memory.*usage*", "memory.limit_in_bytes"]

View File

@ -0,0 +1,4 @@
# Get standard chrony metrics, requires chronyc executable.
[[inputs.chrony]]
## If true, chronyc tries to perform a DNS lookup for the time server.
# dns_lookup = false

View File

@ -0,0 +1,40 @@
# Cisco model-driven telemetry (MDT) input plugin for IOS XR, IOS XE and NX-OS platforms
[[inputs.cisco_telemetry_mdt]]
## Telemetry transport can be "tcp" or "grpc". TLS is only supported when
## using the grpc transport.
transport = "grpc"
## Address and port to host telemetry listener
service_address = ":57000"
## Grpc Maximum Message Size, default is 4MB, increase the size.
max_msg_size = 4000000
## Enable TLS; grpc transport only.
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Enable TLS client authentication and define allowed CA certificates; grpc
## transport only.
# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
## Define (for certain nested telemetry measurements with embedded tags) which fields are tags
# embedded_tags = ["Cisco-IOS-XR-qos-ma-oper:qos/interface-table/interface/input/service-policy-names/service-policy-instance/statistics/class-stats/class-name"]
## Define aliases to map telemetry encoding paths to simple measurement names
[inputs.cisco_telemetry_mdt.aliases]
ifstats = "ietf-interfaces:interfaces-state/interface/statistics"
## Define Property Xformation, please refer README and https://pubhub.devnetcloud.com/media/dme-docs-9-3-3/docs/appendix/ for Model details.
[inputs.cisco_telemetry_mdt.dmes]
# Global Property Xformation.
# prop1 = "uint64 to int"
# prop2 = "uint64 to string"
# prop3 = "string to uint64"
# prop4 = "string to int64"
# prop5 = "string to float64"
# auto-prop-xfrom = "auto-float-xfrom" #Xform any property which is string, and has float number to type float64
# Per Path property xformation, Name is telemetry configuration under sensor-group, path configuration "WORD Distinguished Name"
# Per Path configuration is better as it avoid property collision issue of types.
# dnpath = '{"Name": "show ip route summary","prop": [{"Key": "routes","Value": "string"}, {"Key": "best-paths","Value": "string"}]}'
# dnpath2 = '{"Name": "show processes cpu","prop": [{"Key": "kernel_percent","Value": "float"}, {"Key": "idle_percent","Value": "float"}, {"Key": "process","Value": "string"}, {"Key": "user_percent","Value": "float"}, {"Key": "onesec","Value": "float"}]}'
# dnpath3 = '{"Name": "show processes memory physical","prop": [{"Key": "processname","Value": "string"}]}'

View File

@ -0,0 +1,65 @@
# Read metrics from one or many ClickHouse servers
[[inputs.clickhouse]]
## Username for authorization on ClickHouse server
## example: username = "default"
username = "default"
## Password for authorization on ClickHouse server
## example: password = "super_secret"
## HTTP(s) timeout while getting metrics values
## The timeout includes connection time, any redirects, and reading the response body.
## example: timeout = 1s
# timeout = 5s
## List of servers for metrics scraping
## metrics scrape via HTTP(s) clickhouse interface
## https://clickhouse.tech/docs/en/interfaces/http/
## example: servers = ["http://127.0.0.1:8123","https://custom-server.mdb.yandexcloud.net"]
servers = ["http://127.0.0.1:8123"]
## If "auto_discovery"" is "true" plugin tries to connect to all servers available in the cluster
## with using same "user:password" described in "user" and "password" parameters
## and get this server hostname list from "system.clusters" table
## see
## - https://clickhouse.tech/docs/en/operations/system_tables/#system-clusters
## - https://clickhouse.tech/docs/en/operations/server_settings/settings/#server_settings_remote_servers
## - https://clickhouse.tech/docs/en/operations/table_engines/distributed/
## - https://clickhouse.tech/docs/en/operations/table_engines/replication/#creating-replicated-tables
## example: auto_discovery = false
# auto_discovery = true
## Filter cluster names in "system.clusters" when "auto_discovery" is "true"
## when this filter present then "WHERE cluster IN (...)" filter will apply
## please use only full cluster names here, regexp and glob filters is not allowed
## for "/etc/clickhouse-server/config.d/remote.xml"
## <yandex>
## <remote_servers>
## <my-own-cluster>
## <shard>
## <replica><host>clickhouse-ru-1.local</host><port>9000</port></replica>
## <replica><host>clickhouse-ru-2.local</host><port>9000</port></replica>
## </shard>
## <shard>
## <replica><host>clickhouse-eu-1.local</host><port>9000</port></replica>
## <replica><host>clickhouse-eu-2.local</host><port>9000</port></replica>
## </shard>
## </my-onw-cluster>
## </remote_servers>
##
## </yandex>
##
## example: cluster_include = ["my-own-cluster"]
# cluster_include = []
## Filter cluster names in "system.clusters" when "auto_discovery" is "true"
## when this filter present then "WHERE cluster NOT IN (...)" filter will apply
## example: cluster_exclude = ["my-internal-not-discovered-cluster"]
# cluster_exclude = []
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

View File

@ -0,0 +1,74 @@
# Read metrics from Google PubSub
[[inputs.cloud_pubsub]]
## Required. Name of Google Cloud Platform (GCP) Project that owns
## the given PubSub subscription.
project = "my-project"
## Required. Name of PubSub subscription to ingest metrics from.
subscription = "my-subscription"
## Required. Data format to consume.
## Each data format has its own unique set of configuration options.
## Read more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
## Optional. Filepath for GCP credentials JSON file to authorize calls to
## PubSub APIs. If not set explicitly, Telegraf will attempt to use
## Application Default Credentials, which is preferred.
# credentials_file = "path/to/my/creds.json"
## Optional. Number of seconds to wait before attempting to restart the
## PubSub subscription receiver after an unexpected error.
## If the streaming pull for a PubSub Subscription fails (receiver),
## the agent attempts to restart receiving messages after this many seconds.
# retry_delay_seconds = 5
## Optional. Maximum byte length of a message to consume.
## Larger messages are dropped with an error. If less than 0 or unspecified,
## treated as no limit.
# max_message_len = 1000000
## Optional. Maximum messages to read from PubSub that have not been written
## to an output. Defaults to %d.
## For best throughput set based on the number of metrics within
## each message and the size of the output's metric_batch_size.
##
## For example, if each message contains 10 metrics and the output
## metric_batch_size is 1000, setting this to 100 will ensure that a
## full batch is collected and the write is triggered immediately without
## waiting until the next flush_interval.
# max_undelivered_messages = 1000
## The following are optional Subscription ReceiveSettings in PubSub.
## Read more about these values:
## https://godoc.org/cloud.google.com/go/pubsub#ReceiveSettings
## Optional. Maximum number of seconds for which a PubSub subscription
## should auto-extend the PubSub ACK deadline for each message. If less than
## 0, auto-extension is disabled.
# max_extension = 0
## Optional. Maximum number of unprocessed messages in PubSub
## (unacknowledged but not yet expired in PubSub).
## A value of 0 is treated as the default PubSub value.
## Negative values will be treated as unlimited.
# max_outstanding_messages = 0
## Optional. Maximum size in bytes of unprocessed messages in PubSub
## (unacknowledged but not yet expired in PubSub).
## A value of 0 is treated as the default PubSub value.
## Negative values will be treated as unlimited.
# max_outstanding_bytes = 0
## Optional. Max number of goroutines a PubSub Subscription receiver can spawn
## to pull messages from PubSub concurrently. This limit applies to each
## subscription separately and is treated as the PubSub default if less than
## 1. Note this setting does not limit the number of messages that can be
## processed concurrently (use "max_outstanding_messages" instead).
# max_receiver_go_routines = 0
## Optional. If true, Telegraf will attempt to base64 decode the
## PubSub message data before parsing. Many GCP services that
## output JSON to Google PubSub base64-encode the JSON payload.
# base64_data = false

View File

@ -0,0 +1,49 @@
# Google Cloud Pub/Sub Push HTTP listener
[[inputs.cloud_pubsub_push]]
## Address and port to host HTTP listener on
service_address = ":8080"
## Application secret to verify messages originate from Cloud Pub/Sub
# token = ""
## Path to listen to.
# path = "/"
## Maximum duration before timing out read of the request
# read_timeout = "10s"
## Maximum duration before timing out write of the response. This should be set to a value
## large enough that you can send at least 'metric_batch_size' number of messages within the
## duration.
# write_timeout = "10s"
## Maximum allowed http request body size in bytes.
## 0 means to use the default of 524,288,00 bytes (500 mebibytes)
# max_body_size = "500MB"
## Whether to add the pubsub metadata, such as message attributes and subscription as a tag.
# add_meta = false
## Optional. Maximum messages to read from PubSub that have not been written
## to an output. Defaults to 1000.
## For best throughput set based on the number of metrics within
## each message and the size of the output's metric_batch_size.
##
## For example, if each message contains 10 metrics and the output
## metric_batch_size is 1000, setting this to 100 will ensure that a
## full batch is collected and the write is triggered immediately without
## waiting until the next flush_interval.
# max_undelivered_messages = 1000
## Set one or more allowed client CA certificate file names to
## enable mutually authenticated TLS connections
# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
## Add service certificate and key
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"

View File

@ -0,0 +1,95 @@
# Pull Metric Statistics from Amazon CloudWatch
[[inputs.cloudwatch]]
## Amazon Region
region = "us-east-1"
## Amazon Credentials
## Credentials are loaded in the following order
## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified
## 2) Assumed credentials via STS if role_arn is specified
## 3) explicit credentials from 'access_key' and 'secret_key'
## 4) shared profile from 'profile'
## 5) environment variables
## 6) shared credentials file
## 7) EC2 Instance Profile
# access_key = ""
# secret_key = ""
# token = ""
# role_arn = ""
# web_identity_token_file = ""
# role_session_name = ""
# profile = ""
# shared_credential_file = ""
## Endpoint to make request against, the correct endpoint is automatically
## determined and this option should only be set if you wish to override the
## default.
## ex: endpoint_url = "http://localhost:8000"
# endpoint_url = ""
## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set)
# http_proxy_url = "http://localhost:8888"
# The minimum period for Cloudwatch metrics is 1 minute (60s). However not all
# metrics are made available to the 1 minute period. Some are collected at
# 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
# Note that if a period is configured that is smaller than the minimum for a
# particular metric, that metric will not be returned by the Cloudwatch API
# and will not be collected by Telegraf.
#
## Requested CloudWatch aggregation Period (required - must be a multiple of 60s)
period = "5m"
## Collection Delay (required - must account for metrics availability via CloudWatch API)
delay = "5m"
## Recommended: use metric 'interval' that is a multiple of 'period' to avoid
## gaps or overlap in pulled data
interval = "5m"
## Recommended if "delay" and "period" are both within 3 hours of request time. Invalid values will be ignored.
## Recently Active feature will only poll for CloudWatch ListMetrics values that occurred within the last 3 Hours.
## If enabled, it will reduce total API usage of the CloudWatch ListMetrics API and require less memory to retain.
## Do not enable if "period" or "delay" is longer than 3 hours, as it will not return data more than 3 hours old.
## See https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_ListMetrics.html
#recently_active = "PT3H"
## Configure the TTL for the internal cache of metrics.
# cache_ttl = "1h"
## Metric Statistic Namespaces (required)
namespaces = ["AWS/ELB"]
# A single metric statistic namespace that will be appended to namespaces on startup
# namespace = "AWS/ELB"
## Maximum requests per second. Note that the global default AWS rate limit is
## 50 reqs/sec, so if you define multiple namespaces, these should add up to a
## maximum of 50.
## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html
# ratelimit = 25
## Timeout for http requests made by the cloudwatch client.
# timeout = "5s"
## Namespace-wide statistic filters. These allow fewer queries to be made to
## cloudwatch.
# statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ]
# statistic_exclude = []
## Metrics to Pull
## Defaults to all Metrics in Namespace if nothing is provided
## Refreshes Namespace available metrics every 1h
#[[inputs.cloudwatch.metrics]]
# names = ["Latency", "RequestCount"]
#
# ## Statistic filters for Metric. These allow for retrieving specific
# ## statistics for an individual metric.
# # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ]
# # statistic_exclude = []
#
# ## Dimension filters for Metric. All dimensions defined for the metric names
# ## must be specified in order to retrieve the metric statistics.
# ## 'value' has wildcard / 'glob' matching support such as 'p-*'.
# [[inputs.cloudwatch.metrics.dimensions]]
# name = "LoadBalancerName"
# value = "p-example"

View File

@ -0,0 +1,14 @@
# Collects conntrack stats from the configured directories and files.
[[inputs.conntrack]]
## The following defaults would work with multiple versions of conntrack.
## Note the nf_ and ip_ filename prefixes are mutually exclusive across
## kernel versions, as are the directory locations.
## Superset of filenames to look for within the conntrack dirs.
## Missing files will be ignored.
files = ["ip_conntrack_count","ip_conntrack_max",
"nf_conntrack_count","nf_conntrack_max"]
## Directories to search within for the conntrack files above.
## Missing directories will be ignored.
dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"]

View File

@ -0,0 +1,37 @@
# Gather health check statuses from services registered in Consul
[[inputs.consul]]
## Consul server address
# address = "localhost:8500"
## URI scheme for the Consul server, one of "http", "https"
# scheme = "http"
## Metric version controls the mapping from Consul metrics into
## Telegraf metrics. Version 2 moved all fields with string values
## to tags.
##
## example: metric_version = 1; deprecated in 1.16
## metric_version = 2; recommended version
# metric_version = 1
## ACL token used in every request
# token = ""
## HTTP Basic Authentication username and password.
# username = ""
# password = ""
## Data center to query the health checks from
# datacenter = ""
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = true
## Consul checks' tag splitting
# When tags are formatted like "key:value" with ":" as a delimiter then
# they will be splitted and reported as proper key:value in Telegraf
# tag_delimiter = ":"

View File

@ -0,0 +1,19 @@
# Read metrics from the Consul Agent API
[[inputs.consul_agent]]
## URL for the Consul agent
# url = "http://127.0.0.1:8500"
## Use auth token for authorization.
## If both are set, an error is thrown.
## If both are empty, no token will be used.
# token_file = "/path/to/auth/token"
## OR
# token = "a1234567-40c7-9048-7bae-378687048181"
## Set timeout (default 5 seconds)
# timeout = "5s"
## Optional TLS Config
# tls_ca = /path/to/cafile
# tls_cert = /path/to/certfile
# tls_key = /path/to/keyfile

View File

@ -0,0 +1,23 @@
# Read per-node and per-bucket metrics from Couchbase
[[inputs.couchbase]]
## specify servers via a url matching:
## [protocol://][:password]@address[:port]
## e.g.
## http://couchbase-0.example.com/
## http://admin:secret@couchbase-0.example.com:8091/
##
## If no servers are specified, then localhost is used as the host.
## If no protocol is specified, HTTP is used.
## If no port is specified, 8091 is used.
servers = ["http://localhost:8091"]
## Filter bucket fields to include only here.
# bucket_stats_included = ["quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"]
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification (defaults to false)
## If set to false, tls_cert and tls_key are required
# insecure_skip_verify = false

View File

@ -0,0 +1,9 @@
# Read CouchDB Stats from one or more servers
[[inputs.couchdb]]
## Works with CouchDB stats endpoints out of the box
## Multiple Hosts from which to read CouchDB stats:
hosts = ["http://localhost:8086/_stats"]
## Use HTTP Basic Authentication.
# basic_username = "telegraf"
# basic_password = "p@ssw0rd"

View File

@ -0,0 +1,10 @@
# Read metrics about cpu usage
[[inputs.cpu]]
## Whether to report per-cpu stats or not
percpu = true
## Whether to report total system cpu stats or not
totalcpu = true
## If true, collect raw CPU time metrics
collect_cpu_time = false
## If true, compute and report the sum of all non-idle CPU states
report_active = false

View File

@ -0,0 +1,10 @@
# Fetch metrics from a CSGO SRCDS
[[inputs.csgo]]
## Specify servers using the following format:
## servers = [
## ["ip1:port1", "rcon_password1"],
## ["ip2:port2", "rcon_password2"],
## ]
#
## If no servers are specified, no data will be collected
servers = []

View File

@ -0,0 +1,42 @@
# Input plugin for DC/OS metrics
[[inputs.dcos]]
## The DC/OS cluster URL.
cluster_url = "https://dcos-master-1"
## The ID of the service account.
service_account_id = "telegraf"
## The private key file for the service account.
service_account_private_key = "/etc/telegraf/telegraf-sa-key.pem"
## Path containing login token. If set, will read on every gather.
# token_file = "/home/dcos/.dcos/token"
## In all filter options if both include and exclude are empty all items
## will be collected. Arrays may contain glob patterns.
##
## Node IDs to collect metrics from. If a node is excluded, no metrics will
## be collected for its containers or apps.
# node_include = []
# node_exclude = []
## Container IDs to collect container metrics from.
# container_include = []
# container_exclude = []
## Container IDs to collect app metrics from.
# app_include = []
# app_exclude = []
## Maximum concurrent connections to the cluster.
# max_connections = 10
## Maximum time to receive a response from cluster.
# response_timeout = "20s"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## If false, skip chain & host verification
# insecure_skip_verify = true
## Recommended filtering to reduce series cardinality.
# [inputs.dcos.tagdrop]
# path = ["/var/lib/mesos/slave/slaves/*"]

View File

@ -0,0 +1,44 @@
# Ingests files in a directory and then moves them to a target directory.
[[inputs.directory_monitor]]
## The directory to monitor and read files from.
directory = ""
#
## The directory to move finished files to.
finished_directory = ""
#
## The directory to move files to upon file error.
## If not provided, erroring files will stay in the monitored directory.
# error_directory = ""
#
## The amount of time a file is allowed to sit in the directory before it is picked up.
## This time can generally be low but if you choose to have a very large file written to the directory and it's potentially slow,
## set this higher so that the plugin will wait until the file is fully copied to the directory.
# directory_duration_threshold = "50ms"
#
## A list of the only file names to monitor, if necessary. Supports regex. If left blank, all files are ingested.
# files_to_monitor = ["^.*\.csv"]
#
## A list of files to ignore, if necessary. Supports regex.
# files_to_ignore = [".DS_Store"]
#
## Maximum lines of the file to process that have not yet be written by the
## output. For best throughput set to the size of the output's metric_buffer_limit.
## Warning: setting this number higher than the output's metric_buffer_limit can cause dropped metrics.
# max_buffered_metrics = 10000
#
## The maximum amount of file paths to queue up for processing at once, before waiting until files are processed to find more files.
## Lowering this value will result in *slightly* less memory use, with a potential sacrifice in speed efficiency, if absolutely necessary.
# file_queue_size = 100000
#
## Name a tag containing the name of the file the data was parsed from. Leave empty
## to disable. Cautious when file name variation is high, this can increase the cardinality
## significantly. Read more about cardinality here:
## https://docs.influxdata.com/influxdb/cloud/reference/glossary/#series-cardinality
# file_tag = ""
#
## The dataformat to be read from the files.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
## NOTE: We currently only support parsing newline-delimited JSON. See the format here: https://github.com/ndjson/ndjson-spec
data_format = "influx"

View File

@ -0,0 +1,13 @@
# Read metrics about disk usage by mount point
[[inputs.disk]]
## By default stats will be gathered for all mount points.
## Set mount_points will restrict the stats to only the specified mount points.
# mount_points = ["/"]
## Ignore mount points by filesystem type.
ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"]
## Ignore mount points by mount options.
## The 'mount' command reports options of all mounts in parathesis.
## Bind mounts can be ignored with the special 'bind' option.
# ignore_mount_opts = []

View File

@ -0,0 +1,27 @@
# Read metrics about disk IO by device
[[inputs.diskio]]
## By default, telegraf will gather stats for all devices including
## disk partitions.
## Setting devices will restrict the stats to the specified devices.
# devices = ["sda", "sdb", "vd*"]
## Uncomment the following line if you need disk serial numbers.
# skip_serial_number = false
#
## On systems which support it, device metadata can be added in the form of
## tags.
## Currently only Linux is supported via udev properties. You can view
## available properties for a device by running:
## 'udevadm info -q property -n /dev/sda'
## Note: Most, but not all, udev properties can be accessed this way. Properties
## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH.
# device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"]
#
## Using the same metadata source as device_tags, you can also customize the
## name of the device via templates.
## The 'name_templates' parameter is a list of templates to try and apply to
## the device. The template may contain variables in the form of '$PROPERTY' or
## '${PROPERTY}'. The first template which does not contain any variables not
## present for the device is used as the device name tag.
## The typical use case is for LVM volumes, to get the VG/LV name instead of
## the near-meaningless DM-0 name.
# name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"]

View File

@ -0,0 +1,7 @@
# Read metrics from one or many disque servers
[[inputs.disque]]
## An array of URI to gather stats about. Specify an ip or hostname
## with optional port and password.
## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc.
## If no servers are specified, then localhost is used as the host.
servers = ["localhost"]

View File

@ -0,0 +1,4 @@
# Provide a native collection for dmsetup based statistics for dm-cache
[[inputs.dmcache]]
## Whether to report per-device stats or not
per_device = true

View File

@ -0,0 +1,20 @@
# Query given DNS server and gives statistics
[[inputs.dns_query]]
## servers to query
servers = ["8.8.8.8"]
## Network is the network protocol name.
# network = "udp"
## Domains or subdomains to query.
# domains = ["."]
## Query record type.
## Possible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV.
# record_type = "A"
## Dns server port.
# port = 53
## Query timeout in seconds.
# timeout = 2

View File

@ -0,0 +1,72 @@
# Read metrics about docker containers
[[inputs.docker]]
## Docker Endpoint
## To use TCP, set endpoint = "tcp://[ip]:[port]"
## To use environment variables (ie, docker-machine), set endpoint = "ENV"
endpoint = "unix:///var/run/docker.sock"
## Set to true to collect Swarm metrics(desired_replicas, running_replicas)
## Note: configure this in one of the manager nodes in a Swarm cluster.
## configuring in multiple Swarm managers results in duplication of metrics.
gather_services = false
## Only collect metrics for these containers. Values will be appended to
## container_name_include.
## Deprecated (1.4.0), use container_name_include
container_names = []
## Set the source tag for the metrics to the container ID hostname, eg first 12 chars
source_tag = false
## Containers to include and exclude. Collect all if empty. Globs accepted.
container_name_include = []
container_name_exclude = []
## Container states to include and exclude. Globs accepted.
## When empty only containers in the "running" state will be captured.
## example: container_state_include = ["created", "restarting", "running", "removing", "paused", "exited", "dead"]
## example: container_state_exclude = ["created", "restarting", "running", "removing", "paused", "exited", "dead"]
# container_state_include = []
# container_state_exclude = []
## Timeout for docker list, info, and stats commands
timeout = "5s"
## Whether to report for each container per-device blkio (8:0, 8:1...),
## network (eth0, eth1, ...) and cpu (cpu0, cpu1, ...) stats or not.
## Usage of this setting is discouraged since it will be deprecated in favor of 'perdevice_include'.
## Default value is 'true' for backwards compatibility, please set it to 'false' so that 'perdevice_include' setting
## is honored.
perdevice = true
## Specifies for which classes a per-device metric should be issued
## Possible values are 'cpu' (cpu0, cpu1, ...), 'blkio' (8:0, 8:1, ...) and 'network' (eth0, eth1, ...)
## Please note that this setting has no effect if 'perdevice' is set to 'true'
# perdevice_include = ["cpu"]
## Whether to report for each container total blkio and network stats or not.
## Usage of this setting is discouraged since it will be deprecated in favor of 'total_include'.
## Default value is 'false' for backwards compatibility, please set it to 'true' so that 'total_include' setting
## is honored.
total = false
## Specifies for which classes a total metric should be issued. Total is an aggregated of the 'perdevice' values.
## Possible values are 'cpu', 'blkio' and 'network'
## Total 'cpu' is reported directly by Docker daemon, and 'network' and 'blkio' totals are aggregated by this plugin.
## Please note that this setting has no effect if 'total' is set to 'false'
# total_include = ["cpu", "blkio", "network"]
## docker labels to include and exclude as tags. Globs accepted.
## Note that an empty array for both will include all labels as tags
docker_label_include = []
docker_label_exclude = []
## Which environment variables should we use as a tag
tag_env = ["JAVA_HOME", "HEAP_SIZE"]
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

View File

@ -0,0 +1,38 @@
# Read logging output from the Docker engine
[[inputs.docker_log]]
## Docker Endpoint
## To use TCP, set endpoint = "tcp://[ip]:[port]"
## To use environment variables (ie, docker-machine), set endpoint = "ENV"
# endpoint = "unix:///var/run/docker.sock"
## When true, container logs are read from the beginning; otherwise
## reading begins at the end of the log.
# from_beginning = false
## Timeout for Docker API calls.
# timeout = "5s"
## Containers to include and exclude. Globs accepted.
## Note that an empty array for both will include all containers
# container_name_include = []
# container_name_exclude = []
## Container states to include and exclude. Globs accepted.
## When empty only containers in the "running" state will be captured.
# container_state_include = []
# container_state_exclude = []
## docker labels to include and exclude as tags. Globs accepted.
## Note that an empty array for both will include all labels as tags
# docker_label_include = []
# docker_label_exclude = []
## Set the source tag for the metrics to the container ID hostname, eg first 12 chars
source_tag = false
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

View File

@ -0,0 +1,18 @@
# Read metrics about dovecot servers
[[inputs.dovecot]]
## specify dovecot servers via an address:port list
## e.g.
## localhost:24242
## or as an UDS socket
## e.g.
## /var/run/dovecot/old-stats
##
## If no servers are specified, then localhost is used as the host.
servers = ["localhost:24242"]
## Type is one of "user", "domain", "ip", or "global"
type = "global"
## Wildcard matches like "*.com". An empty string "" is same as "*"
## If type = "ip" filters should be <IP/network>
filters = [""]

View File

@ -0,0 +1,32 @@
# Reads metrics from DPDK applications using v2 telemetry interface.
[[inputs.dpdk]]
## Path to DPDK telemetry socket. This shall point to v2 version of DPDK telemetry interface.
# socket_path = "/var/run/dpdk/rte/dpdk_telemetry.v2"
## Duration that defines how long the connected socket client will wait for a response before terminating connection.
## This includes both writing to and reading from socket. Since it's local socket access
## to a fast packet processing application, the timeout should be sufficient for most users.
## Setting the value to 0 disables the timeout (not recommended)
# socket_access_timeout = "200ms"
## Enables telemetry data collection for selected device types.
## Adding "ethdev" enables collection of telemetry from DPDK NICs (stats, xstats, link_status).
## Adding "rawdev" enables collection of telemetry from DPDK Raw Devices (xstats).
# device_types = ["ethdev"]
## List of custom, application-specific telemetry commands to query
## The list of available commands depend on the application deployed. Applications can register their own commands
## via telemetry library API http://doc.dpdk.org/guides/prog_guide/telemetry_lib.html#registering-commands
## For e.g. L3 Forwarding with Power Management Sample Application this could be:
## additional_commands = ["/l3fwd-power/stats"]
# additional_commands = []
## Allows turning off collecting data for individual "ethdev" commands.
## Remove "/ethdev/link_status" from list to start getting link status metrics.
[inputs.dpdk.ethdev]
exclude_commands = ["/ethdev/link_status"]
## When running multiple instances of the plugin it's recommended to add a unique tag to each instance to identify
## metrics exposed by an instance of DPDK application. This is useful when multiple DPDK apps run on a single host.
## [inputs.dpdk.tags]
## dpdk_instance = "my-fwd-app"

View File

@ -0,0 +1,26 @@
# Read metrics about ECS containers
[[inputs.ecs]]
## ECS metadata url.
## Metadata v2 API is used if set explicitly. Otherwise,
## v3 metadata endpoint API is used if available.
# endpoint_url = ""
## Containers to include and exclude. Globs accepted.
## Note that an empty array for both will include all containers
# container_name_include = []
# container_name_exclude = []
## Container states to include and exclude. Globs accepted.
## When empty only containers in the "RUNNING" state will be captured.
## Possible values are "NONE", "PULLED", "CREATED", "RUNNING",
## "RESOURCES_PROVISIONED", "STOPPED".
# container_status_include = []
# container_status_exclude = []
## ecs labels to include and exclude as tags. Globs accepted.
## Note that an empty array for both will include all labels as tags
ecs_label_include = [ "com.amazonaws.ecs.*" ]
ecs_label_exclude = []
## Timeout for queries.
# timeout = "5s"

View File

@ -0,0 +1,59 @@
# Read stats from one or more Elasticsearch servers or clusters
[[inputs.elasticsearch]]
## specify a list of one or more Elasticsearch servers
## you can add username and password to your url to use basic authentication:
## servers = ["http://user:pass@localhost:9200"]
servers = ["http://localhost:9200"]
## Timeout for HTTP requests to the elastic search server(s)
http_timeout = "5s"
## When local is true (the default), the node will read only its own stats.
## Set local to false when you want to read the node stats from all nodes
## of the cluster.
local = true
## Set cluster_health to true when you want to obtain cluster health stats
cluster_health = false
## Adjust cluster_health_level when you want to obtain detailed health stats
## The options are
## - indices (default)
## - cluster
# cluster_health_level = "indices"
## Set cluster_stats to true when you want to obtain cluster stats.
cluster_stats = false
## Only gather cluster_stats from the master node. To work this require local = true
cluster_stats_only_from_master = true
## Indices to collect; can be one or more indices names or _all
## Use of wildcards is allowed. Use a wildcard at the end to retrieve index names that end with a changing value, like a date.
indices_include = ["_all"]
## One of "shards", "cluster", "indices"
## Currently only "shards" is implemented
indices_level = "shards"
## node_stats is a list of sub-stats that you want to have gathered. Valid options
## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http",
## "breaker". Per default, all stats are gathered.
# node_stats = ["jvm", "http"]
## HTTP Basic Authentication username and password.
# username = ""
# password = ""
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## Sets the number of most recent indices to return for indices that are configured with a date-stamped suffix.
## Each 'indices_include' entry ending with a wildcard (*) or glob matching pattern will group together all indices that match it, and
## sort them by the date or number after the wildcard. Metrics then are gathered for only the 'num_most_recent_indices' amount of most
## recent indices.
# num_most_recent_indices = 0

View File

@ -0,0 +1,71 @@
# Derive metrics from aggregating Elasticsearch query results
[[inputs.elasticsearch_query]]
## The full HTTP endpoint URL for your Elasticsearch instance
## Multiple urls can be specified as part of the same cluster,
## this means that only ONE of the urls will be written to each interval.
urls = [ "http://node1.es.example.com:9200" ] # required.
## Elasticsearch client timeout, defaults to "5s".
# timeout = "5s"
## Set to true to ask Elasticsearch a list of all cluster nodes,
## thus it is not necessary to list all nodes in the urls config option
# enable_sniffer = false
## Set the interval to check if the Elasticsearch nodes are available
## This option is only used if enable_sniffer is also set (0s to disable it)
# health_check_interval = "10s"
## HTTP basic authentication details (eg. when using x-pack)
# username = "telegraf"
# password = "mypassword"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
[[inputs.elasticsearch_query.aggregation]]
## measurement name for the results of the aggregation query
measurement_name = "measurement"
## Elasticsearch indexes to query (accept wildcards).
index = "index-*"
## The date/time field in the Elasticsearch index (mandatory).
date_field = "@timestamp"
## If the field used for the date/time field in Elasticsearch is also using
## a custom date/time format it may be required to provide the format to
## correctly parse the field.
##
## If using one of the built in elasticsearch formats this is not required.
# date_field_custom_format = ""
## Time window to query (eg. "1m" to query documents from last minute).
## Normally should be set to same as collection interval
query_period = "1m"
## Lucene query to filter results
# filter_query = "*"
## Fields to aggregate values (must be numeric fields)
# metric_fields = ["metric"]
## Aggregation function to use on the metric fields
## Must be set if 'metric_fields' is set
## Valid values are: avg, sum, min, max, sum
# metric_function = "avg"
## Fields to be used as tags
## Must be text, non-analyzed fields. Metric aggregations are performed per tag
# tags = ["field.keyword", "field2.keyword"]
## Set to true to not ignore documents when the tag(s) above are missing
# include_missing_tag = false
## String value of the tag when the tag does not exist
## Used when include_missing_tag is true
# missing_tag_value = "null"

View File

@ -0,0 +1,16 @@
# Returns ethtool statistics for given interfaces
[[inputs.ethtool]]
## List of interfaces to pull metrics for
# interface_include = ["eth0"]
## List of interfaces to ignore when pulling metrics.
# interface_exclude = ["eth1"]
## Some drivers declare statistics with extra whitespace, different spacing,
## and mix cases. This list, when enabled, can be used to clean the keys.
## Here are the current possible normalizations:
## * snakecase: converts fooBarBaz to foo_bar_baz
## * trim: removes leading and trailing whitespace
## * lower: changes all capitalized letters to lowercase
## * underscore: replaces spaces with underscores
# normalize_keys = ["snakecase", "trim", "lower", "underscore"]

View File

@ -0,0 +1,84 @@
# Azure Event Hubs service input plugin
[[inputs.eventhub_consumer]]
## The default behavior is to create a new Event Hub client from environment variables.
## This requires one of the following sets of environment variables to be set:
##
## 1) Expected Environment Variables:
## - "EVENTHUB_CONNECTION_STRING"
##
## 2) Expected Environment Variables:
## - "EVENTHUB_NAMESPACE"
## - "EVENTHUB_NAME"
## - "EVENTHUB_KEY_NAME"
## - "EVENTHUB_KEY_VALUE"
## 3) Expected Environment Variables:
## - "EVENTHUB_NAMESPACE"
## - "EVENTHUB_NAME"
## - "AZURE_TENANT_ID"
## - "AZURE_CLIENT_ID"
## - "AZURE_CLIENT_SECRET"
## Uncommenting the option below will create an Event Hub client based solely on the connection string.
## This can either be the associated environment variable or hard coded directly.
## If this option is uncommented, environment variables will be ignored.
## Connection string should contain EventHubName (EntityPath)
# connection_string = ""
## Set persistence directory to a valid folder to use a file persister instead of an in-memory persister
# persistence_dir = ""
## Change the default consumer group
# consumer_group = ""
## By default the event hub receives all messages present on the broker, alternative modes can be set below.
## The timestamp should be in https://github.com/toml-lang/toml#offset-date-time format (RFC 3339).
## The 3 options below only apply if no valid persister is read from memory or file (e.g. first run).
# from_timestamp =
# latest = true
## Set a custom prefetch count for the receiver(s)
# prefetch_count = 1000
## Add an epoch to the receiver(s)
# epoch = 0
## Change to set a custom user agent, "telegraf" is used by default
# user_agent = "telegraf"
## To consume from a specific partition, set the partition_ids option.
## An empty array will result in receiving from all partitions.
# partition_ids = ["0","1"]
## Max undelivered messages
# max_undelivered_messages = 1000
## Set either option below to true to use a system property as timestamp.
## You have the choice between EnqueuedTime and IoTHubEnqueuedTime.
## It is recommended to use this setting when the data itself has no timestamp.
# enqueued_time_as_ts = true
# iot_hub_enqueued_time_as_ts = true
## Tags or fields to create from keys present in the application property bag.
## These could for example be set by message enrichments in Azure IoT Hub.
# application_property_tags = []
# application_property_fields = []
## Tag or field name to use for metadata
## By default all metadata is disabled
# sequence_number_field = "SequenceNumber"
# enqueued_time_field = "EnqueuedTime"
# offset_field = "Offset"
# partition_id_tag = "PartitionID"
# partition_key_tag = "PartitionKey"
# iot_hub_device_connection_id_tag = "IoTHubDeviceConnectionID"
# iot_hub_auth_generation_id_tag = "IoTHubAuthGenerationID"
# iot_hub_connection_auth_method_tag = "IoTHubConnectionAuthMethod"
# iot_hub_connection_module_id_tag = "IoTHubConnectionModuleID"
# iot_hub_enqueued_time_field = "IoTHubEnqueuedTime"
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"

View File

@ -0,0 +1,3 @@
# This is an example plugin
[[inputs.example]]
example_option = "example_value"

View File

@ -0,0 +1,26 @@
# Read metrics from one or more commands that can output to stdout
[[inputs.exec]]
## Commands array
commands = [
"/tmp/test.sh",
"/usr/bin/mycollector --foo=bar",
"/tmp/collect_*.sh"
]
## Environment variables
## Array of "key=value" pairs to pass as environment variables
## e.g. "KEY=value", "USERNAME=John Doe",
## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs"
# environment = []
## Timeout for each command to complete.
timeout = "5s"
## measurement name suffix (for separating different commands)
name_suffix = "_mycollector"
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"

View File

@ -0,0 +1,30 @@
# Run executable as long-running input plugin
[[inputs.execd]]
## One program to run as daemon.
## NOTE: process and each argument should each be their own string
command = ["telegraf-smartctl", "-d", "/dev/sda"]
## Environment variables
## Array of "key=value" pairs to pass as environment variables
## e.g. "KEY=value", "USERNAME=John Doe",
## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs"
# environment = []
## Define how the process is signaled on each collection interval.
## Valid values are:
## "none" : Do not signal anything. (Recommended for service inputs)
## The process must output metrics by itself.
## "STDIN" : Send a newline on STDIN. (Recommended for gather inputs)
## "SIGHUP" : Send a HUP signal. Not available on Windows. (not recommended)
## "SIGUSR1" : Send a USR1 signal. Not available on Windows.
## "SIGUSR2" : Send a USR2 signal. Not available on Windows.
signal = "none"
## Delay before the process is restarted after an unexpected termination
restart_delay = "10s"
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"

View File

@ -0,0 +1,4 @@
# Read metrics from fail2ban.
[[inputs.fail2ban]]
## Use sudo to run fail2ban-client
use_sudo = false

View File

@ -0,0 +1,12 @@
# Read devices value(s) from a Fibaro controller
[[inputs.fibaro]]
## Required Fibaro controller address/hostname.
## Note: at the time of writing this plugin, Fibaro only implemented http - no https available
url = "http://<controller>:80"
## Required credentials to access the API (http://<controller/api/<component>)
username = "<username>"
password = "<password>"
## Amount of time allowed to complete the HTTP request
# timeout = "5s"

View File

@ -0,0 +1,27 @@
# Parse a complete file each interval
[[inputs.file]]
## Files to parse each interval. Accept standard unix glob matching rules,
## as well as ** to match recursive files and directories.
files = ["/tmp/metrics.out"]
## Character encoding to use when interpreting the file contents. Invalid
## characters are replaced using the unicode replacement character. When set
## to the empty string the data is not decoded to text.
## ex: character_encoding = "utf-8"
## character_encoding = "utf-16le"
## character_encoding = "utf-16be"
## character_encoding = ""
# character_encoding = ""
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
## Name a tag containing the name of the file the data was parsed from. Leave empty
## to disable. Cautious when file name variation is high, this can increase the cardinality
## significantly. Read more about cardinality here:
## https://docs.influxdata.com/influxdb/cloud/reference/glossary/#series-cardinality
# file_tag = ""

View File

@ -0,0 +1,32 @@
# Count files in a directory
[[inputs.filecount]]
## Directories to gather stats about.
## This accept standard unit glob matching rules, but with the addition of
## ** as a "super asterisk". ie:
## /var/log/** -> recursively find all directories in /var/log and count files in each directories
## /var/log/*/* -> find all directories with a parent dir in /var/log and count files in each directories
## /var/log -> count all files in /var/log and all of its subdirectories
directories = ["/var/cache/apt", "/tmp"]
## Only count files that match the name pattern. Defaults to "*".
name = "*"
## Count files in subdirectories. Defaults to true.
recursive = true
## Only count regular files. Defaults to true.
regular_only = true
## Follow all symlinks while walking the directory tree. Defaults to false.
follow_symlinks = false
## Only count files that are at least this size. If size is
## a negative number, only count files that are smaller than the
## absolute value of size. Acceptable units are B, KiB, MiB, KB, ...
## Without quotes and units, interpreted as size in bytes.
size = "0B"
## Only count files that have not been touched for at least this
## duration. If mtime is negative, only count files that have been
## touched in this duration. Defaults to "0s".
mtime = "0s"

View File

@ -0,0 +1,9 @@
# Read stats about given file(s)
[[inputs.filestat]]
## Files to gather stats about.
## These accept standard unix glob matching rules, but with the addition of
## ** as a "super asterisk". See https://github.com/gobwas/glob.
files = ["/etc/telegraf/telegraf.conf", "/var/log/**.log"]
## If true, read the entire file and calculate an md5 checksum.
md5 = false

View File

@ -0,0 +1,10 @@
# Read real time temps from fireboard.io servers
[[inputs.fireboard]]
## Specify auth token for your account
auth_token = "invalidAuthToken"
## You can override the fireboard server URL if necessary
# url = https://fireboard.io/api/v1/devices.json
## You can set a different http_timeout if you need to
## You should set a string using an number and time indicator
## for example "12s" for 12 seconds.
# http_timeout = "4s"

View File

@ -0,0 +1,14 @@
# Read metrics exposed by fluentd in_monitor plugin
[[inputs.fluentd]]
## This plugin reads information exposed by fluentd (using /api/plugins.json endpoint).
##
## Endpoint:
## - only one URI is allowed
## - https is not supported
endpoint = "http://localhost:24220/api/plugins.json"
## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent)
exclude = [
"monitor_agent",
"dummy",
]

View File

@ -0,0 +1,24 @@
# Gather repository information from GitHub hosted repositories.
[[inputs.github]]
## List of repositories to monitor
repositories = [
"influxdata/telegraf",
"influxdata/influxdb"
]
## Github API access token. Unauthenticated requests are limited to 60 per hour.
# access_token = ""
## Github API enterprise url. Github Enterprise accounts must specify their base url.
# enterprise_base_url = ""
## Timeout for HTTP requests.
# http_timeout = "5s"
## List of additional fields to query.
## NOTE: Getting those fields might involve issuing additional API-calls, so please
## make sure you do not exceed the rate-limit of GitHub.
##
## Available fields are:
## - pull-requests -- number of open and closed pull requests (2 API-calls per repository)
# additional_fields = []

View File

@ -0,0 +1,68 @@
# gNMI telemetry input plugin
[[inputs.gnmi]]
## Address and port of the gNMI GRPC server
addresses = ["10.49.234.114:57777"]
## define credentials
username = "cisco"
password = "cisco"
## gNMI encoding requested (one of: "proto", "json", "json_ietf", "bytes")
# encoding = "proto"
## redial in case of failures after
redial = "10s"
## enable client-side TLS and define CA to authenticate the device
# enable_tls = true
# tls_ca = "/etc/telegraf/ca.pem"
# insecure_skip_verify = true
## define client-side TLS certificate & key to authenticate to the device
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## gNMI subscription prefix (optional, can usually be left empty)
## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths
# origin = ""
# prefix = ""
# target = ""
## Define additional aliases to map telemetry encoding paths to simple measurement names
# [inputs.gnmi.aliases]
# ifcounters = "openconfig:/interfaces/interface/state/counters"
[[inputs.gnmi.subscription]]
## Name of the measurement that will be emitted
name = "ifcounters"
## Origin and path of the subscription
## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths
##
## origin usually refers to a (YANG) data model implemented by the device
## and path to a specific substructure inside it that should be subscribed to (similar to an XPath)
## YANG models can be found e.g. here: https://github.com/YangModels/yang/tree/master/vendor/cisco/xr
origin = "openconfig-interfaces"
path = "/interfaces/interface/state/counters"
# Subscription mode (one of: "target_defined", "sample", "on_change") and interval
subscription_mode = "sample"
sample_interval = "10s"
## Suppress redundant transmissions when measured values are unchanged
# suppress_redundant = false
## If suppression is enabled, send updates at least every X seconds anyway
# heartbeat_interval = "60s"
#[[inputs.gnmi.subscription]]
# name = "descr"
# origin = "openconfig-interfaces"
# path = "/interfaces/interface/state/description"
# subscription_mode = "on_change"
## If tag_only is set, the subscription in question will be utilized to maintain a map of
## tags to apply to other measurements emitted by the plugin, by matching path keys
## All fields from the tag-only subscription will be applied as tags to other readings,
## in the format <name>_<fieldBase>.
# tag_only = true

View File

@ -0,0 +1,38 @@
# Read flattened metrics from one or more GrayLog HTTP endpoints
[[inputs.graylog]]
## API endpoint, currently supported API:
##
## - multiple (e.g. http://<host>:9000/api/system/metrics/multiple)
## - namespace (e.g. http://<host>:9000/api/system/metrics/namespace/{namespace})
##
## For namespace endpoint, the metrics array will be ignored for that call.
## Endpoint can contain namespace and multiple type calls.
##
## Please check http://[graylog-server-ip]:9000/api/api-browser for full list
## of endpoints
servers = [
"http://[graylog-server-ip]:9000/api/system/metrics/multiple",
]
## Set timeout (default 5 seconds)
# timeout = "5s"
## Metrics list
## List of metrics can be found on Graylog webservice documentation.
## Or by hitting the web service api at:
## http://[graylog-host]:9000/api/system/metrics
metrics = [
"jvm.cl.loaded",
"jvm.memory.pools.Metaspace.committed"
]
## Username and password
username = ""
password = ""
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

View File

@ -0,0 +1,30 @@
# Read metrics of HAProxy, via socket or HTTP stats page
[[inputs.haproxy]]
## An array of address to gather stats about. Specify an ip on hostname
## with optional port. ie localhost, 10.10.3.33:1936, etc.
## Make sure you specify the complete path to the stats endpoint
## including the protocol, ie http://10.10.3.33:1936/haproxy?stats
## Credentials for basic HTTP authentication
# username = "admin"
# password = "admin"
## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats
servers = ["http://myhaproxy.com:1936/haproxy?stats"]
## You can also use local socket with standard wildcard globbing.
## Server address not starting with 'http' will be treated as a possible
## socket, so both examples below are valid.
# servers = ["socket:/run/haproxy/admin.sock", "/run/haproxy/*.sock"]
## By default, some of the fields are renamed from what haproxy calls them.
## Setting this option to true results in the plugin keeping the original
## field names.
# keep_field_names = false
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

View File

@ -0,0 +1,11 @@
# Monitor disks' temperatures using hddtemp
[[inputs.hddtemp]]
## By default, telegraf gathers temps data from all disks detected by the
## hddtemp.
##
## Only collect temps from the selected disks.
##
## A * as the device name will return the temperature values of all disks.
##
# address = "127.0.0.1:7634"
# devices = ["sda", "*"]

View File

@ -0,0 +1,66 @@
# Read formatted metrics from one or more HTTP endpoints
[[inputs.http]]
## One or more URLs from which to read formatted metrics
urls = [
"http://localhost/metrics"
]
## HTTP method
# method = "GET"
## Optional HTTP headers
# headers = {"X-Special-Header" = "Special-Value"}
## HTTP entity-body to send with POST/PUT requests.
# body = ""
## HTTP Content-Encoding for write request body, can be set to "gzip" to
## compress body or "identity" to apply no encoding.
# content_encoding = "identity"
## Optional file with Bearer token
## file content is added as an Authorization header
# bearer_token = "/path/to/file"
## Optional HTTP Basic Auth Credentials
# username = "username"
# password = "pa$$word"
## OAuth2 Client Credentials. The options 'client_id', 'client_secret', and 'token_url' are required to use OAuth2.
# client_id = "clientid"
# client_secret = "secret"
# token_url = "https://indentityprovider/oauth2/v1/token"
# scopes = ["urn:opc:idm:__myscopes__"]
## HTTP Proxy support
# http_proxy_url = ""
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## Optional Cookie authentication
# cookie_auth_url = "https://localhost/authMe"
# cookie_auth_method = "POST"
# cookie_auth_username = "username"
# cookie_auth_password = "pa$$word"
# cookie_auth_headers = '{"Content-Type": "application/json", "X-MY-HEADER":"hello"}'
# cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}'
## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie
# cookie_auth_renewal = "5m"
## Amount of time allowed to complete the HTTP request
# timeout = "5s"
## List of success status codes
# success_status_codes = [200]
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
# data_format = "influx"

View File

@ -0,0 +1,50 @@
# Generic HTTP write listener
[[inputs.http_listener_v2]]
## Address and port to host HTTP listener on
service_address = ":8080"
## Paths to listen to.
# paths = ["/telegraf"]
## Save path as http_listener_v2_path tag if set to true
# path_tag = false
## HTTP methods to accept.
# methods = ["POST", "PUT"]
## maximum duration before timing out read of the request
# read_timeout = "10s"
## maximum duration before timing out write of the response
# write_timeout = "10s"
## Maximum allowed http request body size in bytes.
## 0 means to use the default of 524,288,000 bytes (500 mebibytes)
# max_body_size = "500MB"
## Part of the request to consume. Available options are "body" and
## "query".
# data_source = "body"
## Set one or more allowed client CA certificate file names to
## enable mutually authenticated TLS connections
# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
## Add service certificate and key
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Optional username and password to accept for HTTP basic authentication.
## You probably want to make sure you have TLS configured above for this.
# basic_username = "foobar"
# basic_password = "barfoo"
## Optional setting to map http headers into tags
## If the http header is not present on the request, no corresponding tag will be added
## If multiple instances of the http header are present, only the first value will be used
# http_header_tags = {"HTTP_HEADER" = "TAG_NAME"}
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"

View File

@ -0,0 +1,70 @@
# HTTP/HTTPS request given an address a method and a timeout
[[inputs.http_response]]
## List of urls to query.
# urls = ["http://localhost"]
## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set)
# http_proxy = "http://localhost:8888"
## Set response_timeout (default 5 seconds)
# response_timeout = "5s"
## HTTP Request Method
# method = "GET"
## Whether to follow redirects from the server (defaults to false)
# follow_redirects = false
## Optional file with Bearer token
## file content is added as an Authorization header
# bearer_token = "/path/to/file"
## Optional HTTP Basic Auth Credentials
# username = "username"
# password = "pa$$word"
## Optional HTTP Request Body
# body = '''
# {'fake':'data'}
# '''
## Optional name of the field that will contain the body of the response.
## By default it is set to an empty String indicating that the body's content won't be added
# response_body_field = ''
## Maximum allowed HTTP response body size in bytes.
## 0 means to use the default of 32MiB.
## If the response body size exceeds this limit a "body_read_error" will be raised
# response_body_max_size = "32MiB"
## Optional substring or regex match in body of the response (case sensitive)
# response_string_match = "\"service_status\": \"up\""
# response_string_match = "ok"
# response_string_match = "\".*_status\".?:.?\"up\""
## Expected response status code.
## The status code of the response is compared to this value. If they match, the field
## "response_status_code_match" will be 1, otherwise it will be 0. If the
## expected status code is 0, the check is disabled and the field won't be added.
# response_status_code = 0
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## Use the given name as the SNI server name on each URL
# tls_server_name = ""
## HTTP Request Headers (all values must be strings)
# [inputs.http_response.headers]
# Host = "github.com"
## Optional setting to map response http headers into tags
## If the http header is not present on the request, no corresponding tag will be added
## If multiple instances of the http header are present, only the first value will be used
# http_header_tags = {"HTTP_HEADER" = "TAG_NAME"}
## Interface to use when dialing an address
# interface = "eth0"

View File

@ -0,0 +1,46 @@
# Read flattened metrics from one or more JSON HTTP endpoints
[[inputs.httpjson]]
## NOTE This plugin only reads numerical measurements, strings and booleans
## will be ignored.
## Name for the service being polled. Will be appended to the name of the
## measurement e.g. "httpjson_webserver_stats".
##
## Deprecated (1.3.0): Use name_override, name_suffix, name_prefix instead.
name = "webserver_stats"
## URL of each server in the service's cluster
servers = [
"http://localhost:9999/stats/",
"http://localhost:9998/stats/",
]
## Set response_timeout (default 5 seconds)
response_timeout = "5s"
## HTTP method to use: GET or POST (case-sensitive)
method = "GET"
## Tags to extract from top-level of JSON server response.
# tag_keys = [
# "my_tag_1",
# "my_tag_2"
# ]
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## HTTP Request Parameters (all values must be strings). For "GET" requests, data
## will be included in the query. For "POST" requests, data will be included
## in the request body as "x-www-form-urlencoded".
# [inputs.httpjson.parameters]
# event_type = "cpu_spike"
# threshold = "0.75"
## HTTP Request Headers (all values must be strings).
# [inputs.httpjson.headers]
# X-Auth-Token = "my-xauth-token"
# apiVersion = "v1"

View File

@ -0,0 +1,7 @@
# Gathers huge pages measurements.
[[inputs.hugepages]]
## Supported huge page types:
## - "root" - based on root huge page control directory: /sys/kernel/mm/hugepages
## - "per_node" - based on per NUMA node directories: /sys/devices/system/node/node[0-9]*/hugepages
## - "meminfo" - based on /proc/meminfo file
# types = ["root", "per_node"]

View File

@ -0,0 +1,21 @@
# Gather Icinga2 status
[[inputs.icinga2]]
## Required Icinga2 server address
# server = "https://localhost:5665"
## Required Icinga2 object type ("services" or "hosts")
# object_type = "services"
## Credentials for basic HTTP authentication
# username = "admin"
# password = "admin"
## Maximum time to receive response.
# response_timeout = "5s"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = true

View File

@ -0,0 +1,3 @@
# Gets counters from all InfiniBand cards and ports installed
[[inputs.infiniband]]
# no configuration

View File

@ -0,0 +1,25 @@
# Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints
[[inputs.influxdb]]
## Works with InfluxDB debug endpoints out of the box,
## but other services can use this format too.
## See the influxdb plugin's README for more details.
## Multiple URLs from which to read InfluxDB-formatted JSON
## Default is "http://localhost:8086/debug/vars".
urls = [
"http://localhost:8086/debug/vars"
]
## Username and password to send using HTTP Basic Authentication.
# username = ""
# password = ""
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## http request & header timeout
timeout = "5s"

View File

@ -0,0 +1,47 @@
# Accept metrics over InfluxDB 1.x HTTP API
[[inputs.influxdb_listener]]
## Address and port to host HTTP listener on
service_address = ":8186"
## maximum duration before timing out read of the request
read_timeout = "10s"
## maximum duration before timing out write of the response
write_timeout = "10s"
## Maximum allowed HTTP request body size in bytes.
## 0 means to use the default of 32MiB.
max_body_size = 0
## Maximum line size allowed to be sent in bytes.
## deprecated in 1.14; parser now handles lines of unlimited length and option is ignored
# max_line_size = 0
## Set one or more allowed client CA certificate file names to
## enable mutually authenticated TLS connections
tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
## Add service certificate and key
tls_cert = "/etc/telegraf/cert.pem"
tls_key = "/etc/telegraf/key.pem"
## Optional tag name used to store the database name.
## If the write has a database in the query string then it will be kept in this tag name.
## This tag can be used in downstream outputs.
## The default value of nothing means it will be off and the database will not be recorded.
## If you have a tag that is the same as the one specified below, and supply a database,
## the tag will be overwritten with the database supplied.
# database_tag = ""
## If set the retention policy specified in the write query will be added as
## the value of this tag name.
# retention_policy_tag = ""
## Optional username and password to accept for HTTP basic authentication.
## You probably want to make sure you have TLS configured above for this.
# basic_username = "foobar"
# basic_password = "barfoo"
## Influx line protocol parser
## 'internal' is the default. 'upstream' is a newer parser that is faster
## and more memory efficient.
# parser_type = "internal"

View File

@ -0,0 +1,32 @@
# Accept metrics over InfluxDB 2.x HTTP API
[[inputs.influxdb_v2_listener]]
## Address and port to host InfluxDB listener on
## (Double check the port. Could be 9999 if using OSS Beta)
service_address = ":8086"
## Maximum allowed HTTP request body size in bytes.
## 0 means to use the default of 32MiB.
# max_body_size = "32MiB"
## Optional tag to determine the bucket.
## If the write has a bucket in the query string then it will be kept in this tag name.
## This tag can be used in downstream outputs.
## The default value of nothing means it will be off and the database will not be recorded.
# bucket_tag = ""
## Set one or more allowed client CA certificate file names to
## enable mutually authenticated TLS connections
# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
## Add service certificate and key
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Optional token to accept for HTTP authentication.
## You probably want to make sure you have TLS configured above for this.
# token = "some-long-shared-secret-token"
## Influx line protocol parser
## 'internal' is the default. 'upstream' is a newer parser that is faster
## and more memory efficient.
# parser_type = "internal"

View File

@ -0,0 +1,47 @@
# Intel Performance Monitoring Unit plugin exposes Intel PMU metrics available through Linux Perf subsystem
[[inputs.intel_pmu]]
## List of filesystem locations of JSON files that contain PMU event definitions.
event_definitions = ["/var/cache/pmu/GenuineIntel-6-55-4-core.json", "/var/cache/pmu/GenuineIntel-6-55-4-uncore.json"]
## List of core events measurement entities. There can be more than one core_events sections.
[[inputs.intel_pmu.core_events]]
## List of events to be counted. Event names shall match names from event_definitions files.
## Single entry can contain name of the event (case insensitive) augmented with config options and perf modifiers.
## If absent, all core events from provided event_definitions are counted skipping unresolvable ones.
events = ["INST_RETIRED.ANY", "CPU_CLK_UNHALTED.THREAD_ANY:config1=0x4043200000000k"]
## Limits the counting of events to core numbers specified.
## If absent, events are counted on all cores.
## Single "0", multiple "0,1,2" and range "0-2" notation is supported for each array element.
## example: cores = ["0,2", "4", "12-16"]
cores = ["0"]
## Indicator that plugin shall attempt to run core_events.events as a single perf group.
## If absent or set to false, each event is counted individually. Defaults to false.
## This limits the number of events that can be measured to a maximum of available hardware counters per core.
## Could vary depending on type of event, use of fixed counters.
# perf_group = false
## Optionally set a custom tag value that will be added to every measurement within this events group.
## Can be applied to any group of events, unrelated to perf_group setting.
# events_tag = ""
## List of uncore event measurement entities. There can be more than one uncore_events sections.
[[inputs.intel_pmu.uncore_events]]
## List of events to be counted. Event names shall match names from event_definitions files.
## Single entry can contain name of the event (case insensitive) augmented with config options and perf modifiers.
## If absent, all uncore events from provided event_definitions are counted skipping unresolvable ones.
events = ["UNC_CHA_CLOCKTICKS", "UNC_CHA_TOR_OCCUPANCY.IA_MISS"]
## Limits the counting of events to specified sockets.
## If absent, events are counted on all sockets.
## Single "0", multiple "0,1" and range "0-1" notation is supported for each array element.
## example: sockets = ["0-2"]
sockets = ["0"]
## Indicator that plugin shall provide an aggregated value for multiple units of same type distributed in an uncore.
## If absent or set to false, events for each unit are exposed as separate metric. Defaults to false.
# aggregate_uncore_units = false
## Optionally set a custom tag value that will be added to every measurement within this events group.
# events_tag = ""

View File

@ -0,0 +1,9 @@
# Intel PowerStat plugin enables monitoring of platform metrics (power, TDP) and per-CPU metrics like temperature, power and utilization.
[[inputs.intel_powerstat]]
## All global metrics are always collected by Intel PowerStat plugin.
## User can choose which per-CPU metrics are monitored by the plugin in cpu_metrics array.
## Empty array means no per-CPU specific metrics will be collected by the plugin - in this case only platform level
## telemetry will be exposed by Intel PowerStat plugin.
## Supported options:
## "cpu_frequency", "cpu_busy_frequency", "cpu_temperature", "cpu_c1_state_residency", "cpu_c6_state_residency", "cpu_busy_cycles"
# cpu_metrics = []

View File

@ -0,0 +1,28 @@
# Read Intel RDT metrics
[[inputs.intel_rdt]]
## Optionally set sampling interval to Nx100ms.
## This value is propagated to pqos tool. Interval format is defined by pqos itself.
## If not provided or provided 0, will be set to 10 = 10x100ms = 1s.
# sampling_interval = "10"
## Optionally specify the path to pqos executable.
## If not provided, auto discovery will be performed.
# pqos_path = "/usr/local/bin/pqos"
## Optionally specify if IPC and LLC_Misses metrics shouldn't be propagated.
## If not provided, default value is false.
# shortened_metrics = false
## Specify the list of groups of CPU core(s) to be provided as pqos input.
## Mandatory if processes aren't set and forbidden if processes are specified.
## e.g. ["0-3", "4,5,6"] or ["1-3,4"]
# cores = ["0-3"]
## Specify the list of processes for which Metrics will be collected.
## Mandatory if cores aren't set and forbidden if cores are specified.
## e.g. ["qemu", "pmd"]
# processes = ["process"]
## Specify if the pqos process should be called with sudo.
## Mandatory if the telegraf process does not run as root.
# use_sudo = false

View File

@ -0,0 +1,4 @@
# Collect statistics about itself
[[inputs.internal]]
## If true, collect telegraf memory stats.
# collect_memstats = true

View File

@ -0,0 +1,7 @@
# Monitors internet speed using speedtest.net service
[[inputs.internet_speed]]
## Sets if runs file download test
# enable_file_download = false
## Caches the closest server location
# cache = false

View File

@ -0,0 +1,13 @@
# This plugin gathers interrupts data from /proc/interrupts and /proc/softirqs.
[[inputs.interrupts]]
## When set to true, cpu metrics are tagged with the cpu. Otherwise cpu is
## stored as a field.
##
## The default is false for backwards compatibility, and will be changed to
## true in a future version. It is recommended to set to true on new
## deployments.
# cpu_as_tag = false
## To filter which IRQs to collect, make use of tagpass / tagdrop, i.e.
# [inputs.interrupts.tagdrop]
# irq = [ "NET_RX", "TASKLET" ]

View File

@ -0,0 +1,43 @@
# Read metrics from the bare metal servers via IPMI
[[inputs.ipmi_sensor]]
## optionally specify the path to the ipmitool executable
# path = "/usr/bin/ipmitool"
##
## Setting 'use_sudo' to true will make use of sudo to run ipmitool.
## Sudo must be configured to allow the telegraf user to run ipmitool
## without a password.
# use_sudo = false
##
## optionally force session privilege level. Can be CALLBACK, USER, OPERATOR, ADMINISTRATOR
# privilege = "ADMINISTRATOR"
##
## optionally specify one or more servers via a url matching
## [username[:password]@][protocol[(address)]]
## e.g.
## root:passwd@lan(127.0.0.1)
##
## if no servers are specified, local machine sensor stats will be queried
##
# servers = ["USERID:PASSW0RD@lan(192.168.1.1)"]
## Recommended: use metric 'interval' that is a multiple of 'timeout' to avoid
## gaps or overlap in pulled data
interval = "30s"
## Timeout for the ipmitool command to complete. Default is 20 seconds.
timeout = "20s"
## Schema Version: (Optional, defaults to version 1)
metric_version = 2
## Optionally provide the hex key for the IMPI connection.
# hex_key = ""
## If ipmitool should use a cache
## for me ipmitool runs about 2 to 10 times faster with cache enabled on HP G10 servers (when using ubuntu20.04)
## the cache file may not work well for you if some sensors come up late
# use_cache = false
## Path to the ipmitools cache file (defaults to OS temp dir)
## The provided path must exist and must be writable
# cache_path = ""

View File

@ -0,0 +1,12 @@
# Gather packets and bytes counters from Linux ipsets
[[inputs.ipset]]
## By default, we only show sets which have already matched at least 1 packet.
## set include_unmatched_sets = true to gather them all.
include_unmatched_sets = false
## Adjust your sudo settings appropriately if using this option ("sudo ipset save")
## You can avoid using sudo or root, by setting appropriate privileges for
## the telegraf.service systemd service.
use_sudo = false
## The default timeout of 1s for ipset execution can be overridden here:
# timeout = "1s"

View File

@ -0,0 +1,18 @@
# Gather packets and bytes throughput from iptables
[[inputs.iptables]]
## iptables require root access on most systems.
## Setting 'use_sudo' to true will make use of sudo to run iptables.
## Users must configure sudo to allow telegraf user to run iptables with no password.
## iptables can be restricted to only list command "iptables -nvL".
use_sudo = false
## Setting 'use_lock' to true runs iptables with the "-w" option.
## Adjust your sudo settings appropriately if using this option ("iptables -w 5 -nvl")
use_lock = false
## Define an alternate executable, such as "ip6tables". Default is "iptables".
# binary = "ip6tables"
## defines the table to monitor:
table = "filter"
## defines the chains to monitor.
## NOTE: iptables rules without a comment will not be monitored.
## Read the plugin documentation for more information.
chains = [ "INPUT" ]

View File

@ -0,0 +1,3 @@
# Collect virtual and real server stats from Linux IPVS
[[inputs.ipvs]]
# no configuration

View File

@ -0,0 +1,47 @@
# Read jobs and cluster metrics from Jenkins instances
[[inputs.jenkins]]
## The Jenkins URL in the format "schema://host:port"
url = "http://my-jenkins-instance:8080"
# username = "admin"
# password = "admin"
## Set response_timeout
response_timeout = "5s"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
## Optional Max Job Build Age filter
## Default 1 hour, ignore builds older than max_build_age
# max_build_age = "1h"
## Optional Sub Job Depth filter
## Jenkins can have unlimited layer of sub jobs
## This config will limit the layers of pulling, default value 0 means
## unlimited pulling until no more sub jobs
# max_subjob_depth = 0
## Optional Sub Job Per Layer
## In workflow-multibranch-plugin, each branch will be created as a sub job.
## This config will limit to call only the lasted branches in each layer,
## empty will use default value 10
# max_subjob_per_layer = 10
## Jobs to include or exclude from gathering
## When using both lists, job_exclude has priority.
## Wildcards are supported: [ "jobA/*", "jobB/subjob1/*"]
# job_include = [ "*" ]
# job_exclude = [ ]
## Nodes to include or exclude from gathering
## When using both lists, node_exclude has priority.
# node_include = [ "*" ]
# node_exclude = [ ]
## Worker pool for jenkins plugin only
## Empty this field will use default value 5
# max_connections = 5

View File

@ -0,0 +1,54 @@
# Read JMX metrics through Jolokia
[[inputs.jolokia]]
## This is the context root used to compose the jolokia url
## NOTE that Jolokia requires a trailing slash at the end of the context root
context = "/jolokia/"
## This specifies the mode used
# mode = "proxy"
#
## When in proxy mode this section is used to specify further
## proxy address configurations.
## Remember to change host address to fit your environment.
# [inputs.jolokia.proxy]
# host = "127.0.0.1"
# port = "8080"
## Optional http timeouts
##
## response_header_timeout, if non-zero, specifies the amount of time to wait
## for a server's response headers after fully writing the request.
# response_header_timeout = "3s"
##
## client_timeout specifies a time limit for requests made by this client.
## Includes connection time, any redirects, and reading the response body.
# client_timeout = "4s"
## List of servers exposing jolokia read service
[[inputs.jolokia.servers]]
name = "as-server-01"
host = "127.0.0.1"
port = "8080"
# username = "myuser"
# password = "mypassword"
## List of metrics collected on above servers
## Each metric consists in a name, a jmx path and either
## a pass or drop slice attribute.
## This collect all heap memory usage metrics.
[[inputs.jolokia.metrics]]
name = "heap_memory_usage"
mbean = "java.lang:type=Memory"
attribute = "HeapMemoryUsage"
## This collect thread counts metrics.
[[inputs.jolokia.metrics]]
name = "thread_count"
mbean = "java.lang:type=Threading"
attribute = "TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount"
## This collect number of class loaded/unloaded counts metrics.
[[inputs.jolokia.metrics]]
name = "class_count"
mbean = "java.lang:type=ClassLoading"
attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount"

View File

@ -0,0 +1,24 @@
# Read JMX metrics from a Jolokia REST agent endpoint
[[inputs.jolokia2_agent]]
# default_tag_prefix = ""
# default_field_prefix = ""
# default_field_separator = "."
# Add agents URLs to query
urls = ["http://localhost:8080/jolokia"]
# username = ""
# password = ""
# response_timeout = "5s"
## Optional TLS config
# tls_ca = "/var/private/ca.pem"
# tls_cert = "/var/private/client.pem"
# tls_key = "/var/private/client-key.pem"
# insecure_skip_verify = false
## Add metrics to read
[[inputs.jolokia2_agent.metric]]
name = "java_runtime"
mbean = "java.lang:type=Runtime"
paths = ["Uptime"]

View File

@ -0,0 +1,31 @@
# Read JMX metrics from a Jolokia REST proxy endpoint
[[inputs.jolokia2_proxy]]
# default_tag_prefix = ""
# default_field_prefix = ""
# default_field_separator = "."
## Proxy agent
url = "http://localhost:8080/jolokia"
# username = ""
# password = ""
# response_timeout = "5s"
## Optional TLS config
# tls_ca = "/var/private/ca.pem"
# tls_cert = "/var/private/client.pem"
# tls_key = "/var/private/client-key.pem"
# insecure_skip_verify = false
## Add proxy targets to query
# default_target_username = ""
# default_target_password = ""
[[inputs.jolokia2_proxy.target]]
url = "service:jmx:rmi:///jndi/rmi://targethost:9999/jmxrmi"
# username = ""
# password = ""
## Add metrics to read
[[inputs.jolokia2_proxy.metric]]
name = "java_runtime"
mbean = "java.lang:type=Runtime"
paths = ["Uptime"]

View File

@ -0,0 +1,49 @@
# Subscribe and receive OpenConfig Telemetry data using JTI
[[inputs.jti_openconfig_telemetry]]
## List of device addresses to collect telemetry from
servers = ["localhost:1883"]
## Authentication details. Username and password are must if device expects
## authentication. Client ID must be unique when connecting from multiple instances
## of telegraf to the same device
username = "user"
password = "pass"
client_id = "telegraf"
## Frequency to get data
sample_frequency = "1000ms"
## Sensors to subscribe for
## A identifier for each sensor can be provided in path by separating with space
## Else sensor path will be used as identifier
## When identifier is used, we can provide a list of space separated sensors.
## A single subscription will be created with all these sensors and data will
## be saved to measurement with this identifier name
sensors = [
"/interfaces/",
"collection /components/ /lldp",
]
## We allow specifying sensor group level reporting rate. To do this, specify the
## reporting rate in Duration at the beginning of sensor paths / collection
## name. For entries without reporting rate, we use configured sample frequency
sensors = [
"1000ms customReporting /interfaces /lldp",
"2000ms collection /components",
"/interfaces",
]
## Optional TLS Config
# enable_tls = true
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## Delay between retry attempts of failed RPC calls or streams. Defaults to 1000ms.
## Failed streams/calls will not be retried if 0 is provided
retry_delay = "1000ms"
## To treat all string values as tags, set this to true
str_as_tags = false

View File

@ -0,0 +1,99 @@
# Read metrics from Kafka topics
[[inputs.kafka_consumer]]
## Kafka brokers.
brokers = ["localhost:9092"]
## Topics to consume.
topics = ["telegraf"]
## When set this tag will be added to all metrics with the topic as the value.
# topic_tag = ""
## Optional Client id
# client_id = "Telegraf"
## Set the minimal supported Kafka version. Setting this enables the use of new
## Kafka features and APIs. Must be 0.10.2.0 or greater.
## ex: version = "1.1.0"
# version = ""
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## SASL authentication credentials. These settings should typically be used
## with TLS encryption enabled
# sasl_username = "kafka"
# sasl_password = "secret"
## Optional SASL:
## one of: OAUTHBEARER, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI
## (defaults to PLAIN)
# sasl_mechanism = ""
## used if sasl_mechanism is GSSAPI (experimental)
# sasl_gssapi_service_name = ""
# ## One of: KRB5_USER_AUTH and KRB5_KEYTAB_AUTH
# sasl_gssapi_auth_type = "KRB5_USER_AUTH"
# sasl_gssapi_kerberos_config_path = "/"
# sasl_gssapi_realm = "realm"
# sasl_gssapi_key_tab_path = ""
# sasl_gssapi_disable_pafxfast = false
## used if sasl_mechanism is OAUTHBEARER (experimental)
# sasl_access_token = ""
## SASL protocol version. When connecting to Azure EventHub set to 0.
# sasl_version = 1
# Disable Kafka metadata full fetch
# metadata_full = false
## Name of the consumer group.
# consumer_group = "telegraf_metrics_consumers"
## Compression codec represents the various compression codecs recognized by
## Kafka in messages.
## 0 : None
## 1 : Gzip
## 2 : Snappy
## 3 : LZ4
## 4 : ZSTD
# compression_codec = 0
## Initial offset position; one of "oldest" or "newest".
# offset = "oldest"
## Consumer group partition assignment strategy; one of "range", "roundrobin" or "sticky".
# balance_strategy = "range"
## Maximum length of a message to consume, in bytes (default 0/unlimited);
## larger messages are dropped
max_message_len = 1000000
## Maximum messages to read from the broker that have not been written by an
## output. For best throughput set based on the number of metrics within
## each message and the size of the output's metric_batch_size.
##
## For example, if each message from the queue contains 10 metrics and the
## output metric_batch_size is 1000, setting this to 100 will ensure that a
## full batch is collected and the write is triggered immediately without
## waiting until the next flush_interval.
# max_undelivered_messages = 1000
## Maximum amount of time the consumer should take to process messages. If
## the debug log prints messages from sarama about 'abandoning subscription
## to [topic] because consuming was taking too long', increase this value to
## longer than the time taken by the output plugin(s).
##
## Note that the effective timeout could be between 'max_processing_time' and
## '2 * max_processing_time'.
# max_processing_time = "100ms"
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"

View File

@ -0,0 +1,27 @@
## DEPRECATED: The 'kafka_consumer_legacy' plugin is deprecated in version 1.4.0, use 'inputs.kafka_consumer' instead, NOTE: 'kafka_consumer' only supports Kafka v0.8+.
# Read metrics from Kafka topic(s)
[[inputs.kafka_consumer_legacy]]
## topic(s) to consume
topics = ["telegraf"]
## an array of Zookeeper connection strings
zookeeper_peers = ["localhost:2181"]
## Zookeeper Chroot
zookeeper_chroot = ""
## the name of the consumer group
consumer_group = "telegraf_metrics_consumers"
## Offset (must be either "oldest" or "newest")
offset = "oldest"
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
## Maximum length of a message to consume, in bytes (default 0/unlimited);
## larger messages are dropped
max_message_len = 65536

View File

@ -0,0 +1,17 @@
# Read Kapacitor-formatted JSON metrics from one or more HTTP endpoints
[[inputs.kapacitor]]
## Multiple URLs from which to read Kapacitor-formatted JSON
## Default is "http://localhost:9092/kapacitor/v1/debug/vars".
urls = [
"http://localhost:9092/kapacitor/v1/debug/vars"
]
## Time limit for http requests
timeout = "5s"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

View File

@ -0,0 +1,3 @@
# Get kernel statistics from /proc/stat
[[inputs.kernel]]
# no configuration

View File

@ -0,0 +1,3 @@
# Get kernel statistics from /proc/vmstat
[[inputs.kernel_vmstat]]
# no configuration

View File

@ -0,0 +1,18 @@
# Read status information from one or more Kibana servers
[[inputs.kibana]]
## Specify a list of one or more Kibana servers
servers = ["http://localhost:5601"]
## Timeout for HTTP requests
timeout = "5s"
## HTTP Basic Auth credentials
# username = "username"
# password = "pa$$word"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

View File

@ -0,0 +1,66 @@
# Configuration for the AWS Kinesis input.
[[inputs.kinesis_consumer]]
## Amazon REGION of kinesis endpoint.
region = "ap-southeast-2"
## Amazon Credentials
## Credentials are loaded in the following order
## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified
## 2) Assumed credentials via STS if role_arn is specified
## 3) explicit credentials from 'access_key' and 'secret_key'
## 4) shared profile from 'profile'
## 5) environment variables
## 6) shared credentials file
## 7) EC2 Instance Profile
# access_key = ""
# secret_key = ""
# token = ""
# role_arn = ""
# web_identity_token_file = ""
# role_session_name = ""
# profile = ""
# shared_credential_file = ""
## Endpoint to make request against, the correct endpoint is automatically
## determined and this option should only be set if you wish to override the
## default.
## ex: endpoint_url = "http://localhost:8000"
# endpoint_url = ""
## Kinesis StreamName must exist prior to starting telegraf.
streamname = "StreamName"
## Shard iterator type (only 'TRIM_HORIZON' and 'LATEST' currently supported)
# shard_iterator_type = "TRIM_HORIZON"
## Maximum messages to read from the broker that have not been written by an
## output. For best throughput set based on the number of metrics within
## each message and the size of the output's metric_batch_size.
##
## For example, if each message from the queue contains 10 metrics and the
## output metric_batch_size is 1000, setting this to 100 will ensure that a
## full batch is collected and the write is triggered immediately without
## waiting until the next flush_interval.
# max_undelivered_messages = 1000
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
##
## The content encoding of the data from kinesis
## If you are processing a cloudwatch logs kinesis stream then set this to "gzip"
## as AWS compresses cloudwatch log data before it is sent to kinesis (aws
## also base64 encodes the zip byte data before pushing to the stream. The base64 decoding
## is done automatically by the golang sdk, as data is read from kinesis)
##
# content_encoding = "identity"
## Optional
## Configuration for a dynamodb checkpoint
[inputs.kinesis_consumer.checkpoint_dynamodb]
## unique name for this consumer
app_name = "default"
table_name = "default"

View File

@ -0,0 +1,22 @@
# Listener capable of handling KNX bus messages provided through a KNX-IP Interface.
[[inputs.knx_listener]]
## Type of KNX-IP interface.
## Can be either "tunnel" or "router".
# service_type = "tunnel"
## Address of the KNX-IP interface.
service_address = "localhost:3671"
## Measurement definition(s)
# [[inputs.knx_listener.measurement]]
# ## Name of the measurement
# name = "temperature"
# ## Datapoint-Type (DPT) of the KNX messages
# dpt = "9.001"
# ## List of Group-Addresses (GAs) assigned to the measurement
# addresses = ["5/5/1"]
# [[inputs.knx_listener.measurement]]
# name = "illumination"
# dpt = "9.004"
# addresses = ["5/5/3"]

View File

@ -0,0 +1,48 @@
# Read metrics from the Kubernetes api
[[inputs.kube_inventory]]
## URL for the Kubernetes API
url = "https://127.0.0.1"
## Namespace to use. Set to "" to use all namespaces.
# namespace = "default"
## Use bearer token for authorization. ('bearer_token' takes priority)
## If both of these are empty, we'll use the default serviceaccount:
## at: /run/secrets/kubernetes.io/serviceaccount/token
# bearer_token = "/path/to/bearer/token"
## OR
# bearer_token_string = "abc_123"
## Set response_timeout (default 5 seconds)
# response_timeout = "5s"
## Optional Resources to exclude from gathering
## Leave them with blank with try to gather everything available.
## Values can be - "daemonsets", deployments", "endpoints", "ingress", "nodes",
## "persistentvolumes", "persistentvolumeclaims", "pods", "services", "statefulsets"
# resource_exclude = [ "deployments", "nodes", "statefulsets" ]
## Optional Resources to include when gathering
## Overrides resource_exclude if both set.
# resource_include = [ "deployments", "nodes", "statefulsets" ]
## selectors to include and exclude as tags. Globs accepted.
## Note that an empty array for both will include all selectors as tags
## selector_exclude overrides selector_include if both set.
# selector_include = []
# selector_exclude = ["*"]
## Optional TLS Config
## Trusted root certificates for server
# tls_ca = "/path/to/cafile"
## Used for TLS client certificate authentication
# tls_cert = "/path/to/certfile"
## Used for TLS client certificate authentication
# tls_key = "/path/to/keyfile"
## Send the specified TLS server name via SNI
# tls_server_name = "kubernetes.example.com"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## Uncomment to remove deprecated metrics.
# fielddrop = ["terminated_reason"]

View File

@ -0,0 +1,26 @@
# Read metrics from the kubernetes kubelet api
[[inputs.kubernetes]]
## URL for the kubelet
url = "http://127.0.0.1:10255"
## Use bearer token for authorization. ('bearer_token' takes priority)
## If both of these are empty, we'll use the default serviceaccount:
## at: /run/secrets/kubernetes.io/serviceaccount/token
# bearer_token = "/path/to/bearer/token"
## OR
# bearer_token_string = "abc_123"
## Pod labels to be added as tags. An empty array for both include and
## exclude will include all labels.
# label_include = []
# label_exclude = ["*"]
## Set response_timeout (default 5 seconds)
# response_timeout = "5s"
## Optional TLS Config
# tls_ca = /path/to/cafile
# tls_cert = /path/to/certfile
# tls_key = /path/to/keyfile
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

View File

@ -0,0 +1,7 @@
# Read metrics off Arista LANZ, via socket
[[inputs.lanz]]
## URL to Arista LANZ endpoint
servers = [
"tcp://switch1.int.example.com:50001",
"tcp://switch2.int.example.com:50001",
]

View File

@ -0,0 +1,5 @@
# Read metrics from a LeoFS Server via SNMP
[[inputs.leofs]]
## An array of URLs of the form:
## host [ ":" port]
servers = ["127.0.0.1:4010"]

View File

@ -0,0 +1,3 @@
# Provides Linux sysctl fs metrics
[[inputs.linux_sysctl_fs]]
# no configuration

Some files were not shown because too many files have changed in this diff Show More