chore(inputs_m-z): migrate sample configs into separate files (#11133)

This commit is contained in:
Sebastian Spaink 2022-05-18 11:31:34 -05:00 committed by GitHub
parent 256caede89
commit 0f5dc9946c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
113 changed files with 3239 additions and 0 deletions

View File

@ -0,0 +1,12 @@
# Gathers metrics from the /3.0/reports MailChimp API
[[inputs.mailchimp]]
## MailChimp API key
## get from https://admin.mailchimp.com/account/api/
api_key = "" # required
## Reports for campaigns sent more than days_old ago will not be collected.
## 0 means collect all and is the default value.
days_old = 0
## Campaign ID to get, if empty gets all campaigns, this option overrides days_old
# campaign_id = ""

View File

@ -0,0 +1,18 @@
# Retrieves information on a specific host in a MarkLogic Cluster
[[inputs.marklogic]]
## Base URL of the MarkLogic HTTP Server.
url = "http://localhost:8002"
## List of specific hostnames to retrieve information. At least (1) required.
# hosts = ["hostname1", "hostname2"]
## Using HTTP Basic Authentication. Management API requires 'manage-user' role privileges
# username = "myuser"
# password = "mypassword"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

View File

@ -0,0 +1,8 @@
# Read metrics from one or many mcrouter servers.
[[inputs.mcrouter]]
## An array of address to gather stats about. Specify an ip or hostname
## with port. ie tcp://localhost:11211, tcp://10.0.0.1:11211, etc.
servers = ["tcp://localhost:11211", "unix:///var/run/mcrouter.sock"]
## Timeout for metric collections from all servers. Minimum timeout is "1s".
# timeout = "5s"

View File

@ -0,0 +1,5 @@
# Get kernel statistics from /proc/mdstat
[[inputs.mdstat]]
## Sets file path
## If not specified, then default is /proc/mdstat
# file_name = "/proc/mdstat"

View File

@ -0,0 +1,3 @@
# Read metrics about memory usage
[[inputs.mem]]
# no configuration

View File

@ -0,0 +1,15 @@
# Read metrics from one or many memcached servers.
[[inputs.memcached]]
# An array of address to gather stats about. Specify an ip on hostname
# with optional port. ie localhost, 10.0.0.1:11211, etc.
servers = ["localhost:11211"]
# An array of unix memcached sockets to gather stats about.
# unix_sockets = ["/var/run/memcached.sock"]
## Optional TLS Config
# enable_tls = true
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## If false, skip chain & host verification
# insecure_skip_verify = true

View File

@ -0,0 +1,42 @@
# Telegraf plugin for gathering metrics from N Mesos masters
[[inputs.mesos]]
## Timeout, in ms.
timeout = 100
## A list of Mesos masters.
masters = ["http://localhost:5050"]
## Master metrics groups to be collected, by default, all enabled.
master_collections = [
"resources",
"master",
"system",
"agents",
"frameworks",
"framework_offers",
"tasks",
"messages",
"evqueue",
"registrar",
"allocator",
]
## A list of Mesos slaves, default is []
# slaves = []
## Slave metrics groups to be collected, by default, all enabled.
# slave_collections = [
# "resources",
# "agent",
# "system",
# "executors",
# "tasks",
# "messages",
# ]
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

View File

@ -0,0 +1,13 @@
# Collects scores from a Minecraft server's scoreboard using the RCON protocol
[[inputs.minecraft]]
## Address of the Minecraft server.
# server = "localhost"
## Server RCON Port.
# port = "25575"
## Server RCON Password.
password = ""
## Uncomment to remove deprecated metric components.
# tagdrop = ["server"]

View File

@ -0,0 +1,27 @@
# Generate metrics for test and demonstration purposes
[[inputs.mock]]
## Set the metric name to use for reporting
metric_name = "mock"
## Optional string key-value pairs of tags to add to all metrics
# [inputs.mock.tags]
# "key" = "value"
## One or more mock data fields *must* be defined.
##
## [[inputs.mock.random]]
## name = "rand"
## min = 1.0
## max = 6.0
## [[inputs.mock.sine_wave]]
## name = "wave"
## amplitude = 1.0
## period = 0.5
## [[inputs.mock.step]]
## name = "plus_one"
## start = 0.0
## step = 1.0
## [[inputs.mock.stock]]
## name = "abc"
## price = 50.00
## volatility = 0.2

View File

@ -0,0 +1,196 @@
# Retrieve data from MODBUS slave devices
[[inputs.modbus]]
## Connection Configuration
##
## The plugin supports connections to PLCs via MODBUS/TCP, RTU over TCP, ASCII over TCP or
## via serial line communication in binary (RTU) or readable (ASCII) encoding
##
## Device name
name = "Device"
## Slave ID - addresses a MODBUS device on the bus
## Range: 0 - 255 [0 = broadcast; 248 - 255 = reserved]
slave_id = 1
## Timeout for each request
timeout = "1s"
## Maximum number of retries and the time to wait between retries
## when a slave-device is busy.
# busy_retries = 0
# busy_retries_wait = "100ms"
# TCP - connect via Modbus/TCP
controller = "tcp://localhost:502"
## Serial (RS485; RS232)
# controller = "file:///dev/ttyUSB0"
# baud_rate = 9600
# data_bits = 8
# parity = "N"
# stop_bits = 1
## For Modbus over TCP you can choose between "TCP", "RTUoverTCP" and "ASCIIoverTCP"
## default behaviour is "TCP" if the controller is TCP
## For Serial you can choose between "RTU" and "ASCII"
# transmission_mode = "RTU"
## Trace the connection to the modbus device as debug messages
## Note: You have to enable telegraf's debug mode to see those messages!
# debug_connection = false
## Define the configuration schema
## |---register -- define fields per register type in the original style (only supports one slave ID)
## |---request -- define fields on a requests base
configuration_type = "register"
## --- "register" configuration style ---
## Measurements
##
## Digital Variables, Discrete Inputs and Coils
## measurement - the (optional) measurement name, defaults to "modbus"
## name - the variable name
## address - variable address
discrete_inputs = [
{ name = "start", address = [0]},
{ name = "stop", address = [1]},
{ name = "reset", address = [2]},
{ name = "emergency_stop", address = [3]},
]
coils = [
{ name = "motor1_run", address = [0]},
{ name = "motor1_jog", address = [1]},
{ name = "motor1_stop", address = [2]},
]
## Analog Variables, Input Registers and Holding Registers
## measurement - the (optional) measurement name, defaults to "modbus"
## name - the variable name
## byte_order - the ordering of bytes
## |---AB, ABCD - Big Endian
## |---BA, DCBA - Little Endian
## |---BADC - Mid-Big Endian
## |---CDAB - Mid-Little Endian
## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64,
## FLOAT32-IEEE, FLOAT64-IEEE (the IEEE 754 binary representation)
## FLOAT32, FIXED, UFIXED (fixed-point representation on input)
## scale - the final numeric variable representation
## address - variable address
holding_registers = [
{ name = "power_factor", byte_order = "AB", data_type = "FIXED", scale=0.01, address = [8]},
{ name = "voltage", byte_order = "AB", data_type = "FIXED", scale=0.1, address = [0]},
{ name = "energy", byte_order = "ABCD", data_type = "FIXED", scale=0.001, address = [5,6]},
{ name = "current", byte_order = "ABCD", data_type = "FIXED", scale=0.001, address = [1,2]},
{ name = "frequency", byte_order = "AB", data_type = "UFIXED", scale=0.1, address = [7]},
{ name = "power", byte_order = "ABCD", data_type = "UFIXED", scale=0.1, address = [3,4]},
]
input_registers = [
{ name = "tank_level", byte_order = "AB", data_type = "INT16", scale=1.0, address = [0]},
{ name = "tank_ph", byte_order = "AB", data_type = "INT16", scale=1.0, address = [1]},
{ name = "pump1_speed", byte_order = "ABCD", data_type = "INT32", scale=1.0, address = [3,4]},
]
## --- "request" configuration style ---
## Per request definition
##
## Define a request sent to the device
## Multiple of those requests can be defined. Data will be collated into metrics at the end of data collection.
[[inputs.modbus.request]]
## ID of the modbus slave device to query.
## If you need to query multiple slave-devices, create several "request" definitions.
slave_id = 1
## Byte order of the data.
## |---ABCD -- Big Endian (Motorola)
## |---DCBA -- Little Endian (Intel)
## |---BADC -- Big Endian with byte swap
## |---CDAB -- Little Endian with byte swap
byte_order = "ABCD"
## Type of the register for the request
## Can be "coil", "discrete", "holding" or "input"
register = "coil"
## Name of the measurement.
## Can be overriden by the individual field definitions. Defaults to "modbus"
# measurement = "modbus"
## Field definitions
## Analog Variables, Input Registers and Holding Registers
## address - address of the register to query. For coil and discrete inputs this is the bit address.
## name *1 - field name
## type *1,2 - type of the modbus field, can be INT16, UINT16, INT32, UINT32, INT64, UINT64 and
## FLOAT32, FLOAT64 (IEEE 754 binary representation)
## scale *1,2 - (optional) factor to scale the variable with
## output *1,2 - (optional) type of resulting field, can be INT64, UINT64 or FLOAT64. Defaults to FLOAT64 if
## "scale" is provided and to the input "type" class otherwise (i.e. INT* -> INT64, etc).
## measurement *1 - (optional) measurement name, defaults to the setting of the request
## omit - (optional) omit this field. Useful to leave out single values when querying many registers
## with a single request. Defaults to "false".
##
## *1: Those fields are ignored if field is omitted ("omit"=true)
##
## *2: Thise fields are ignored for both "coil" and "discrete"-input type of registers. For those register types
## the fields are output as zero or one in UINT64 format by default.
## Coil / discrete input example
fields = [
{ address=0, name="motor1_run"},
{ address=1, name="jog", measurement="motor"},
{ address=2, name="motor1_stop", omit=true},
{ address=3, name="motor1_overheating"},
]
[[inputs.modbus.request.tags]]
machine = "impresser"
location = "main building"
[[inputs.modbus.request]]
## Holding example
## All of those examples will result in FLOAT64 field outputs
slave_id = 1
byte_order = "DCBA"
register = "holding"
fields = [
{ address=0, name="voltage", type="INT16", scale=0.1 },
{ address=1, name="current", type="INT32", scale=0.001 },
{ address=3, name="power", type="UINT32", omit=true },
{ address=5, name="energy", type="FLOAT32", scale=0.001, measurement="W" },
{ address=7, name="frequency", type="UINT32", scale=0.1 },
{ address=8, name="power_factor", type="INT64", scale=0.01 },
]
[[inputs.modbus.request.tags]]
machine = "impresser"
location = "main building"
[[inputs.modbus.request]]
## Input example with type conversions
slave_id = 1
byte_order = "ABCD"
register = "input"
fields = [
{ address=0, name="rpm", type="INT16" }, # will result in INT64 field
{ address=1, name="temperature", type="INT16", scale=0.1 }, # will result in FLOAT64 field
{ address=2, name="force", type="INT32", output="FLOAT64" }, # will result in FLOAT64 field
{ address=4, name="hours", type="UINT32" }, # will result in UIN64 field
]
[[inputs.modbus.request.tags]]
machine = "impresser"
location = "main building"
## Enable workarounds required by some devices to work correctly
# [inputs.modbus.workarounds]
## Pause between read requests sent to the device. This might be necessary for (slow) serial devices.
# pause_between_requests = "0ms"
## Close the connection after every gather cycle. Usually the plugin closes the connection after a certain
## idle-timeout, however, if you query a device with limited simultaneous connectivity (e.g. serial devices)
## from multiple instances you might want to only stay connected during gather and disconnect afterwards.
# close_connection_after_gather = false

View File

@ -0,0 +1,34 @@
# Read metrics from one or many MongoDB servers
[[inputs.mongodb]]
## An array of URLs of the form:
## "mongodb://" [user ":" pass "@"] host [ ":" port]
## For example:
## mongodb://user:auth_key@10.10.3.30:27017,
## mongodb://10.10.3.33:18832,
servers = ["mongodb://127.0.0.1:27017/?connect=direct"]
## When true, collect cluster status.
## Note that the query that counts jumbo chunks triggers a COLLSCAN, which
## may have an impact on performance.
# gather_cluster_status = true
## When true, collect per database stats
# gather_perdb_stats = false
## When true, collect per collection stats
# gather_col_stats = false
## When true, collect usage statistics for each collection
## (insert, update, queries, remove, getmore, commands etc...).
# gather_top_stat = false
## List of db where collections stats are collected
## If empty, all db are concerned
# col_stats_dbs = ["local"]
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

View File

@ -0,0 +1,18 @@
# Read metrics and status information about processes managed by Monit
[[inputs.monit]]
## Monit HTTPD address
address = "http://127.0.0.1:2812"
## Username and Password for Monit
# username = ""
# password = ""
## Amount of time allowed to complete the HTTP request
# timeout = "5s"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

View File

@ -0,0 +1,79 @@
# Read metrics from MQTT topic(s)
[[inputs.mqtt_consumer]]
## Broker URLs for the MQTT server or cluster. To connect to multiple
## clusters or standalone servers, use a separate plugin instance.
## example: servers = ["tcp://localhost:1883"]
## servers = ["ssl://localhost:1883"]
## servers = ["ws://localhost:1883"]
servers = ["tcp://127.0.0.1:1883"]
## Topics that will be subscribed to.
topics = [
"telegraf/host01/cpu",
"telegraf/+/mem",
"sensors/#",
]
## The message topic will be stored in a tag specified by this value. If set
## to the empty string no topic tag will be created.
# topic_tag = "topic"
## QoS policy for messages
## 0 = at most once
## 1 = at least once
## 2 = exactly once
##
## When using a QoS of 1 or 2, you should enable persistent_session to allow
## resuming unacknowledged messages.
# qos = 0
## Connection timeout for initial connection in seconds
# connection_timeout = "30s"
## Maximum messages to read from the broker that have not been written by an
## output. For best throughput set based on the number of metrics within
## each message and the size of the output's metric_batch_size.
##
## For example, if each message from the queue contains 10 metrics and the
## output metric_batch_size is 1000, setting this to 100 will ensure that a
## full batch is collected and the write is triggered immediately without
## waiting until the next flush_interval.
# max_undelivered_messages = 1000
## Persistent session disables clearing of the client session on connection.
## In order for this option to work you must also set client_id to identify
## the client. To receive messages that arrived while the client is offline,
## also set the qos option to 1 or 2 and don't forget to also set the QoS when
## publishing.
# persistent_session = false
## If unset, a random client ID will be generated.
# client_id = ""
## Username and password to connect MQTT server.
# username = "telegraf"
# password = "metricsmetricsmetricsmetrics"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
## Enable extracting tag values from MQTT topics
## _ denotes an ignored entry in the topic path
# [[inputs.mqtt_consumer.topic_parsing]]
# topic = ""
# measurement = ""
# tags = ""
# fields = ""
## Value supported is int, float, unit
# [[inputs.mqtt_consumer.topic.types]]
# key = type

View File

@ -0,0 +1,23 @@
# Aggregates the contents of multiple files into a single point
[[inputs.multifile]]
## Base directory where telegraf will look for files.
## Omit this option to use absolute paths.
base_dir = "/sys/bus/i2c/devices/1-0076/iio:device0"
## If true discard all data when a single file can't be read.
## Else, Telegraf omits the field generated from this file.
# fail_early = true
## Files to parse each interval.
[[inputs.multifile.file]]
file = "in_pressure_input"
dest = "pressure"
conversion = "float"
[[inputs.multifile.file]]
file = "in_temp_input"
dest = "temperature"
conversion = "float(3)"
[[inputs.multifile.file]]
file = "in_humidityrelative_input"
dest = "humidityrelative"
conversion = "float(3)"

View File

@ -0,0 +1,100 @@
# Read metrics from one or many mysql servers
[[inputs.mysql]]
## specify servers via a url matching:
## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]]
## see https://github.com/go-sql-driver/mysql#dsn-data-source-name
## e.g.
## servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"]
## servers = ["user@tcp(127.0.0.1:3306)/?tls=false"]
#
## If no servers are specified, then localhost is used as the host.
servers = ["tcp(127.0.0.1:3306)/"]
## Selects the metric output format.
##
## This option exists to maintain backwards compatibility, if you have
## existing metrics do not set or change this value until you are ready to
## migrate to the new format.
##
## If you do not have existing metrics from this plugin set to the latest
## version.
##
## Telegraf >=1.6: metric_version = 2
## <1.6: metric_version = 1 (or unset)
metric_version = 2
## if the list is empty, then metrics are gathered from all database tables
# table_schema_databases = []
## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list
# gather_table_schema = false
## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST
# gather_process_list = false
## gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS
# gather_user_statistics = false
## gather auto_increment columns and max values from information schema
# gather_info_schema_auto_inc = false
## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS
# gather_innodb_metrics = false
## gather metrics from all channels from SHOW SLAVE STATUS command output
# gather_all_slave_channels = false
## gather metrics from SHOW SLAVE STATUS command output
# gather_slave_status = false
## use SHOW ALL SLAVES STATUS command output for MariaDB
# mariadb_dialect = false
## gather metrics from SHOW BINARY LOGS command output
# gather_binary_logs = false
## gather metrics from SHOW GLOBAL VARIABLES command output
# gather_global_variables = true
## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE
# gather_table_io_waits = false
## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS
# gather_table_lock_waits = false
## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE
# gather_index_io_waits = false
## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS
# gather_event_waits = false
## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME
# gather_file_events_stats = false
## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST
# gather_perf_events_statements = false
#
## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME
# gather_perf_sum_per_acc_per_event = false
#
## list of events to be gathered for gather_perf_sum_per_acc_per_event
## in case of empty list all events will be gathered
# perf_summary_events = []
#
# gather_perf_events_statements = false
## the limits for metrics form perf_events_statements
# perf_events_statements_digest_text_limit = 120
# perf_events_statements_limit = 250
# perf_events_statements_time_limit = 86400
## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES)
## example: interval_slow = "30m"
# interval_slow = ""
## Optional TLS Config (will be used if tls=custom parameter specified in server uri)
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

View File

@ -0,0 +1,7 @@
# Provides metrics about the state of a NATS server
[[inputs.nats]]
## The address of the monitoring endpoint of the NATS server
server = "http://localhost:8222"
## Maximum time to receive response
# response_timeout = "5s"

View File

@ -0,0 +1,48 @@
# Read metrics from NATS subject(s)
[[inputs.nats_consumer]]
## urls of NATS servers
servers = ["nats://localhost:4222"]
## subject(s) to consume
subjects = ["telegraf"]
## name a queue group
queue_group = "telegraf_consumers"
## Optional credentials
# username = ""
# password = ""
## Optional NATS 2.0 and NATS NGS compatible user credentials
# credentials = "/etc/telegraf/nats.creds"
## Use Transport Layer Security
# secure = false
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## Sets the limits for pending msgs and bytes for each subscription
## These shouldn't need to be adjusted except in very high throughput scenarios
# pending_message_limit = 65536
# pending_bytes_limit = 67108864
## Maximum messages to read from the broker that have not been written by an
## output. For best throughput set based on the number of metrics within
## each message and the size of the output's metric_batch_size.
##
## For example, if each message from the queue contains 10 metrics and the
## output metric_batch_size is 1000, setting this to 100 will ensure that a
## full batch is collected and the write is triggered immediately without
## waiting until the next flush_interval.
# max_undelivered_messages = 1000
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"

View File

@ -0,0 +1,14 @@
# Neptune Apex data collector
[[inputs.neptune_apex]]
## The Neptune Apex plugin reads the publicly available status.xml data from a local Apex.
## Measurements will be logged under "apex".
## The base URL of the local Apex(es). If you specify more than one server, they will
## be differentiated by the "source" tag.
servers = [
"http://apex.local",
]
## The response_timeout specifies how long to wait for a reply from the Apex.
#response_timeout = "5s"

View File

@ -0,0 +1,14 @@
# Gather metrics about network interfaces
[[inputs.net]]
## By default, telegraf gathers stats from any up interface (excluding loopback)
## Setting interfaces will tell it to gather these explicit interfaces,
## regardless of status. When specifying an interface, glob-style
## patterns are also supported.
##
# interfaces = ["eth*", "enp0s[0-1]", "lo"]
##
## On linux systems telegraf also collects protocol stats.
## Setting ignore_protocol_stats to true will skip reporting of protocol metrics.
##
# ignore_protocol_stats = false
##

View File

@ -0,0 +1,25 @@
# Collect response time of a TCP or UDP connection
[[inputs.net_response]]
## Protocol, must be "tcp" or "udp"
## NOTE: because the "udp" protocol does not respond to requests, it requires
## a send/expect string pair (see below).
protocol = "tcp"
## Server address (default localhost)
address = "localhost:80"
## Set timeout
# timeout = "1s"
## Set read timeout (only used if expecting a response)
# read_timeout = "1s"
## The following options are required for UDP checks. For TCP, they are
## optional. The plugin will send the given string to the server and then
## expect to receive the given 'expect' string back.
## string sent to the server
# send = "ssh"
## expected string in answer
# expect = "ssh"
## Uncomment to remove deprecated fields; recommended for new deploys
# fielddrop = ["result_type", "string_found"]

View File

@ -0,0 +1,3 @@
# Read TCP metrics such as established, time wait and sockets counts.
[[inputs.netstat]]
# no configuration

View File

@ -0,0 +1,27 @@
# Read per-mount NFS client metrics from /proc/self/mountstats
[[inputs.nfsclient]]
## Read more low-level metrics (optional, defaults to false)
# fullstat = false
## List of mounts to explictly include or exclude (optional)
## The pattern (Go regexp) is matched against the mount point (not the
## device being mounted). If include_mounts is set, all mounts are ignored
## unless present in the list. If a mount is listed in both include_mounts
## and exclude_mounts, it is excluded. Go regexp patterns can be used.
# include_mounts = []
# exclude_mounts = []
## List of operations to include or exclude from collecting. This applies
## only when fullstat=true. Symantics are similar to {include,exclude}_mounts:
## the default is to collect everything; when include_operations is set, only
## those OPs are collected; when exclude_operations is set, all are collected
## except those listed. If include and exclude are set, the OP is excluded.
## See /proc/self/mountstats for a list of valid operations; note that
## NFSv3 and NFSv4 have different lists. While it is not possible to
## have different include/exclude lists for NFSv3/4, unused elements
## in the list should be okay. It is possible to have different lists
## for different mountpoints: use mulitple [[input.nfsclient]] stanzas,
## with their own lists. See "include_mounts" above, and be careful of
## duplicate metrics.
# include_operations = []
# exclude_operations = []

View File

@ -0,0 +1,14 @@
# Read Nginx's basic status information (ngx_http_stub_status_module)
[[inputs.nginx]]
## An array of Nginx stub_status URI to gather stats.
urls = ["http://localhost/server_status"]
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## HTTP response timeout (default: 5s)
response_timeout = "5s"

View File

@ -0,0 +1,14 @@
# Read Nginx Plus' advanced status information
[[inputs.nginx_plus]]
## An array of Nginx status URIs to gather stats.
urls = ["http://localhost/status"]
# HTTP response timeout (default: 5s)
response_timeout = "5s"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

View File

@ -0,0 +1,16 @@
# Read Nginx Plus API advanced status information
[[inputs.nginx_plus_api]]
## An array of Nginx API URIs to gather stats.
urls = ["http://localhost/api"]
# Nginx API version, default: 3
# api_version = 3
# HTTP response timeout (default: 5s)
response_timeout = "5s"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

View File

@ -0,0 +1,14 @@
# Read Nginx virtual host traffic status module information (nginx-module-sts)
[[inputs.nginx_sts]]
## An array of ngx_http_status_module or status URI to gather stats.
urls = ["http://localhost/status"]
## HTTP response timeout (default: 5s)
response_timeout = "5s"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

View File

@ -0,0 +1,28 @@
# Read nginx_upstream_check module status information (https://github.com/yaoweibin/nginx_upstream_check_module)
[[inputs.nginx_upstream_check]]
## An URL where Nginx Upstream check module is enabled
## It should be set to return a JSON formatted response
url = "http://127.0.0.1/status?format=json"
## HTTP method
# method = "GET"
## Optional HTTP headers
# headers = {"X-Special-Header" = "Special-Value"}
## Override HTTP "Host" header
# host_header = "check.example.com"
## Timeout for HTTP requests
timeout = "5s"
## Optional HTTP Basic Auth credentials
# username = "username"
# password = "pa$$word"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

View File

@ -0,0 +1,14 @@
# Read Nginx virtual host traffic status module information (nginx-module-vts)
[[inputs.nginx_vts]]
## An array of ngx_http_status_module or status URI to gather stats.
urls = ["http://localhost/status"]
## HTTP response timeout (default: 5s)
response_timeout = "5s"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

View File

@ -0,0 +1,12 @@
# Read metrics from the Nomad API
[[inputs.nomad]]
## URL for the Nomad agent
# url = "http://127.0.0.1:4646"
## Set response_timeout (default 5 seconds)
# response_timeout = "5s"
## Optional TLS Config
# tls_ca = /path/to/cafile
# tls_cert = /path/to/certfile
# tls_key = /path/to/keyfile

View File

@ -0,0 +1,17 @@
# A plugin to collect stats from the NSD DNS resolver
[[inputs.nsd]]
## Address of server to connect to, optionally ':port'. Defaults to the
## address in the nsd config file.
server = "127.0.0.1:8953"
## If running as a restricted user you can prepend sudo for additional access:
# use_sudo = false
## The default location of the nsd-control binary can be overridden with:
# binary = "/usr/sbin/nsd-control"
## The default location of the nsd config file can be overridden with:
# config_file = "/etc/nsd/nsd.conf"
## The default timeout of 1s can be overridden with:
# timeout = "1s"

View File

@ -0,0 +1,11 @@
# Read NSQ topic and channel statistics.
[[inputs.nsq]]
## An array of NSQD HTTP API endpoints
endpoints = ["http://localhost:4151"]
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

View File

@ -0,0 +1,29 @@
# Read metrics from NSQD topic(s)
[[inputs.nsq_consumer]]
## Server option still works but is deprecated, we just prepend it to the nsqd array.
# server = "localhost:4150"
## An array representing the NSQD TCP HTTP Endpoints
nsqd = ["localhost:4150"]
## An array representing the NSQLookupd HTTP Endpoints
nsqlookupd = ["localhost:4161"]
topic = "telegraf"
channel = "consumer"
max_in_flight = 100
## Maximum messages to read from the broker that have not been written by an
## output. For best throughput set based on the number of metrics within
## each message and the size of the output's metric_batch_size.
##
## For example, if each message from the queue contains 10 metrics and the
## output metric_batch_size is 1000, setting this to 100 will ensure that a
## full batch is collected and the write is triggered immediately without
## waiting until the next flush_interval.
# max_undelivered_messages = 1000
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"

View File

@ -0,0 +1,10 @@
# Collect kernel snmp counters and network interface statistics
[[inputs.nstat]]
## file paths for proc files. If empty default paths will be used:
## /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6
## These can also be overridden with env variables, see README.
proc_net_netstat = "/proc/net/netstat"
proc_net_snmp = "/proc/net/snmp"
proc_net_snmp6 = "/proc/net/snmp6"
## dump metrics with 0 values too
dump_zeros = true

View File

@ -0,0 +1,4 @@
# Get standard NTP query metrics, requires ntpq executable.
[[inputs.ntpq]]
## If false, set the -n ntpq flag. Can reduce metric gather time.
dns_lookup = true

View File

@ -0,0 +1,9 @@
# Pulls statistics from nvidia GPUs attached to the host
[[inputs.nvidia_smi]]
## Optional: path to nvidia-smi binary, defaults "/usr/bin/nvidia-smi"
## We will first try to locate the nvidia-smi binary with the explicitly specified value (or default value),
## if it is not found, we will try to locate it on PATH(exec.LookPath), if it is still not found, an error will be returned
# bin_path = "/usr/bin/nvidia-smi"
## Optional: timeout for GPU polling
# timeout = "5s"

View File

@ -0,0 +1,87 @@
# Retrieve data from OPCUA devices
[[inputs.opcua]]
## Metric name
# name = "opcua"
#
## OPC UA Endpoint URL
# endpoint = "opc.tcp://localhost:4840"
#
## Maximum time allowed to establish a connect to the endpoint.
# connect_timeout = "10s"
#
## Maximum time allowed for a request over the estabilished connection.
# request_timeout = "5s"
#
## Security policy, one of "None", "Basic128Rsa15", "Basic256",
## "Basic256Sha256", or "auto"
# security_policy = "auto"
#
## Security mode, one of "None", "Sign", "SignAndEncrypt", or "auto"
# security_mode = "auto"
#
## Path to cert.pem. Required when security mode or policy isn't "None".
## If cert path is not supplied, self-signed cert and key will be generated.
# certificate = "/etc/telegraf/cert.pem"
#
## Path to private key.pem. Required when security mode or policy isn't "None".
## If key path is not supplied, self-signed cert and key will be generated.
# private_key = "/etc/telegraf/key.pem"
#
## Authentication Method, one of "Certificate", "UserName", or "Anonymous". To
## authenticate using a specific ID, select 'Certificate' or 'UserName'
# auth_method = "Anonymous"
#
## Username. Required for auth_method = "UserName"
# username = ""
#
## Password. Required for auth_method = "UserName"
# password = ""
#
## Option to select the metric timestamp to use. Valid options are:
## "gather" -- uses the time of receiving the data in telegraf
## "server" -- uses the timestamp provided by the server
## "source" -- uses the timestamp provided by the source
# timestamp = "gather"
#
## Node ID configuration
## name - field name to use in the output
## namespace - OPC UA namespace of the node (integer value 0 thru 3)
## identifier_type - OPC UA ID type (s=string, i=numeric, g=guid, b=opaque)
## identifier - OPC UA ID (tag as shown in opcua browser)
## tags - extra tags to be added to the output metric (optional)
## Example:
## {name="ProductUri", namespace="0", identifier_type="i", identifier="2262", tags=[["tag1","value1"],["tag2","value2]]}
# nodes = [
# {name="", namespace="", identifier_type="", identifier=""},
# {name="", namespace="", identifier_type="", identifier=""},
#]
#
## Node Group
## Sets defaults for OPC UA namespace and ID type so they aren't required in
## every node. A group can also have a metric name that overrides the main
## plugin metric name.
##
## Multiple node groups are allowed
#[[inputs.opcua.group]]
## Group Metric name. Overrides the top level name. If unset, the
## top level name is used.
# name =
#
## Group default namespace. If a node in the group doesn't set its
## namespace, this is used.
# namespace =
#
## Group default identifier type. If a node in the group doesn't set its
## namespace, this is used.
# identifier_type =
#
## Node ID Configuration. Array of nodes with the same settings as above.
# nodes = [
# {name="", namespace="", identifier_type="", identifier=""},
# {name="", namespace="", identifier_type="", identifier=""},
#]
## Enable workarounds required by some devices to work correctly
# [inputs.opcua.workarounds]
## Set additional valid status codes, StatusOK (0x0) is always considered valid
# additional_valid_status_codes = ["0xC0"]

View File

@ -0,0 +1,23 @@
# OpenLDAP cn=Monitor plugin
[[inputs.openldap]]
host = "localhost"
port = 389
# ldaps, starttls, or no encryption. default is an empty string, disabling all encryption.
# note that port will likely need to be changed to 636 for ldaps
# valid options: "" | "starttls" | "ldaps"
tls = ""
# skip peer certificate verification. Default is false.
insecure_skip_verify = false
# Path to PEM-encoded Root certificate to use to verify server certificate
tls_ca = "/etc/ssl/certs.pem"
# dn/password to bind with. If bind_dn is empty, an anonymous bind is performed.
bind_dn = ""
bind_password = ""
# reverse metric names so they sort more naturally
# Defaults to false if unset, but is set to true when generating a new config
reverse_metric_names = true

View File

@ -0,0 +1,10 @@
# Get standard NTP query metrics from OpenNTPD.
[[inputs.openntpd]]
## Run ntpctl binary with sudo.
# use_sudo = false
## Location of the ntpctl binary.
# binary = "/usr/sbin/ntpctl"
## Maximum time the ntpctl binary is allowed to run.
# timeout = "5ms"

View File

@ -0,0 +1,10 @@
# A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver
[[inputs.opensmtpd]]
## If running as a restricted user you can prepend sudo for additional access:
#use_sudo = false
## The default location of the smtpctl binary can be overridden with:
binary = "/usr/sbin/smtpctl"
# The default timeout of 1s can be overridden with:
#timeout = "1s"

View File

@ -0,0 +1,51 @@
# Collects performance metrics from OpenStack services
[[inputs.openstack]]
## The recommended interval to poll is '30m'
## The identity endpoint to authenticate against and get the service catalog from.
authentication_endpoint = "https://my.openstack.cloud:5000"
## The domain to authenticate against when using a V3 identity endpoint.
# domain = "default"
## The project to authenticate as.
# project = "admin"
## User authentication credentials. Must have admin rights.
username = "admin"
password = "password"
## Available services are:
## "agents", "aggregates", "flavors", "hypervisors", "networks", "nova_services",
## "ports", "projects", "servers", "services", "stacks", "storage_pools", "subnets", "volumes"
# enabled_services = ["services", "projects", "hypervisors", "flavors", "networks", "volumes"]
## Collect Server Diagnostics
# server_diagnotics = false
## output secrets (such as adminPass(for server) and UserID(for volume)).
# output_secrets = false
## Amount of time allowed to complete the HTTP(s) request.
# timeout = "5s"
## HTTP Proxy support
# http_proxy_url = ""
## Optional TLS Config
# tls_ca = /path/to/cafile
# tls_cert = /path/to/certfile
# tls_key = /path/to/keyfile
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## Options for tags received from Openstack
# tag_prefix = "openstack_tag_"
# tag_value = "true"
## Timestamp format for timestamp data recieved from Openstack.
## If false format is unix nanoseconds.
# human_readable_timestamps = false
## Measure Openstack call duration
# measure_openstack_requests = false

View File

@ -0,0 +1,24 @@
# Receive OpenTelemetry traces, metrics, and logs over gRPC
[[inputs.opentelemetry]]
## Override the default (0.0.0.0:4317) destination OpenTelemetry gRPC service
## address:port
# service_address = "0.0.0.0:4317"
## Override the default (5s) new connection timeout
# timeout = "5s"
## Override the default (prometheus-v1) metrics schema.
## Supports: "prometheus-v1", "prometheus-v2"
## For more information about the alternatives, read the Prometheus input
## plugin notes.
# metrics_schema = "prometheus-v1"
## Optional TLS Config.
## For advanced options: https://github.com/influxdata/telegraf/blob/v1.18.3/docs/TLS.md
##
## Set one or more allowed client CA certificate file names to
## enable mutually authenticated TLS connections.
# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
## Add service certificate and key.
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"

View File

@ -0,0 +1,30 @@
# Read current weather and forecasts data from openweathermap.org
[[inputs.openweathermap]]
## OpenWeatherMap API key.
app_id = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
## City ID's to collect weather data from.
city_id = ["5391959"]
## Language of the description field. Can be one of "ar", "bg",
## "ca", "cz", "de", "el", "en", "fa", "fi", "fr", "gl", "hr", "hu",
## "it", "ja", "kr", "la", "lt", "mk", "nl", "pl", "pt", "ro", "ru",
## "se", "sk", "sl", "es", "tr", "ua", "vi", "zh_cn", "zh_tw"
# lang = "en"
## APIs to fetch; can contain "weather" or "forecast".
fetch = ["weather", "forecast"]
## OpenWeatherMap base URL
# base_url = "https://api.openweathermap.org/"
## Timeout for HTTP response.
# response_timeout = "5s"
## Preferred unit system for temperature and wind speed. Can be one of
## "metric", "imperial", or "standard".
# units = "metric"
## Query interval; OpenWeatherMap weather data is updated every 10
## minutes.
interval = "10m"

View File

@ -0,0 +1,11 @@
# Read metrics of passenger using passenger-status
[[inputs.passenger]]
## Path of passenger-status.
##
## Plugin gather metric via parsing XML output of passenger-status
## More information about the tool:
## https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html
##
## If no path is specified, then the plugin simply execute passenger-status
## hopefully it can be found in your PATH
command = "passenger-status -v --show=xml"

View File

@ -0,0 +1,7 @@
# Gather counters from PF
[[inputs.pf]]
## PF require root access on most systems.
## Setting 'use_sudo' to true will make use of sudo to run pfctl.
## Users must configure sudo to allow telegraf user to run pfctl with no password.
## pfctl can be restricted to only list command "pfctl -s info".
use_sudo = false

View File

@ -0,0 +1,11 @@
# Read metrics from one or many pgbouncer servers
[[inputs.pgbouncer]]
## specify address via a url matching:
## postgres://[pqgotest[:password]]@host:port[/dbname]\
## ?sslmode=[disable|verify-ca|verify-full]
## or a simple string:
## host=localhost port=5432 user=pqgotest password=... sslmode=... dbname=app_production
##
## All connection parameters are optional.
##
address = "host=localhost user=pgbouncer sslmode=disable"

View File

@ -0,0 +1,34 @@
# Read metrics of phpfpm, via HTTP status page or socket
[[inputs.phpfpm]]
## An array of addresses to gather stats about. Specify an ip or hostname
## with optional port and path
##
## Plugin can be configured in three modes (either can be used):
## - http: the URL must start with http:// or https://, ie:
## "http://localhost/status"
## "http://192.168.130.1/status?full"
##
## - unixsocket: path to fpm socket, ie:
## "/var/run/php5-fpm.sock"
## or using a custom fpm status path:
## "/var/run/php5-fpm.sock:fpm-custom-status-path"
## glob patterns are also supported:
## "/var/run/php*.sock"
##
## - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie:
## "fcgi://10.0.0.12:9000/status"
## "cgi://10.0.10.12:9001/status"
##
## Example of multiple gathering from local socket and remote host
## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"]
urls = ["http://localhost/status"]
## Duration allowed to complete HTTP requests.
# timeout = "5s"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

View File

@ -0,0 +1,51 @@
# Ping given url(s) and return statistics
[[inputs.ping]]
## Hosts to send ping packets to.
urls = ["example.org"]
## Method used for sending pings, can be either "exec" or "native". When set
## to "exec" the systems ping command will be executed. When set to "native"
## the plugin will send pings directly.
##
## While the default is "exec" for backwards compatibility, new deployments
## are encouraged to use the "native" method for improved compatibility and
## performance.
# method = "exec"
## Number of ping packets to send per interval. Corresponds to the "-c"
## option of the ping command.
# count = 1
## Time to wait between sending ping packets in seconds. Operates like the
## "-i" option of the ping command.
# ping_interval = 1.0
## If set, the time to wait for a ping response in seconds. Operates like
## the "-W" option of the ping command.
# timeout = 1.0
## If set, the total ping deadline, in seconds. Operates like the -w option
## of the ping command.
# deadline = 10
## Interface or source address to send ping from. Operates like the -I or -S
## option of the ping command.
# interface = ""
## Percentiles to calculate. This only works with the native method.
# percentiles = [50, 95, 99]
## Specify the ping executable binary.
# binary = "ping"
## Arguments for ping command. When arguments is not empty, the command from
## the binary option will be used and other options (ping_interval, timeout,
## etc) will be ignored.
# arguments = ["-c", "3"]
## Use only IPv6 addresses when resolving a hostname.
# ipv6 = false
## Number of data bytes to be sent. Corresponds to the "-s"
## option of the ping command. This only works with the native method.
# size = 56

View File

@ -0,0 +1,5 @@
# Measure postfix queue statistics
[[inputs.postfix]]
## Postfix queue directory. If not provided, telegraf will try to use
## 'postconf -h queue_directory' to determine it.
# queue_directory = "/var/spool/postfix"

View File

@ -0,0 +1,37 @@
# Read metrics from one or many postgresql servers
[[inputs.postgresql]]
## specify address via a url matching:
## postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full]
## or a simple string:
## host=localhost user=pqgotest password=... sslmode=... dbname=app_production
##
## All connection parameters are optional.
##
## Without the dbname parameter, the driver will default to a database
## with the same name as the user. This dbname is just for instantiating a
## connection with the server and doesn't restrict the databases we are trying
## to grab metrics for.
##
address = "host=localhost user=postgres sslmode=disable"
## A custom name for the database that will be used as the "server" tag in the
## measurement output. If not specified, a default one generated from
## the connection address is used.
# outputaddress = "db01"
## connection configuration.
## maxlifetime - specify the maximum lifetime of a connection.
## default is forever (0s)
# max_lifetime = "0s"
## A list of databases to explicitly ignore. If not specified, metrics for all
## databases are gathered. Do NOT use with the 'databases' option.
# ignored_databases = ["postgres", "template0", "template1"]
## A list of databases to pull metrics about. If not specified, metrics for all
## databases are gathered. Do NOT use with the 'ignored_databases' option.
# databases = ["app_production", "testing"]
## Whether to use prepared statements when connecting to the database.
## This should be set to false when connecting through a PgBouncer instance
## with pool_mode set to transaction.
prepared_statements = true

View File

@ -0,0 +1,54 @@
# Read metrics from one or many postgresql servers
[[inputs.postgresql_extensible]]
# specify address via a url matching:
# postgres://[pqgotest[:password]]@host:port[/dbname]?sslmode=...
# or a simple string:
# host=localhost port=5432 user=pqgotest password=... sslmode=... dbname=app_production
#
# All connection parameters are optional.
# Without the dbname parameter, the driver will default to a database
# with the same name as the user. This dbname is just for instantiating a
# connection with the server and doesn't restrict the databases we are trying
# to grab metrics for.
#
address = "host=localhost user=postgres sslmode=disable"
## A list of databases to pull metrics about.
## deprecated in 1.22.3; use the sqlquery option to specify database to use
# databases = ["app_production", "testing"]
## Whether to use prepared statements when connecting to the database.
## This should be set to false when connecting through a PgBouncer instance
## with pool_mode set to transaction.
prepared_statements = true
# Define the toml config where the sql queries are stored
# The script option can be used to specify the .sql file path.
# If script and sqlquery options specified at same time, sqlquery will be used
#
# the tagvalue field is used to define custom tags (separated by comas).
# the query is expected to return columns which match the names of the
# defined tags. The values in these columns must be of a string-type,
# a number-type or a blob-type.
#
# The timestamp field is used to override the data points timestamp value. By
# default, all rows inserted with current time. By setting a timestamp column,
# the row will be inserted with that column's value.
#
# Structure :
# [[inputs.postgresql_extensible.query]]
# sqlquery string
# version string
# withdbname boolean
# tagvalue string (coma separated)
# timestamp string
[[inputs.postgresql_extensible.query]]
sqlquery="SELECT * FROM pg_stat_database where datname"
version=901
withdbname=false
tagvalue=""
[[inputs.postgresql_extensible.query]]
script="your_sql-filepath.sql"
version=901
withdbname=false
tagvalue=""

View File

@ -0,0 +1,7 @@
# Read metrics from one or many PowerDNS servers
[[inputs.powerdns]]
# An array of sockets to gather stats about.
# Specify a path to unix socket.
#
# If no servers are specified, then '/var/run/pdns.controlsocket' is used as the path.
unix_sockets = ["/var/run/pdns.controlsocket"]

View File

@ -0,0 +1,10 @@
# Read metrics from one or many PowerDNS Recursor servers
[[inputs.powerdns_recursor]]
## Path to the Recursor control socket.
unix_sockets = ["/var/run/pdns_recursor.controlsocket"]
## Directory to create receive socket. This default is likely not writable,
## please reference the full plugin documentation for a recommended setup.
# socket_dir = "/var/run/"
## Socket permissions for the receive socket.
# socket_mode = "0666"

View File

@ -0,0 +1,3 @@
# Get the number of processes and group them by status
[[inputs.processes]]
# no configuration

View File

@ -0,0 +1,45 @@
# Monitor process cpu and memory usage
[[inputs.procstat]]
## PID file to monitor process
pid_file = "/var/run/nginx.pid"
## executable name (ie, pgrep <exe>)
# exe = "nginx"
## pattern as argument for pgrep (ie, pgrep -f <pattern>)
# pattern = "nginx"
## user as argument for pgrep (ie, pgrep -u <user>)
# user = "nginx"
## Systemd unit name, supports globs when include_systemd_children is set to true
# systemd_unit = "nginx.service"
# include_systemd_children = false
## CGroup name or path, supports globs
# cgroup = "systemd/system.slice/nginx.service"
## Windows service name
# win_service = ""
## override for process_name
## This is optional; default is sourced from /proc/<pid>/status
# process_name = "bar"
## Field name prefix
# prefix = ""
## When true add the full cmdline as a tag.
# cmdline_tag = false
## Mode to use when calculating CPU usage. Can be one of 'solaris' or 'irix'.
# mode = "irix"
## Add the PID as a tag instead of as a field. When collecting multiple
## processes with otherwise matching tags this setting should be enabled to
## ensure each process has a unique identity.
##
## Enabling this option may result in a large number of series, especially
## when processes have a short lifetime.
# pid_tag = false
## Method to use when finding process IDs. Can be one of 'pgrep', or
## 'native'. The pgrep finder calls the pgrep executable in the PATH while
## the native finder performs the search directly in a manor dependent on the
## platform. Default is 'pgrep'
# pid_finder = "pgrep"

View File

@ -0,0 +1,94 @@
# Read metrics from one or many prometheus clients
[[inputs.prometheus]]
## An array of urls to scrape metrics from.
urls = ["http://localhost:9100/metrics"]
## Metric version controls the mapping from Prometheus metrics into
## Telegraf metrics. When using the prometheus_client output, use the same
## value in both plugins to ensure metrics are round-tripped without
## modification.
##
## example: metric_version = 1;
## metric_version = 2; recommended version
# metric_version = 1
## Url tag name (tag containing scrapped url. optional, default is "url")
# url_tag = "url"
## Whether the timestamp of the scraped metrics will be ignored.
## If set to true, the gather time will be used.
# ignore_timestamp = false
## An array of Kubernetes services to scrape metrics from.
# kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"]
## Kubernetes config file to create client from.
# kube_config = "/path/to/kubernetes.config"
## Scrape Kubernetes pods for the following prometheus annotations:
## - prometheus.io/scrape: Enable scraping for this pod
## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to
## set this to 'https' & most likely set the tls config.
## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation.
## - prometheus.io/port: If port is not 9102 use this annotation
# monitor_kubernetes_pods = true
## Get the list of pods to scrape with either the scope of
## - cluster: the kubernetes watch api (default, no need to specify)
## - node: the local cadvisor api; for scalability. Note that the config node_ip or the environment variable NODE_IP must be set to the host IP.
# pod_scrape_scope = "cluster"
## Only for node scrape scope: node IP of the node that telegraf is running on.
## Either this config or the environment variable NODE_IP must be set.
# node_ip = "10.180.1.1"
## Only for node scrape scope: interval in seconds for how often to get updated pod list for scraping.
## Default is 60 seconds.
# pod_scrape_interval = 60
## Restricts Kubernetes monitoring to a single namespace
## ex: monitor_kubernetes_pods_namespace = "default"
# monitor_kubernetes_pods_namespace = ""
# label selector to target pods which have the label
# kubernetes_label_selector = "env=dev,app=nginx"
# field selector to target pods
# eg. To scrape pods on a specific node
# kubernetes_field_selector = "spec.nodeName=$HOSTNAME"
# cache refresh interval to set the interval for re-sync of pods list.
# Default is 60 minutes.
# cache_refresh_interval = 60
## Scrape Services available in Consul Catalog
# [inputs.prometheus.consul]
# enabled = true
# agent = "http://localhost:8500"
# query_interval = "5m"
# [[inputs.prometheus.consul.query]]
# name = "a service name"
# tag = "a service tag"
# url = 'http://{{if ne .ServiceAddress ""}}{{.ServiceAddress}}{{else}}{{.Address}}{{end}}:{{.ServicePort}}/{{with .ServiceMeta.metrics_path}}{{.}}{{else}}metrics{{end}}'
# [inputs.prometheus.consul.query.tags]
# host = "{{.Node}}"
## Use bearer token for authorization. ('bearer_token' takes priority)
# bearer_token = "/path/to/bearer/token"
## OR
# bearer_token_string = "abc_123"
## HTTP Basic Authentication username and password. ('bearer_token' and
## 'bearer_token_string' take priority)
# username = ""
# password = ""
## Specify timeout duration for slower prometheus clients (default is 3s)
# response_timeout = "3s"
## Optional TLS Config
# tls_ca = /path/to/cafile
# tls_cert = /path/to/certfile
# tls_key = /path/to/keyfile
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

View File

@ -0,0 +1,17 @@
# Provides metrics from Proxmox nodes (Proxmox Virtual Environment > 6.2).
[[inputs.proxmox]]
## API connection configuration. The API token was introduced in Proxmox v6.2. Required permissions for user and token: PVEAuditor role on /.
base_url = "https://localhost:8006/api2/json"
api_token = "USER@REALM!TOKENID=UUID"
## Node name, defaults to OS hostname
# node_name = ""
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
insecure_skip_verify = false
# HTTP response timeout (default: 5s)
response_timeout = "5s"

View File

@ -0,0 +1,4 @@
# Reads last_run_summary.yaml file and converts to measurements
[[inputs.puppetagent]]
## Location of puppet last run summary file
location = "/var/lib/puppet/state/last_run_summary.yaml"

View File

@ -0,0 +1,56 @@
# Reads metrics from RabbitMQ servers via the Management Plugin
[[inputs.rabbitmq]]
## Management Plugin url. (default: http://localhost:15672)
# url = "http://localhost:15672"
## Tag added to rabbitmq_overview series; deprecated: use tags
# name = "rmq-server-1"
## Credentials
# username = "guest"
# password = "guest"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## Optional request timeouts
##
## ResponseHeaderTimeout, if non-zero, specifies the amount of time to wait
## for a server's response headers after fully writing the request.
# header_timeout = "3s"
##
## client_timeout specifies a time limit for requests made by this client.
## Includes connection time, any redirects, and reading the response body.
# client_timeout = "4s"
## A list of nodes to gather as the rabbitmq_node measurement. If not
## specified, metrics for all nodes are gathered.
# nodes = ["rabbit@node1", "rabbit@node2"]
## A list of queues to gather as the rabbitmq_queue measurement. If not
## specified, metrics for all queues are gathered.
## Deprecated in 1.6: Use queue_name_include instead.
# queues = ["telegraf"]
## A list of exchanges to gather as the rabbitmq_exchange measurement. If not
## specified, metrics for all exchanges are gathered.
# exchanges = ["telegraf"]
## Metrics to include and exclude. Globs accepted.
## Note that an empty array for both will include all metrics
## Currently the following metrics are supported: "exchange", "federation", "node", "overview", "queue"
# metric_include = []
# metric_exclude = []
## Queues to include and exclude. Globs accepted.
## Note that an empty array for both will include all queues
# queue_name_include = []
# queue_name_exclude = []
## Federation upstreams to include and exclude specified as an array of glob
## pattern strings. Federation links can also be limited by the queue and
## exchange filters.
# federation_upstream_include = []
# federation_upstream_exclude = []

View File

@ -0,0 +1,4 @@
# Read raindrops stats (raindrops - real-time stats for preforking Rack servers)
[[inputs.raindrops]]
## An array of raindrops middleware URI to gather stats.
urls = ["http://localhost:8080/_raindrops"]

View File

@ -0,0 +1,5 @@
# RAS plugin exposes counter metrics for Machine Check Errors provided by RASDaemon (sqlite3 output is required).
[[inputs.ras]]
## Optional path to RASDaemon sqlite3 database.
## Default: /var/lib/rasdaemon/ras-mc_event.db
# db_path = ""

View File

@ -0,0 +1,36 @@
# Reads metrics from RavenDB servers via the Monitoring Endpoints
[[inputs.ravendb]]
## Node URL and port that RavenDB is listening on. By default,
## attempts to connect securely over HTTPS, however, if the user
## is running a local unsecure development cluster users can use
## HTTP via a URL like "http://localhost:8080"
url = "https://localhost:4433"
## RavenDB X509 client certificate setup
# tls_cert = "/etc/telegraf/raven.crt"
# tls_key = "/etc/telegraf/raven.key"
## Optional request timeout
##
## Timeout, specifies the amount of time to wait
## for a server's response headers after fully writing the request and
## time limit for requests made by this client
# timeout = "5s"
## List of statistics which are collected
# At least one is required
# Allowed values: server, databases, indexes, collections
#
# stats_include = ["server", "databases", "indexes", "collections"]
## List of db where database stats are collected
## If empty, all db are concerned
# db_stats_dbs = []
## List of db where index status are collected
## If empty, all indexes from all db are concerned
# index_stats_dbs = []
## List of db where collection status are collected
## If empty, all collections from all db are concerned
# collection_stats_dbs = []

View File

@ -0,0 +1,21 @@
# Read CPU, Fans, Powersupply and Voltage metrics of hardware server through redfish APIs
[[inputs.redfish]]
## Redfish API Base URL.
address = "https://127.0.0.1:5000"
## Credentials for the Redfish API.
username = "root"
password = "password123456"
## System Id to collect data for in Redfish APIs.
computer_system_id="System.Embedded.1"
## Amount of time allowed to complete the HTTP request
# timeout = "5s"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

View File

@ -0,0 +1,32 @@
# Read metrics from one or many redis servers
[[inputs.redis]]
## specify servers via a url matching:
## [protocol://][:password]@address[:port]
## e.g.
## tcp://localhost:6379
## tcp://:password@192.168.99.100
## unix:///var/run/redis.sock
##
## If no servers are specified, then localhost is used as the host.
## If no port is specified, 6379 is used
servers = ["tcp://localhost:6379"]
## Optional. Specify redis commands to retrieve values
# [[inputs.redis.commands]]
# # The command to run where each argument is a separate element
# command = ["get", "sample-key"]
# # The field to store the result in
# field = "sample-key-value"
# # The type of the result
# # Can be "string", "integer", or "float"
# type = "string"
## specify server password
# password = "s#cr@t%"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = true

View File

@ -0,0 +1,19 @@
# Read metrics from one or many redis-sentinel servers
[[inputs.redis_sentinel]]
## specify servers via a url matching:
## [protocol://][:password]@address[:port]
## e.g.
## tcp://localhost:26379
## tcp://:password@192.168.99.100
## unix:///var/run/redis-sentinel.sock
##
## If no servers are specified, then localhost is used as the host.
## If no port is specified, 26379 is used
# servers = ["tcp://localhost:26379"]
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = true

View File

@ -0,0 +1,16 @@
# Read metrics from one or many RethinkDB servers
[[inputs.rethinkdb]]
## An array of URI to gather stats about. Specify an ip or hostname
## with optional port add password. ie,
## rethinkdb://user:auth_key@10.10.3.30:28105,
## rethinkdb://10.10.3.33:18832,
## 10.0.0.1:10000, etc.
servers = ["127.0.0.1:28015"]
## If you use actual rethinkdb of > 2.3.0 with username/password authorization,
## protocol have to be named "rethinkdb2" - it will use 1_0 H.
# servers = ["rethinkdb2://username:password@127.0.0.1:28015"]
## If you use older versions of rethinkdb (<2.2) with auth_key, protocol
## have to be named "rethinkdb".
# servers = ["rethinkdb://username:auth_key@127.0.0.1:28015"]

View File

@ -0,0 +1,4 @@
# Read metrics one or many Riak servers
[[inputs.riak]]
# Specify a list of one or more riak http servers
servers = ["http://localhost:8098"]

View File

@ -0,0 +1,27 @@
# Riemann protobuff listener
[[inputs.rimann_listener]]
## URL to listen on
## Default is "tcp://:5555"
# service_address = "tcp://:8094"
# service_address = "tcp://127.0.0.1:http"
# service_address = "tcp4://:8094"
# service_address = "tcp6://:8094"
# service_address = "tcp6://[2001:db8::1]:8094"
## Maximum number of concurrent connections.
## 0 (default) is unlimited.
# max_connections = 1024
## Read timeout.
## 0 (default) is unlimited.
# read_timeout = "30s"
## Optional TLS configuration.
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Enables client authentication if set.
# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
## Maximum socket buffer size (in bytes when no unit specified).
# read_buffer_size = "64KiB"
## Period between keep alive probes.
## 0 disables keep alive probes.
## Defaults to the OS configuration.
# keep_alive_period = "5m"

View File

@ -0,0 +1,18 @@
# Read API usage and limits for a Salesforce organisation
[[inputs.salesforce]]
## specify your credentials
##
username = "your_username"
password = "your_password"
##
## (optional) security token
# security_token = "your_security_token"
##
## (optional) environment type (sandbox or production)
## default is: production
##
# environment = "production"
##
## (optional) API version (default: "39.0")
##
# version = "39.0"

View File

@ -0,0 +1,8 @@
# Monitor sensors, requires lm-sensors package
[[inputs.sensors]]
## Remove numbers from field names.
## If true, a field name like 'temp1_input' will be changed to 'temp_input'.
# remove_numbers = true
## Timeout is the maximum amount of time that the sensors command can run.
# timeout = "5s"

View File

@ -0,0 +1,11 @@
# SFlow V5 Protocol Listener
[[inputs.sflow]]
## Address to listen for sFlow packets.
## example: service_address = "udp://:6343"
## service_address = "udp4://:6343"
## service_address = "udp6://:6343"
service_address = "udp://:6343"
## Set the size of the operating system's receive buffer.
## example: read_buffer_size = "64KiB"
# read_buffer_size = ""

View File

@ -0,0 +1,49 @@
# Read metrics from storage devices supporting S.M.A.R.T.
[[inputs.smart]]
## Optionally specify the path to the smartctl executable
# path_smartctl = "/usr/bin/smartctl"
## Optionally specify the path to the nvme-cli executable
# path_nvme = "/usr/bin/nvme"
## Optionally specify if vendor specific attributes should be propagated for NVMe disk case
## ["auto-on"] - automatically find and enable additional vendor specific disk info
## ["vendor1", "vendor2", ...] - e.g. "Intel" enable additional Intel specific disk info
# enable_extensions = ["auto-on"]
## On most platforms used cli utilities requires root access.
## Setting 'use_sudo' to true will make use of sudo to run smartctl or nvme-cli.
## Sudo must be configured to allow the telegraf user to run smartctl or nvme-cli
## without a password.
# use_sudo = false
## Skip checking disks in this power mode. Defaults to
## "standby" to not wake up disks that have stopped rotating.
## See --nocheck in the man pages for smartctl.
## smartctl version 5.41 and 5.42 have faulty detection of
## power mode and might require changing this value to
## "never" depending on your disks.
# nocheck = "standby"
## Gather all returned S.M.A.R.T. attribute metrics and the detailed
## information from each drive into the 'smart_attribute' measurement.
# attributes = false
## Optionally specify devices to exclude from reporting if disks auto-discovery is performed.
# excludes = [ "/dev/pass6" ]
## Optionally specify devices and device type, if unset
## a scan (smartctl --scan and smartctl --scan -d nvme) for S.M.A.R.T. devices will be done
## and all found will be included except for the excluded in excludes.
# devices = [ "/dev/ada0 -d atacam", "/dev/nvme0"]
## Timeout for the cli command to complete.
# timeout = "30s"
## Optionally call smartctl and nvme-cli with a specific concurrency policy.
## By default, smartctl and nvme-cli are called in separate threads (goroutines) to gather disk attributes.
## Some devices (e.g. disks in RAID arrays) may have access limitations that require sequential reading of
## SMART data - one individual array drive at the time. In such case please set this configuration option
## to "sequential" to get readings for all drives.
## valid options: concurrent, sequential
# read_method = "concurrent"

View File

@ -0,0 +1,75 @@
# Retrieves SNMP values from remote agents
[[inputs.snmp]]
## Agent addresses to retrieve values from.
## format: agents = ["<scheme://><hostname>:<port>"]
## scheme: optional, either udp, udp4, udp6, tcp, tcp4, tcp6.
## default is udp
## port: optional
## example: agents = ["udp://127.0.0.1:161"]
## agents = ["tcp://127.0.0.1:161"]
## agents = ["udp4://v4only-snmp-agent"]
agents = ["udp://127.0.0.1:161"]
## Timeout for each request.
# timeout = "5s"
## SNMP version; can be 1, 2, or 3.
# version = 2
## Path to mib files
## Used by the gosmi translator.
## To add paths when translating with netsnmp, use the MIBDIRS environment variable
# path = ["/usr/share/snmp/mibs"]
## SNMP community string.
# community = "public"
## Agent host tag
# agent_host_tag = "agent_host"
## Number of retries to attempt.
# retries = 3
## The GETBULK max-repetitions parameter.
# max_repetitions = 10
## SNMPv3 authentication and encryption options.
##
## Security Name.
# sec_name = "myuser"
## Authentication protocol; one of "MD5", "SHA", "SHA224", "SHA256", "SHA384", "SHA512" or "".
# auth_protocol = "MD5"
## Authentication password.
# auth_password = "pass"
## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv".
# sec_level = "authNoPriv"
## Context Name.
# context_name = ""
## Privacy protocol used for encrypted messages; one of "DES", "AES", "AES192", "AES192C", "AES256", "AES256C", or "".
### Protocols "AES192", "AES192", "AES256", and "AES256C" require the underlying net-snmp tools
### to be compiled with --enable-blumenthal-aes (http://www.net-snmp.org/docs/INSTALL.html)
# priv_protocol = ""
## Privacy password used for encrypted messages.
# priv_password = ""
## Add fields and tables defining the variables you wish to collect. This
## example collects the system uptime and interface variables. Reference the
## full plugin documentation for configuration details.
[[inputs.snmp.field]]
oid = "RFC1213-MIB::sysUpTime.0"
name = "uptime"
[[inputs.snmp.field]]
oid = "RFC1213-MIB::sysName.0"
name = "source"
is_tag = true
[[inputs.snmp.table]]
oid = "IF-MIB::ifTable"
name = "interface"
inherit_tags = ["source"]
[[inputs.snmp.table.field]]
oid = "IF-MIB::ifDescr"
name = "ifDescr"
is_tag = true

View File

@ -0,0 +1,91 @@
# DEPRECATED! PLEASE USE inputs.snmp INSTEAD.
[[inputs.snmp_legacy]]
## Use 'oids.txt' file to translate oids to names
## To generate 'oids.txt' you need to run:
## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
## Or if you have an other MIB folder with custom MIBs
## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt
snmptranslate_file = "/tmp/oids.txt"
[[inputs.snmp.host]]
address = "192.168.2.2:161"
# SNMP community
community = "public" # default public
# SNMP version (1, 2 or 3)
# Version 3 not supported yet
version = 2 # default 2
# SNMP response timeout
timeout = 2.0 # default 2.0
# SNMP request retries
retries = 2 # default 2
# Which get/bulk do you want to collect for this host
collect = ["mybulk", "sysservices", "sysdescr"]
# Simple list of OIDs to get, in addition to "collect"
get_oids = []
[[inputs.snmp.host]]
address = "192.168.2.3:161"
community = "public"
version = 2
timeout = 2.0
retries = 2
collect = ["mybulk"]
get_oids = [
"ifNumber",
".1.3.6.1.2.1.1.3.0",
]
[[inputs.snmp.get]]
name = "ifnumber"
oid = "ifNumber"
[[inputs.snmp.get]]
name = "interface_speed"
oid = "ifSpeed"
instance = "0"
[[inputs.snmp.get]]
name = "sysuptime"
oid = ".1.3.6.1.2.1.1.3.0"
unit = "second"
[[inputs.snmp.bulk]]
name = "mybulk"
max_repetition = 127
oid = ".1.3.6.1.2.1.1"
[[inputs.snmp.bulk]]
name = "ifoutoctets"
max_repetition = 127
oid = "ifOutOctets"
[[inputs.snmp.host]]
address = "192.168.2.13:161"
#address = "127.0.0.1:161"
community = "public"
version = 2
timeout = 2.0
retries = 2
#collect = ["mybulk", "sysservices", "sysdescr", "systype"]
collect = ["sysuptime" ]
[[inputs.snmp.host.table]]
name = "iftable3"
include_instances = ["enp5s0", "eth1"]
# SNMP TABLEs
# table without mapping neither subtables
[[inputs.snmp.table]]
name = "iftable1"
oid = ".1.3.6.1.2.1.31.1.1.1"
# table without mapping but with subtables
[[inputs.snmp.table]]
name = "iftable2"
oid = ".1.3.6.1.2.1.31.1.1.1"
sub_tables = [".1.3.6.1.2.1.2.2.1.13"]
# table with mapping but without subtables
[[inputs.snmp.table]]
name = "iftable3"
oid = ".1.3.6.1.2.1.31.1.1.1"
# if empty. get all instances
mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
# if empty, get all subtables
# table with both mapping and subtables
[[inputs.snmp.table]]
name = "iftable4"
oid = ".1.3.6.1.2.1.31.1.1.1"
# if empty get all instances
mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
# if empty get all subtables
# sub_tables could be not "real subtables"
sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"]

View File

@ -0,0 +1,35 @@
# Receive SNMP traps
[[inputs.snmp_trap]]
## Transport, local address, and port to listen on. Transport must
## be "udp://". Omit local address to listen on all interfaces.
## example: "udp://127.0.0.1:1234"
##
## Special permissions may be required to listen on a port less than
## 1024. See README.md for details
##
# service_address = "udp://:162"
##
## Path to mib files
## Used by the gosmi translator.
## To add paths when translating with netsnmp, use the MIBDIRS environment variable
# path = ["/usr/share/snmp/mibs"]
##
## Deprecated in 1.20.0; no longer running snmptranslate
## Timeout running snmptranslate command
# timeout = "5s"
## Snmp version
# version = "2c"
## SNMPv3 authentication and encryption options.
##
## Security Name.
# sec_name = "myuser"
## Authentication protocol; one of "MD5", "SHA" or "".
# auth_protocol = "MD5"
## Authentication password.
# auth_password = "pass"
## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv".
# sec_level = "authNoPriv"
## Privacy protocol used for encrypted messages; one of "DES", "AES", "AES192", "AES192C", "AES256", "AES256C" or "".
# priv_protocol = ""
## Privacy password used for encrypted messages.
# priv_password = ""

View File

@ -0,0 +1,59 @@
# Generic socket listener capable of handling multiple socket types.
[[inputs.socket_listener]]
## URL to listen on
# service_address = "tcp://:8094"
# service_address = "tcp://127.0.0.1:http"
# service_address = "tcp4://:8094"
# service_address = "tcp6://:8094"
# service_address = "tcp6://[2001:db8::1]:8094"
# service_address = "udp://:8094"
# service_address = "udp4://:8094"
# service_address = "udp6://:8094"
# service_address = "unix:///tmp/telegraf.sock"
# service_address = "unixgram:///tmp/telegraf.sock"
## Change the file mode bits on unix sockets. These permissions may not be
## respected by some platforms, to safely restrict write permissions it is best
## to place the socket into a directory that has previously been created
## with the desired permissions.
## ex: socket_mode = "777"
# socket_mode = ""
## Maximum number of concurrent connections.
## Only applies to stream sockets (e.g. TCP).
## 0 (default) is unlimited.
# max_connections = 1024
## Read timeout.
## Only applies to stream sockets (e.g. TCP).
## 0 (default) is unlimited.
# read_timeout = "30s"
## Optional TLS configuration.
## Only applies to stream sockets (e.g. TCP).
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Enables client authentication if set.
# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
## Maximum socket buffer size (in bytes when no unit specified).
## For stream sockets, once the buffer fills up, the sender will start backing up.
## For datagram sockets, once the buffer fills up, metrics will start dropping.
## Defaults to the OS default.
# read_buffer_size = "64KiB"
## Period between keep alive probes.
## Only applies to TCP sockets.
## 0 disables keep alive probes.
## Defaults to the OS configuration.
# keep_alive_period = "5m"
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
# data_format = "influx"
## Content encoding for message payloads, can be set to "gzip" to or
## "identity" to apply no encoding.
# content_encoding = "identity"

View File

@ -0,0 +1,7 @@
# Gather indicators from established connections, using iproute2's ss command.
[[inputs.socketstat]]
## ss can display information about tcp, udp, raw, unix, packet, dccp and sctp sockets
## Specify here the types you want to gather
socket_types = [ "tcp", "udp" ]
## The default timeout of 1s for ss execution can be overridden here:
# timeout = "1s"

View File

@ -0,0 +1,11 @@
# Read stats from one or more Solr servers or cores
[[inputs.solr]]
## specify a list of one or more Solr servers
servers = ["http://localhost:8983"]
##
## specify a list of one or more Solr cores (default - all)
# cores = ["main"]
##
## Optional HTTP Basic Auth Credentials
# username = "username"
# password = "pa$$word"

View File

@ -0,0 +1,77 @@
# Read metrics from SQL queries
[[inputs.sql]]
## Database Driver
## See https://github.com/influxdata/telegraf/blob/master/docs/SQL_DRIVERS_INPUT.md for
## a list of supported drivers.
driver = "mysql"
## Data source name for connecting
## The syntax and supported options depends on selected driver.
dsn = "username:password@mysqlserver:3307/dbname?param=value"
## Timeout for any operation
## Note that the timeout for queries is per query not per gather.
# timeout = "5s"
## Connection time limits
## By default the maximum idle time and maximum lifetime of a connection is unlimited, i.e. the connections
## will not be closed automatically. If you specify a positive time, the connections will be closed after
## idleing or existing for at least that amount of time, respectively.
# connection_max_idle_time = "0s"
# connection_max_life_time = "0s"
## Connection count limits
## By default the number of open connections is not limited and the number of maximum idle connections
## will be inferred from the number of queries specified. If you specify a positive number for any of the
## two options, connections will be closed when reaching the specified limit. The number of idle connections
## will be clipped to the maximum number of connections limit if any.
# connection_max_open = 0
# connection_max_idle = auto
[[inputs.sql.query]]
## Query to perform on the server
query="SELECT user,state,latency,score FROM Scoreboard WHERE application > 0"
## Alternatively to specifying the query directly you can select a file here containing the SQL query.
## Only one of 'query' and 'query_script' can be specified!
# query_script = "/path/to/sql/script.sql"
## Name of the measurement
## In case both measurement and 'measurement_col' are given, the latter takes precedence.
# measurement = "sql"
## Column name containing the name of the measurement
## If given, this will take precedence over the 'measurement' setting. In case a query result
## does not contain the specified column, we fall-back to the 'measurement' setting.
# measurement_column = ""
## Column name containing the time of the measurement
## If ommited, the time of the query will be used.
# time_column = ""
## Format of the time contained in 'time_col'
## The time must be 'unix', 'unix_ms', 'unix_us', 'unix_ns', or a golang time format.
## See https://golang.org/pkg/time/#Time.Format for details.
# time_format = "unix"
## Column names containing tags
## An empty include list will reject all columns and an empty exclude list will not exclude any column.
## I.e. by default no columns will be returned as tag and the tags are empty.
# tag_columns_include = []
# tag_columns_exclude = []
## Column names containing fields (explicit types)
## Convert the given columns to the corresponding type. Explicit type conversions take precedence over
## the automatic (driver-based) conversion below.
## NOTE: Columns should not be specified for multiple types or the resulting type is undefined.
# field_columns_float = []
# field_columns_int = []
# field_columns_uint = []
# field_columns_bool = []
# field_columns_string = []
## Column names containing fields (automatic types)
## An empty include list is equivalent to '[*]' and all returned columns will be accepted. An empty
## exclude list will not exclude any column. I.e. by default all columns will be returned as fields.
## NOTE: We rely on the database driver to perform automatic datatype conversion.
# field_columns_include = []
# field_columns_exclude = []

View File

@ -0,0 +1,139 @@
# Read metrics from Microsoft SQL Server
[[inputs.sqlserver]]
## Specify instances to monitor with a list of connection strings.
## All connection parameters are optional.
## By default, the host is localhost, listening on default port, TCP 1433.
## for Windows, the user is the currently running AD user (SSO).
## See https://github.com/denisenkom/go-mssqldb for detailed connection
## parameters, in particular, tls connections can be created like so:
## "encrypt=true;certificate=<cert>;hostNameInCertificate=<SqlServer host fqdn>"
servers = [
"Server=192.168.1.10;Port=1433;User Id=<user>;Password=<pw>;app name=telegraf;log=1;",
]
## Authentication method
## valid methods: "connection_string", "AAD"
# auth_method = "connection_string"
## "database_type" enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2
## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type.
## Possible values for database_type are - "SQLServer" or "AzureSQLDB" or "AzureSQLManagedInstance" or "AzureSQLPool"
database_type = "SQLServer"
## A list of queries to include. If not specified, all the below listed queries are used.
include_query = []
## A list of queries to explicitly ignore.
exclude_query = ["SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"]
## Queries enabled by default for database_type = "SQLServer" are -
## SQLServerPerformanceCounters, SQLServerWaitStatsCategorized, SQLServerDatabaseIO, SQLServerProperties, SQLServerMemoryClerks,
## SQLServerSchedulers, SQLServerRequests, SQLServerVolumeSpace, SQLServerCpu, SQLServerAvailabilityReplicaStates, SQLServerDatabaseReplicaStates,
## SQLServerRecentBackups
## Queries enabled by default for database_type = "AzureSQLDB" are -
## AzureSQLDBResourceStats, AzureSQLDBResourceGovernance, AzureSQLDBWaitStats, AzureSQLDBDatabaseIO, AzureSQLDBServerProperties,
## AzureSQLDBOsWaitstats, AzureSQLDBMemoryClerks, AzureSQLDBPerformanceCounters, AzureSQLDBRequests, AzureSQLDBSchedulers
## Queries enabled by default for database_type = "AzureSQLManagedInstance" are -
## AzureSQLMIResourceStats, AzureSQLMIResourceGovernance, AzureSQLMIDatabaseIO, AzureSQLMIServerProperties, AzureSQLMIOsWaitstats,
## AzureSQLMIMemoryClerks, AzureSQLMIPerformanceCounters, AzureSQLMIRequests, AzureSQLMISchedulers
## Queries enabled by default for database_type = "AzureSQLPool" are -
## AzureSQLPoolResourceStats, AzureSQLPoolResourceGovernance, AzureSQLPoolDatabaseIO, AzureSQLPoolWaitStats,
## AzureSQLPoolMemoryClerks, AzureSQLPoolPerformanceCounters, AzureSQLPoolSchedulers
## Following are old config settings
## You may use them only if you are using the earlier flavor of queries, however it is recommended to use
## the new mechanism of identifying the database_type there by use it's corresponding queries
## Optional parameter, setting this to 2 will use a new version
## of the collection queries that break compatibility with the original
## dashboards.
## Version 2 - is compatible from SQL Server 2012 and later versions and also for SQL Azure DB
# query_version = 2
## If you are using AzureDB, setting this to true will gather resource utilization metrics
# azuredb = false
## Toggling this to true will emit an additional metric called "sqlserver_telegraf_health".
## This metric tracks the count of attempted queries and successful queries for each SQL instance specified in "servers".
## The purpose of this metric is to assist with identifying and diagnosing any connectivity or query issues.
## This setting/metric is optional and is disabled by default.
# health_metric = false
## Possible queries accross different versions of the collectors
## Queries enabled by default for specific Database Type
## database_type = AzureSQLDB by default collects the following queries
## - AzureSQLDBWaitStats
## - AzureSQLDBResourceStats
## - AzureSQLDBResourceGovernance
## - AzureSQLDBDatabaseIO
## - AzureSQLDBServerProperties
## - AzureSQLDBOsWaitstats
## - AzureSQLDBMemoryClerks
## - AzureSQLDBPerformanceCounters
## - AzureSQLDBRequests
## - AzureSQLDBSchedulers
## database_type = AzureSQLManagedInstance by default collects the following queries
## - AzureSQLMIResourceStats
## - AzureSQLMIResourceGovernance
## - AzureSQLMIDatabaseIO
## - AzureSQLMIServerProperties
## - AzureSQLMIOsWaitstats
## - AzureSQLMIMemoryClerks
## - AzureSQLMIPerformanceCounters
## - AzureSQLMIRequests
## - AzureSQLMISchedulers
## database_type = AzureSQLPool by default collects the following queries
## - AzureSQLPoolResourceStats
## - AzureSQLPoolResourceGovernance
## - AzureSQLPoolDatabaseIO
## - AzureSQLPoolOsWaitStats,
## - AzureSQLPoolMemoryClerks
## - AzureSQLPoolPerformanceCounters
## - AzureSQLPoolSchedulers
## database_type = SQLServer by default collects the following queries
## - SQLServerPerformanceCounters
## - SQLServerWaitStatsCategorized
## - SQLServerDatabaseIO
## - SQLServerProperties
## - SQLServerMemoryClerks
## - SQLServerSchedulers
## - SQLServerRequests
## - SQLServerVolumeSpace
## - SQLServerCpu
## - SQLServerRecentBackups
## and following as optional (if mentioned in the include_query list)
## - SQLServerAvailabilityReplicaStates
## - SQLServerDatabaseReplicaStates
## Version 2 by default collects the following queries
## Version 2 is being deprecated, please consider using database_type.
## - PerformanceCounters
## - WaitStatsCategorized
## - DatabaseIO
## - ServerProperties
## - MemoryClerk
## - Schedulers
## - SqlRequests
## - VolumeSpace
## - Cpu
## Version 1 by default collects the following queries
## Version 1 is deprecated, please consider using database_type.
## - PerformanceCounters
## - WaitStatsCategorized
## - CPUHistory
## - DatabaseIO
## - DatabaseSize
## - DatabaseStats
## - DatabaseProperties
## - MemoryClerk
## - VolumeSpace
## - PerformanceMetrics

View File

@ -0,0 +1,78 @@
# Gather timeseries from Google Cloud Platform v3 monitoring API
[[inputs.stackdriver]]
## GCP Project
project = "erudite-bloom-151019"
## Include timeseries that start with the given metric type.
metric_type_prefix_include = [
"compute.googleapis.com/",
]
## Exclude timeseries that start with the given metric type.
# metric_type_prefix_exclude = []
## Most metrics are updated no more than once per minute; it is recommended
## to override the agent level interval with a value of 1m or greater.
interval = "1m"
## Maximum number of API calls to make per second. The quota for accounts
## varies, it can be viewed on the API dashboard:
## https://cloud.google.com/monitoring/quotas#quotas_and_limits
# rate_limit = 14
## The delay and window options control the number of points selected on
## each gather. When set, metrics are gathered between:
## start: now() - delay - window
## end: now() - delay
#
## Collection delay; if set too low metrics may not yet be available.
# delay = "5m"
#
## If unset, the window will start at 1m and be updated dynamically to span
## the time between calls (approximately the length of the plugin interval).
# window = "1m"
## TTL for cached list of metric types. This is the maximum amount of time
## it may take to discover new metrics.
# cache_ttl = "1h"
## If true, raw bucket counts are collected for distribution value types.
## For a more lightweight collection, you may wish to disable and use
## distribution_aggregation_aligners instead.
# gather_raw_distribution_buckets = true
## Aggregate functions to be used for metrics whose value type is
## distribution. These aggregate values are recorded in in addition to raw
## bucket counts; if they are enabled.
##
## For a list of aligner strings see:
## https://cloud.google.com/monitoring/api/ref_v3/rpc/google.monitoring.v3#aligner
# distribution_aggregation_aligners = [
# "ALIGN_PERCENTILE_99",
# "ALIGN_PERCENTILE_95",
# "ALIGN_PERCENTILE_50",
# ]
## Filters can be added to reduce the number of time series matched. All
## functions are supported: starts_with, ends_with, has_substring, and
## one_of. Only the '=' operator is supported.
##
## The logical operators when combining filters are defined statically using
## the following values:
## filter ::= <resource_labels> {AND <metric_labels>}
## resource_labels ::= <resource_labels> {OR <resource_label>}
## metric_labels ::= <metric_labels> {OR <metric_label>}
##
## For more details, see https://cloud.google.com/monitoring/api/v3/filters
#
## Resource labels refine the time series selection with the following expression:
## resource.labels.<key> = <value>
# [[inputs.stackdriver.filter.resource_labels]]
# key = "instance_name"
# value = 'starts_with("localhost")'
#
## Metric labels refine the time series selection with the following expression:
## metric.labels.<key> = <value>
# [[inputs.stackdriver.filter.metric_labels]]
# key = "device_name"
# value = 'one_of("sda", "sdb")'

View File

@ -0,0 +1,80 @@
# Statsd Server
[[inputs.statsd]]
## Protocol, must be "tcp", "udp4", "udp6" or "udp" (default=udp)
protocol = "udp"
## MaxTCPConnection - applicable when protocol is set to tcp (default=250)
max_tcp_connections = 250
## Enable TCP keep alive probes (default=false)
tcp_keep_alive = false
## Specifies the keep-alive period for an active network connection.
## Only applies to TCP sockets and will be ignored if tcp_keep_alive is false.
## Defaults to the OS configuration.
# tcp_keep_alive_period = "2h"
## Address and port to host UDP listener on
service_address = ":8125"
## The following configuration options control when telegraf clears it's cache
## of previous values. If set to false, then telegraf will only clear it's
## cache when the daemon is restarted.
## Reset gauges every interval (default=true)
delete_gauges = true
## Reset counters every interval (default=true)
delete_counters = true
## Reset sets every interval (default=true)
delete_sets = true
## Reset timings & histograms every interval (default=true)
delete_timings = true
## Percentiles to calculate for timing & histogram stats.
percentiles = [50.0, 90.0, 99.0, 99.9, 99.95, 100.0]
## separator to use between elements of a statsd metric
metric_separator = "_"
## Parses tags in the datadog statsd format
## http://docs.datadoghq.com/guides/dogstatsd/
## deprecated in 1.10; use datadog_extensions option instead
parse_data_dog_tags = false
## Parses extensions to statsd in the datadog statsd format
## currently supports metrics and datadog tags.
## http://docs.datadoghq.com/guides/dogstatsd/
datadog_extensions = false
## Parses distributions metric as specified in the datadog statsd format
## https://docs.datadoghq.com/developers/metrics/types/?tab=distribution#definition
datadog_distributions = false
## Statsd data translation templates, more info can be read here:
## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md
# templates = [
# "cpu.* measurement*"
# ]
## Number of UDP messages allowed to queue up, once filled,
## the statsd server will start dropping packets
allowed_pending_messages = 10000
## Number of timing/histogram values to track per-measurement in the
## calculation of percentiles. Raising this limit increases the accuracy
## of percentiles but also increases the memory usage and cpu time.
percentile_limit = 1000
## Maximum socket buffer size in bytes, once the buffer fills up, metrics
## will start dropping. Defaults to the OS default.
# read_buffer_size = 65535
## Max duration (TTL) for each metric to stay cached/reported without being updated.
# max_ttl = "10h"
## Sanitize name method
## By default, telegraf will pass names directly as they are received.
## However, upstream statsd now does sanitization of names which can be
## enabled by using the "upstream" method option. This option will a) replace
## white space with '_', replace '/' with '-', and remove charachters not
## matching 'a-zA-Z_\-0-9\.;='.
#sanitize_name_method = ""

View File

@ -0,0 +1,13 @@
# Suricata stats and alerts plugin
[[inputs.suricata]]
## Data sink for Suricata stats log.
# This is expected to be a filename of a
# unix socket to be created for listening.
source = "/var/run/suricata-stats.sock"
# Delimiter for flattening field keys, e.g. subitem "alert" of "detect"
# becomes "detect_alert" when delimiter is "_".
delimiter = "_"
# Detect alert logs
alerts = false

View File

@ -0,0 +1,3 @@
# Read metrics about swap memory usage
[[inputs.swap]]
# no configuration

View File

@ -0,0 +1,3 @@
# Get synproxy counter statistics from procfs
[[inputs.synproxy]]
# no configuration

View File

@ -0,0 +1,54 @@
[[inputs.syslog]]
## Protocol, address and port to host the syslog receiver.
## If no host is specified, then localhost is used.
## If no port is specified, 6514 is used (RFC5425#section-4.1).
## ex: server = "tcp://localhost:6514"
## server = "udp://:6514"
## server = "unix:///var/run/telegraf-syslog.sock"
server = "tcp://:6514"
## TLS Config
# tls_allowed_cacerts = ["/etc/telegraf/ca.pem"]
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Period between keep alive probes.
## 0 disables keep alive probes.
## Defaults to the OS configuration.
## Only applies to stream sockets (e.g. TCP).
# keep_alive_period = "5m"
## Maximum number of concurrent connections (default = 0).
## 0 means unlimited.
## Only applies to stream sockets (e.g. TCP).
# max_connections = 1024
## Read timeout is the maximum time allowed for reading a single message (default = 5s).
## 0 means unlimited.
# read_timeout = "5s"
## The framing technique with which it is expected that messages are transported (default = "octet-counting").
## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1),
## or the non-transparent framing technique (RFC6587#section-3.4.2).
## Must be one of "octect-counting", "non-transparent".
# framing = "octet-counting"
## The trailer to be expected in case of non-transparent framing (default = "LF").
## Must be one of "LF", or "NUL".
# trailer = "LF"
## Whether to parse in best effort mode or not (default = false).
## By default best effort parsing is off.
# best_effort = false
## The RFC standard to use for message parsing
## By default RFC5424 is used. RFC3164 only supports UDP transport (no streaming support)
## Must be one of "RFC5424", or "RFC3164".
# syslog_standard = "RFC5424"
## Character to prepend to SD-PARAMs (default = "_").
## A syslog message can contain multiple parameters and multiple identifiers within structured data section.
## Eg., [id1 name1="val1" name2="val2"][id2 name1="val1" nameA="valA"]
## For each combination a field is created.
## Its name is created concatenating identifier, sdparam_separator, and parameter name.
# sdparam_separator = "_"

View File

@ -0,0 +1,52 @@
# Sysstat metrics collector
[[inputs.sysstat]]
## Path to the sadc command.
#
## Common Defaults:
## Debian/Ubuntu: /usr/lib/sysstat/sadc
## Arch: /usr/lib/sa/sadc
## RHEL/CentOS: /usr/lib64/sa/sadc
sadc_path = "/usr/lib/sa/sadc" # required
## Path to the sadf command, if it is not in PATH
# sadf_path = "/usr/bin/sadf"
## Activities is a list of activities, that are passed as argument to the
## sadc collector utility (e.g: DISK, SNMP etc...)
## The more activities that are added, the more data is collected.
# activities = ["DISK"]
## Group metrics to measurements.
##
## If group is false each metric will be prefixed with a description
## and represents itself a measurement.
##
## If Group is true, corresponding metrics are grouped to a single measurement.
# group = true
## Options for the sadf command. The values on the left represent the sadf options and
## the values on the right their description (wich are used for grouping and prefixing metrics).
##
## Run 'sar -h' or 'man sar' to find out the supported options for your sysstat version.
[inputs.sysstat.options]
-C = "cpu"
-B = "paging"
-b = "io"
-d = "disk" # requires DISK activity
"-n ALL" = "network"
"-P ALL" = "per_cpu"
-q = "queue"
-R = "mem"
-r = "mem_util"
-S = "swap_util"
-u = "cpu_util"
-v = "inode"
-W = "swap"
-w = "task"
# -H = "hugepages" # only available for newer linux distributions
# "-I ALL" = "interrupts" # requires INT activity
## Device tags can be used to add additional tags for devices. For example the configuration below
## adds a tag vg with value rootvg for all metrics with sda devices.
# [[inputs.sysstat.device_tags.sda]]
# vg = "rootvg"

View File

@ -0,0 +1,3 @@
# Read metrics about system load & uptime
[[inputs.system]]
# no configuration

View File

@ -0,0 +1,16 @@
# Gather systemd units state
[[inputs.systemd_units]]
## Set timeout for systemctl execution
# timeout = "1s"
#
## Filter for a specific unit type, default is "service", other possible
## values are "socket", "target", "device", "mount", "automount", "swap",
## "timer", "path", "slice" and "scope ":
# unittype = "service"
#
## Filter for a specific pattern, default is "" (i.e. all), other possible
## values are valid pattern for systemctl, e.g. "a*" for all units with
## names starting with "a"
# pattern = ""
## pattern = "telegraf* influxdb*"
## pattern = "a*"

View File

@ -0,0 +1,66 @@
# Parse the new lines appended to a file
[[inputs.tail]]
## File names or a pattern to tail.
## These accept standard unix glob matching rules, but with the addition of
## ** as a "super asterisk". ie:
## "/var/log/**.log" -> recursively find all .log files in /var/log
## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log
## "/var/log/apache.log" -> just tail the apache log file
## "/var/log/log[!1-2]* -> tail files without 1-2
## "/var/log/log[^1-2]* -> identical behavior as above
## See https://github.com/gobwas/glob for more examples
##
files = ["/var/mymetrics.out"]
## Read file from beginning.
# from_beginning = false
## Whether file is a named pipe
# pipe = false
## Method used to watch for file updates. Can be either "inotify" or "poll".
# watch_method = "inotify"
## Maximum lines of the file to process that have not yet be written by the
## output. For best throughput set based on the number of metrics on each
## line and the size of the output's metric_batch_size.
# max_undelivered_lines = 1000
## Character encoding to use when interpreting the file contents. Invalid
## characters are replaced using the unicode replacement character. When set
## to the empty string the data is not decoded to text.
## ex: character_encoding = "utf-8"
## character_encoding = "utf-16le"
## character_encoding = "utf-16be"
## character_encoding = ""
# character_encoding = ""
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
## Set the tag that will contain the path of the tailed file. If you don't want this tag, set it to an empty string.
# path_tag = "path"
## Filters to apply to files before generating metrics
## "ansi_color" removes ANSI colors
# filters = []
## multiline parser/codec
## https://www.elastic.co/guide/en/logstash/2.4/plugins-filters-multiline.html
#[inputs.tail.multiline]
## The pattern should be a regexp which matches what you believe to be an indicator that the field is part of an event consisting of multiple lines of log data.
#pattern = "^\s"
## The field's value must be previous or next and indicates the relation to the
## multi-line event.
#match_which_line = "previous"
## The invert_match can be true or false (defaults to false).
## If true, a message not matching the pattern will constitute a match of the multiline filter and the what will be applied. (vice-versa is also true)
#invert_match = false
#After the specified timeout, this plugin sends the multiline event even if no new pattern is found to start a new event. The default is 5s.
#timeout = 5s

View File

@ -0,0 +1,4 @@
# Generic TCP listener
[[inputs.tcp_listener]]
# socket_listener plugin
# see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener

View File

@ -0,0 +1,12 @@
# Reads metrics from a Teamspeak 3 Server via ServerQuery
[[inputs.teamspeak]]
## Server address for Teamspeak 3 ServerQuery
# server = "127.0.0.1:10011"
## Username for ServerQuery
username = "serverqueryuser"
## Password for ServerQuery
password = "secret"
## Nickname of the ServerQuery client
nickname = "telegraf"
## Array of virtual servers
# virtual_servers = [1]

View File

@ -0,0 +1,3 @@
# Read metrics about temperature
[[inputs.temp]]
# no configuration

View File

@ -0,0 +1,14 @@
# Read Tengine's basic status information (ngx_http_reqstat_module)
[[inputs.tengine]]
## An array of Tengine reqstat module URI to gather stats.
urls = ["http://127.0.0.1/us"]
## HTTP response timeout (default: 5s)
# response_timeout = "5s"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

View File

@ -0,0 +1,18 @@
# Gather metrics from the Tomcat server status page.
[[inputs.tomcat]]
## URL of the Tomcat server status
# url = "http://127.0.0.1:8080/manager/status/all?XML=true"
## HTTP Basic Auth Credentials
# username = "tomcat"
# password = "s3cret"
## Request timeout
# timeout = "5s"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

View File

@ -0,0 +1,4 @@
# Inserts sine and cosine waves for demonstration purposes
[[inputs.trig]]
## Set the amplitude
amplitude = 10.0

View File

@ -0,0 +1,6 @@
# Read Twemproxy stats data
[[inputs.twemproxy]]
## Twemproxy stats address and port (no scheme)
addr = "localhost:22222"
## Monitor pool name
pools = ["redis_pool", "mc_pool"]

View File

@ -0,0 +1,3 @@
# Generic UDP listener
[[inputs.udp_listener]]
# see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener

View File

@ -0,0 +1,24 @@
# A plugin to collect stats from the Unbound DNS resolver
[[inputs.unbound]]
## Address of server to connect to, read from unbound conf default, optionally ':port'
## Will lookup IP if given a hostname
server = "127.0.0.1:8953"
## If running as a restricted user you can prepend sudo for additional access:
# use_sudo = false
## The default location of the unbound-control binary can be overridden with:
# binary = "/usr/sbin/unbound-control"
## The default location of the unbound config file can be overridden with:
# config_file = "/etc/unbound/unbound.conf"
## The default timeout of 1s can be overridden with:
# timeout = "1s"
## When set to true, thread metrics are tagged with the thread id.
##
## The default is false for backwards compatibility, and will be changed to
## true in a future version. It is recommended to set to true on new
## deployments.
thread_as_tag = false

View File

@ -0,0 +1,11 @@
# Read uWSGI metrics.
[[inputs.uwsgi]]
## List with urls of uWSGI Stats servers. Url must match pattern:
## scheme://address[:port]
##
## For example:
## servers = ["tcp://localhost:5050", "http://localhost:1717", "unix:///tmp/statsock"]
servers = ["tcp://127.0.0.1:1717"]
## General connection timeout
# timeout = "5s"

View File

@ -0,0 +1,38 @@
# A plugin to collect stats from Varnish HTTP Cache
[[inputs.varnish]]
## If running as a restricted user you can prepend sudo for additional access:
#use_sudo = false
## The default location of the varnishstat binary can be overridden with:
binary = "/usr/bin/varnishstat"
## Additional custom arguments for the varnishstat command
# binary_args = ["-f", "MAIN.*"]
## The default location of the varnishadm binary can be overridden with:
adm_binary = "/usr/bin/varnishadm"
## Custom arguments for the varnishadm command
# adm_binary_args = [""]
## Metric version defaults to metric_version=1, use metric_version=2 for removal of nonactive vcls
## Varnish 6.0.2 and newer is required for metric_version=2.
metric_version = 1
## Additional regexps to override builtin conversion of varnish metrics into telegraf metrics.
## Regexp group "_vcl" is used for extracting the VCL name. Metrics that contain nonactive VCL's are skipped.
## Regexp group "_field" overrides the field name. Other named regexp groups are used as tags.
# regexps = ['^XCNT\.(?P<_vcl>[\w\-]*)(\.)*(?P<group>[\w\-.+]*)\.(?P<_field>[\w\-.+]*)\.val']
## By default, telegraf gather stats for 3 metric points.
## Setting stats will override the defaults shown below.
## Glob matching can be used, ie, stats = ["MAIN.*"]
## stats may also be set to ["*"], which will collect all stats
stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"]
## Optional name for the varnish instance (or working directory) to query
## Usually append after -n in varnish cli
# instance_name = instanceName
## Timeout for varnishstat command
# timeout = "1s"

Some files were not shown because too many files have changed in this diff Show More