feat: migrate input plugins to new sample config format (A-L) (#10924)

This commit is contained in:
Sebastian Spaink 2022-04-07 17:01:21 -05:00 committed by GitHub
parent a7df6c6aa6
commit 8e2b4988fe
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
289 changed files with 1189 additions and 4104 deletions

View File

@ -5,7 +5,7 @@ This plugin gather queues, topics & subscribers metrics using ActiveMQ Console A
## Configuration
```toml
# Description
# Gather ActiveMQ metrics
[[inputs.activemq]]
## ActiveMQ WebConsole URL
url = "http://127.0.0.1:8161"

View File

@ -82,36 +82,6 @@ type Stats struct {
DequeueCounter int `xml:"dequeueCounter,attr"`
}
var sampleConfig = `
## ActiveMQ WebConsole URL
url = "http://127.0.0.1:8161"
## Credentials for basic HTTP authentication
# username = "admin"
# password = "admin"
## Required ActiveMQ webadmin root path
# webadmin = "admin"
## Maximum time to receive response.
# response_timeout = "5s"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
`
func (a *ActiveMQ) Description() string {
return "Gather ActiveMQ metrics"
}
func (a *ActiveMQ) SampleConfig() string {
return sampleConfig
}
func (a *ActiveMQ) createHTTPClient() (*http.Client, error) {
tlsCfg, err := a.ClientConfig.TLSConfig()
if err != nil {

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package activemq
func (a *ActiveMQ) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -42,48 +42,6 @@ type Aerospike struct {
NumberHistogramBuckets int `toml:"num_histogram_buckets"`
}
var sampleConfig = `
## Aerospike servers to connect to (with port)
## This plugin will query all namespaces the aerospike
## server has configured and get stats for them.
servers = ["localhost:3000"]
# username = "telegraf"
# password = "pa$$word"
## Optional TLS Config
# enable_tls = false
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
# tls_name = "tlsname"
## If false, skip chain & host verification
# insecure_skip_verify = true
# Feature Options
# Add namespace variable to limit the namespaces executed on
# Leave blank to do all
# disable_query_namespaces = true # default false
# namespaces = ["namespace1", "namespace2"]
# Enable set level telemetry
# query_sets = true # default: false
# Add namespace set combinations to limit sets executed on
# Leave blank to do all sets
# sets = ["namespace1/set1", "namespace1/set2", "namespace3"]
# Histograms
# enable_ttl_histogram = true # default: false
# enable_object_size_linear_histogram = true # default: false
# by default, aerospike produces a 100 bucket histogram
# this is not great for most graphing tools, this will allow
# the ability to squash this to a smaller number of buckets
# To have a balanced histogram, the number of buckets chosen
# should divide evenly into 100.
# num_histogram_buckets = 100 # default: 10
`
// On the random chance a hex value is all digits
// these are fields that can contain hex and should always be strings
var protectedHexFields = map[string]bool{
@ -92,14 +50,6 @@ var protectedHexFields = map[string]bool{
"paxos_principal": true,
}
func (a *Aerospike) SampleConfig() string {
return sampleConfig
}
func (a *Aerospike) Description() string {
return "Read stats from aerospike server(s)"
}
func (a *Aerospike) Gather(acc telegraf.Accumulator) error {
if !a.initialized {
tlsConfig, err := a.ClientConfig.TLSConfig()

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package aerospike
func (a *Aerospike) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -20,6 +20,8 @@ In the following order the plugin will attempt to authenticate.
## Configuration
```toml
# Pull Metric Statistics from Aliyun CMS
[[inputs.aliyuncms]]
## Aliyun Credentials
## Credentials are loaded in the following order
## 1) Ram RoleArn credential

View File

@ -20,110 +20,6 @@ import (
"github.com/pkg/errors"
)
const (
description = "Pull Metric Statistics from Aliyun CMS"
sampleConfig = `
## Aliyun Credentials
## Credentials are loaded in the following order
## 1) Ram RoleArn credential
## 2) AccessKey STS token credential
## 3) AccessKey credential
## 4) Ecs Ram Role credential
## 5) RSA keypair credential
## 6) Environment variables credential
## 7) Instance metadata credential
# access_key_id = ""
# access_key_secret = ""
# access_key_sts_token = ""
# role_arn = ""
# role_session_name = ""
# private_key = ""
# public_key_id = ""
# role_name = ""
## Specify the ali cloud region list to be queried for metrics and objects discovery
## If not set, all supported regions (see below) would be covered, it can provide a significant load on API, so the recommendation here
## is to limit the list as much as possible. Allowed values: https://www.alibabacloud.com/help/zh/doc-detail/40654.htm
## Default supported regions are:
## 21 items: cn-qingdao,cn-beijing,cn-zhangjiakou,cn-huhehaote,cn-hangzhou,cn-shanghai,cn-shenzhen,
## cn-heyuan,cn-chengdu,cn-hongkong,ap-southeast-1,ap-southeast-2,ap-southeast-3,ap-southeast-5,
## ap-south-1,ap-northeast-1,us-west-1,us-east-1,eu-central-1,eu-west-1,me-east-1
##
## From discovery perspective it set the scope for object discovery, the discovered info can be used to enrich
## the metrics with objects attributes/tags. Discovery is supported not for all projects (if not supported, then
## it will be reported on the start - for example for 'acs_cdn' project:
## 'E! [inputs.aliyuncms] Discovery tool is not activated: no discovery support for project "acs_cdn"' )
## Currently, discovery supported for the following projects:
## - acs_ecs_dashboard
## - acs_rds_dashboard
## - acs_slb_dashboard
## - acs_vpc_eip
regions = ["cn-hongkong"]
# The minimum period for AliyunCMS metrics is 1 minute (60s). However not all
# metrics are made available to the 1 minute period. Some are collected at
# 3 minute, 5 minute, or larger intervals.
# See: https://help.aliyun.com/document_detail/51936.html?spm=a2c4g.11186623.2.18.2bc1750eeOw1Pv
# Note that if a period is configured that is smaller than the minimum for a
# particular metric, that metric will not be returned by the Aliyun OpenAPI
# and will not be collected by Telegraf.
#
## Requested AliyunCMS aggregation Period (required - must be a multiple of 60s)
period = "5m"
## Collection Delay (required - must account for metrics availability via AliyunCMS API)
delay = "1m"
## Recommended: use metric 'interval' that is a multiple of 'period' to avoid
## gaps or overlap in pulled data
interval = "5m"
## Metric Statistic Project (required)
project = "acs_slb_dashboard"
## Maximum requests per second, default value is 200
ratelimit = 200
## How often the discovery API call executed (default 1m)
#discovery_interval = "1m"
## Metrics to Pull (Required)
[[inputs.aliyuncms.metrics]]
## Metrics names to be requested,
## described here (per project): https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq
names = ["InstanceActiveConnection", "InstanceNewConnection"]
## Dimension filters for Metric (these are optional).
## This allows to get additional metric dimension. If dimension is not specified it can be returned or
## the data can be aggregated - it depends on particular metric, you can find details here: https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq
##
## Note, that by default dimension filter includes the list of discovered objects in scope (if discovery is enabled)
## Values specified here would be added into the list of discovered objects.
## You can specify either single dimension:
#dimensions = '{"instanceId": "p-example"}'
## Or you can specify several dimensions at once:
#dimensions = '[{"instanceId": "p-example"},{"instanceId": "q-example"}]'
## Enrichment tags, can be added from discovery (if supported)
## Notation is <measurement_tag_name>:<JMES query path (https://jmespath.org/tutorial.html)>
## To figure out which fields are available, consult the Describe<ObjectType> API per project.
## For example, for SLB: https://api.aliyun.com/#/?product=Slb&version=2014-05-15&api=DescribeLoadBalancers&params={}&tab=MOCK&lang=GO
#tag_query_path = [
# "address:Address",
# "name:LoadBalancerName",
# "cluster_owner:Tags.Tag[?TagKey=='cs.cluster.name'].TagValue | [0]"
# ]
## The following tags added by default: regionId (if discovery enabled), userId, instanceId.
## Allow metrics without discovery data, if discovery is enabled. If set to true, then metric without discovery
## data would be emitted, otherwise dropped. This cane be of help, in case debugging dimension filters, or partial coverage
## of discovery scope vs monitoring scope
#allow_dps_without_discovery = false
`
)
type (
// AliyunCMS is aliyun cms config info.
AliyunCMS struct {
@ -207,16 +103,6 @@ var aliyunRegionList = []string{
"me-east-1",
}
// SampleConfig implements telegraf.Inputs interface
func (s *AliyunCMS) SampleConfig() string {
return sampleConfig
}
// Description implements telegraf.Inputs interface
func (s *AliyunCMS) Description() string {
return description
}
// Init perform checks of plugin inputs and initialize internals
func (s *AliyunCMS) Init() error {
if s.Project == "" {

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package aliyuncms
func (s *AliyunCMS) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -5,7 +5,7 @@ This plugin uses a query on the [`rocm-smi`](https://github.com/RadeonOpenComput
## Configuration
```toml
# Pulls statistics from AMD GPUs attached to the host
# Query statistics from AMD Graphics cards using rocm-smi binary
[[inputs.amd_rocm_smi]]
## Optional: path to rocm-smi binary, defaults to $PATH via exec.LookPath
# bin_path = "/opt/rocm/bin/rocm-smi"

View File

@ -22,24 +22,6 @@ type ROCmSMI struct {
Timeout config.Duration
}
// Description returns the description of the ROCmSMI plugin
func (rsmi *ROCmSMI) Description() string {
return "Query statistics from AMD Graphics cards using rocm-smi binary"
}
var ROCmSMIConfig = `
## Optional: path to rocm-smi binary, defaults to $PATH via exec.LookPath
# bin_path = "/opt/rocm/bin/rocm-smi"
## Optional: timeout for GPU polling
# timeout = "5s"
`
// SampleConfig returns the sample configuration for the ROCmSMI plugin
func (rsmi *ROCmSMI) SampleConfig() string {
return ROCmSMIConfig
}
// Gather implements the telegraf interface
func (rsmi *ROCmSMI) Gather(acc telegraf.Accumulator) error {
if _, err := os.Stat(rsmi.BinPath); os.IsNotExist(err) {

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package amd_rocm_smi
func (rsmi *ROCmSMI) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -11,14 +11,13 @@ For an introduction to AMQP see:
- [amqp - concepts](https://www.rabbitmq.com/tutorials/amqp-concepts.html)
- [rabbitmq: getting started](https://www.rabbitmq.com/getstarted.html)
## Configuration
The following defaults are known to work with RabbitMQ:
```toml
# AMQP consumer plugin
[[inputs.amqp_consumer]]
## Broker to consume from.
## deprecated in 1.7; use the brokers option
# url = "amqp://localhost:5672/influxdb"
## Brokers to consume from. If multiple brokers are specified a random broker
## will be selected anytime a connection is established. This can be
## helpful for load balancing when not using a dedicated load balancer.

View File

@ -88,87 +88,6 @@ const (
DefaultPrefetchCount = 50
)
func (a *AMQPConsumer) SampleConfig() string {
return `
## Brokers to consume from. If multiple brokers are specified a random broker
## will be selected anytime a connection is established. This can be
## helpful for load balancing when not using a dedicated load balancer.
brokers = ["amqp://localhost:5672/influxdb"]
## Authentication credentials for the PLAIN auth_method.
# username = ""
# password = ""
## Name of the exchange to declare. If unset, no exchange will be declared.
exchange = "telegraf"
## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash".
# exchange_type = "topic"
## If true, exchange will be passively declared.
# exchange_passive = false
## Exchange durability can be either "transient" or "durable".
# exchange_durability = "durable"
## Additional exchange arguments.
# exchange_arguments = { }
# exchange_arguments = {"hash_property" = "timestamp"}
## AMQP queue name.
queue = "telegraf"
## AMQP queue durability can be "transient" or "durable".
queue_durability = "durable"
## If true, queue will be passively declared.
# queue_passive = false
## A binding between the exchange and queue using this binding key is
## created. If unset, no binding is created.
binding_key = "#"
## Maximum number of messages server should give to the worker.
# prefetch_count = 50
## Maximum messages to read from the broker that have not been written by an
## output. For best throughput set based on the number of metrics within
## each message and the size of the output's metric_batch_size.
##
## For example, if each message from the queue contains 10 metrics and the
## output metric_batch_size is 1000, setting this to 100 will ensure that a
## full batch is collected and the write is triggered immediately without
## waiting until the next flush_interval.
# max_undelivered_messages = 1000
## Auth method. PLAIN and EXTERNAL are supported
## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
## described here: https://www.rabbitmq.com/plugins.html
# auth_method = "PLAIN"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## Content encoding for message payloads, can be set to "gzip" to or
## "identity" to apply no encoding.
# content_encoding = "identity"
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
`
}
func (a *AMQPConsumer) Description() string {
return "AMQP consumer plugin"
}
func (a *AMQPConsumer) SetParser(parser parsers.Parser) {
a.parser = parser
}

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package amqp_consumer
func (a *AMQPConsumer) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -27,35 +27,6 @@ type Apache struct {
client *http.Client
}
var sampleConfig = `
## An array of URLs to gather from, must be directed at the machine
## readable version of the mod_status page including the auto query string.
## Default is "http://localhost/server-status?auto".
urls = ["http://localhost/server-status?auto"]
## Credentials for basic HTTP authentication.
# username = "myuser"
# password = "mypassword"
## Maximum time to receive response.
# response_timeout = "5s"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
`
func (n *Apache) SampleConfig() string {
return sampleConfig
}
func (n *Apache) Description() string {
return "Read Apache status information (mod_status)"
}
func (n *Apache) Gather(acc telegraf.Accumulator) error {
var wg sync.WaitGroup

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package apache
func (n *Apache) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -9,6 +9,7 @@ apcupsd should be installed and it's daemon should be running.
## Configuration
```toml
# Monitor APC UPSes connected to apcupsd
[[inputs.apcupsd]]
# A list of running apcupsd server to connect to.
# If not provided will default to tcp://127.0.0.1:3551

View File

@ -23,23 +23,6 @@ type ApcUpsd struct {
Timeout config.Duration
}
func (*ApcUpsd) Description() string {
return "Monitor APC UPSes connected to apcupsd"
}
var sampleConfig = `
# A list of running apcupsd server to connect to.
# If not provided will default to tcp://127.0.0.1:3551
servers = ["tcp://127.0.0.1:3551"]
## Timeout for dialing server.
timeout = "5s"
`
func (*ApcUpsd) SampleConfig() string {
return sampleConfig
}
func (h *ApcUpsd) Gather(acc telegraf.Accumulator) error {
ctx := context.Background()

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package apcupsd
func (*ApcUpsd) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -15,7 +15,6 @@ import (
func TestApcupsdDocs(_ *testing.T) {
apc := &ApcUpsd{}
apc.Description()
apc.SampleConfig()
}

View File

@ -7,6 +7,7 @@ For monitoring recommendations reference [Monitoring your Aurora cluster](https:
## Configuration
```toml
# Gather metrics from Apache Aurora schedulers
[[inputs.aurora]]
## Schedulers are the base addresses of your Aurora Schedulers
schedulers = ["http://127.0.0.1:8081"]

View File

@ -54,39 +54,6 @@ type Aurora struct {
urls []*url.URL
}
var sampleConfig = `
## Schedulers are the base addresses of your Aurora Schedulers
schedulers = ["http://127.0.0.1:8081"]
## Set of role types to collect metrics from.
##
## The scheduler roles are checked each interval by contacting the
## scheduler nodes; zookeeper is not contacted.
# roles = ["leader", "follower"]
## Timeout is the max time for total network operations.
# timeout = "5s"
## Username and password are sent using HTTP Basic Auth.
# username = "username"
# password = "pa$$word"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
`
func (a *Aurora) SampleConfig() string {
return sampleConfig
}
func (a *Aurora) Description() string {
return "Gather metrics from Apache Aurora schedulers"
}
func (a *Aurora) Gather(acc telegraf.Accumulator) error {
if a.client == nil {
err := a.initialize()

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package aurora
func (a *Aurora) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -5,7 +5,7 @@ This plugin gathers sizes of Azure Storage Queues.
## Configuration
```toml
# Description
# Gather Azure Storage Queue metrics
[[inputs.azure_storage_queue]]
## Required Azure Storage Account name
account_name = "mystorageaccount"

View File

@ -21,25 +21,6 @@ type AzureStorageQueue struct {
serviceURL *azqueue.ServiceURL
}
var sampleConfig = `
## Required Azure Storage Account name
account_name = "mystorageaccount"
## Required Azure Storage Account access key
account_key = "storageaccountaccesskey"
## Set to false to disable peeking age of oldest message (executes faster)
# peek_oldest_message_age = true
`
func (a *AzureStorageQueue) Description() string {
return "Gather Azure Storage Queue metrics"
}
func (a *AzureStorageQueue) SampleConfig() string {
return sampleConfig
}
func (a *AzureStorageQueue) Init() error {
if a.StorageAccountName == "" {
return errors.New("account_name must be configured")

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package azure_storage_queue
func (a *AzureStorageQueue) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -51,11 +51,12 @@ cache_readaheads
Count of times readahead occurred.
```
## Example
## Configuration
Using this configuration:
```toml
# Read metrics of bcache from stats_total and dirty_data
[[inputs.bcache]]
## Bcache sets path
## If not specified, then default is:

View File

@ -22,25 +22,6 @@ type Bcache struct {
BcacheDevs []string
}
var sampleConfig = `
## Bcache sets path
## If not specified, then default is:
bcachePath = "/sys/fs/bcache"
## By default, Telegraf gather stats for all bcache devices
## Setting devices will restrict the stats to the specified
## bcache devices.
bcacheDevs = ["bcache0"]
`
func (b *Bcache) SampleConfig() string {
return sampleConfig
}
func (b *Bcache) Description() string {
return "Read metrics of bcache from stats_total and dirty_data"
}
func getTags(bdev string) map[string]string {
backingDevFile, _ := os.Readlink(bdev)
backingDevPath := strings.Split(backingDevFile, "/")

View File

@ -0,0 +1,11 @@
//go:build !windows
// +build !windows
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package bcache
func (b *Bcache) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -5,6 +5,7 @@ The `beanstalkd` plugin collects server stats as well as tube stats (reported by
## Configuration
```toml
# Collects Beanstalkd server and tubes stats
[[inputs.beanstalkd]]
## Server to collect data from
server = "localhost:11300"

View File

@ -11,28 +11,11 @@ import (
"gopkg.in/yaml.v2"
)
const sampleConfig = `
## Server to collect data from
server = "localhost:11300"
## List of tubes to gather stats about.
## If no tubes specified then data gathered for each tube on server reported by list-tubes command
tubes = ["notifications"]
`
type Beanstalkd struct {
Server string `toml:"server"`
Tubes []string `toml:"tubes"`
}
func (b *Beanstalkd) Description() string {
return "Collects Beanstalkd server and tubes stats"
}
func (b *Beanstalkd) SampleConfig() string {
return sampleConfig
}
func (b *Beanstalkd) Gather(acc telegraf.Accumulator) error {
connection, err := textproto.Dial("tcp", b.Server)
if err != nil {

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package beanstalkd
func (b *Beanstalkd) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -6,6 +6,8 @@ known to work with Filebeat and Kafkabeat.
## Configuration
```toml
# Read metrics exposed by Beat
[[inputs.beat]]
## An URL from which to read Beat-formatted JSON
## Default is "http://127.0.0.1:5066".
url = "http://127.0.0.1:5066"

View File

@ -15,42 +15,6 @@ import (
jsonparser "github.com/influxdata/telegraf/plugins/parsers/json"
)
const sampleConfig = `
## An URL from which to read Beat-formatted JSON
## Default is "http://127.0.0.1:5066".
url = "http://127.0.0.1:5066"
## Enable collection of the listed stats
## An empty list means collect all. Available options are currently
## "beat", "libbeat", "system" and "filebeat".
# include = ["beat", "libbeat", "filebeat"]
## HTTP method
# method = "GET"
## Optional HTTP headers
# headers = {"X-Special-Header" = "Special-Value"}
## Override HTTP "Host" header
# host_header = "logstash.example.com"
## Timeout for HTTP requests
# timeout = "5s"
## Optional HTTP Basic Auth credentials
# username = "username"
# password = "pa$$word"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
`
const description = "Read metrics exposed by Beat"
const suffixInfo = "/"
const suffixStats = "/stats"
@ -113,14 +77,6 @@ func (beat *Beat) Init() error {
return nil
}
func (beat *Beat) Description() string {
return description
}
func (beat *Beat) SampleConfig() string {
return sampleConfig
}
// createHTTPClient create a clients to access API
func (beat *Beat) createHTTPClient() (*http.Client, error) {
tlsConfig, err := beat.ClientConfig.TLSConfig()

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package beat
func (beat *Beat) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -16,6 +16,19 @@ not enable support for JSON statistics in their BIND packages.
## Configuration
```toml
# Read BIND nameserver XML statistics
[[inputs.bind]]
## An array of BIND XML statistics URI to gather stats.
## Default is "http://localhost:8053/xml/v3".
# urls = ["http://localhost:8053/xml/v3"]
# gather_memory_contexts = false
# gather_views = false
## Timeout for http requests made by bind nameserver
# timeout = "4s"
```
- **urls** []string: List of BIND statistics channel URLs to collect from. Do not include a
trailing slash in the URL. Default is "http://localhost:8053/xml/v3".
- **gather_memory_contexts** bool: Report per-context memory statistics.

View File

@ -21,25 +21,6 @@ type Bind struct {
client http.Client
}
var sampleConfig = `
## An array of BIND XML statistics URI to gather stats.
## Default is "http://localhost:8053/xml/v3".
# urls = ["http://localhost:8053/xml/v3"]
# gather_memory_contexts = false
# gather_views = false
## Timeout for http requests made by bind nameserver
# timeout = "4s"
`
func (b *Bind) Description() string {
return "Read BIND nameserver XML statistics"
}
func (b *Bind) SampleConfig() string {
return sampleConfig
}
func (b *Bind) Init() error {
b.client = http.Client{
Timeout: time.Duration(b.Timeout),

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package bind
func (b *Bind) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -7,6 +7,7 @@ The plugin collects these metrics from `/proc/net/bonding/*` files.
## Configuration
```toml
# Collect bond interface status, slaves statuses and failures count
[[inputs.bond]]
## Sets 'proc' directory path
## If not specified, then default is /proc

View File

@ -34,34 +34,6 @@ type sysFiles struct {
ADPortsFile string
}
const sampleConfig = `
## Sets 'proc' directory path
## If not specified, then default is /proc
# host_proc = "/proc"
## Sets 'sys' directory path
## If not specified, then default is /sys
# host_sys = "/sys"
## By default, telegraf gather stats for all bond interfaces
## Setting interfaces will restrict the stats to the specified
## bond interfaces.
# bond_interfaces = ["bond0"]
## Tries to collect additional bond details from /sys/class/net/{bond}
## currently only useful for LACP (mode 4) bonds
# collect_sys_details = false
`
func (bond *Bond) Description() string {
return "Collect bond interface status, slaves statuses and failures count"
}
func (bond *Bond) SampleConfig() string {
return sampleConfig
}
func (bond *Bond) Gather(acc telegraf.Accumulator) error {
// load proc path, get default value if config value and env variable are empty
bond.loadPaths()

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package bond
func (bond *Bond) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -8,6 +8,7 @@ Supported Burrow version: `1.x`
## Configuration
```toml
# Collect Kafka topics and consumers status from Burrow HTTP API.
[[inputs.burrow]]
## Burrow API endpoints in format "schema://host:port".
## Default is "http://localhost:8000".

View File

@ -24,48 +24,6 @@ const (
defaultServer = "http://localhost:8000"
)
const configSample = `
## Burrow API endpoints in format "schema://host:port".
## Default is "http://localhost:8000".
servers = ["http://localhost:8000"]
## Override Burrow API prefix.
## Useful when Burrow is behind reverse-proxy.
# api_prefix = "/v3/kafka"
## Maximum time to receive response.
# response_timeout = "5s"
## Limit per-server concurrent connections.
## Useful in case of large number of topics or consumer groups.
# concurrent_connections = 20
## Filter clusters, default is no filtering.
## Values can be specified as glob patterns.
# clusters_include = []
# clusters_exclude = []
## Filter consumer groups, default is no filtering.
## Values can be specified as glob patterns.
# groups_include = []
# groups_exclude = []
## Filter topics, default is no filtering.
## Values can be specified as glob patterns.
# topics_include = []
# topics_exclude = []
## Credentials for basic HTTP authentication.
# username = ""
# password = ""
## Optional SSL config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
# insecure_skip_verify = false
`
type (
burrow struct {
tls.ClientConfig
@ -133,14 +91,6 @@ func init() {
})
}
func (b *burrow) SampleConfig() string {
return configSample
}
func (b *burrow) Description() string {
return "Collect Kafka topics and consumers status from Burrow HTTP API."
}
func (b *burrow) Gather(acc telegraf.Accumulator) error {
var wg sync.WaitGroup

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package burrow
func (b *burrow) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -18,7 +18,7 @@ See: [https://jolokia.org/](https://jolokia.org/) and [Cassandra Documentation](
Cassandra plugin produces one or more measurements for each metric configured, adding Server's name as `host` tag. More than one measurement is generated when querying table metrics with a wildcard for the keyspace or table name.
Given a configuration like:
## Configuration
```toml
# Read Cassandra metrics through Jolokia

View File

@ -165,33 +165,6 @@ func (c cassandraMetric) addTagsFields(out map[string]interface{}) {
}
}
func (c *Cassandra) SampleConfig() string {
return `
## DEPRECATED: The cassandra plugin has been deprecated. Please use the
## jolokia2 plugin instead.
##
## see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2
context = "/jolokia/read"
## List of cassandra servers exposing jolokia read service
servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"]
## List of metrics collected on above servers
## Each metric consists of a jmx path.
## This will collect all heap memory usage metrics from the jvm and
## ReadLatency metrics for all keyspaces and tables.
## "type=Table" in the query works with Cassandra3.0. Older versions might
## need to use "type=ColumnFamily"
metrics = [
"/java.lang:type=Memory/HeapMemoryUsage",
"/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency"
]
`
}
func (c *Cassandra) Description() string {
return "Read Cassandra metrics through Jolokia"
}
func (c *Cassandra) getAttr(requestURL *url.URL) (map[string]interface{}, error) {
// Create + send request
req, err := http.NewRequest("GET", requestURL.String(), nil)

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package cassandra
func (c *Cassandra) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -42,57 +42,6 @@ type Ceph struct {
Log telegraf.Logger `toml:"-"`
}
func (c *Ceph) Description() string {
return "Collects performance metrics from the MON, OSD, MDS and RGW nodes in a Ceph storage cluster."
}
var sampleConfig = `
## This is the recommended interval to poll. Too frequent and you will lose
## data points due to timeouts during rebalancing and recovery
interval = '1m'
## All configuration values are optional, defaults are shown below
## location of ceph binary
ceph_binary = "/usr/bin/ceph"
## directory in which to look for socket files
socket_dir = "/var/run/ceph"
## prefix of MON and OSD socket files, used to determine socket type
mon_prefix = "ceph-mon"
osd_prefix = "ceph-osd"
mds_prefix = "ceph-mds"
rgw_prefix = "ceph-client"
## suffix used to identify socket files
socket_suffix = "asok"
## Ceph user to authenticate as, ceph will search for the corresponding keyring
## e.g. client.admin.keyring in /etc/ceph, or the explicit path defined in the
## client section of ceph.conf for example:
##
## [client.telegraf]
## keyring = /etc/ceph/client.telegraf.keyring
##
## Consult the ceph documentation for more detail on keyring generation.
ceph_user = "client.admin"
## Ceph configuration to use to locate the cluster
ceph_config = "/etc/ceph/ceph.conf"
## Whether to gather statistics via the admin socket
gather_admin_socket_stats = true
## Whether to gather statistics via ceph commands, requires ceph_user and ceph_config
## to be specified
gather_cluster_stats = false
`
func (c *Ceph) SampleConfig() string {
return sampleConfig
}
func (c *Ceph) Gather(acc telegraf.Accumulator) error {
if c.GatherAdminSocketStats {
if err := c.gatherAdminSocketStats(acc); err != nil {
@ -312,7 +261,11 @@ func (c *Ceph) flatten(data interface{}) []*metric {
switch val := data.(type) {
case float64:
metrics = []*metric{{make([]string, 0, 1), val}}
metrics = []*metric{
{
make([]string, 0, 1), val,
},
}
case map[string]interface{}:
metrics = make([]*metric, 0, len(val))
for k, v := range val {

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package ceph
func (c *Ceph) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -42,7 +42,7 @@ All measurements have the `path` tag.
```toml
# Read specific statistics per cgroup
# [[inputs.cgroup]]
[[inputs.cgroup]]
## Directories in which to look for files, globs are supported.
## Consider restricting paths to the set of cgroups you really
## want to monitor if you have a large number of cgroups, to avoid

View File

@ -10,29 +10,6 @@ type CGroup struct {
Files []string `toml:"files"`
}
var sampleConfig = `
## Directories in which to look for files, globs are supported.
## Consider restricting paths to the set of cgroups you really
## want to monitor if you have a large number of cgroups, to avoid
## any cardinality issues.
# paths = [
# "/sys/fs/cgroup/memory",
# "/sys/fs/cgroup/memory/child1",
# "/sys/fs/cgroup/memory/child2/*",
# ]
## cgroup stat fields, as file names, globs are supported.
## these file names are appended to each path from above.
# files = ["memory.*usage*", "memory.limit_in_bytes"]
`
func (g *CGroup) SampleConfig() string {
return sampleConfig
}
func (g *CGroup) Description() string {
return "Read specific statistics per cgroup"
}
func init() {
inputs.Add("cgroup", func() telegraf.Input { return &CGroup{} })
}

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package cgroup
func (g *CGroup) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -22,17 +22,6 @@ type Chrony struct {
path string
}
func (*Chrony) Description() string {
return "Get standard chrony metrics, requires chronyc executable."
}
func (*Chrony) SampleConfig() string {
return `
## If true, chronyc tries to perform a DNS lookup for the time server.
# dns_lookup = false
`
}
func (c *Chrony) Init() error {
var err error
c.path, err = exec.LookPath("chronyc")

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package chrony
func (*Chrony) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -12,6 +12,7 @@ The TCP dialout transport is supported on IOS XR (32-bit and 64-bit) 6.1.x and l
## Configuration
```toml
# Cisco model-driven telemetry (MDT) input plugin for IOS XR, IOS XE and NX-OS platforms
[[inputs.cisco_telemetry_mdt]]
## Telemetry transport can be "tcp" or "grpc". TLS is only supported when
## using the grpc transport.
@ -37,6 +38,7 @@ The TCP dialout transport is supported on IOS XR (32-bit and 64-bit) 6.1.x and l
## Define aliases to map telemetry encoding paths to simple measurement names
[inputs.cisco_telemetry_mdt.aliases]
ifstats = "ietf-interfaces:interfaces-state/interface/statistics"
## Define Property Xformation, please refer README and https://pubhub.devnetcloud.com/media/dme-docs-9-3-3/docs/appendix/ for Model details.
[inputs.cisco_telemetry_mdt.dmes]
# Global Property Xformation.
# prop1 = "uint64 to int"

View File

@ -682,44 +682,6 @@ func (c *CiscoTelemetryMDT) Stop() {
c.wg.Wait()
}
const sampleConfig = `
## Telemetry transport can be "tcp" or "grpc". TLS is only supported when
## using the grpc transport.
transport = "grpc"
## Address and port to host telemetry listener
service_address = ":57000"
## Enable TLS; grpc transport only.
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Enable TLS client authentication and define allowed CA certificates; grpc
## transport only.
# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
## Define (for certain nested telemetry measurements with embedded tags) which fields are tags
# embedded_tags = ["Cisco-IOS-XR-qos-ma-oper:qos/interface-table/interface/input/service-policy-names/service-policy-instance/statistics/class-stats/class-name"]
## Define aliases to map telemetry encoding paths to simple measurement names
[inputs.cisco_telemetry_mdt.aliases]
ifstats = "ietf-interfaces:interfaces-state/interface/statistics"
##Define Property Xformation, please refer README and https://pubhub.devnetcloud.com/media/dme-docs-9-3-3/docs/appendix/ for Model details.
[inputs.cisco_telemetry_mdt.dmes]
ModTs = "ignore"
CreateTs = "ignore"
`
// SampleConfig of plugin
func (c *CiscoTelemetryMDT) SampleConfig() string {
return sampleConfig
}
// Description of plugin
func (c *CiscoTelemetryMDT) Description() string {
return "Cisco model-driven telemetry (MDT) input plugin for IOS XR, IOS XE and NX-OS platforms"
}
// Gather plugin measurements (unused)
func (c *CiscoTelemetryMDT) Gather(_ telegraf.Accumulator) error {
return nil

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package cisco_telemetry_mdt
func (c *CiscoTelemetryMDT) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -21,72 +21,6 @@ import (
var defaultTimeout = 5 * time.Second
var sampleConfig = `
## Username for authorization on ClickHouse server
## example: username = "default"
username = "default"
## Password for authorization on ClickHouse server
## example: password = "super_secret"
## HTTP(s) timeout while getting metrics values
## The timeout includes connection time, any redirects, and reading the response body.
## example: timeout = 1s
# timeout = 5s
## List of servers for metrics scraping
## metrics scrape via HTTP(s) clickhouse interface
## https://clickhouse.tech/docs/en/interfaces/http/
## example: servers = ["http://127.0.0.1:8123","https://custom-server.mdb.yandexcloud.net"]
servers = ["http://127.0.0.1:8123"]
## If "auto_discovery"" is "true" plugin tries to connect to all servers available in the cluster
## with using same "user:password" described in "user" and "password" parameters
## and get this server hostname list from "system.clusters" table
## see
## - https://clickhouse.tech/docs/en/operations/system_tables/#system-clusters
## - https://clickhouse.tech/docs/en/operations/server_settings/settings/#server_settings_remote_servers
## - https://clickhouse.tech/docs/en/operations/table_engines/distributed/
## - https://clickhouse.tech/docs/en/operations/table_engines/replication/#creating-replicated-tables
## example: auto_discovery = false
# auto_discovery = true
## Filter cluster names in "system.clusters" when "auto_discovery" is "true"
## when this filter present then "WHERE cluster IN (...)" filter will apply
## please use only full cluster names here, regexp and glob filters is not allowed
## for "/etc/clickhouse-server/config.d/remote.xml"
## <yandex>
## <remote_servers>
## <my-own-cluster>
## <shard>
## <replica><host>clickhouse-ru-1.local</host><port>9000</port></replica>
## <replica><host>clickhouse-ru-2.local</host><port>9000</port></replica>
## </shard>
## <shard>
## <replica><host>clickhouse-eu-1.local</host><port>9000</port></replica>
## <replica><host>clickhouse-eu-2.local</host><port>9000</port></replica>
## </shard>
## </my-onw-cluster>
## </remote_servers>
##
## </yandex>
##
## example: cluster_include = ["my-own-cluster"]
# cluster_include = []
## Filter cluster names in "system.clusters" when "auto_discovery" is "true"
## when this filter present then "WHERE cluster NOT IN (...)" filter will apply
## example: cluster_exclude = ["my-internal-not-discovered-cluster"]
# cluster_exclude = []
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
`
type connect struct {
Cluster string `json:"cluster"`
ShardNum int `json:"shard_num"`
@ -119,16 +53,6 @@ type ClickHouse struct {
tls.ClientConfig
}
// SampleConfig returns the sample config
func (*ClickHouse) SampleConfig() string {
return sampleConfig
}
// Description return plugin description
func (*ClickHouse) Description() string {
return "Read metrics from one or many ClickHouse servers"
}
// Start ClickHouse input service
func (ch *ClickHouse) Start(telegraf.Accumulator) error {
timeout := defaultTimeout

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package clickhouse
func (*ClickHouse) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -6,6 +6,7 @@ and creates metrics using one of the supported [input data formats][].
## Configuration
```toml
# Read metrics from Google PubSub
[[inputs.cloud_pubsub]]
## Required. Name of Google Cloud Platform (GCP) Project that owns
## the given PubSub subscription.

View File

@ -59,14 +59,6 @@ type PubSub struct {
sem semaphore
}
func (ps *PubSub) Description() string {
return "Read metrics from Google PubSub"
}
func (ps *PubSub) SampleConfig() string {
return fmt.Sprintf(sampleConfig, defaultMaxUndeliveredMessages)
}
// Gather does nothing for this service input.
func (ps *PubSub) Gather(_ telegraf.Accumulator) error {
return nil
@ -293,77 +285,3 @@ func init() {
return ps
})
}
const sampleConfig = `
## Required. Name of Google Cloud Platform (GCP) Project that owns
## the given PubSub subscription.
project = "my-project"
## Required. Name of PubSub subscription to ingest metrics from.
subscription = "my-subscription"
## Required. Data format to consume.
## Each data format has its own unique set of configuration options.
## Read more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
## Optional. Filepath for GCP credentials JSON file to authorize calls to
## PubSub APIs. If not set explicitly, Telegraf will attempt to use
## Application Default Credentials, which is preferred.
# credentials_file = "path/to/my/creds.json"
## Optional. Number of seconds to wait before attempting to restart the
## PubSub subscription receiver after an unexpected error.
## If the streaming pull for a PubSub Subscription fails (receiver),
## the agent attempts to restart receiving messages after this many seconds.
# retry_delay_seconds = 5
## Optional. Maximum byte length of a message to consume.
## Larger messages are dropped with an error. If less than 0 or unspecified,
## treated as no limit.
# max_message_len = 1000000
## Optional. Maximum messages to read from PubSub that have not been written
## to an output. Defaults to %d.
## For best throughput set based on the number of metrics within
## each message and the size of the output's metric_batch_size.
##
## For example, if each message contains 10 metrics and the output
## metric_batch_size is 1000, setting this to 100 will ensure that a
## full batch is collected and the write is triggered immediately without
## waiting until the next flush_interval.
# max_undelivered_messages = 1000
## The following are optional Subscription ReceiveSettings in PubSub.
## Read more about these values:
## https://godoc.org/cloud.google.com/go/pubsub#ReceiveSettings
## Optional. Maximum number of seconds for which a PubSub subscription
## should auto-extend the PubSub ACK deadline for each message. If less than
## 0, auto-extension is disabled.
# max_extension = 0
## Optional. Maximum number of unprocessed messages in PubSub
## (unacknowledged but not yet expired in PubSub).
## A value of 0 is treated as the default PubSub value.
## Negative values will be treated as unlimited.
# max_outstanding_messages = 0
## Optional. Maximum size in bytes of unprocessed messages in PubSub
## (unacknowledged but not yet expired in PubSub).
## A value of 0 is treated as the default PubSub value.
## Negative values will be treated as unlimited.
# max_outstanding_bytes = 0
## Optional. Max number of goroutines a PubSub Subscription receiver can spawn
## to pull messages from PubSub concurrently. This limit applies to each
## subscription separately and is treated as the PubSub default if less than
## 1. Note this setting does not limit the number of messages that can be
## processed concurrently (use "max_outstanding_messages" instead).
# max_receiver_go_routines = 0
## Optional. If true, Telegraf will attempt to base64 decode the
## PubSub message data before parsing
# base64_data = false
`

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package cloud_pubsub
func (ps *PubSub) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -14,6 +14,7 @@ Enable mutually authenticated TLS and authorize client connections by signing ce
This is a sample configuration for the plugin.
```toml
# Google Cloud Pub/Sub Push HTTP listener
[[inputs.cloud_pubsub_push]]
## Address and port to host HTTP listener on
service_address = ":8080"

View File

@ -61,64 +61,6 @@ type Payload struct {
Subscription string `json:"subscription"`
}
const sampleConfig = `
## Address and port to host HTTP listener on
service_address = ":8080"
## Application secret to verify messages originate from Cloud Pub/Sub
# token = ""
## Path to listen to.
# path = "/"
## Maximum duration before timing out read of the request
# read_timeout = "10s"
## Maximum duration before timing out write of the response. This should be set to a value
## large enough that you can send at least 'metric_batch_size' number of messages within the
## duration.
# write_timeout = "10s"
## Maximum allowed http request body size in bytes.
## 0 means to use the default of 524,288,00 bytes (500 mebibytes)
# max_body_size = "500MB"
## Whether to add the pubsub metadata, such as message attributes and subscription as a tag.
# add_meta = false
## Optional. Maximum messages to read from PubSub that have not been written
## to an output. Defaults to 1000.
## For best throughput set based on the number of metrics within
## each message and the size of the output's metric_batch_size.
##
## For example, if each message contains 10 metrics and the output
## metric_batch_size is 1000, setting this to 100 will ensure that a
## full batch is collected and the write is triggered immediately without
## waiting until the next flush_interval.
# max_undelivered_messages = 1000
## Set one or more allowed client CA certificate file names to
## enable mutually authenticated TLS connections
# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
## Add service certificate and key
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
`
func (p *PubSubPush) SampleConfig() string {
return sampleConfig
}
func (p *PubSubPush) Description() string {
return "Google Cloud Pub/Sub Push HTTP listener"
}
func (p *PubSubPush) Gather(_ telegraf.Accumulator) error {
return nil
}

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package cloud_pubsub_push
func (p *PubSubPush) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -31,14 +31,14 @@ API endpoint. In the following order the plugin will attempt to authenticate.
## 5) environment variables
## 6) shared credentials file
## 7) EC2 Instance Profile
#access_key = ""
#secret_key = ""
#token = ""
#role_arn = ""
#web_identity_token_file = ""
#role_session_name = ""
#profile = ""
#shared_credential_file = ""
# access_key = ""
# secret_key = ""
# token = ""
# role_arn = ""
# web_identity_token_file = ""
# role_session_name = ""
# profile = ""
# shared_credential_file = ""
## Endpoint to make request against, the correct endpoint is automatically
## determined and this option should only be set if you wish to override the

View File

@ -90,110 +90,6 @@ type cloudwatchClient interface {
GetMetricData(context.Context, *cwClient.GetMetricDataInput, ...func(*cwClient.Options)) (*cwClient.GetMetricDataOutput, error)
}
// SampleConfig returns the default configuration of the Cloudwatch input plugin.
func (c *CloudWatch) SampleConfig() string {
return `
## Amazon Region
region = "us-east-1"
## Amazon Credentials
## Credentials are loaded in the following order
## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified
## 2) Assumed credentials via STS if role_arn is specified
## 3) explicit credentials from 'access_key' and 'secret_key'
## 4) shared profile from 'profile'
## 5) environment variables
## 6) shared credentials file
## 7) EC2 Instance Profile
# access_key = ""
# secret_key = ""
# token = ""
# role_arn = ""
# web_identity_token_file = ""
# role_session_name = ""
# profile = ""
# shared_credential_file = ""
## Endpoint to make request against, the correct endpoint is automatically
## determined and this option should only be set if you wish to override the
## default.
## ex: endpoint_url = "http://localhost:8000"
# endpoint_url = ""
## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set)
# http_proxy_url = "http://localhost:8888"
# The minimum period for Cloudwatch metrics is 1 minute (60s). However not all
# metrics are made available to the 1 minute period. Some are collected at
# 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
# Note that if a period is configured that is smaller than the minimum for a
# particular metric, that metric will not be returned by the Cloudwatch API
# and will not be collected by Telegraf.
#
## Requested CloudWatch aggregation Period (required - must be a multiple of 60s)
period = "5m"
## Collection Delay (required - must account for metrics availability via CloudWatch API)
delay = "5m"
## Recommended: use metric 'interval' that is a multiple of 'period' to avoid
## gaps or overlap in pulled data
interval = "5m"
## Recommended if "delay" and "period" are both within 3 hours of request time. Invalid values will be ignored.
## Recently Active feature will only poll for CloudWatch ListMetrics values that occurred within the last 3 Hours.
## If enabled, it will reduce total API usage of the CloudWatch ListMetrics API and require less memory to retain.
## Do not enable if "period" or "delay" is longer than 3 hours, as it will not return data more than 3 hours old.
## See https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_ListMetrics.html
#recently_active = "PT3H"
## Configure the TTL for the internal cache of metrics.
# cache_ttl = "1h"
## Metric Statistic Namespaces (required)
namespaces = ["AWS/ELB"]
# A single metric statistic namespace that will be appended to namespaces on startup
# namespace = "AWS/ELB"
## Maximum requests per second. Note that the global default AWS rate limit is
## 50 reqs/sec, so if you define multiple namespaces, these should add up to a
## maximum of 50.
## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html
# ratelimit = 25
## Timeout for http requests made by the cloudwatch client.
# timeout = "5s"
## Namespace-wide statistic filters. These allow fewer queries to be made to
## cloudwatch.
# statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ]
# statistic_exclude = []
## Metrics to Pull
## Defaults to all Metrics in Namespace if nothing is provided
## Refreshes Namespace available metrics every 1h
#[[inputs.cloudwatch.metrics]]
# names = ["Latency", "RequestCount"]
#
# ## Statistic filters for Metric. These allow for retrieving specific
# ## statistics for an individual metric.
# # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ]
# # statistic_exclude = []
#
# ## Dimension filters for Metric. All dimensions defined for the metric names
# ## must be specified in order to retrieve the metric statistics.
# ## 'value' has wildcard / 'glob' matching support such as 'p-*'.
# [[inputs.cloudwatch.metrics.dimensions]]
# name = "LoadBalancerName"
# value = "p-example"
`
}
// Description returns a one-sentence description on the Cloudwatch input plugin.
func (c *CloudWatch) Description() string {
return "Pull Metric Statistics from Amazon CloudWatch"
}
func (c *CloudWatch) Init() error {
if len(c.Namespace) != 0 {
c.Namespaces = append(c.Namespaces, c.Namespace)
@ -395,10 +291,12 @@ func getFilteredMetrics(c *CloudWatch) ([]filteredMetric, error) {
return nil, err
}
fMetrics = []filteredMetric{{
metrics: metrics,
statFilter: c.statFilter,
}}
fMetrics = []filteredMetric{
{
metrics: metrics,
statFilter: c.statFilter,
},
}
}
c.metricCache = &metricCache{

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package cloudwatch
func (c *CloudWatch) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -47,29 +47,6 @@ func (c *Conntrack) setDefaults() {
}
}
func (c *Conntrack) Description() string {
return "Collects conntrack stats from the configured directories and files."
}
var sampleConfig = `
## The following defaults would work with multiple versions of conntrack.
## Note the nf_ and ip_ filename prefixes are mutually exclusive across
## kernel versions, as are the directory locations.
## Superset of filenames to look for within the conntrack dirs.
## Missing files will be ignored.
files = ["ip_conntrack_count","ip_conntrack_max",
"nf_conntrack_count","nf_conntrack_max"]
## Directories to search within for the conntrack files above.
## Missing directories will be ignored.
dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"]
`
func (c *Conntrack) SampleConfig() string {
return sampleConfig
}
func (c *Conntrack) Gather(acc telegraf.Accumulator) error {
c.setDefaults()

View File

@ -0,0 +1,11 @@
//go:build linux
// +build linux
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package conntrack
func (c *Conntrack) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -27,43 +27,6 @@ type Consul struct {
client *api.Client
}
var sampleConfig = `
## Consul server address
# address = "localhost:8500"
## URI scheme for the Consul server, one of "http", "https"
# scheme = "http"
## Metric version controls the mapping from Consul metrics into
## Telegraf metrics.
##
## example: metric_version = 1; deprecated in 1.15
## metric_version = 2; recommended version
# metric_version = 1
## ACL token used in every request
# token = ""
## HTTP Basic Authentication username and password.
# username = ""
# password = ""
## Data center to query the health checks from
# datacenter = ""
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = true
## Consul checks' tag splitting
# When tags are formatted like "key:value" with ":" as a delimiter then
# they will be splitted and reported as proper key:value in Telegraf
# tag_delimiter = ":"
`
func (c *Consul) Init() error {
if c.MetricVersion != 2 {
c.Log.Warnf("Use of deprecated configuration: 'metric_version = 1'; please update to 'metric_version = 2'")
@ -72,14 +35,6 @@ func (c *Consul) Init() error {
return nil
}
func (c *Consul) Description() string {
return "Gather health check statuses from services registered in Consul"
}
func (c *Consul) SampleConfig() string {
return sampleConfig
}
func (c *Consul) createAPIClient() (*api.Client, error) {
config := api.DefaultConfig()

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package consul
func (c *Consul) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -7,6 +7,7 @@ This plugin grabs metrics from a Consul agent. Telegraf may be present in every
## Configuration
```toml
# Read metrics from the Consul Agent API
[[inputs.consul_agent]]
## URL for the Consul agent
# url = "http://127.0.0.1:8500"
@ -18,7 +19,7 @@ This plugin grabs metrics from a Consul agent. Telegraf may be present in every
## OR
# token = "a1234567-40c7-9048-7bae-378687048181"
## Set response_timeout (default 5 seconds)
## Set timeout (default 5 seconds)
# timeout = "5s"
## Optional TLS Config

View File

@ -31,25 +31,6 @@ type ConsulAgent struct {
const timeLayout = "2006-01-02 15:04:05 -0700 MST"
const sampleConfig = `
## URL for the Consul agent
# url = "http://127.0.0.1:8500"
## Use auth token for authorization.
## Only one of the options can be set. Leave empty to not use any token.
# token_file = "/path/to/auth/token"
## OR
# token = "a1234567-40c7-9048-7bae-378687048181"
## Set timeout (default 5 seconds)
# timeout = "5s"
## Optional TLS Config
# tls_ca = /path/to/cafile
# tls_cert = /path/to/certfile
# tls_key = /path/to/keyfile
`
func init() {
inputs.Add("consul_agent", func() telegraf.Input {
return &ConsulAgent{
@ -58,16 +39,6 @@ func init() {
})
}
// SampleConfig returns a sample config
func (n *ConsulAgent) SampleConfig() string {
return sampleConfig
}
// Description returns a description of the plugin
func (n *ConsulAgent) Description() string {
return "Read metrics from the Consul Agent API"
}
func (n *ConsulAgent) Init() error {
if n.URL == "" {
n.URL = "http://127.0.0.1:8500"

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package consul_agent
func (n *ConsulAgent) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -26,40 +26,8 @@ type Couchbase struct {
tls.ClientConfig
}
var sampleConfig = `
## specify servers via a url matching:
## [protocol://][:password]@address[:port]
## e.g.
## http://couchbase-0.example.com/
## http://admin:secret@couchbase-0.example.com:8091/
##
## If no servers are specified, then localhost is used as the host.
## If no protocol is specified, HTTP is used.
## If no port is specified, 8091 is used.
servers = ["http://localhost:8091"]
## Filter bucket fields to include only here.
# bucket_stats_included = ["quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"]
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification (defaults to false)
## If set to false, tls_cert and tls_key are required
# insecure_skip_verify = false
`
var regexpURI = regexp.MustCompile(`(\S+://)?(\S+\:\S+@)`)
func (cb *Couchbase) SampleConfig() string {
return sampleConfig
}
func (cb *Couchbase) Description() string {
return "Read per-node and per-bucket metrics from Couchbase"
}
// Reads stats from all configured clusters. Accumulates stats.
// Returns one of the errors encountered while gathering stats (if any).
func (cb *Couchbase) Gather(acc telegraf.Accumulator) error {

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package couchbase
func (cb *Couchbase) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -5,6 +5,7 @@ The CouchDB plugin gathers metrics of CouchDB using [_stats] endpoint.
## Configuration
```toml
# Read CouchDB Stats from one or more servers
[[inputs.couchdb]]
## Works with CouchDB stats endpoints out of the box
## Multiple Hosts from which to read CouchDB stats:

View File

@ -88,22 +88,6 @@ type (
}
)
func (*CouchDB) Description() string {
return "Read CouchDB Stats from one or more servers"
}
func (*CouchDB) SampleConfig() string {
return `
## Works with CouchDB stats endpoints out of the box
## Multiple Hosts from which to read CouchDB stats:
hosts = ["http://localhost:8086/_stats"]
## Use HTTP Basic Authentication.
# basic_username = "telegraf"
# basic_password = "p@ssw0rd"
`
}
func (c *CouchDB) Gather(accumulator telegraf.Accumulator) error {
var wg sync.WaitGroup
for _, u := range c.Hosts {

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package couchdb
func (*CouchDB) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -29,25 +29,6 @@ func NewCPUStats(ps system.PS) *CPUStats {
}
}
func (c *CPUStats) Description() string {
return "Read metrics about cpu usage"
}
var sampleConfig = `
## Whether to report per-cpu stats or not
percpu = true
## Whether to report total system cpu stats or not
totalcpu = true
## If true, collect raw CPU time metrics
collect_cpu_time = false
## If true, compute and report the sum of all non-idle CPU states
report_active = false
`
func (c *CPUStats) SampleConfig() string {
return sampleConfig
}
func (c *CPUStats) Gather(acc telegraf.Accumulator) error {
times, err := c.ps.CPUTimes(c.PerCPU, c.TotalCPU)
if err != nil {

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package cpu
func (c *CPUStats) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -31,25 +31,6 @@ type CSGO struct {
Servers [][]string `toml:"servers"`
}
func (*CSGO) Description() string {
return "Fetch metrics from a CSGO SRCDS"
}
var sampleConfig = `
## Specify servers using the following format:
## servers = [
## ["ip1:port1", "rcon_password1"],
## ["ip2:port2", "rcon_password2"],
## ]
#
## If no servers are specified, no data will be collected
servers = []
`
func (*CSGO) SampleConfig() string {
return sampleConfig
}
func (s *CSGO) Gather(acc telegraf.Accumulator) error {
var wg sync.WaitGroup

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package csgo
func (*CSGO) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -21,6 +21,7 @@ your database.
## Configuration
```toml
# Input plugin for DC/OS metrics
[[inputs.dcos]]
## The DC/OS cluster URL.
cluster_url = "https://dcos-master-1"

View File

@ -69,57 +69,6 @@ type DCOS struct {
appFilter filter.Filter
}
func (d *DCOS) Description() string {
return "Input plugin for DC/OS metrics"
}
var sampleConfig = `
## The DC/OS cluster URL.
cluster_url = "https://dcos-ee-master-1"
## The ID of the service account.
service_account_id = "telegraf"
## The private key file for the service account.
service_account_private_key = "/etc/telegraf/telegraf-sa-key.pem"
## Path containing login token. If set, will read on every gather.
# token_file = "/home/dcos/.dcos/token"
## In all filter options if both include and exclude are empty all items
## will be collected. Arrays may contain glob patterns.
##
## Node IDs to collect metrics from. If a node is excluded, no metrics will
## be collected for its containers or apps.
# node_include = []
# node_exclude = []
## Container IDs to collect container metrics from.
# container_include = []
# container_exclude = []
## Container IDs to collect app metrics from.
# app_include = []
# app_exclude = []
## Maximum concurrent connections to the cluster.
# max_connections = 10
## Maximum time to receive a response from cluster.
# response_timeout = "20s"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## If false, skip chain & host verification
# insecure_skip_verify = true
## Recommended filtering to reduce series cardinality.
# [inputs.dcos.tagdrop]
# path = ["/var/lib/mesos/slave/slaves/*"]
`
func (d *DCOS) SampleConfig() string {
return sampleConfig
}
func (d *DCOS) Gather(acc telegraf.Accumulator) error {
err := d.init()
if err != nil {

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package dcos
func (d *DCOS) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -8,6 +8,7 @@ This plugin is intended to read files that are moved or copied to the monitored
## Configuration
```toml
# Ingests files in a directory and then moves them to a target directory.
[[inputs.directory_monitor]]
## The directory to monitor and read files from.
directory = ""

View File

@ -24,51 +24,6 @@ import (
"github.com/influxdata/telegraf/selfstat"
)
const sampleConfig = `
## The directory to monitor and read files from.
directory = ""
#
## The directory to move finished files to.
finished_directory = ""
#
## The directory to move files to upon file error.
## If not provided, erroring files will stay in the monitored directory.
# error_directory = ""
#
## The amount of time a file is allowed to sit in the directory before it is picked up.
## This time can generally be low but if you choose to have a very large file written to the directory and it's potentially slow,
## set this higher so that the plugin will wait until the file is fully copied to the directory.
# directory_duration_threshold = "50ms"
#
## A list of the only file names to monitor, if necessary. Supports regex. If left blank, all files are ingested.
# files_to_monitor = ["^.*\.csv"]
#
## A list of files to ignore, if necessary. Supports regex.
# files_to_ignore = [".DS_Store"]
#
## Maximum lines of the file to process that have not yet be written by the
## output. For best throughput set to the size of the output's metric_buffer_limit.
## Warning: setting this number higher than the output's metric_buffer_limit can cause dropped metrics.
# max_buffered_metrics = 10000
#
## The maximum amount of file paths to queue up for processing at once, before waiting until files are processed to find more files.
## Lowering this value will result in *slightly* less memory use, with a potential sacrifice in speed efficiency, if absolutely necessary.
# file_queue_size = 100000
#
## Name a tag containing the name of the file the data was parsed from. Leave empty
## to disable. Cautious when file name variation is high, this can increase the cardinality
## significantly. Read more about cardinality here:
## https://docs.influxdata.com/influxdb/cloud/reference/glossary/#series-cardinality
# file_tag = ""
#
## The dataformat to be read from the files.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
## NOTE: We currently only support parsing newline-delimited JSON. See the format here: https://github.com/ndjson/ndjson-spec
data_format = "influx"
`
var (
defaultFilesToMonitor = []string{}
defaultFilesToIgnore = []string{}
@ -104,14 +59,6 @@ type DirectoryMonitor struct {
filesToProcess chan string
}
func (monitor *DirectoryMonitor) SampleConfig() string {
return sampleConfig
}
func (monitor *DirectoryMonitor) Description() string {
return "Ingests files in a directory and then moves them to a target directory."
}
func (monitor *DirectoryMonitor) Gather(_ telegraf.Accumulator) error {
// Get all files sitting in the directory.
files, err := os.ReadDir(monitor.Directory)

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package directory_monitor
func (monitor *DirectoryMonitor) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -9,6 +9,7 @@ Note that `used_percent` is calculated by doing `used / (used + free)`, _not_
## Configuration
```toml
# Read metrics about disk usage by mount point
[[inputs.disk]]
## By default stats will be gathered for all mount points.
## Set mount_points will restrict the stats to only the specified mount points.

View File

@ -21,23 +21,6 @@ type DiskStats struct {
Log telegraf.Logger `toml:"-"`
}
func (ds *DiskStats) Description() string {
return "Read metrics about disk usage by mount point"
}
var diskSampleConfig = `
## By default stats will be gathered for all mount points.
## Set mount_points will restrict the stats to only the specified mount points.
# mount_points = ["/"]
## Ignore mount points by filesystem type.
ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"]
`
func (ds *DiskStats) SampleConfig() string {
return diskSampleConfig
}
func (ds *DiskStats) Init() error {
// Legacy support:
if len(ds.LegacyMountPoints) != 0 {

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package disk
func (ds *DiskStats) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -10,7 +10,7 @@ The diskio input plugin gathers metrics about disk traffic and timing.
## By default, telegraf will gather stats for all devices including
## disk partitions.
## Setting devices will restrict the stats to the specified devices.
# devices = ["sda", "sdb"]
# devices = ["sda", "sdb", "vd*"]
## Uncomment the following line if you need disk serial numbers.
# skip_serial_number = false
#

View File

@ -30,42 +30,6 @@ type DiskIO struct {
initialized bool
}
func (d *DiskIO) Description() string {
return "Read metrics about disk IO by device"
}
var diskIOsampleConfig = `
## By default, telegraf will gather stats for all devices including
## disk partitions.
## Setting devices will restrict the stats to the specified devices.
# devices = ["sda", "sdb", "vd*"]
## Uncomment the following line if you need disk serial numbers.
# skip_serial_number = false
#
## On systems which support it, device metadata can be added in the form of
## tags.
## Currently only Linux is supported via udev properties. You can view
## available properties for a device by running:
## 'udevadm info -q property -n /dev/sda'
## Note: Most, but not all, udev properties can be accessed this way. Properties
## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH.
# device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"]
#
## Using the same metadata source as device_tags, you can also customize the
## name of the device via templates.
## The 'name_templates' parameter is a list of templates to try and apply to
## the device. The template may contain variables in the form of '$PROPERTY' or
## '${PROPERTY}'. The first template which does not contain any variables not
## present for the device is used as the device name tag.
## The typical use case is for LVM volumes, to get the VG/LV name instead of
## the near-meaningless DM-0 name.
# name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"]
`
func (d *DiskIO) SampleConfig() string {
return diskIOsampleConfig
}
// hasMeta reports whether s contains any special glob characters.
func hasMeta(s string) bool {
return strings.ContainsAny(s, "*?[")

View File

@ -0,0 +1,8 @@
//go:generate go run ../../../tools/generate_plugindata/main.go
//go:generate go run ../../../tools/generate_plugindata/main.go --clean
// DON'T EDIT; This file is used as a template by tools/generate_plugindata
package diskio
func (d *DiskIO) SampleConfig() string {
return `{{ .SampleConfig }}`
}

View File

@ -5,6 +5,7 @@
## Configuration
```toml
# Read metrics from one or many disque servers
[[inputs.disque]]
## An array of URI to gather stats about. Specify an ip or hostname
## with optional port and password.

View File

@ -21,24 +21,8 @@ type Disque struct {
c net.Conn
}
var sampleConfig = `
## An array of URI to gather stats about. Specify an ip or hostname
## with optional port and password.
## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc.
## If no servers are specified, then localhost is used as the host.
servers = ["localhost"]
`
var defaultTimeout = 5 * time.Second
func (d *Disque) SampleConfig() string {
return sampleConfig
}
func (d *Disque) Description() string {
return "Read metrics from one or many disque servers"
}
var Tracking = map[string]string{
"uptime_in_seconds": "uptime",
"connected_clients": "clients",

Some files were not shown because too many files have changed in this diff Show More