style: Add deprecation notices to plugin options (#10616)

This commit is contained in:
Thomas Casteleyn 2022-03-01 23:05:53 +01:00 committed by GitHub
parent 39e065fd78
commit a4f6b27bd5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
33 changed files with 61 additions and 185 deletions

View File

@ -181,13 +181,12 @@ type AgentConfig struct {
// FlushBufferWhenFull tells Telegraf to flush the metric buffer whenever // FlushBufferWhenFull tells Telegraf to flush the metric buffer whenever
// it fills up, regardless of FlushInterval. Setting this option to true // it fills up, regardless of FlushInterval. Setting this option to true
// does _not_ deactivate FlushInterval. // does _not_ deactivate FlushInterval.
FlushBufferWhenFull bool // deprecated in 0.13; has no effect FlushBufferWhenFull bool `toml:"flush_buffer_when_full" deprecated:"0.13.0;2.0.0;option is ignored"`
// TODO(cam): Remove UTC and parameter, they are no longer // TODO(cam): Remove UTC and parameter, they are no longer
// valid for the agent config. Leaving them here for now for backwards- // valid for the agent config. Leaving them here for now for backwards-
// compatibility // compatibility
// Deprecated: 1.0.0 after, has no effect UTC bool `toml:"utc" deprecated:"1.0.0;option is ignored"`
UTC bool `toml:"utc"`
// Debug is the option for running in debug mode // Debug is the option for running in debug mode
Debug bool `toml:"debug"` Debug bool `toml:"debug"`

View File

@ -1,8 +1,6 @@
package kafka package kafka
import ( import (
"log"
"github.com/Shopify/sarama" "github.com/Shopify/sarama"
"github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/common/tls"
) )
@ -53,8 +51,7 @@ type Config struct {
ClientID string `toml:"client_id"` ClientID string `toml:"client_id"`
CompressionCodec int `toml:"compression_codec"` CompressionCodec int `toml:"compression_codec"`
// EnableTLS deprecated EnableTLS *bool `toml:"enable_tls" deprecated:"1.17.0;option is ignored"`
EnableTLS *bool `toml:"enable_tls"`
// Disable full metadata fetching // Disable full metadata fetching
MetadataFull *bool `toml:"metadata_full"` MetadataFull *bool `toml:"metadata_full"`
@ -62,9 +59,6 @@ type Config struct {
// SetConfig on the sarama.Config object from the Config struct. // SetConfig on the sarama.Config object from the Config struct.
func (k *Config) SetConfig(config *sarama.Config) error { func (k *Config) SetConfig(config *sarama.Config) error {
if k.EnableTLS != nil {
log.Printf("W! [kafka] enable_tls is deprecated, and the setting does nothing, you can safely remove it from the config")
}
if k.Version != "" { if k.Version != "" {
version, err := sarama.ParseKafkaVersion(k.Version) version, err := sarama.ParseKafkaVersion(k.Version)
if err != nil { if err != nil {

View File

@ -19,10 +19,9 @@ type ClientConfig struct {
InsecureSkipVerify bool `toml:"insecure_skip_verify"` InsecureSkipVerify bool `toml:"insecure_skip_verify"`
ServerName string `toml:"tls_server_name"` ServerName string `toml:"tls_server_name"`
// Deprecated in 1.7; use TLS variables above SSLCA string `toml:"ssl_ca" deprecated:"1.7.0;use 'tls_ca' instead"`
SSLCA string `toml:"ssl_ca"` SSLCert string `toml:"ssl_cert" deprecated:"1.7.0;use 'tls_cert' instead"`
SSLCert string `toml:"ssl_cert"` SSLKey string `toml:"ssl_key" deprecated:"1.7.0;use 'tls_key' instead"`
SSLKey string `toml:"ssl_key"`
} }
// ServerConfig represents the standard server TLS config. // ServerConfig represents the standard server TLS config.

View File

@ -18,8 +18,8 @@ import (
) )
type ActiveMQ struct { type ActiveMQ struct {
Server string `toml:"server"` Server string `toml:"server" deprecated:"1.11.0;use 'url' instead"`
Port int `toml:"port"` Port int `toml:"port" deprecated:"1.11.0;use 'url' instead"`
URL string `toml:"url"` URL string `toml:"url"`
Username string `toml:"username"` Username string `toml:"username"`
Password string `toml:"password"` Password string `toml:"password"`
@ -86,11 +86,6 @@ var sampleConfig = `
## ActiveMQ WebConsole URL ## ActiveMQ WebConsole URL
url = "http://127.0.0.1:8161" url = "http://127.0.0.1:8161"
## Required ActiveMQ Endpoint
## deprecated in 1.11; use the url option
# server = "127.0.0.1"
# port = 8161
## Credentials for basic HTTP authentication ## Credentials for basic HTTP authentication
# username = "admin" # username = "admin"
# password = "admin" # password = "admin"

View File

@ -23,7 +23,7 @@ type Aerospike struct {
Password string `toml:"password"` Password string `toml:"password"`
EnableTLS bool `toml:"enable_tls"` EnableTLS bool `toml:"enable_tls"`
EnableSSL bool `toml:"enable_ssl"` // deprecated in 1.7; use enable_tls EnableSSL bool `toml:"enable_ssl" deprecated:"1.7.0;use 'enable_tls' instead"`
TLSName string `toml:"tls_name"` TLSName string `toml:"tls_name"`
tlsint.ClientConfig tlsint.ClientConfig

View File

@ -27,7 +27,7 @@ type semaphore chan empty
// AMQPConsumer is the top level struct for this plugin // AMQPConsumer is the top level struct for this plugin
type AMQPConsumer struct { type AMQPConsumer struct {
URL string `toml:"url" deprecated:"1.7.0;use brokers"` URL string `toml:"url" deprecated:"1.7.0;use 'brokers' instead"`
Brokers []string `toml:"brokers"` Brokers []string `toml:"brokers"`
Username string `toml:"username"` Username string `toml:"username"`
Password string `toml:"password"` Password string `toml:"password"`
@ -90,10 +90,6 @@ const (
func (a *AMQPConsumer) SampleConfig() string { func (a *AMQPConsumer) SampleConfig() string {
return ` return `
## Broker to consume from.
## deprecated in 1.7; use the brokers option
# url = "amqp://localhost:5672/influxdb"
## Brokers to consume from. If multiple brokers are specified a random broker ## Brokers to consume from. If multiple brokers are specified a random broker
## will be selected anytime a connection is established. This can be ## will be selected anytime a connection is established. This can be
## helpful for load balancing when not using a dedicated load balancer. ## helpful for load balancing when not using a dedicated load balancer.

View File

@ -16,7 +16,7 @@ type Consul struct {
Token string Token string
Username string Username string
Password string Password string
Datacentre string // deprecated in 1.10; use Datacenter Datacentre string `toml:"datacentre" deprecated:"1.10.0;use 'datacenter' instead"`
Datacenter string Datacenter string
tls.ClientConfig tls.ClientConfig
TagDelimiter string TagDelimiter string

View File

@ -8,15 +8,19 @@ var Deprecations = map[string]telegraf.DeprecationInfo{
Since: "1.7.0", Since: "1.7.0",
Notice: "use 'inputs.jolokia2' with the 'cassandra.conf' example configuration instead", Notice: "use 'inputs.jolokia2' with the 'cassandra.conf' example configuration instead",
}, },
"io": { "http_listener": {
Since: "0.10.0", Since: "1.9.0",
RemovalIn: "2.0.0", Notice: "has been renamed to 'influxdb_listener', use 'inputs.influxdb_listener' or 'inputs.http_listener_v2' instead",
Notice: "use 'inputs.diskio' instead",
}, },
"httpjson": { "httpjson": {
Since: "1.6.0", Since: "1.6.0",
Notice: "use 'inputs.http' instead", Notice: "use 'inputs.http' instead",
}, },
"io": {
Since: "0.10.0",
RemovalIn: "2.0.0",
Notice: "use 'inputs.diskio' instead",
},
"jolokia": { "jolokia": {
Since: "1.5.0", Since: "1.5.0",
Notice: "use 'inputs.jolokia2' instead", Notice: "use 'inputs.jolokia2' instead",

View File

@ -28,14 +28,14 @@ import (
// Docker object // Docker object
type Docker struct { type Docker struct {
Endpoint string Endpoint string
ContainerNames []string // deprecated in 1.4; use container_name_include ContainerNames []string `toml:"container_names" deprecated:"1.4.0;use 'container_name_include' instead"`
GatherServices bool `toml:"gather_services"` GatherServices bool `toml:"gather_services"`
Timeout config.Duration Timeout config.Duration
PerDevice bool `toml:"perdevice"` PerDevice bool `toml:"perdevice" deprecated:"1.18.0;use 'perdevice_include' instead"`
PerDeviceInclude []string `toml:"perdevice_include"` PerDeviceInclude []string `toml:"perdevice_include"`
Total bool `toml:"total"` Total bool `toml:"total" deprecated:"1.18.0;use 'total_include' instead"`
TotalInclude []string `toml:"total_include"` TotalInclude []string `toml:"total_include"`
TagEnvironment []string `toml:"tag_env"` TagEnvironment []string `toml:"tag_env"`
LabelInclude []string `toml:"docker_label_include"` LabelInclude []string `toml:"docker_label_include"`
@ -74,14 +74,6 @@ const (
PB = 1000 * TB PB = 1000 * TB
defaultEndpoint = "unix:///var/run/docker.sock" defaultEndpoint = "unix:///var/run/docker.sock"
perDeviceIncludeDeprecationWarning = "'perdevice' setting is set to 'true' so 'blkio' and 'network' metrics will " +
"be collected. Please set it to 'false' and use 'perdevice_include' instead to control this behaviour as " +
"'perdevice' will be deprecated"
totalIncludeDeprecationWarning = "'total' setting is set to 'false' so 'blkio' and 'network' metrics will not be " +
"collected. Please set it to 'true' and use 'total_include' instead to control this behaviour as 'total' " +
"will be deprecated"
) )
var ( var (
@ -100,9 +92,6 @@ var sampleConfig = `
## Set to true to collect Swarm metrics(desired_replicas, running_replicas) ## Set to true to collect Swarm metrics(desired_replicas, running_replicas)
gather_services = false gather_services = false
## Only collect metrics for these containers, collect all if empty
container_names = []
## Set the source tag for the metrics to the container ID hostname, eg first 12 chars ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars
source_tag = false source_tag = false
@ -121,24 +110,11 @@ var sampleConfig = `
## Timeout for docker list, info, and stats commands ## Timeout for docker list, info, and stats commands
timeout = "5s" timeout = "5s"
## Whether to report for each container per-device blkio (8:0, 8:1...),
## network (eth0, eth1, ...) and cpu (cpu0, cpu1, ...) stats or not.
## Usage of this setting is discouraged since it will be deprecated in favor of 'perdevice_include'.
## Default value is 'true' for backwards compatibility, please set it to 'false' so that 'perdevice_include' setting
## is honored.
perdevice = true
## Specifies for which classes a per-device metric should be issued ## Specifies for which classes a per-device metric should be issued
## Possible values are 'cpu' (cpu0, cpu1, ...), 'blkio' (8:0, 8:1, ...) and 'network' (eth0, eth1, ...) ## Possible values are 'cpu' (cpu0, cpu1, ...), 'blkio' (8:0, 8:1, ...) and 'network' (eth0, eth1, ...)
## Please note that this setting has no effect if 'perdevice' is set to 'true' ## Please note that this setting has no effect if 'perdevice' is set to 'true'
# perdevice_include = ["cpu"] # perdevice_include = ["cpu"]
## Whether to report for each container total blkio and network stats or not.
## Usage of this setting is discouraged since it will be deprecated in favor of 'total_include'.
## Default value is 'false' for backwards compatibility, please set it to 'true' so that 'total_include' setting
## is honored.
total = false
## Specifies for which classes a total metric should be issued. Total is an aggregated of the 'perdevice' values. ## Specifies for which classes a total metric should be issued. Total is an aggregated of the 'perdevice' values.
## Possible values are 'cpu', 'blkio' and 'network' ## Possible values are 'cpu', 'blkio' and 'network'
## Total 'cpu' is reported directly by Docker daemon, and 'network' and 'blkio' totals are aggregated by this plugin. ## Total 'cpu' is reported directly by Docker daemon, and 'network' and 'blkio' totals are aggregated by this plugin.
@ -182,7 +158,6 @@ func (d *Docker) Init() error {
// Temporary logic needed for backwards compatibility until 'perdevice' setting is removed. // Temporary logic needed for backwards compatibility until 'perdevice' setting is removed.
if d.PerDevice { if d.PerDevice {
d.Log.Warn(perDeviceIncludeDeprecationWarning)
if !choice.Contains("network", d.PerDeviceInclude) { if !choice.Contains("network", d.PerDeviceInclude) {
d.PerDeviceInclude = append(d.PerDeviceInclude, "network") d.PerDeviceInclude = append(d.PerDeviceInclude, "network")
} }
@ -193,7 +168,6 @@ func (d *Docker) Init() error {
// Temporary logic needed for backwards compatibility until 'total' setting is removed. // Temporary logic needed for backwards compatibility until 'total' setting is removed.
if !d.Total { if !d.Total {
d.Log.Warn(totalIncludeDeprecationWarning)
if choice.Contains("cpu", d.TotalInclude) { if choice.Contains("cpu", d.TotalInclude) {
d.TotalInclude = []string{"cpu"} d.TotalInclude = []string{"cpu"}
} else { } else {

View File

@ -14,10 +14,6 @@ import (
) )
const sampleConfig = ` const sampleConfig = `
## Directory to gather stats about.
## deprecated in 1.9; use the directories option
# directory = "/var/cache/apt/archives"
## Directories to gather stats about. ## Directories to gather stats about.
## This accept standard unit glob matching rules, but with the addition of ## This accept standard unit glob matching rules, but with the addition of
## ** as a "super asterisk". ie: ## ** as a "super asterisk". ie:
@ -51,7 +47,7 @@ const sampleConfig = `
` `
type FileCount struct { type FileCount struct {
Directory string // deprecated in 1.9 Directory string `toml:"directory" deprecated:"1.9.0;use 'directories' instead"`
Directories []string Directories []string
Name string Name string
Recursive bool Recursive bool

View File

@ -39,7 +39,7 @@ type TimeFunc func() time.Time
// HTTPListenerV2 is an input plugin that collects external metrics sent via HTTP // HTTPListenerV2 is an input plugin that collects external metrics sent via HTTP
type HTTPListenerV2 struct { type HTTPListenerV2 struct {
ServiceAddress string `toml:"service_address"` ServiceAddress string `toml:"service_address"`
Path string `toml:"path"` Path string `toml:"path" deprecated:"1.20.0;use 'paths' instead"`
Paths []string `toml:"paths"` Paths []string `toml:"paths"`
PathTag bool `toml:"path_tag"` PathTag bool `toml:"path_tag"`
Methods []string `toml:"methods"` Methods []string `toml:"methods"`
@ -71,10 +71,6 @@ const sampleConfig = `
## Address and port to host HTTP listener on ## Address and port to host HTTP listener on
service_address = ":8080" service_address = ":8080"
## Path to listen to.
## This option is deprecated and only available for backward-compatibility. Please use paths instead.
# path = ""
## Paths to listen to. ## Paths to listen to.
# paths = ["/telegraf"] # paths = ["/telegraf"]

View File

@ -65,10 +65,6 @@ func (h *HTTPResponse) Description() string {
} }
var sampleConfig = ` var sampleConfig = `
## Deprecated in 1.12, use 'urls'
## Server address (default http://localhost)
# address = "http://localhost"
## List of urls to query. ## List of urls to query.
# urls = ["http://localhost"] # urls = ["http://localhost"]
@ -425,7 +421,6 @@ func (h *HTTPResponse) Gather(acc telegraf.Accumulator) error {
if h.Address == "" { if h.Address == "" {
h.URLs = []string{"http://localhost"} h.URLs = []string{"http://localhost"}
} else { } else {
h.Log.Warn("'address' deprecated in telegraf 1.12, please use 'urls'")
h.URLs = []string{h.Address} h.URLs = []string{h.Address}
} }
} }

View File

@ -23,7 +23,7 @@ var (
// HTTPJSON struct // HTTPJSON struct
type HTTPJSON struct { type HTTPJSON struct {
Name string Name string `toml:"name" deprecated:"1.3.0;use 'name_override', 'name_suffix', 'name_prefix' instead"`
Servers []string Servers []string
Method string Method string
TagKeys []string TagKeys []string
@ -70,12 +70,6 @@ var sampleConfig = `
## NOTE This plugin only reads numerical measurements, strings and booleans ## NOTE This plugin only reads numerical measurements, strings and booleans
## will be ignored. ## will be ignored.
## Name for the service being polled. Will be appended to the name of the
## measurement e.g. httpjson_webserver_stats
##
## Deprecated (1.3.0): Use name_override, name_suffix, name_prefix instead.
name = "webserver_stats"
## URL of each server in the service's cluster ## URL of each server in the service's cluster
servers = [ servers = [
"http://localhost:9999/stats/", "http://localhost:9999/stats/",

View File

@ -33,7 +33,7 @@ type InfluxDBListener struct {
ReadTimeout config.Duration `toml:"read_timeout"` ReadTimeout config.Duration `toml:"read_timeout"`
WriteTimeout config.Duration `toml:"write_timeout"` WriteTimeout config.Duration `toml:"write_timeout"`
MaxBodySize config.Size `toml:"max_body_size"` MaxBodySize config.Size `toml:"max_body_size"`
MaxLineSize config.Size `toml:"max_line_size"` // deprecated in 1.14; ignored MaxLineSize config.Size `toml:"max_line_size" deprecated:"1.14.0;parser now handles lines of unlimited length and option is ignored"`
BasicUsername string `toml:"basic_username"` BasicUsername string `toml:"basic_username"`
BasicPassword string `toml:"basic_password"` BasicPassword string `toml:"basic_password"`
DatabaseTag string `toml:"database_tag"` DatabaseTag string `toml:"database_tag"`
@ -142,10 +142,6 @@ func (h *InfluxDBListener) Init() error {
h.MaxBodySize = config.Size(defaultMaxBodySize) h.MaxBodySize = config.Size(defaultMaxBodySize)
} }
if h.MaxLineSize != 0 {
h.Log.Warnf("Use of deprecated configuration: 'max_line_size'; parser now handles lines of unlimited length and option is ignored")
}
if h.ReadTimeout < config.Duration(time.Second) { if h.ReadTimeout < config.Duration(time.Second) {
h.ReadTimeout = config.Duration(time.Second * 10) h.ReadTimeout = config.Duration(time.Second * 10)
} }

View File

@ -34,8 +34,8 @@ type MongoDB struct {
} }
type Ssl struct { type Ssl struct {
Enabled bool Enabled bool `toml:"ssl_enabled" deprecated:"1.3.0;use 'tls_*' options instead"`
CaCerts []string `toml:"cacerts"` CaCerts []string `toml:"cacerts" deprecated:"1.3.0;use 'tls_ca' instead"`
} }
var sampleConfig = ` var sampleConfig = `

View File

@ -64,11 +64,13 @@ type MQTTConsumer struct {
ConnectionTimeout config.Duration `toml:"connection_timeout"` ConnectionTimeout config.Duration `toml:"connection_timeout"`
MaxUndeliveredMessages int `toml:"max_undelivered_messages"` MaxUndeliveredMessages int `toml:"max_undelivered_messages"`
parser parsers.Parser parser parsers.Parser
// Legacy metric buffer support; deprecated in v0.10.3
MetricBuffer int MetricBuffer int `toml:"metric_buffer" deprecated:"0.10.3;2.0.0;option is ignored"`
PersistentSession bool PersistentSession bool
ClientID string `toml:"client_id"` ClientID string `toml:"client_id"`
tls.ClientConfig tls.ClientConfig
Log telegraf.Logger Log telegraf.Logger
clientFactory ClientFactory clientFactory ClientFactory
client Client client Client

View File

@ -49,9 +49,7 @@ type natsConsumer struct {
PendingBytesLimit int `toml:"pending_bytes_limit"` PendingBytesLimit int `toml:"pending_bytes_limit"`
MaxUndeliveredMessages int `toml:"max_undelivered_messages"` MaxUndeliveredMessages int `toml:"max_undelivered_messages"`
MetricBuffer int `toml:"metric_buffer" deprecated:"0.10.3;2.0.0;option is ignored"`
// Legacy metric buffer support; deprecated in v0.10.3
MetricBuffer int
conn *nats.Conn conn *nats.Conn
subs []*nats.Subscription subs []*nats.Subscription

View File

@ -29,7 +29,7 @@ func (l *logger) Output(_ int, s string) error {
//NSQConsumer represents the configuration of the plugin //NSQConsumer represents the configuration of the plugin
type NSQConsumer struct { type NSQConsumer struct {
Server string `toml:"server"` Server string `toml:"server" deprecated:"1.5.0;use 'nsqd' instead"`
Nsqd []string `toml:"nsqd"` Nsqd []string `toml:"nsqd"`
Nsqlookupd []string `toml:"nsqlookupd"` Nsqlookupd []string `toml:"nsqlookupd"`
Topic string `toml:"topic"` Topic string `toml:"topic"`
@ -50,9 +50,6 @@ type NSQConsumer struct {
} }
var sampleConfig = ` var sampleConfig = `
## Server option still works but is deprecated, we just prepend it to the nsqd array.
# server = "localhost:4150"
## An array representing the NSQD TCP HTTP Endpoints ## An array representing the NSQD TCP HTTP Endpoints
nsqd = ["localhost:4150"] nsqd = ["localhost:4150"]

View File

@ -15,10 +15,10 @@ import (
type Openldap struct { type Openldap struct {
Host string Host string
Port int Port int
SSL string `toml:"ssl"` // Deprecated in 1.7; use TLS SSL string `toml:"ssl" deprecated:"1.7.0;use 'tls' instead"`
TLS string `toml:"tls"` TLS string `toml:"tls"`
InsecureSkipVerify bool InsecureSkipVerify bool
SSLCA string `toml:"ssl_ca"` // Deprecated in 1.7; use TLSCA SSLCA string `toml:"ssl_ca" deprecated:"1.7.0;use 'tls_ca' instead"`
TLSCA string `toml:"tls_ca"` TLSCA string `toml:"tls_ca"`
BindDn string BindDn string
BindPassword string BindPassword string

View File

@ -42,6 +42,7 @@ For additional details reference the [RabbitMQ Management HTTP Stats][management
## A list of queues to gather as the rabbitmq_queue measurement. If not ## A list of queues to gather as the rabbitmq_queue measurement. If not
## specified, metrics for all queues are gathered. ## specified, metrics for all queues are gathered.
## Deprecated in 1.6: Use queue_name_include instead.
# queues = ["telegraf"] # queues = ["telegraf"]
## A list of exchanges to gather as the rabbitmq_exchange measurement. If not ## A list of exchanges to gather as the rabbitmq_exchange measurement. If not

View File

@ -36,7 +36,7 @@ const DefaultClientTimeout = 4
// see the sample config for further details // see the sample config for further details
type RabbitMQ struct { type RabbitMQ struct {
URL string `toml:"url"` URL string `toml:"url"`
Name string `toml:"name"` Name string `toml:"name" deprecated:"1.3.0;use 'tags' instead"`
Username string `toml:"username"` Username string `toml:"username"`
Password string `toml:"password"` Password string `toml:"password"`
tls.ClientConfig tls.ClientConfig
@ -45,7 +45,7 @@ type RabbitMQ struct {
ClientTimeout config.Duration `toml:"client_timeout"` ClientTimeout config.Duration `toml:"client_timeout"`
Nodes []string `toml:"nodes"` Nodes []string `toml:"nodes"`
Queues []string `toml:"queues"` Queues []string `toml:"queues" deprecated:"1.6.0;use 'queue_name_include' instead"`
Exchanges []string `toml:"exchanges"` Exchanges []string `toml:"exchanges"`
MetricInclude []string `toml:"metric_include"` MetricInclude []string `toml:"metric_include"`
@ -272,8 +272,6 @@ var gatherFunctions = map[string]gatherFunc{
var sampleConfig = ` var sampleConfig = `
## Management Plugin url. (default: http://localhost:15672) ## Management Plugin url. (default: http://localhost:15672)
# url = "http://localhost:15672" # url = "http://localhost:15672"
## Tag added to rabbitmq_overview series; deprecated: use tags
# name = "rmq-server-1"
## Credentials ## Credentials
# username = "guest" # username = "guest"
# password = "guest" # password = "guest"
@ -299,10 +297,6 @@ var sampleConfig = `
## specified, metrics for all nodes are gathered. ## specified, metrics for all nodes are gathered.
# nodes = ["rabbit@node1", "rabbit@node2"] # nodes = ["rabbit@node1", "rabbit@node2"]
## A list of queues to gather as the rabbitmq_queue measurement. If not
## specified, metrics for all queues are gathered.
# queues = ["telegraf"]
## A list of exchanges to gather as the rabbitmq_exchange measurement. If not ## A list of exchanges to gather as the rabbitmq_exchange measurement. If not
## specified, metrics for all exchanges are gathered. ## specified, metrics for all exchanges are gathered.
# exchanges = ["telegraf"] # exchanges = ["telegraf"]

View File

@ -326,7 +326,7 @@ var (
// Smart plugin reads metrics from storage devices supporting S.M.A.R.T. // Smart plugin reads metrics from storage devices supporting S.M.A.R.T.
type Smart struct { type Smart struct {
Path string `toml:"path"` //deprecated - to keep backward compatibility Path string `toml:"path" deprecated:"1.16.0;use 'path_smartctl' instead"`
PathSmartctl string `toml:"path_smartctl"` PathSmartctl string `toml:"path_smartctl"`
PathNVMe string `toml:"path_nvme"` PathNVMe string `toml:"path_nvme"`
Nocheck string `toml:"nocheck"` Nocheck string `toml:"nocheck"`

View File

@ -91,7 +91,7 @@ type Snmp struct {
// Name & Fields are the elements of a Table. // Name & Fields are the elements of a Table.
// Telegraf chokes if we try to embed a Table. So instead we have to embed the // Telegraf chokes if we try to embed a Table. So instead we have to embed the
// fields of a Table, and construct a Table during runtime. // fields of a Table, and construct a Table during runtime.
Name string // deprecated in 1.14; use name_override Name string `toml:"name"`
Fields []Field `toml:"field"` Fields []Field `toml:"field"`
connectionCache []snmpConnection connectionCache []snmpConnection

View File

@ -20,8 +20,8 @@ import (
type SQLServer struct { type SQLServer struct {
Servers []string `toml:"servers"` Servers []string `toml:"servers"`
AuthMethod string `toml:"auth_method"` AuthMethod string `toml:"auth_method"`
QueryVersion int `toml:"query_version"` QueryVersion int `toml:"query_version" deprecated:"1.16.0;use 'database_type' instead"`
AzureDB bool `toml:"azuredb"` AzureDB bool `toml:"azuredb" deprecated:"1.16.0;use 'database_type' instead"`
DatabaseType string `toml:"database_type"` DatabaseType string `toml:"database_type"`
IncludeQuery []string `toml:"include_query"` IncludeQuery []string `toml:"include_query"`
ExcludeQuery []string `toml:"exclude_query"` ExcludeQuery []string `toml:"exclude_query"`
@ -88,7 +88,7 @@ servers = [
## valid methods: "connection_string", "AAD" ## valid methods: "connection_string", "AAD"
# auth_method = "connection_string" # auth_method = "connection_string"
## "database_type" enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2 ## "database_type" enables a specific set of queries depending on the database type.
## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type. ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type.
## Possible values for database_type are - "SQLServer" or "AzureSQLDB" or "AzureSQLManagedInstance" or "AzureSQLPool" ## Possible values for database_type are - "SQLServer" or "AzureSQLDB" or "AzureSQLManagedInstance" or "AzureSQLPool"
@ -115,19 +115,6 @@ exclude_query = ["SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplica
## Queries enabled by default for database_type = "AzureSQLPool" are - ## Queries enabled by default for database_type = "AzureSQLPool" are -
## AzureSQLPoolResourceStats, AzureSQLPoolResourceGovernance, AzureSQLPoolDatabaseIO, AzureSQLPoolWaitStats, ## AzureSQLPoolResourceStats, AzureSQLPoolResourceGovernance, AzureSQLPoolDatabaseIO, AzureSQLPoolWaitStats,
## AzureSQLPoolMemoryClerks, AzureSQLPoolPerformanceCounters, AzureSQLPoolSchedulers ## AzureSQLPoolMemoryClerks, AzureSQLPoolPerformanceCounters, AzureSQLPoolSchedulers
## Following are old config settings
## You may use them only if you are using the earlier flavor of queries, however it is recommended to use
## the new mechanism of identifying the database_type there by use it's corresponding queries
## Optional parameter, setting this to 2 will use a new version
## of the collection queries that break compatibility with the original
## dashboards.
## Version 2 - is compatible from SQL Server 2012 and later versions and also for SQL Azure DB
# query_version = 2
## If you are using AzureDB, setting this to true will gather resource utilization metrics
# azuredb = false
` `
// SampleConfig return the sample configuration // SampleConfig return the sample configuration
@ -149,7 +136,6 @@ func (s *SQLServer) initQueries() error {
queries := s.queries queries := s.queries
s.Log.Infof("Config: database_type: %s , query_version:%d , azuredb: %t", s.DatabaseType, s.QueryVersion, s.AzureDB) s.Log.Infof("Config: database_type: %s , query_version:%d , azuredb: %t", s.DatabaseType, s.QueryVersion, s.AzureDB)
// New config option database_type
// To prevent query definition conflicts // To prevent query definition conflicts
// Constant definitions for type "AzureSQLDB" start with sqlAzureDB // Constant definitions for type "AzureSQLDB" start with sqlAzureDB
// Constant definitions for type "AzureSQLManagedInstance" start with sqlAzureMI // Constant definitions for type "AzureSQLManagedInstance" start with sqlAzureMI
@ -204,7 +190,6 @@ func (s *SQLServer) initQueries() error {
} }
// Decide if we want to run version 1 or version 2 queries // Decide if we want to run version 1 or version 2 queries
if s.QueryVersion == 2 { if s.QueryVersion == 2 {
s.Log.Warn("DEPRECATION NOTICE: query_version=2 is being deprecated in favor of database_type.")
queries["PerformanceCounters"] = Query{ScriptName: "PerformanceCounters", Script: sqlPerformanceCountersV2, ResultByRow: true} queries["PerformanceCounters"] = Query{ScriptName: "PerformanceCounters", Script: sqlPerformanceCountersV2, ResultByRow: true}
queries["WaitStatsCategorized"] = Query{ScriptName: "WaitStatsCategorized", Script: sqlWaitStatsCategorizedV2, ResultByRow: false} queries["WaitStatsCategorized"] = Query{ScriptName: "WaitStatsCategorized", Script: sqlWaitStatsCategorizedV2, ResultByRow: false}
queries["DatabaseIO"] = Query{ScriptName: "DatabaseIO", Script: sqlDatabaseIOV2, ResultByRow: false} queries["DatabaseIO"] = Query{ScriptName: "DatabaseIO", Script: sqlDatabaseIOV2, ResultByRow: false}
@ -215,7 +200,6 @@ func (s *SQLServer) initQueries() error {
queries["VolumeSpace"] = Query{ScriptName: "VolumeSpace", Script: sqlServerVolumeSpaceV2, ResultByRow: false} queries["VolumeSpace"] = Query{ScriptName: "VolumeSpace", Script: sqlServerVolumeSpaceV2, ResultByRow: false}
queries["Cpu"] = Query{ScriptName: "Cpu", Script: sqlServerCPUV2, ResultByRow: false} queries["Cpu"] = Query{ScriptName: "Cpu", Script: sqlServerCPUV2, ResultByRow: false}
} else { } else {
s.Log.Warn("DEPRECATED: query_version=1 has been deprecated in favor of database_type.")
queries["PerformanceCounters"] = Query{ScriptName: "PerformanceCounters", Script: sqlPerformanceCounters, ResultByRow: true} queries["PerformanceCounters"] = Query{ScriptName: "PerformanceCounters", Script: sqlPerformanceCounters, ResultByRow: true}
queries["WaitStatsCategorized"] = Query{ScriptName: "WaitStatsCategorized", Script: sqlWaitStatsCategorized, ResultByRow: false} queries["WaitStatsCategorized"] = Query{ScriptName: "WaitStatsCategorized", Script: sqlWaitStatsCategorized, ResultByRow: false}
queries["CPUHistory"] = Query{ScriptName: "CPUHistory", Script: sqlCPUHistory, ResultByRow: false} queries["CPUHistory"] = Query{ScriptName: "CPUHistory", Script: sqlCPUHistory, ResultByRow: false}

View File

@ -73,13 +73,13 @@ type Statsd struct {
DeleteCounters bool DeleteCounters bool
DeleteSets bool DeleteSets bool
DeleteTimings bool DeleteTimings bool
ConvertNames bool ConvertNames bool `toml:"convert_names" deprecated:"0.12.0;2.0.0;use 'metric_separator' instead"`
// MetricSeparator is the separator between parts of the metric name. // MetricSeparator is the separator between parts of the metric name.
MetricSeparator string MetricSeparator string
// This flag enables parsing of tags in the dogstatsd extension to the // This flag enables parsing of tags in the dogstatsd extension to the
// statsd protocol (http://docs.datadoghq.com/guides/dogstatsd/) // statsd protocol (http://docs.datadoghq.com/guides/dogstatsd/)
ParseDataDogTags bool // depreciated in 1.10; use datadog_extensions ParseDataDogTags bool `toml:"parse_data_dog_tags" deprecated:"1.10.0;use 'datadog_extensions' instead"`
// Parses extensions to statsd in the datadog statsd format // Parses extensions to statsd in the datadog statsd format
// currently supports metrics and datadog tags. // currently supports metrics and datadog tags.
@ -95,7 +95,7 @@ type Statsd struct {
// we now always create 1 max size buffer and then copy only what we need // we now always create 1 max size buffer and then copy only what we need
// into the in channel // into the in channel
// see https://github.com/influxdata/telegraf/pull/992 // see https://github.com/influxdata/telegraf/pull/992
UDPPacketSize int `toml:"udp_packet_size"` UDPPacketSize int `toml:"udp_packet_size" deprecated:"0.12.1;2.0.0;option is ignored"`
ReadBufferSize int `toml:"read_buffer_size"` ReadBufferSize int `toml:"read_buffer_size"`
@ -375,7 +375,6 @@ func (s *Statsd) Gather(acc telegraf.Accumulator) error {
func (s *Statsd) Start(ac telegraf.Accumulator) error { func (s *Statsd) Start(ac telegraf.Accumulator) error {
if s.ParseDataDogTags { if s.ParseDataDogTags {
s.DataDogExtensions = true s.DataDogExtensions = true
s.Log.Warn("'parse_data_dog_tags' config option is deprecated, please use 'datadog_extensions' instead")
} }
s.acc = ac s.acc = ac
@ -417,10 +416,6 @@ func (s *Statsd) Start(ac telegraf.Accumulator) error {
s.accept <- true s.accept <- true
} }
if s.ConvertNames {
s.Log.Warn("'convert_names' config option is deprecated, please use 'metric_separator' instead")
}
if s.MetricSeparator == "" { if s.MetricSeparator == "" {
s.MetricSeparator = defaultSeparator s.MetricSeparator = defaultSeparator
} }

View File

@ -54,7 +54,7 @@ type VSphere struct {
MaxQueryMetrics int MaxQueryMetrics int
CollectConcurrency int CollectConcurrency int
DiscoverConcurrency int DiscoverConcurrency int
ForceDiscoverOnInit bool ForceDiscoverOnInit bool `toml:"force_discover_on_init" deprecated:"1.14.0;option is ignored"`
ObjectDiscoveryInterval config.Duration ObjectDiscoveryInterval config.Duration
Timeout config.Duration Timeout config.Duration
HistoricalInterval config.Duration HistoricalInterval config.Duration
@ -275,11 +275,6 @@ func (v *VSphere) Start(_ telegraf.Accumulator) error {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
v.cancel = cancel v.cancel = cancel
// Check for deprecated settings
if !v.ForceDiscoverOnInit {
v.Log.Warn("The 'force_discover_on_init' configuration parameter has been deprecated. Setting it to 'false' has no effect")
}
// Create endpoints, one for each vCenter we're monitoring // Create endpoints, one for each vCenter we're monitoring
v.endpoints = make([]*Endpoint, len(v.Vcenters)) v.endpoints = make([]*Endpoint, len(v.Vcenters))
for i, rawURL := range v.Vcenters { for i, rawURL := range v.Vcenters {

View File

@ -25,7 +25,7 @@ type Zookeeper struct {
Timeout config.Duration Timeout config.Duration
EnableTLS bool `toml:"enable_tls"` EnableTLS bool `toml:"enable_tls"`
EnableSSL bool `toml:"enable_ssl"` // deprecated in 1.7; use enable_tls EnableSSL bool `toml:"enable_ssl" deprecated:"1.7.0;use 'enable_tls' instead"`
tlsint.ClientConfig tlsint.ClientConfig
initialized bool initialized bool

View File

@ -34,7 +34,7 @@ func (a *externalAuth) Response() string {
} }
type AMQP struct { type AMQP struct {
URL string `toml:"url"` // deprecated in 1.7; use brokers URL string `toml:"url" deprecated:"1.7.0;use 'brokers' instead"`
Brokers []string `toml:"brokers"` Brokers []string `toml:"brokers"`
Exchange string `toml:"exchange"` Exchange string `toml:"exchange"`
ExchangeType string `toml:"exchange_type"` ExchangeType string `toml:"exchange_type"`
@ -48,9 +48,9 @@ type AMQP struct {
RoutingTag string `toml:"routing_tag"` RoutingTag string `toml:"routing_tag"`
RoutingKey string `toml:"routing_key"` RoutingKey string `toml:"routing_key"`
DeliveryMode string `toml:"delivery_mode"` DeliveryMode string `toml:"delivery_mode"`
Database string `toml:"database"` // deprecated in 1.7; use headers Database string `toml:"database" deprecated:"1.7.0;use 'headers' instead"`
RetentionPolicy string `toml:"retention_policy"` // deprecated in 1.7; use headers RetentionPolicy string `toml:"retention_policy" deprecated:"1.7.0;use 'headers' instead"`
Precision string `toml:"precision"` // deprecated; has no effect Precision string `toml:"precision" deprecated:"1.2.0;option is ignored"`
Headers map[string]string `toml:"headers"` Headers map[string]string `toml:"headers"`
Timeout config.Duration `toml:"timeout"` Timeout config.Duration `toml:"timeout"`
UseBatchFormat bool `toml:"use_batch_format"` UseBatchFormat bool `toml:"use_batch_format"`
@ -72,10 +72,6 @@ type Client interface {
} }
var sampleConfig = ` var sampleConfig = `
## Broker to publish to.
## deprecated in 1.7; use the brokers option
# url = "amqp://localhost:5672/influxdb"
## Brokers to publish to. If multiple brokers are specified a random broker ## Brokers to publish to. If multiple brokers are specified a random broker
## will be selected anytime a connection is established. This can be ## will be selected anytime a connection is established. This can be
## helpful for load balancing when not using a dedicated load balancer. ## helpful for load balancing when not using a dedicated load balancer.
@ -124,14 +120,6 @@ var sampleConfig = `
## One of "transient" or "persistent". ## One of "transient" or "persistent".
# delivery_mode = "transient" # delivery_mode = "transient"
## InfluxDB database added as a message header.
## deprecated in 1.7; use the headers option
# database = "telegraf"
## InfluxDB retention policy added as a message header
## deprecated in 1.7; use the headers option
# retention_policy = "default"
## Static headers added to each published message. ## Static headers added to each published message.
# headers = { } # headers = { }
# headers = {"database" = "telegraf", "retention_policy" = "default"} # headers = {"database" = "telegraf", "retention_policy" = "default"}

View File

@ -32,7 +32,7 @@ type Client interface {
// InfluxDB struct is the primary data structure for the plugin // InfluxDB struct is the primary data structure for the plugin
type InfluxDB struct { type InfluxDB struct {
URL string // url deprecated in 0.1.9; use urls URL string `toml:"url" deprecated:"0.1.9;2.0.0;use 'urls' instead"`
URLs []string `toml:"urls"` URLs []string `toml:"urls"`
Username string `toml:"username"` Username string `toml:"username"`
Password string `toml:"password"` Password string `toml:"password"`
@ -53,7 +53,7 @@ type InfluxDB struct {
InfluxUintSupport bool `toml:"influx_uint_support"` InfluxUintSupport bool `toml:"influx_uint_support"`
tls.ClientConfig tls.ClientConfig
Precision string // precision deprecated in 1.0; value is ignored Precision string `toml:"precision" deprecated:"1.0.0;option is ignored"`
clients []Client clients []Client

View File

@ -20,8 +20,8 @@ const maxRecordsPerRequest uint32 = 500
type ( type (
KinesisOutput struct { KinesisOutput struct {
StreamName string `toml:"streamname"` StreamName string `toml:"streamname"`
PartitionKey string `toml:"partitionkey"` PartitionKey string `toml:"partitionkey" deprecated:"1.5.0;use 'partition.key' instead"`
RandomPartitionKey bool `toml:"use_random_partitionkey"` RandomPartitionKey bool `toml:"use_random_partitionkey" deprecated:"1.5.0;use 'partition.method' instead"`
Partition *Partition `toml:"partition"` Partition *Partition `toml:"partition"`
Debug bool `toml:"debug"` Debug bool `toml:"debug"`
@ -73,12 +73,7 @@ var sampleConfig = `
## Kinesis StreamName must exist prior to starting telegraf. ## Kinesis StreamName must exist prior to starting telegraf.
streamname = "StreamName" streamname = "StreamName"
## DEPRECATED: PartitionKey as used for sharding data.
partitionkey = "PartitionKey"
## DEPRECATED: If set the partitionKey will be a random UUID on every put.
## This allows for scaling across multiple shards in a stream.
## This will cause issues with ordering.
use_random_partitionkey = false
## The partition key can be calculated using one of several methods: ## The partition key can be calculated using one of several methods:
## ##
## Use a static value for all writes: ## Use a static value for all writes:

View File

@ -20,7 +20,7 @@ type Librato struct {
APIUser string `toml:"api_user"` APIUser string `toml:"api_user"`
APIToken string `toml:"api_token"` APIToken string `toml:"api_token"`
Debug bool `toml:"debug"` Debug bool `toml:"debug"`
SourceTag string `toml:"source_tag"` // Deprecated, keeping for backward-compatibility SourceTag string `toml:"source_tag" deprecated:"1.0.0;use 'template' instead"`
Timeout config.Duration `toml:"timeout"` Timeout config.Duration `toml:"timeout"`
Template string `toml:"template"` Template string `toml:"template"`
Log telegraf.Logger `toml:"-"` Log telegraf.Logger `toml:"-"`

View File

@ -32,7 +32,7 @@ type OpenTSDB struct {
Host string `toml:"host"` Host string `toml:"host"`
Port int `toml:"port"` Port int `toml:"port"`
HTTPBatchSize int `toml:"http_batch_size"` // deprecated httpBatchSize form in 1.8 HTTPBatchSize int `toml:"http_batch_size"`
HTTPPath string `toml:"http_path"` HTTPPath string `toml:"http_path"`
Debug bool `toml:"debug"` Debug bool `toml:"debug"`

View File

@ -28,7 +28,7 @@ type Wavefront struct {
TruncateTags bool `toml:"truncate_tags"` TruncateTags bool `toml:"truncate_tags"`
ImmediateFlush bool `toml:"immediate_flush"` ImmediateFlush bool `toml:"immediate_flush"`
SourceOverride []string `toml:"source_override"` SourceOverride []string `toml:"source_override"`
StringToNumber map[string][]map[string]float64 `toml:"string_to_number"` StringToNumber map[string][]map[string]float64 `toml:"string_to_number" deprecated:"1.9.0;use the enum processor instead"`
sender wavefront.Sender sender wavefront.Sender
Log telegraf.Logger `toml:"-"` Log telegraf.Logger `toml:"-"`
@ -108,13 +108,6 @@ var sampleConfig = `
## of metrics will block for a longer time, but this will be handled gracefully by the internal buffering in ## of metrics will block for a longer time, but this will be handled gracefully by the internal buffering in
## Telegraf. ## Telegraf.
#immediate_flush = true #immediate_flush = true
## Define a mapping, namespaced by metric prefix, from string values to numeric values
## deprecated in 1.9; use the enum processor plugin
#[[outputs.wavefront.string_to_number.elasticsearch]]
# green = 1.0
# yellow = 0.5
# red = 0.0
` `
type MetricPoint struct { type MetricPoint struct {
@ -126,10 +119,6 @@ type MetricPoint struct {
} }
func (w *Wavefront) Connect() error { func (w *Wavefront) Connect() error {
if len(w.StringToNumber) > 0 {
w.Log.Warn("The string_to_number option is deprecated; please use the enum processor instead")
}
flushSeconds := 5 flushSeconds := 5
if w.ImmediateFlush { if w.ImmediateFlush {
flushSeconds = 86400 // Set a very long flush interval if we're flushing directly flushSeconds = 86400 // Set a very long flush interval if we're flushing directly