2015-06-20 20:38:01 +08:00
|
|
|
package prometheus
|
|
|
|
|
|
|
|
|
|
import (
|
2018-11-06 05:30:16 +08:00
|
|
|
"context"
|
2015-06-20 20:38:01 +08:00
|
|
|
"errors"
|
|
|
|
|
"fmt"
|
2021-09-29 05:16:32 +08:00
|
|
|
"io"
|
2017-09-19 06:06:11 +08:00
|
|
|
"net"
|
2015-06-20 20:38:01 +08:00
|
|
|
"net/http"
|
2017-09-19 06:06:11 +08:00
|
|
|
"net/url"
|
2021-03-09 00:00:56 +08:00
|
|
|
"os"
|
|
|
|
|
"strings"
|
2015-06-20 20:38:01 +08:00
|
|
|
"sync"
|
2016-03-01 00:52:58 +08:00
|
|
|
"time"
|
2017-03-30 06:04:29 +08:00
|
|
|
|
2021-11-15 23:14:09 +08:00
|
|
|
"k8s.io/apimachinery/pkg/fields"
|
|
|
|
|
"k8s.io/apimachinery/pkg/labels"
|
|
|
|
|
|
2017-03-30 06:04:29 +08:00
|
|
|
"github.com/influxdata/telegraf"
|
2021-04-10 01:15:04 +08:00
|
|
|
"github.com/influxdata/telegraf/config"
|
2021-05-20 22:36:36 +08:00
|
|
|
"github.com/influxdata/telegraf/internal"
|
2020-06-26 02:44:22 +08:00
|
|
|
"github.com/influxdata/telegraf/plugins/common/tls"
|
2017-03-30 06:04:29 +08:00
|
|
|
"github.com/influxdata/telegraf/plugins/inputs"
|
2021-11-15 23:14:09 +08:00
|
|
|
parserV2 "github.com/influxdata/telegraf/plugins/parsers/prometheus"
|
2015-06-20 20:38:01 +08:00
|
|
|
)
|
|
|
|
|
|
2022-05-24 21:11:17 +08:00
|
|
|
const acceptHeader = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.7,text/plain;version=0.0.4;q=0.3`
|
2016-07-07 18:15:47 +08:00
|
|
|
|
2015-06-20 20:38:01 +08:00
|
|
|
type Prometheus struct {
|
2017-09-19 06:06:11 +08:00
|
|
|
// An array of urls to scrape metrics from.
|
2018-02-06 03:16:00 +08:00
|
|
|
URLs []string `toml:"urls"`
|
2016-03-18 03:17:48 +08:00
|
|
|
|
2017-09-19 06:06:11 +08:00
|
|
|
// An array of Kubernetes services to scrape metrics from.
|
|
|
|
|
KubernetesServices []string
|
|
|
|
|
|
2018-11-06 05:30:16 +08:00
|
|
|
// Location of kubernetes config file
|
|
|
|
|
KubeConfig string
|
|
|
|
|
|
2020-03-03 10:51:31 +08:00
|
|
|
// Label Selector/s for Kubernetes
|
|
|
|
|
KubernetesLabelSelector string `toml:"kubernetes_label_selector"`
|
|
|
|
|
|
|
|
|
|
// Field Selector/s for Kubernetes
|
|
|
|
|
KubernetesFieldSelector string `toml:"kubernetes_field_selector"`
|
|
|
|
|
|
2021-07-28 05:23:01 +08:00
|
|
|
// Consul SD configuration
|
|
|
|
|
ConsulConfig ConsulConfig `toml:"consul"`
|
|
|
|
|
|
2016-03-18 03:17:48 +08:00
|
|
|
// Bearer Token authorization file path
|
2019-01-16 07:25:26 +08:00
|
|
|
BearerToken string `toml:"bearer_token"`
|
|
|
|
|
BearerTokenString string `toml:"bearer_token_string"`
|
2016-06-23 15:59:44 +08:00
|
|
|
|
2019-07-03 02:14:48 +08:00
|
|
|
// Basic authentication credentials
|
|
|
|
|
Username string `toml:"username"`
|
|
|
|
|
Password string `toml:"password"`
|
|
|
|
|
|
2021-04-10 01:15:04 +08:00
|
|
|
ResponseTimeout config.Duration `toml:"response_timeout"`
|
2016-11-08 00:34:02 +08:00
|
|
|
|
2019-11-21 12:53:57 +08:00
|
|
|
MetricVersion int `toml:"metric_version"`
|
|
|
|
|
|
|
|
|
|
URLTag string `toml:"url_tag"`
|
|
|
|
|
|
2021-10-06 05:11:46 +08:00
|
|
|
IgnoreTimestamp bool `toml:"ignore_timestamp"`
|
|
|
|
|
|
2018-05-05 07:33:23 +08:00
|
|
|
tls.ClientConfig
|
2017-05-10 07:20:43 +08:00
|
|
|
|
2019-09-24 06:39:50 +08:00
|
|
|
Log telegraf.Logger
|
|
|
|
|
|
2021-05-20 22:36:36 +08:00
|
|
|
client *http.Client
|
|
|
|
|
headers map[string]string
|
2018-11-06 05:30:16 +08:00
|
|
|
|
|
|
|
|
// Should we scrape Kubernetes services for prometheus annotations
|
2021-03-09 00:00:56 +08:00
|
|
|
MonitorPods bool `toml:"monitor_kubernetes_pods"`
|
|
|
|
|
PodScrapeScope string `toml:"pod_scrape_scope"`
|
|
|
|
|
NodeIP string `toml:"node_ip"`
|
|
|
|
|
PodScrapeInterval int `toml:"pod_scrape_interval"`
|
|
|
|
|
PodNamespace string `toml:"monitor_kubernetes_pods_namespace"`
|
|
|
|
|
lock sync.Mutex
|
|
|
|
|
kubernetesPods map[string]URLAndAddress
|
|
|
|
|
cancel context.CancelFunc
|
|
|
|
|
wg sync.WaitGroup
|
|
|
|
|
|
|
|
|
|
// Only for monitor_kubernetes_pods=true and pod_scrape_scope="node"
|
|
|
|
|
podLabelSelector labels.Selector
|
|
|
|
|
podFieldSelector fields.Selector
|
|
|
|
|
isNodeScrapeScope bool
|
2021-07-28 05:23:01 +08:00
|
|
|
|
2022-04-08 22:09:02 +08:00
|
|
|
// Only for monitor_kubernetes_pods=true
|
|
|
|
|
CacheRefreshInterval int `toml:"cache_refresh_interval"`
|
|
|
|
|
|
2021-07-28 05:23:01 +08:00
|
|
|
// List of consul services to scrape
|
|
|
|
|
consulServices map[string]URLAndAddress
|
2015-06-20 20:38:01 +08:00
|
|
|
}
|
|
|
|
|
|
2019-11-27 07:46:31 +08:00
|
|
|
func (p *Prometheus) Init() error {
|
2021-11-15 23:14:09 +08:00
|
|
|
// Config processing for node scrape scope for monitor_kubernetes_pods
|
2021-03-09 00:00:56 +08:00
|
|
|
p.isNodeScrapeScope = strings.EqualFold(p.PodScrapeScope, "node")
|
|
|
|
|
if p.isNodeScrapeScope {
|
|
|
|
|
// Need node IP to make cAdvisor call for pod list. Check if set in config and valid IP address
|
|
|
|
|
if p.NodeIP == "" || net.ParseIP(p.NodeIP) == nil {
|
|
|
|
|
p.Log.Infof("The config node_ip is empty or invalid. Using NODE_IP env var as default.")
|
|
|
|
|
|
|
|
|
|
// Check if set as env var and is valid IP address
|
|
|
|
|
envVarNodeIP := os.Getenv("NODE_IP")
|
|
|
|
|
if envVarNodeIP == "" || net.ParseIP(envVarNodeIP) == nil {
|
2021-03-26 01:57:01 +08:00
|
|
|
return errors.New("the node_ip config and the environment variable NODE_IP are not set or invalid; " +
|
|
|
|
|
"cannot get pod list for monitor_kubernetes_pods using node scrape scope")
|
2021-03-09 00:00:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
p.NodeIP = envVarNodeIP
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Parse label and field selectors - will be used to filter pods after cAdvisor call
|
|
|
|
|
var err error
|
|
|
|
|
p.podLabelSelector, err = labels.Parse(p.KubernetesLabelSelector)
|
|
|
|
|
if err != nil {
|
2021-03-26 01:57:01 +08:00
|
|
|
return fmt.Errorf("error parsing the specified label selector(s): %s", err.Error())
|
2021-03-09 00:00:56 +08:00
|
|
|
}
|
|
|
|
|
p.podFieldSelector, err = fields.ParseSelector(p.KubernetesFieldSelector)
|
|
|
|
|
if err != nil {
|
2021-03-26 01:57:01 +08:00
|
|
|
return fmt.Errorf("error parsing the specified field selector(s): %s", err.Error())
|
2021-03-09 00:00:56 +08:00
|
|
|
}
|
|
|
|
|
isValid, invalidSelector := fieldSelectorIsSupported(p.podFieldSelector)
|
|
|
|
|
if !isValid {
|
2021-03-26 01:57:01 +08:00
|
|
|
return fmt.Errorf("the field selector %s is not supported for pods", invalidSelector)
|
2021-03-09 00:00:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
p.Log.Infof("Using pod scrape scope at node level to get pod list using cAdvisor.")
|
|
|
|
|
p.Log.Infof("Using the label selector: %v and field selector: %v", p.podLabelSelector, p.podFieldSelector)
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-27 07:46:31 +08:00
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2018-02-06 03:16:00 +08:00
|
|
|
func (p *Prometheus) AddressToURL(u *url.URL, address string) *url.URL {
|
2017-09-19 06:06:11 +08:00
|
|
|
host := address
|
|
|
|
|
if u.Port() != "" {
|
|
|
|
|
host = address + ":" + u.Port()
|
|
|
|
|
}
|
2018-02-06 03:16:00 +08:00
|
|
|
reconstructedURL := &url.URL{
|
2017-09-19 06:06:11 +08:00
|
|
|
Scheme: u.Scheme,
|
|
|
|
|
Opaque: u.Opaque,
|
|
|
|
|
User: u.User,
|
|
|
|
|
Path: u.Path,
|
|
|
|
|
RawPath: u.RawPath,
|
|
|
|
|
ForceQuery: u.ForceQuery,
|
|
|
|
|
RawQuery: u.RawQuery,
|
|
|
|
|
Fragment: u.Fragment,
|
|
|
|
|
Host: host,
|
|
|
|
|
}
|
2018-02-06 03:16:00 +08:00
|
|
|
return reconstructedURL
|
2017-09-19 06:06:11 +08:00
|
|
|
}
|
|
|
|
|
|
2018-02-06 03:16:00 +08:00
|
|
|
type URLAndAddress struct {
|
|
|
|
|
OriginalURL *url.URL
|
|
|
|
|
URL *url.URL
|
2017-09-23 08:26:19 +08:00
|
|
|
Address string
|
2018-11-06 05:30:16 +08:00
|
|
|
Tags map[string]string
|
2017-09-19 06:06:11 +08:00
|
|
|
}
|
|
|
|
|
|
2019-01-17 07:49:24 +08:00
|
|
|
func (p *Prometheus) GetAllURLs() (map[string]URLAndAddress, error) {
|
2021-03-26 01:57:01 +08:00
|
|
|
allURLs := make(map[string]URLAndAddress)
|
2018-02-06 03:16:00 +08:00
|
|
|
for _, u := range p.URLs {
|
2021-11-15 23:14:09 +08:00
|
|
|
address, err := url.Parse(u)
|
2018-02-06 03:16:00 +08:00
|
|
|
if err != nil {
|
2019-09-24 06:39:50 +08:00
|
|
|
p.Log.Errorf("Could not parse %q, skipping it. Error: %s", u, err.Error())
|
2018-02-06 03:16:00 +08:00
|
|
|
continue
|
|
|
|
|
}
|
2021-11-15 23:14:09 +08:00
|
|
|
allURLs[address.String()] = URLAndAddress{URL: address, OriginalURL: address}
|
2017-09-19 06:06:11 +08:00
|
|
|
}
|
2019-01-17 07:49:24 +08:00
|
|
|
|
2018-11-06 05:30:16 +08:00
|
|
|
p.lock.Lock()
|
|
|
|
|
defer p.lock.Unlock()
|
2021-07-28 05:23:01 +08:00
|
|
|
// add all services collected from consul
|
|
|
|
|
for k, v := range p.consulServices {
|
|
|
|
|
allURLs[k] = v
|
|
|
|
|
}
|
2018-11-06 05:30:16 +08:00
|
|
|
// loop through all pods scraped via the prometheus annotation on the pods
|
2019-01-17 07:49:24 +08:00
|
|
|
for k, v := range p.kubernetesPods {
|
|
|
|
|
allURLs[k] = v
|
|
|
|
|
}
|
2018-11-06 05:30:16 +08:00
|
|
|
|
2017-09-19 06:06:11 +08:00
|
|
|
for _, service := range p.KubernetesServices {
|
2021-11-15 23:14:09 +08:00
|
|
|
address, err := url.Parse(service)
|
2017-09-19 06:06:11 +08:00
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
2018-11-06 05:30:16 +08:00
|
|
|
|
2021-11-15 23:14:09 +08:00
|
|
|
resolvedAddresses, err := net.LookupHost(address.Hostname())
|
2017-09-19 06:06:11 +08:00
|
|
|
if err != nil {
|
2021-11-15 23:14:09 +08:00
|
|
|
p.Log.Errorf("Could not resolve %q, skipping it. Error: %s", address.Host, err.Error())
|
2017-09-19 06:06:11 +08:00
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
for _, resolved := range resolvedAddresses {
|
2021-11-15 23:14:09 +08:00
|
|
|
serviceURL := p.AddressToURL(address, resolved)
|
2019-01-17 07:49:24 +08:00
|
|
|
allURLs[serviceURL.String()] = URLAndAddress{
|
|
|
|
|
URL: serviceURL,
|
|
|
|
|
Address: resolved,
|
2021-11-15 23:14:09 +08:00
|
|
|
OriginalURL: address,
|
2019-01-17 07:49:24 +08:00
|
|
|
}
|
2017-09-19 06:06:11 +08:00
|
|
|
}
|
|
|
|
|
}
|
2018-02-06 03:16:00 +08:00
|
|
|
return allURLs, nil
|
2017-09-19 06:06:11 +08:00
|
|
|
}
|
|
|
|
|
|
2015-06-20 20:38:01 +08:00
|
|
|
// Reads stats from all configured servers accumulates stats.
|
|
|
|
|
// Returns one of the errors encountered while gather stats (if any).
|
2016-03-18 03:17:48 +08:00
|
|
|
func (p *Prometheus) Gather(acc telegraf.Accumulator) error {
|
2017-05-10 07:20:43 +08:00
|
|
|
if p.client == nil {
|
2018-11-03 08:51:40 +08:00
|
|
|
client, err := p.createHTTPClient()
|
2017-05-10 07:20:43 +08:00
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
p.client = client
|
2021-05-20 22:36:36 +08:00
|
|
|
p.headers = map[string]string{
|
|
|
|
|
"User-Agent": internal.ProductToken(),
|
|
|
|
|
"Accept": acceptHeader,
|
|
|
|
|
}
|
2017-05-10 07:20:43 +08:00
|
|
|
}
|
|
|
|
|
|
2015-06-20 20:38:01 +08:00
|
|
|
var wg sync.WaitGroup
|
|
|
|
|
|
2018-02-06 03:16:00 +08:00
|
|
|
allURLs, err := p.GetAllURLs()
|
2017-09-19 06:06:11 +08:00
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
2018-02-06 03:16:00 +08:00
|
|
|
for _, URL := range allURLs {
|
2015-06-20 20:38:01 +08:00
|
|
|
wg.Add(1)
|
2018-02-06 03:16:00 +08:00
|
|
|
go func(serviceURL URLAndAddress) {
|
2015-06-20 20:38:01 +08:00
|
|
|
defer wg.Done()
|
2018-02-06 03:16:00 +08:00
|
|
|
acc.AddError(p.gatherURL(serviceURL, acc))
|
|
|
|
|
}(URL)
|
2015-06-20 20:38:01 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
wg.Wait()
|
|
|
|
|
|
2017-04-25 02:13:26 +08:00
|
|
|
return nil
|
2015-06-20 20:38:01 +08:00
|
|
|
}
|
|
|
|
|
|
2018-11-03 08:51:40 +08:00
|
|
|
func (p *Prometheus) createHTTPClient() (*http.Client, error) {
|
2018-05-05 07:33:23 +08:00
|
|
|
tlsCfg, err := p.ClientConfig.TLSConfig()
|
2016-06-23 15:59:44 +08:00
|
|
|
if err != nil {
|
2017-05-10 07:20:43 +08:00
|
|
|
return nil, err
|
2016-06-23 15:59:44 +08:00
|
|
|
}
|
|
|
|
|
|
2017-05-10 07:20:43 +08:00
|
|
|
client := &http.Client{
|
|
|
|
|
Transport: &http.Transport{
|
|
|
|
|
TLSClientConfig: tlsCfg,
|
|
|
|
|
DisableKeepAlives: true,
|
|
|
|
|
},
|
2021-04-10 01:15:04 +08:00
|
|
|
Timeout: time.Duration(p.ResponseTimeout),
|
2016-03-18 03:17:48 +08:00
|
|
|
}
|
|
|
|
|
|
2017-05-10 07:20:43 +08:00
|
|
|
return client, nil
|
|
|
|
|
}
|
|
|
|
|
|
2018-02-06 03:16:00 +08:00
|
|
|
func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error {
|
2018-11-03 08:51:40 +08:00
|
|
|
var req *http.Request
|
|
|
|
|
var err error
|
|
|
|
|
var uClient *http.Client
|
2019-11-21 12:53:57 +08:00
|
|
|
var metrics []telegraf.Metric
|
2018-11-03 08:51:40 +08:00
|
|
|
if u.URL.Scheme == "unix" {
|
|
|
|
|
path := u.URL.Query().Get("path")
|
|
|
|
|
if path == "" {
|
|
|
|
|
path = "/metrics"
|
|
|
|
|
}
|
2020-10-08 23:20:35 +08:00
|
|
|
addr := "http://localhost" + path
|
|
|
|
|
req, err = http.NewRequest("GET", addr, nil)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("unable to create new request '%s': %s", addr, err)
|
|
|
|
|
}
|
2018-11-03 08:51:40 +08:00
|
|
|
|
|
|
|
|
// ignore error because it's been handled before getting here
|
|
|
|
|
tlsCfg, _ := p.ClientConfig.TLSConfig()
|
|
|
|
|
uClient = &http.Client{
|
|
|
|
|
Transport: &http.Transport{
|
|
|
|
|
TLSClientConfig: tlsCfg,
|
|
|
|
|
DisableKeepAlives: true,
|
|
|
|
|
Dial: func(network, addr string) (net.Conn, error) {
|
|
|
|
|
c, err := net.Dial("unix", u.URL.Path)
|
|
|
|
|
return c, err
|
|
|
|
|
},
|
|
|
|
|
},
|
2021-04-10 01:15:04 +08:00
|
|
|
Timeout: time.Duration(p.ResponseTimeout),
|
2018-11-03 08:51:40 +08:00
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
if u.URL.Path == "" {
|
|
|
|
|
u.URL.Path = "/metrics"
|
|
|
|
|
}
|
|
|
|
|
req, err = http.NewRequest("GET", u.URL.String(), nil)
|
2020-10-08 23:20:35 +08:00
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("unable to create new request '%s': %s", u.URL.String(), err)
|
|
|
|
|
}
|
2018-11-03 08:51:40 +08:00
|
|
|
}
|
|
|
|
|
|
2021-05-20 22:36:36 +08:00
|
|
|
p.addHeaders(req)
|
2017-05-10 07:20:43 +08:00
|
|
|
|
2016-03-18 03:17:48 +08:00
|
|
|
if p.BearerToken != "" {
|
2021-09-29 05:16:32 +08:00
|
|
|
token, err := os.ReadFile(p.BearerToken)
|
2016-03-18 03:17:48 +08:00
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
req.Header.Set("Authorization", "Bearer "+string(token))
|
2019-01-16 07:25:26 +08:00
|
|
|
} else if p.BearerTokenString != "" {
|
|
|
|
|
req.Header.Set("Authorization", "Bearer "+p.BearerTokenString)
|
2019-07-03 02:14:48 +08:00
|
|
|
} else if p.Username != "" || p.Password != "" {
|
|
|
|
|
req.SetBasicAuth(p.Username, p.Password)
|
2016-03-18 03:17:48 +08:00
|
|
|
}
|
|
|
|
|
|
2018-11-03 08:51:40 +08:00
|
|
|
var resp *http.Response
|
|
|
|
|
if u.URL.Scheme != "unix" {
|
2021-11-15 23:14:09 +08:00
|
|
|
//nolint:bodyclose // False positive (because of if-else) - body will be closed in `defer`
|
2018-11-03 08:51:40 +08:00
|
|
|
resp, err = p.client.Do(req)
|
|
|
|
|
} else {
|
2021-11-15 23:14:09 +08:00
|
|
|
//nolint:bodyclose // False positive (because of if-else) - body will be closed in `defer`
|
2018-11-03 08:51:40 +08:00
|
|
|
resp, err = uClient.Do(req)
|
|
|
|
|
}
|
2015-06-20 20:38:01 +08:00
|
|
|
if err != nil {
|
2018-02-06 03:16:00 +08:00
|
|
|
return fmt.Errorf("error making HTTP request to %s: %s", u.URL, err)
|
2015-06-20 20:38:01 +08:00
|
|
|
}
|
|
|
|
|
defer resp.Body.Close()
|
2018-11-03 08:51:40 +08:00
|
|
|
|
2015-06-20 20:38:01 +08:00
|
|
|
if resp.StatusCode != http.StatusOK {
|
2018-02-06 03:16:00 +08:00
|
|
|
return fmt.Errorf("%s returned HTTP status %s", u.URL, resp.Status)
|
2015-06-20 20:38:01 +08:00
|
|
|
}
|
|
|
|
|
|
2021-09-29 05:16:32 +08:00
|
|
|
body, err := io.ReadAll(resp.Body)
|
2016-03-02 00:12:23 +08:00
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("error reading body: %s", err)
|
|
|
|
|
}
|
2015-06-20 20:38:01 +08:00
|
|
|
|
2019-11-21 12:53:57 +08:00
|
|
|
if p.MetricVersion == 2 {
|
2021-11-15 23:14:09 +08:00
|
|
|
parser := parserV2.Parser{
|
2021-10-06 05:11:46 +08:00
|
|
|
Header: resp.Header,
|
|
|
|
|
IgnoreTimestamp: p.IgnoreTimestamp,
|
|
|
|
|
}
|
2020-12-03 03:48:44 +08:00
|
|
|
metrics, err = parser.Parse(body)
|
2019-11-21 12:53:57 +08:00
|
|
|
} else {
|
2021-10-06 05:11:46 +08:00
|
|
|
metrics, err = Parse(body, resp.Header, p.IgnoreTimestamp)
|
2019-11-21 12:53:57 +08:00
|
|
|
}
|
|
|
|
|
|
2016-03-02 00:12:23 +08:00
|
|
|
if err != nil {
|
2016-07-07 18:15:47 +08:00
|
|
|
return fmt.Errorf("error reading metrics for %s: %s",
|
2018-02-06 03:16:00 +08:00
|
|
|
u.URL, err)
|
2016-03-02 00:12:23 +08:00
|
|
|
}
|
2018-11-03 08:51:40 +08:00
|
|
|
|
2016-03-02 00:12:23 +08:00
|
|
|
for _, metric := range metrics {
|
|
|
|
|
tags := metric.Tags()
|
2018-02-06 03:16:00 +08:00
|
|
|
// strip user and password from URL
|
|
|
|
|
u.OriginalURL.User = nil
|
2019-11-27 07:46:31 +08:00
|
|
|
if p.URLTag != "" {
|
|
|
|
|
tags[p.URLTag] = u.OriginalURL.String()
|
|
|
|
|
}
|
2018-02-06 03:16:00 +08:00
|
|
|
if u.Address != "" {
|
|
|
|
|
tags["address"] = u.Address
|
2017-09-19 06:06:11 +08:00
|
|
|
}
|
2018-11-06 05:30:16 +08:00
|
|
|
for k, v := range u.Tags {
|
|
|
|
|
tags[k] = v
|
|
|
|
|
}
|
2017-10-19 05:51:08 +08:00
|
|
|
|
|
|
|
|
switch metric.Type() {
|
|
|
|
|
case telegraf.Counter:
|
|
|
|
|
acc.AddCounter(metric.Name(), metric.Fields(), tags, metric.Time())
|
|
|
|
|
case telegraf.Gauge:
|
|
|
|
|
acc.AddGauge(metric.Name(), metric.Fields(), tags, metric.Time())
|
2017-10-25 07:28:52 +08:00
|
|
|
case telegraf.Summary:
|
|
|
|
|
acc.AddSummary(metric.Name(), metric.Fields(), tags, metric.Time())
|
|
|
|
|
case telegraf.Histogram:
|
|
|
|
|
acc.AddHistogram(metric.Name(), metric.Fields(), tags, metric.Time())
|
2017-10-19 05:51:08 +08:00
|
|
|
default:
|
|
|
|
|
acc.AddFields(metric.Name(), metric.Fields(), tags, metric.Time())
|
|
|
|
|
}
|
2015-06-20 20:38:01 +08:00
|
|
|
}
|
2015-10-23 00:17:57 +08:00
|
|
|
|
2015-06-20 20:38:01 +08:00
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2021-05-20 22:36:36 +08:00
|
|
|
func (p *Prometheus) addHeaders(req *http.Request) {
|
|
|
|
|
for header, value := range p.headers {
|
|
|
|
|
req.Header.Add(header, value)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-09 00:00:56 +08:00
|
|
|
/* Check if the field selector specified is valid.
|
|
|
|
|
* See ToSelectableFields() for list of fields that are selectable:
|
|
|
|
|
* https://github.com/kubernetes/kubernetes/release-1.20/pkg/registry/core/pod/strategy.go
|
|
|
|
|
*/
|
|
|
|
|
func fieldSelectorIsSupported(fieldSelector fields.Selector) (bool, string) {
|
|
|
|
|
supportedFieldsToSelect := map[string]bool{
|
|
|
|
|
"spec.nodeName": true,
|
|
|
|
|
"spec.restartPolicy": true,
|
|
|
|
|
"spec.schedulerName": true,
|
|
|
|
|
"spec.serviceAccountName": true,
|
|
|
|
|
"status.phase": true,
|
|
|
|
|
"status.podIP": true,
|
|
|
|
|
"status.nominatedNodeName": true,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for _, requirement := range fieldSelector.Requirements() {
|
|
|
|
|
if !supportedFieldsToSelect[requirement.Field] {
|
|
|
|
|
return false, requirement.Field
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return true, ""
|
|
|
|
|
}
|
|
|
|
|
|
2021-07-28 05:23:01 +08:00
|
|
|
// Start will start the Kubernetes and/or Consul scraping if enabled in the configuration
|
2021-03-23 01:21:36 +08:00
|
|
|
func (p *Prometheus) Start(_ telegraf.Accumulator) error {
|
2021-07-28 05:23:01 +08:00
|
|
|
var ctx context.Context
|
|
|
|
|
p.wg = sync.WaitGroup{}
|
|
|
|
|
ctx, p.cancel = context.WithCancel(context.Background())
|
|
|
|
|
|
|
|
|
|
if p.ConsulConfig.Enabled && len(p.ConsulConfig.Queries) > 0 {
|
|
|
|
|
if err := p.startConsul(ctx); err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
}
|
2018-11-06 05:30:16 +08:00
|
|
|
if p.MonitorPods {
|
2021-07-28 05:23:01 +08:00
|
|
|
if err := p.startK8s(ctx); err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
2018-11-06 05:30:16 +08:00
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (p *Prometheus) Stop() {
|
2021-07-28 05:23:01 +08:00
|
|
|
p.cancel()
|
2018-11-06 05:30:16 +08:00
|
|
|
p.wg.Wait()
|
|
|
|
|
}
|
|
|
|
|
|
2015-06-20 20:38:01 +08:00
|
|
|
func init() {
|
2016-01-28 05:21:36 +08:00
|
|
|
inputs.Add("prometheus", func() telegraf.Input {
|
2019-01-17 07:49:24 +08:00
|
|
|
return &Prometheus{
|
2021-04-10 01:15:04 +08:00
|
|
|
ResponseTimeout: config.Duration(time.Second * 3),
|
2019-01-17 07:49:24 +08:00
|
|
|
kubernetesPods: map[string]URLAndAddress{},
|
2021-07-28 05:23:01 +08:00
|
|
|
consulServices: map[string]URLAndAddress{},
|
2019-11-21 12:53:57 +08:00
|
|
|
URLTag: "url",
|
2019-01-17 07:49:24 +08:00
|
|
|
}
|
2015-06-20 20:38:01 +08:00
|
|
|
})
|
|
|
|
|
}
|