diff --git a/plugins/inputs/kube_inventory/README.md b/plugins/inputs/kube_inventory/README.md index 013a8f3da..e17006503 100644 --- a/plugins/inputs/kube_inventory/README.md +++ b/plugins/inputs/kube_inventory/README.md @@ -4,10 +4,13 @@ This plugin generates metrics derived from the state of the following Kubernetes - daemonsets - deployments +- endpoints +- ingress - nodes - persistentvolumes - persistentvolumeclaims - pods (containers) +- services - statefulsets Kubernetes is a fast moving project, with a new minor release every 3 months. As @@ -60,6 +63,12 @@ avoid cardinality issues: ## Overrides resource_exclude if both set. # resource_include = [ "deployments", "nodes", "statefulsets" ] + ## selectors to include and exclude as tags. Globs accepted. + ## Note that an empty array for both will include all selectors as tags + ## selector_exclude overrides selector_include if both set. + selector_include = [] + selector_exclude = ["*"] + ## Optional TLS Config # tls_ca = "/path/to/cafile" # tls_cert = "/path/to/certfile" @@ -126,6 +135,7 @@ subjects: - tags: - daemonset_name - namespace + - selector (\*varies) - fields: - generation - current_number_scheduled @@ -140,6 +150,7 @@ subjects: - tags: - deployment_name - namespace + - selector (\*varies) - fields: - replicas_available - replicas_unavailable @@ -200,6 +211,7 @@ subjects: - namespace - phase - storageclass + - selector (\*varies) - fields: - phase_type (int, [see below](#pvc-phase_type)) @@ -209,6 +221,7 @@ subjects: - namespace - node_name - pod_name + - node_selector (\*varies) - state - readiness - fields: @@ -229,6 +242,7 @@ subjects: - port_protocol - external_name - cluster_ip + - selector (\*varies) - fields - created - generation @@ -239,6 +253,7 @@ subjects: - tags: - statefulset_name - namespace + - selector (\*varies) - fields: - created - generation @@ -277,14 +292,15 @@ The persistentvolumeclaim "phase" is saved in the `phase` tag with a correlated ``` kubernetes_configmap,configmap_name=envoy-config,namespace=default,resource_version=56593031 created=1544103867000000000i 1547597616000000000 -kubernetes_daemonset,daemonset_name=telegraf,namespace=logging number_unavailable=0i,desired_number_scheduled=11i,number_available=11i,number_misscheduled=8i,number_ready=11i,updated_number_scheduled=11i,created=1527758699000000000i,generation=16i,current_number_scheduled=11i 1547597616000000000 -kubernetes_deployment,deployment_name=deployd,namespace=default replicas_unavailable=0i,created=1544103082000000000i,replicas_available=1i 1547597616000000000 +kubernetes_daemonset,daemonset_name=telegraf,selector_select1=s1,namespace=logging number_unavailable=0i,desired_number_scheduled=11i,number_available=11i,number_misscheduled=8i,number_ready=11i,updated_number_scheduled=11i,created=1527758699000000000i,generation=16i,current_number_scheduled=11i 1547597616000000000 +kubernetes_deployment,deployment_name=deployd,selector_select1=s1,namespace=default replicas_unavailable=0i,created=1544103082000000000i,replicas_available=1i 1547597616000000000 kubernetes_node,node_name=ip-172-17-0-2.internal allocatable_pods=110i,capacity_memory_bytes=128837533696,capacity_pods=110i,capacity_cpu_cores=16i,allocatable_cpu_cores=16i,allocatable_memory_bytes=128732676096 1547597616000000000 kubernetes_persistentvolume,phase=Released,pv_name=pvc-aaaaaaaa-bbbb-cccc-1111-222222222222,storageclass=ebs-1-retain phase_type=3i 1547597616000000000 -kubernetes_persistentvolumeclaim,namespace=default,phase=Bound,pvc_name=data-etcd-0,storageclass=ebs-1-retain phase_type=0i 1547597615000000000 +kubernetes_persistentvolumeclaim,namespace=default,phase=Bound,pvc_name=data-etcd-0,selector_select1=s1,storageclass=ebs-1-retain phase_type=0i 1547597615000000000 kubernetes_pod,namespace=default,node_name=ip-172-17-0-2.internal,pod_name=tick1 last_transition_time=1547578322000000000i,ready="false" 1547597616000000000 -kubernetes_pod_container,container_name=telegraf,namespace=default,node_name=ip-172-17-0-2.internal,pod_name=tick1,state=running,readiness=ready resource_requests_cpu_units=0.1,resource_limits_memory_bytes=524288000,resource_limits_cpu_units=0.5,restarts_total=0i,state_code=0i,state_reason="",resource_requests_memory_bytes=524288000 1547597616000000000 -kubernetes_statefulset,namespace=default,statefulset_name=etcd replicas_updated=3i,spec_replicas=3i,observed_generation=1i,created=1544101669000000000i,generation=1i,replicas=3i,replicas_current=3i,replicas_ready=3i 1547597616000000000 +kubernetes_service,cluster_ip=172.29.61.80,namespace=redis-cache-0001,port_name=redis,port_protocol=TCP,selector_app=myapp,selector_io.kompose.service=redis,selector_role=slave,service_name=redis-slave created=1588690034000000000i,generation=0i,port=6379i,target_port=0i 1547597616000000000 +kubernetes_pod_container,container_name=telegraf,namespace=default,node_name=ip-172-17-0-2.internal,node_selector_node-role.kubernetes.io/compute=true,pod_name=tick1,state=running,readiness=ready resource_requests_cpu_units=0.1,resource_limits_memory_bytes=524288000,resource_limits_cpu_units=0.5,restarts_total=0i,state_code=0i,state_reason="",resource_requests_memory_bytes=524288000 1547597616000000000 +kubernetes_statefulset,namespace=default,selector_select1=s1,statefulset_name=etcd replicas_updated=3i,spec_replicas=3i,observed_generation=1i,created=1544101669000000000i,generation=1i,replicas=3i,replicas_current=3i,replicas_ready=3i 1547597616000000000 ``` [metric filtering]: https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#metric-filtering diff --git a/plugins/inputs/kube_inventory/daemonset.go b/plugins/inputs/kube_inventory/daemonset.go index 15df586d6..db612a5e3 100644 --- a/plugins/inputs/kube_inventory/daemonset.go +++ b/plugins/inputs/kube_inventory/daemonset.go @@ -38,6 +38,11 @@ func (ki *KubernetesInventory) gatherDaemonSet(d v1.DaemonSet, acc telegraf.Accu "daemonset_name": d.Metadata.GetName(), "namespace": d.Metadata.GetNamespace(), } + for key, val := range d.GetSpec().GetSelector().GetMatchLabels() { + if ki.selectorFilter.Match(key) { + tags["selector_"+key] = val + } + } if d.Metadata.CreationTimestamp.GetSeconds() != 0 { fields["created"] = time.Unix(d.Metadata.CreationTimestamp.GetSeconds(), int64(d.Metadata.CreationTimestamp.GetNanos())).UnixNano() diff --git a/plugins/inputs/kube_inventory/daemonset_test.go b/plugins/inputs/kube_inventory/daemonset_test.go index bf4e934d3..0a13f1e42 100644 --- a/plugins/inputs/kube_inventory/daemonset_test.go +++ b/plugins/inputs/kube_inventory/daemonset_test.go @@ -1,6 +1,8 @@ package kube_inventory import ( + "reflect" + "strings" "testing" "time" @@ -12,6 +14,8 @@ import ( func TestDaemonSet(t *testing.T) { cli := &client{} + selectInclude := []string{} + selectExclude := []string{} now := time.Now() now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location()) tests := []struct { @@ -55,6 +59,14 @@ func TestDaemonSet(t *testing.T) { }, CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, }, + Spec: &v1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "select1": "s1", + "select2": "s2", + }, + }, + }, }, }, }, @@ -75,8 +87,10 @@ func TestDaemonSet(t *testing.T) { "created": now.UnixNano(), }, Tags: map[string]string{ - "daemonset_name": "daemon1", - "namespace": "ns1", + "daemonset_name": "daemon1", + "namespace": "ns1", + "selector_select1": "s1", + "selector_select2": "s2", }, }, }, @@ -87,8 +101,11 @@ func TestDaemonSet(t *testing.T) { for _, v := range tests { ks := &KubernetesInventory{ - client: cli, + client: cli, + SelectorInclude: selectInclude, + SelectorExclude: selectExclude, } + ks.createSelectorFilters() acc := new(testutil.Accumulator) for _, dset := range ((v.handler.responseMap["/daemonsets/"]).(*v1.DaemonSetList)).Items { err := ks.gatherDaemonSet(*dset, acc) @@ -121,3 +138,170 @@ func TestDaemonSet(t *testing.T) { } } } + +func TestDaemonSetSelectorFilter(t *testing.T) { + cli := &client{} + now := time.Now() + now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location()) + + responseMap := map[string]interface{}{ + "/daemonsets/": &v1.DaemonSetList{ + Items: []*v1.DaemonSet{ + { + Status: &v1.DaemonSetStatus{ + CurrentNumberScheduled: toInt32Ptr(3), + DesiredNumberScheduled: toInt32Ptr(5), + NumberAvailable: toInt32Ptr(2), + NumberMisscheduled: toInt32Ptr(2), + NumberReady: toInt32Ptr(1), + NumberUnavailable: toInt32Ptr(1), + UpdatedNumberScheduled: toInt32Ptr(2), + }, + Metadata: &metav1.ObjectMeta{ + Generation: toInt64Ptr(11221), + Namespace: toStrPtr("ns1"), + Name: toStrPtr("daemon1"), + Labels: map[string]string{ + "lab1": "v1", + "lab2": "v2", + }, + CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + }, + Spec: &v1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "select1": "s1", + "select2": "s2", + }, + }, + }, + }, + }, + }, + } + + tests := []struct { + name string + handler *mockHandler + hasError bool + include []string + exclude []string + expected map[string]string + }{ + { + name: "nil filters equals all selectors", + handler: &mockHandler{ + responseMap: responseMap, + }, + hasError: false, + include: nil, + exclude: nil, + expected: map[string]string{ + "selector_select1": "s1", + "selector_select2": "s2", + }, + }, + { + name: "empty filters equals all selectors", + handler: &mockHandler{ + responseMap: responseMap, + }, + hasError: false, + include: []string{}, + exclude: []string{}, + expected: map[string]string{ + "selector_select1": "s1", + "selector_select2": "s2", + }, + }, + { + name: "include filter equals only include-matched selectors", + handler: &mockHandler{ + responseMap: responseMap, + }, + hasError: false, + include: []string{"select1"}, + exclude: []string{}, + expected: map[string]string{ + "selector_select1": "s1", + }, + }, + { + name: "exclude filter equals only non-excluded selectors (overrides include filter)", + handler: &mockHandler{ + responseMap: responseMap, + }, + hasError: false, + include: []string{}, + exclude: []string{"select2"}, + expected: map[string]string{ + "selector_select1": "s1", + }, + }, + { + name: "include glob filter equals only include-matched selectors", + handler: &mockHandler{ + responseMap: responseMap, + }, + hasError: false, + include: []string{"*1"}, + exclude: []string{}, + expected: map[string]string{ + "selector_select1": "s1", + }, + }, + { + name: "exclude glob filter equals only non-excluded selectors", + handler: &mockHandler{ + responseMap: responseMap, + }, + hasError: false, + include: []string{}, + exclude: []string{"*2"}, + expected: map[string]string{ + "selector_select1": "s1", + }, + }, + { + name: "exclude glob filter equals only non-excluded selectors", + handler: &mockHandler{ + responseMap: responseMap, + }, + hasError: false, + include: []string{}, + exclude: []string{"*2"}, + expected: map[string]string{ + "selector_select1": "s1", + }, + }, + } + for _, v := range tests { + ks := &KubernetesInventory{ + client: cli, + } + ks.SelectorInclude = v.include + ks.SelectorExclude = v.exclude + ks.createSelectorFilters() + acc := new(testutil.Accumulator) + for _, dset := range ((v.handler.responseMap["/daemonsets/"]).(*v1.DaemonSetList)).Items { + err := ks.gatherDaemonSet(*dset, acc) + if err != nil { + t.Errorf("Failed to gather daemonset - %s", err.Error()) + } + } + + // Grab selector tags + actual := map[string]string{} + for _, metric := range acc.Metrics { + for key, val := range metric.Tags { + if strings.Contains(key, "selector_") { + actual[key] = val + } + } + } + + if !reflect.DeepEqual(v.expected, actual) { + t.Fatalf("actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected) + } + } +} diff --git a/plugins/inputs/kube_inventory/deployment.go b/plugins/inputs/kube_inventory/deployment.go index 5a0eb0b19..b91216765 100644 --- a/plugins/inputs/kube_inventory/deployment.go +++ b/plugins/inputs/kube_inventory/deployment.go @@ -32,6 +32,11 @@ func (ki *KubernetesInventory) gatherDeployment(d v1.Deployment, acc telegraf.Ac "deployment_name": d.Metadata.GetName(), "namespace": d.Metadata.GetNamespace(), } + for key, val := range d.GetSpec().GetSelector().GetMatchLabels() { + if ki.selectorFilter.Match(key) { + tags["selector_"+key] = val + } + } acc.AddFields(deploymentMeasurement, fields, tags) diff --git a/plugins/inputs/kube_inventory/deployment_test.go b/plugins/inputs/kube_inventory/deployment_test.go index 21b7bfd02..9407c84d9 100644 --- a/plugins/inputs/kube_inventory/deployment_test.go +++ b/plugins/inputs/kube_inventory/deployment_test.go @@ -1,6 +1,8 @@ package kube_inventory import ( + "reflect" + "strings" "testing" "time" @@ -12,7 +14,8 @@ import ( func TestDeployment(t *testing.T) { cli := &client{} - + selectInclude := []string{} + selectExclude := []string{} now := time.Now() now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location()) outputMetric := &testutil.Metric{ @@ -22,8 +25,10 @@ func TestDeployment(t *testing.T) { "created": now.UnixNano(), }, Tags: map[string]string{ - "namespace": "ns1", - "deployment_name": "deploy1", + "namespace": "ns1", + "deployment_name": "deploy1", + "selector_select1": "s1", + "selector_select2": "s2", }, } @@ -68,6 +73,12 @@ func TestDeployment(t *testing.T) { }, }, Replicas: toInt32Ptr(4), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "select1": "s1", + "select2": "s2", + }, + }, }, Metadata: &metav1.ObjectMeta{ Generation: toInt64Ptr(11221), @@ -95,8 +106,11 @@ func TestDeployment(t *testing.T) { for _, v := range tests { ks := &KubernetesInventory{ - client: cli, + client: cli, + SelectorInclude: selectInclude, + SelectorExclude: selectExclude, } + ks.createSelectorFilters() acc := new(testutil.Accumulator) for _, deployment := range ((v.handler.responseMap["/deployments/"]).(*v1.DeploymentList)).Items { err := ks.gatherDeployment(*deployment, acc) @@ -129,3 +143,179 @@ func TestDeployment(t *testing.T) { } } } + +func TestDeploymentSelectorFilter(t *testing.T) { + cli := &client{} + now := time.Now() + now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location()) + + responseMap := map[string]interface{}{ + "/deployments/": &v1.DeploymentList{ + Items: []*v1.Deployment{ + { + Status: &v1.DeploymentStatus{ + Replicas: toInt32Ptr(3), + AvailableReplicas: toInt32Ptr(1), + UnavailableReplicas: toInt32Ptr(4), + UpdatedReplicas: toInt32Ptr(2), + ObservedGeneration: toInt64Ptr(9121), + }, + Spec: &v1.DeploymentSpec{ + Strategy: &v1.DeploymentStrategy{ + RollingUpdate: &v1.RollingUpdateDeployment{ + MaxUnavailable: &intstr.IntOrString{ + IntVal: toInt32Ptr(30), + }, + MaxSurge: &intstr.IntOrString{ + IntVal: toInt32Ptr(20), + }, + }, + }, + Replicas: toInt32Ptr(4), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "select1": "s1", + "select2": "s2", + }, + }, + }, + Metadata: &metav1.ObjectMeta{ + Generation: toInt64Ptr(11221), + Namespace: toStrPtr("ns1"), + Name: toStrPtr("deploy1"), + Labels: map[string]string{ + "lab1": "v1", + "lab2": "v2", + }, + CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + }, + }, + }, + }, + } + + tests := []struct { + name string + handler *mockHandler + hasError bool + include []string + exclude []string + expected map[string]string + }{ + { + name: "nil filters equals all selectors", + handler: &mockHandler{ + responseMap: responseMap, + }, + hasError: false, + include: nil, + exclude: nil, + expected: map[string]string{ + "selector_select1": "s1", + "selector_select2": "s2", + }, + }, + { + name: "empty filters equals all selectors", + handler: &mockHandler{ + responseMap: responseMap, + }, + hasError: false, + include: []string{}, + exclude: []string{}, + expected: map[string]string{ + "selector_select1": "s1", + "selector_select2": "s2", + }, + }, + { + name: "include filter equals only include-matched selectors", + handler: &mockHandler{ + responseMap: responseMap, + }, + hasError: false, + include: []string{"select1"}, + exclude: []string{}, + expected: map[string]string{ + "selector_select1": "s1", + }, + }, + { + name: "exclude filter equals only non-excluded selectors (overrides include filter)", + handler: &mockHandler{ + responseMap: responseMap, + }, + hasError: false, + include: []string{}, + exclude: []string{"select2"}, + expected: map[string]string{ + "selector_select1": "s1", + }, + }, + { + name: "include glob filter equals only include-matched selectors", + handler: &mockHandler{ + responseMap: responseMap, + }, + hasError: false, + include: []string{"*1"}, + exclude: []string{}, + expected: map[string]string{ + "selector_select1": "s1", + }, + }, + { + name: "exclude glob filter equals only non-excluded selectors", + handler: &mockHandler{ + responseMap: responseMap, + }, + hasError: false, + include: []string{}, + exclude: []string{"*2"}, + expected: map[string]string{ + "selector_select1": "s1", + }, + }, + { + name: "exclude glob filter equals only non-excluded selectors", + handler: &mockHandler{ + responseMap: responseMap, + }, + hasError: false, + include: []string{}, + exclude: []string{"*2"}, + expected: map[string]string{ + "selector_select1": "s1", + }, + }, + } + for _, v := range tests { + ks := &KubernetesInventory{ + client: cli, + } + ks.SelectorInclude = v.include + ks.SelectorExclude = v.exclude + ks.createSelectorFilters() + acc := new(testutil.Accumulator) + for _, deployment := range ((v.handler.responseMap["/deployments/"]).(*v1.DeploymentList)).Items { + err := ks.gatherDeployment(*deployment, acc) + if err != nil { + t.Errorf("Failed to gather deployment - %s", err.Error()) + } + } + + // Grab selector tags + actual := map[string]string{} + for _, metric := range acc.Metrics { + for key, val := range metric.Tags { + if strings.Contains(key, "selector_") { + actual[key] = val + } + } + } + + if !reflect.DeepEqual(v.expected, actual) { + t.Fatalf("actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected) + } + } +} diff --git a/plugins/inputs/kube_inventory/kube_state.go b/plugins/inputs/kube_inventory/kube_state.go index 5aa51b6c5..251fe9d28 100644 --- a/plugins/inputs/kube_inventory/kube_state.go +++ b/plugins/inputs/kube_inventory/kube_state.go @@ -34,8 +34,13 @@ type KubernetesInventory struct { ResourceInclude []string `toml:"resource_include"` MaxConfigMapAge internal.Duration `toml:"max_config_map_age"` + SelectorInclude []string `toml:"selector_include"` + SelectorExclude []string `toml:"selector_exclude"` + tls.ClientConfig client *client + + selectorFilter filter.Filter } var sampleConfig = ` @@ -65,6 +70,12 @@ var sampleConfig = ` ## Overrides resource_exclude if both set. # resource_include = [ "deployments", "nodes", "statefulsets" ] + ## selectors to include and exclude as tags. Globs accepted. + ## Note that an empty array for both will include all selectors as tags + ## selector_exclude overrides selector_include if both set. + # selector_include = [] + # selector_exclude = ["*"] + ## Optional TLS Config # tls_ca = "/path/to/cafile" # tls_cert = "/path/to/certfile" @@ -114,6 +125,11 @@ func (ki *KubernetesInventory) Gather(acc telegraf.Accumulator) (err error) { return err } + ki.selectorFilter, err = filter.NewIncludeExcludeFilter(ki.SelectorInclude, ki.SelectorExclude) + if err != nil { + return err + } + wg := sync.WaitGroup{} ctx := context.Background() @@ -170,6 +186,15 @@ func convertQuantity(s string, m float64) int64 { return int64(f * m) } +func (ki *KubernetesInventory) createSelectorFilters() error { + filter, err := filter.NewIncludeExcludeFilter(ki.SelectorInclude, ki.SelectorExclude) + if err != nil { + return err + } + ki.selectorFilter = filter + return nil +} + var ( daemonSetMeasurement = "kubernetes_daemonset" deploymentMeasurement = "kubernetes_deployment" @@ -188,6 +213,8 @@ func init() { return &KubernetesInventory{ ResponseTimeout: internal.Duration{Duration: time.Second * 5}, Namespace: "default", + SelectorInclude: []string{}, + SelectorExclude: []string{"*"}, } }) } diff --git a/plugins/inputs/kube_inventory/persistentvolumeclaim.go b/plugins/inputs/kube_inventory/persistentvolumeclaim.go index 0663462ae..ac8c9f85a 100644 --- a/plugins/inputs/kube_inventory/persistentvolumeclaim.go +++ b/plugins/inputs/kube_inventory/persistentvolumeclaim.go @@ -42,6 +42,11 @@ func (ki *KubernetesInventory) gatherPersistentVolumeClaim(pvc v1.PersistentVolu "phase": pvc.Status.GetPhase(), "storageclass": pvc.Spec.GetStorageClassName(), } + for key, val := range pvc.GetSpec().GetSelector().GetMatchLabels() { + if ki.selectorFilter.Match(key) { + tags["selector_"+key] = val + } + } acc.AddFields(persistentVolumeClaimMeasurement, fields, tags) diff --git a/plugins/inputs/kube_inventory/persistentvolumeclaim_test.go b/plugins/inputs/kube_inventory/persistentvolumeclaim_test.go index 8a50c0f2e..5155a5d3b 100644 --- a/plugins/inputs/kube_inventory/persistentvolumeclaim_test.go +++ b/plugins/inputs/kube_inventory/persistentvolumeclaim_test.go @@ -1,6 +1,8 @@ package kube_inventory import ( + "reflect" + "strings" "testing" "time" @@ -12,6 +14,8 @@ import ( func TestPersistentVolumeClaim(t *testing.T) { cli := &client{} + selectInclude := []string{} + selectExclude := []string{} now := time.Now() now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location()) @@ -43,6 +47,12 @@ func TestPersistentVolumeClaim(t *testing.T) { Spec: &v1.PersistentVolumeClaimSpec{ VolumeName: toStrPtr("pvc-dc870fd6-1e08-11e8-b226-02aa4bc06eb8"), StorageClassName: toStrPtr("ebs-1"), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "select1": "s1", + "select2": "s2", + }, + }, }, Metadata: &metav1.ObjectMeta{ Namespace: toStrPtr("ns1"), @@ -65,10 +75,12 @@ func TestPersistentVolumeClaim(t *testing.T) { "phase_type": 0, }, Tags: map[string]string{ - "pvc_name": "pc1", - "namespace": "ns1", - "storageclass": "ebs-1", - "phase": "bound", + "pvc_name": "pc1", + "namespace": "ns1", + "storageclass": "ebs-1", + "phase": "bound", + "selector_select1": "s1", + "selector_select2": "s2", }, }, }, @@ -79,8 +91,11 @@ func TestPersistentVolumeClaim(t *testing.T) { for _, v := range tests { ks := &KubernetesInventory{ - client: cli, + client: cli, + SelectorInclude: selectInclude, + SelectorExclude: selectExclude, } + ks.createSelectorFilters() acc := new(testutil.Accumulator) for _, pvc := range ((v.handler.responseMap["/persistentvolumeclaims/"]).(*v1.PersistentVolumeClaimList)).Items { err := ks.gatherPersistentVolumeClaim(*pvc, acc) @@ -113,3 +128,165 @@ func TestPersistentVolumeClaim(t *testing.T) { } } } + +func TestPersistentVolumeClaimSelectorFilter(t *testing.T) { + cli := &client{} + now := time.Now() + now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location()) + + responseMap := map[string]interface{}{ + "/persistentvolumeclaims/": &v1.PersistentVolumeClaimList{ + Items: []*v1.PersistentVolumeClaim{ + { + Status: &v1.PersistentVolumeClaimStatus{ + Phase: toStrPtr("bound"), + }, + Spec: &v1.PersistentVolumeClaimSpec{ + VolumeName: toStrPtr("pvc-dc870fd6-1e08-11e8-b226-02aa4bc06eb8"), + StorageClassName: toStrPtr("ebs-1"), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "select1": "s1", + "select2": "s2", + }, + }, + }, + Metadata: &metav1.ObjectMeta{ + Namespace: toStrPtr("ns1"), + Name: toStrPtr("pc1"), + Labels: map[string]string{ + "lab1": "v1", + "lab2": "v2", + }, + CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + }, + }, + }, + }, + } + + tests := []struct { + name string + handler *mockHandler + hasError bool + include []string + exclude []string + expected map[string]string + }{ + { + name: "nil filters equals all selectors", + handler: &mockHandler{ + responseMap: responseMap, + }, + hasError: false, + include: nil, + exclude: nil, + expected: map[string]string{ + "selector_select1": "s1", + "selector_select2": "s2", + }, + }, + { + name: "empty filters equals all selectors", + handler: &mockHandler{ + responseMap: responseMap, + }, + hasError: false, + include: []string{}, + exclude: []string{}, + expected: map[string]string{ + "selector_select1": "s1", + "selector_select2": "s2", + }, + }, + { + name: "include filter equals only include-matched selectors", + handler: &mockHandler{ + responseMap: responseMap, + }, + hasError: false, + include: []string{"select1"}, + exclude: []string{}, + expected: map[string]string{ + "selector_select1": "s1", + }, + }, + { + name: "exclude filter equals only non-excluded selectors (overrides include filter)", + handler: &mockHandler{ + responseMap: responseMap, + }, + hasError: false, + include: []string{}, + exclude: []string{"select2"}, + expected: map[string]string{ + "selector_select1": "s1", + }, + }, + { + name: "include glob filter equals only include-matched selectors", + handler: &mockHandler{ + responseMap: responseMap, + }, + hasError: false, + include: []string{"*1"}, + exclude: []string{}, + expected: map[string]string{ + "selector_select1": "s1", + }, + }, + { + name: "exclude glob filter equals only non-excluded selectors", + handler: &mockHandler{ + responseMap: responseMap, + }, + hasError: false, + include: []string{}, + exclude: []string{"*2"}, + expected: map[string]string{ + "selector_select1": "s1", + }, + }, + { + name: "exclude glob filter equals only non-excluded selectors", + handler: &mockHandler{ + responseMap: responseMap, + }, + hasError: false, + include: []string{}, + exclude: []string{"*2"}, + expected: map[string]string{ + "selector_select1": "s1", + }, + }, + } + for _, v := range tests { + ks := &KubernetesInventory{ + client: cli, + } + ks.SelectorInclude = v.include + ks.SelectorExclude = v.exclude + ks.createSelectorFilters() + acc := new(testutil.Accumulator) + for _, pvc := range ((v.handler.responseMap["/persistentvolumeclaims/"]).(*v1.PersistentVolumeClaimList)).Items { + err := ks.gatherPersistentVolumeClaim(*pvc, acc) + if err != nil { + t.Errorf("Failed to gather pvc - %s", err.Error()) + } + } + + // Grab selector tags + actual := map[string]string{} + for _, metric := range acc.Metrics { + for key, val := range metric.Tags { + if strings.Contains(key, "selector_") { + actual[key] = val + } + } + } + + if !reflect.DeepEqual(v.expected, actual) { + t.Fatalf("actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected) + } + } +} diff --git a/plugins/inputs/kube_inventory/pod.go b/plugins/inputs/kube_inventory/pod.go index f6947c601..2f17f690d 100644 --- a/plugins/inputs/kube_inventory/pod.go +++ b/plugins/inputs/kube_inventory/pod.go @@ -29,13 +29,13 @@ func (ki *KubernetesInventory) gatherPod(p v1.Pod, acc telegraf.Accumulator) err for i, cs := range p.Status.ContainerStatuses { c := p.Spec.Containers[i] - gatherPodContainer(*p.Spec.NodeName, p, *cs, *c, acc) + gatherPodContainer(*p.Spec.NodeName, ki, p, *cs, *c, acc) } return nil } -func gatherPodContainer(nodeName string, p v1.Pod, cs v1.ContainerStatus, c v1.Container, acc telegraf.Accumulator) { +func gatherPodContainer(nodeName string, ki *KubernetesInventory, p v1.Pod, cs v1.ContainerStatus, c v1.Container, acc telegraf.Accumulator) { stateCode := 3 stateReason := "" state := "unknown" @@ -77,6 +77,11 @@ func gatherPodContainer(nodeName string, p v1.Pod, cs v1.ContainerStatus, c v1.C "state": state, "readiness": readiness, } + for key, val := range p.GetSpec().GetNodeSelector() { + if ki.selectorFilter.Match(key) { + tags["node_selector_"+key] = val + } + } req := c.Resources.Requests lim := c.Resources.Limits diff --git a/plugins/inputs/kube_inventory/pod_test.go b/plugins/inputs/kube_inventory/pod_test.go index aac29b9f1..d9b322165 100644 --- a/plugins/inputs/kube_inventory/pod_test.go +++ b/plugins/inputs/kube_inventory/pod_test.go @@ -1,6 +1,8 @@ package kube_inventory import ( + "reflect" + "strings" "testing" "time" @@ -12,6 +14,8 @@ import ( func TestPod(t *testing.T) { cli := &client{} + selectInclude := []string{} + selectExclude := []string{} now := time.Now() started := time.Date(now.Year(), now.Month(), now.Day(), now.Hour()-1, 1, 36, 0, now.Location()) created := time.Date(now.Year(), now.Month(), now.Day(), now.Hour()-2, 1, 36, 0, now.Location()) @@ -112,6 +116,10 @@ func TestPod(t *testing.T) { Name: toStrPtr("vol2"), }, }, + NodeSelector: map[string]string{ + "select1": "s1", + "select2": "s2", + }, }, Status: &v1.PodStatus{ Phase: toStrPtr("Running"), @@ -213,12 +221,14 @@ func TestPod(t *testing.T) { "resource_limits_millicpu_units": int64(100), }, Tags: map[string]string{ - "namespace": "ns1", - "container_name": "running", - "node_name": "node1", - "pod_name": "pod1", - "state": "running", - "readiness": "ready", + "namespace": "ns1", + "container_name": "running", + "node_name": "node1", + "pod_name": "pod1", + "state": "running", + "readiness": "ready", + "node_selector_select1": "s1", + "node_selector_select2": "s2", }, }, { @@ -264,8 +274,11 @@ func TestPod(t *testing.T) { } for _, v := range tests { ks := &KubernetesInventory{ - client: cli, + client: cli, + SelectorInclude: selectInclude, + SelectorExclude: selectExclude, } + ks.createSelectorFilters() acc := new(testutil.Accumulator) for _, pod := range ((v.handler.responseMap["/pods/"]).(*v1.PodList)).Items { err := ks.gatherPod(*pod, acc) @@ -298,3 +311,243 @@ func TestPod(t *testing.T) { } } } + +func TestPodSelectorFilter(t *testing.T) { + cli := &client{} + now := time.Now() + started := time.Date(now.Year(), now.Month(), now.Day(), now.Hour()-1, 1, 36, 0, now.Location()) + created := time.Date(now.Year(), now.Month(), now.Day(), now.Hour()-2, 1, 36, 0, now.Location()) + cond1 := time.Date(now.Year(), 7, 5, 7, 53, 29, 0, now.Location()) + cond2 := time.Date(now.Year(), 7, 5, 7, 53, 31, 0, now.Location()) + + responseMap := map[string]interface{}{ + "/pods/": &v1.PodList{ + Items: []*v1.Pod{ + { + Spec: &v1.PodSpec{ + NodeName: toStrPtr("node1"), + Containers: []*v1.Container{ + { + Name: toStrPtr("forwarder"), + Image: toStrPtr("image1"), + Ports: []*v1.ContainerPort{ + { + ContainerPort: toInt32Ptr(8080), + Protocol: toStrPtr("TCP"), + }, + }, + Resources: &v1.ResourceRequirements{ + Limits: map[string]*resource.Quantity{ + "cpu": {String_: toStrPtr("100m")}, + }, + Requests: map[string]*resource.Quantity{ + "cpu": {String_: toStrPtr("100m")}, + }, + }, + }, + }, + Volumes: []*v1.Volume{ + { + Name: toStrPtr("vol1"), + VolumeSource: &v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: toStrPtr("pc1"), + ReadOnly: toBoolPtr(true), + }, + }, + }, + { + Name: toStrPtr("vol2"), + }, + }, + NodeSelector: map[string]string{ + "select1": "s1", + "select2": "s2", + }, + }, + Status: &v1.PodStatus{ + Phase: toStrPtr("Running"), + HostIP: toStrPtr("180.12.10.18"), + PodIP: toStrPtr("10.244.2.15"), + StartTime: &metav1.Time{Seconds: toInt64Ptr(started.Unix())}, + Conditions: []*v1.PodCondition{ + { + Type: toStrPtr("Initialized"), + Status: toStrPtr("True"), + LastTransitionTime: &metav1.Time{Seconds: toInt64Ptr(cond1.Unix())}, + }, + { + Type: toStrPtr("Ready"), + Status: toStrPtr("True"), + LastTransitionTime: &metav1.Time{Seconds: toInt64Ptr(cond2.Unix())}, + }, + { + Type: toStrPtr("Scheduled"), + Status: toStrPtr("True"), + LastTransitionTime: &metav1.Time{Seconds: toInt64Ptr(cond1.Unix())}, + }, + }, + ContainerStatuses: []*v1.ContainerStatus{ + { + Name: toStrPtr("forwarder"), + State: &v1.ContainerState{ + Running: &v1.ContainerStateRunning{ + StartedAt: &metav1.Time{Seconds: toInt64Ptr(cond2.Unix())}, + }, + }, + Ready: toBoolPtr(true), + RestartCount: toInt32Ptr(3), + Image: toStrPtr("image1"), + ImageID: toStrPtr("image_id1"), + ContainerID: toStrPtr("docker://54abe32d0094479d3d"), + }, + }, + }, + Metadata: &metav1.ObjectMeta{ + OwnerReferences: []*metav1.OwnerReference{ + { + ApiVersion: toStrPtr("apps/v1"), + Kind: toStrPtr("DaemonSet"), + Name: toStrPtr("forwarder"), + Controller: toBoolPtr(true), + }, + }, + Generation: toInt64Ptr(11232), + Namespace: toStrPtr("ns1"), + Name: toStrPtr("pod1"), + Labels: map[string]string{ + "lab1": "v1", + "lab2": "v2", + }, + CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(created.Unix())}, + }, + }, + }, + }, + } + + tests := []struct { + name string + handler *mockHandler + hasError bool + include []string + exclude []string + expected map[string]string + }{ + { + name: "nil filters equals all selectors", + handler: &mockHandler{ + responseMap: responseMap, + }, + hasError: false, + include: nil, + exclude: nil, + expected: map[string]string{ + "node_selector_select1": "s1", + "node_selector_select2": "s2", + }, + }, + { + name: "empty filters equals all selectors", + handler: &mockHandler{ + responseMap: responseMap, + }, + hasError: false, + include: []string{}, + exclude: []string{}, + expected: map[string]string{ + "node_selector_select1": "s1", + "node_selector_select2": "s2", + }, + }, + { + name: "include filter equals only include-matched selectors", + handler: &mockHandler{ + responseMap: responseMap, + }, + hasError: false, + include: []string{"select1"}, + exclude: []string{}, + expected: map[string]string{ + "node_selector_select1": "s1", + }, + }, + { + name: "exclude filter equals only non-excluded selectors (overrides include filter)", + handler: &mockHandler{ + responseMap: responseMap, + }, + hasError: false, + include: []string{}, + exclude: []string{"select2"}, + expected: map[string]string{ + "node_selector_select1": "s1", + }, + }, + { + name: "include glob filter equals only include-matched selectors", + handler: &mockHandler{ + responseMap: responseMap, + }, + hasError: false, + include: []string{"*1"}, + exclude: []string{}, + expected: map[string]string{ + "node_selector_select1": "s1", + }, + }, + { + name: "exclude glob filter equals only non-excluded selectors", + handler: &mockHandler{ + responseMap: responseMap, + }, + hasError: false, + include: []string{}, + exclude: []string{"*2"}, + expected: map[string]string{ + "node_selector_select1": "s1", + }, + }, + { + name: "exclude glob filter equals only non-excluded selectors", + handler: &mockHandler{ + responseMap: responseMap, + }, + hasError: false, + include: []string{}, + exclude: []string{"*2"}, + expected: map[string]string{ + "node_selector_select1": "s1", + }, + }, + } + for _, v := range tests { + ks := &KubernetesInventory{ + client: cli, + } + ks.SelectorInclude = v.include + ks.SelectorExclude = v.exclude + ks.createSelectorFilters() + acc := new(testutil.Accumulator) + for _, pod := range ((v.handler.responseMap["/pods/"]).(*v1.PodList)).Items { + err := ks.gatherPod(*pod, acc) + if err != nil { + t.Errorf("Failed to gather pod - %s", err.Error()) + } + } + + // Grab selector tags + actual := map[string]string{} + for _, metric := range acc.Metrics { + for key, val := range metric.Tags { + if strings.Contains(key, "node_selector_") { + actual[key] = val + } + } + } + + if !reflect.DeepEqual(v.expected, actual) { + t.Fatalf("actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected) + } + } +} diff --git a/plugins/inputs/kube_inventory/service.go b/plugins/inputs/kube_inventory/service.go index 4b0cc0845..0c749ea8a 100644 --- a/plugins/inputs/kube_inventory/service.go +++ b/plugins/inputs/kube_inventory/service.go @@ -38,6 +38,12 @@ func (ki *KubernetesInventory) gatherService(s v1.Service, acc telegraf.Accumula "namespace": s.Metadata.GetNamespace(), } + for key, val := range s.GetSpec().GetSelector() { + if ki.selectorFilter.Match(key) { + tags["selector_"+key] = val + } + } + var getPorts = func() { for _, port := range s.GetSpec().GetPorts() { fields["port"] = port.GetPort() diff --git a/plugins/inputs/kube_inventory/service_test.go b/plugins/inputs/kube_inventory/service_test.go index 6c0c8787a..3b1089130 100644 --- a/plugins/inputs/kube_inventory/service_test.go +++ b/plugins/inputs/kube_inventory/service_test.go @@ -1,17 +1,20 @@ package kube_inventory import ( + "reflect" + "testing" "time" "github.com/ericchiang/k8s/apis/core/v1" metav1 "github.com/ericchiang/k8s/apis/meta/v1" "github.com/influxdata/telegraf/testutil" + + "strings" ) func TestService(t *testing.T) { cli := &client{} - now := time.Now() now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location()) @@ -20,6 +23,8 @@ func TestService(t *testing.T) { handler *mockHandler output *testutil.Accumulator hasError bool + include []string + exclude []string }{ { name: "no service", @@ -48,6 +53,10 @@ func TestService(t *testing.T) { }, ExternalIPs: []string{"1.0.0.127"}, ClusterIP: toStrPtr("127.0.0.1"), + Selector: map[string]string{ + "select1": "s1", + "select2": "s2", + }, }, Metadata: &metav1.ObjectMeta{ Generation: toInt64Ptr(12), @@ -71,12 +80,14 @@ func TestService(t *testing.T) { "created": now.UnixNano(), }, Tags: map[string]string{ - "service_name": "checker", - "namespace": "ns1", - "port_name": "diagnostic", - "port_protocol": "TCP", - "cluster_ip": "127.0.0.1", - "ip": "1.0.0.127", + "service_name": "checker", + "namespace": "ns1", + "port_name": "diagnostic", + "port_protocol": "TCP", + "cluster_ip": "127.0.0.1", + "ip": "1.0.0.127", + "selector_select1": "s1", + "selector_select2": "s2", }, }, }, @@ -89,6 +100,9 @@ func TestService(t *testing.T) { ks := &KubernetesInventory{ client: cli, } + ks.SelectorInclude = v.include + ks.SelectorExclude = v.exclude + ks.createSelectorFilters() acc := new(testutil.Accumulator) for _, service := range ((v.handler.responseMap["/service/"]).(*v1.ServiceList)).Items { err := ks.gatherService(*service, acc) @@ -121,3 +135,165 @@ func TestService(t *testing.T) { } } } + +func TestServiceSelectorFilter(t *testing.T) { + cli := &client{} + now := time.Now() + now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location()) + + responseMap := map[string]interface{}{ + "/service/": &v1.ServiceList{ + Items: []*v1.Service{ + { + Spec: &v1.ServiceSpec{ + Ports: []*v1.ServicePort{ + { + Port: toInt32Ptr(8080), + TargetPort: toIntStrPtrI(1234), + Name: toStrPtr("diagnostic"), + Protocol: toStrPtr("TCP"), + }, + }, + ExternalIPs: []string{"1.0.0.127"}, + ClusterIP: toStrPtr("127.0.0.1"), + Selector: map[string]string{ + "select1": "s1", + "select2": "s2", + }, + }, + Metadata: &metav1.ObjectMeta{ + Generation: toInt64Ptr(12), + Namespace: toStrPtr("ns1"), + Name: toStrPtr("checker"), + CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + }, + }, + }, + }, + } + + tests := []struct { + name string + handler *mockHandler + hasError bool + include []string + exclude []string + expected map[string]string + }{ + { + name: "nil filters equals all selectors", + handler: &mockHandler{ + responseMap: responseMap, + }, + hasError: false, + include: nil, + exclude: nil, + expected: map[string]string{ + "selector_select1": "s1", + "selector_select2": "s2", + }, + }, + { + name: "empty filters equals all selectors", + handler: &mockHandler{ + responseMap: responseMap, + }, + hasError: false, + include: []string{}, + exclude: []string{}, + expected: map[string]string{ + "selector_select1": "s1", + "selector_select2": "s2", + }, + }, + { + name: "include filter equals only include-matched selectors", + handler: &mockHandler{ + responseMap: responseMap, + }, + hasError: false, + include: []string{"select1"}, + exclude: []string{}, + expected: map[string]string{ + "selector_select1": "s1", + }, + }, + { + name: "exclude filter equals only non-excluded selectors (overrides include filter)", + handler: &mockHandler{ + responseMap: responseMap, + }, + hasError: false, + include: []string{}, + exclude: []string{"select2"}, + expected: map[string]string{ + "selector_select1": "s1", + }, + }, + { + name: "include glob filter equals only include-matched selectors", + handler: &mockHandler{ + responseMap: responseMap, + }, + hasError: false, + include: []string{"*1"}, + exclude: []string{}, + expected: map[string]string{ + "selector_select1": "s1", + }, + }, + { + name: "exclude glob filter equals only non-excluded selectors", + handler: &mockHandler{ + responseMap: responseMap, + }, + hasError: false, + include: []string{}, + exclude: []string{"*2"}, + expected: map[string]string{ + "selector_select1": "s1", + }, + }, + { + name: "exclude glob filter equals only non-excluded selectors", + handler: &mockHandler{ + responseMap: responseMap, + }, + hasError: false, + include: []string{}, + exclude: []string{"*2"}, + expected: map[string]string{ + "selector_select1": "s1", + }, + }, + } + for _, v := range tests { + ks := &KubernetesInventory{ + client: cli, + } + ks.SelectorInclude = v.include + ks.SelectorExclude = v.exclude + ks.createSelectorFilters() + acc := new(testutil.Accumulator) + for _, service := range ((v.handler.responseMap["/service/"]).(*v1.ServiceList)).Items { + err := ks.gatherService(*service, acc) + if err != nil { + t.Errorf("Failed to gather service - %s", err.Error()) + } + } + + // Grab selector tags + actual := map[string]string{} + for _, metric := range acc.Metrics { + for key, val := range metric.Tags { + if strings.Contains(key, "selector_") { + actual[key] = val + } + } + } + + if !reflect.DeepEqual(v.expected, actual) { + t.Fatalf("actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected) + } + } +} diff --git a/plugins/inputs/kube_inventory/statefulset.go b/plugins/inputs/kube_inventory/statefulset.go index c95e566c2..fe25f19f0 100644 --- a/plugins/inputs/kube_inventory/statefulset.go +++ b/plugins/inputs/kube_inventory/statefulset.go @@ -39,6 +39,11 @@ func (ki *KubernetesInventory) gatherStatefulSet(s v1.StatefulSet, acc telegraf. "statefulset_name": *s.Metadata.Name, "namespace": *s.Metadata.Namespace, } + for key, val := range s.GetSpec().GetSelector().GetMatchLabels() { + if ki.selectorFilter.Match(key) { + tags["selector_"+key] = val + } + } acc.AddFields(statefulSetMeasurement, fields, tags) diff --git a/plugins/inputs/kube_inventory/statefulset_test.go b/plugins/inputs/kube_inventory/statefulset_test.go index 1a971b7b6..689cbadbc 100644 --- a/plugins/inputs/kube_inventory/statefulset_test.go +++ b/plugins/inputs/kube_inventory/statefulset_test.go @@ -1,6 +1,8 @@ package kube_inventory import ( + "reflect" + "strings" "testing" "time" @@ -12,6 +14,8 @@ import ( func TestStatefulSet(t *testing.T) { cli := &client{} + selectInclude := []string{} + selectExclude := []string{} now := time.Now() now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location()) tests := []struct { @@ -45,6 +49,12 @@ func TestStatefulSet(t *testing.T) { }, Spec: &v1.StatefulSetSpec{ Replicas: toInt32Ptr(3), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "select1": "s1", + "select2": "s2", + }, + }, }, Metadata: &metav1.ObjectMeta{ Generation: toInt64Ptr(332), @@ -77,6 +87,8 @@ func TestStatefulSet(t *testing.T) { Tags: map[string]string{ "namespace": "ns1", "statefulset_name": "sts1", + "selector_select1": "s1", + "selector_select2": "s2", }, }, }, @@ -87,8 +99,11 @@ func TestStatefulSet(t *testing.T) { for _, v := range tests { ks := &KubernetesInventory{ - client: cli, + client: cli, + SelectorInclude: selectInclude, + SelectorExclude: selectExclude, } + ks.createSelectorFilters() acc := new(testutil.Accumulator) for _, ss := range ((v.handler.responseMap["/statefulsets/"]).(*v1.StatefulSetList)).Items { err := ks.gatherStatefulSet(*ss, acc) @@ -121,3 +136,169 @@ func TestStatefulSet(t *testing.T) { } } } + +func TestStatefulSetSelectorFilter(t *testing.T) { + cli := &client{} + now := time.Now() + now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location()) + + responseMap := map[string]interface{}{ + "/statefulsets/": &v1.StatefulSetList{ + Items: []*v1.StatefulSet{ + { + Status: &v1.StatefulSetStatus{ + Replicas: toInt32Ptr(2), + CurrentReplicas: toInt32Ptr(4), + ReadyReplicas: toInt32Ptr(1), + UpdatedReplicas: toInt32Ptr(3), + ObservedGeneration: toInt64Ptr(119), + }, + Spec: &v1.StatefulSetSpec{ + Replicas: toInt32Ptr(3), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "select1": "s1", + "select2": "s2", + }, + }, + }, + Metadata: &metav1.ObjectMeta{ + Generation: toInt64Ptr(332), + Namespace: toStrPtr("ns1"), + Name: toStrPtr("sts1"), + Labels: map[string]string{ + "lab1": "v1", + "lab2": "v2", + }, + CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + }, + }, + }, + }, + } + + tests := []struct { + name string + handler *mockHandler + hasError bool + include []string + exclude []string + expected map[string]string + }{ + { + name: "nil filters equals all selectors", + handler: &mockHandler{ + responseMap: responseMap, + }, + hasError: false, + include: nil, + exclude: nil, + expected: map[string]string{ + "selector_select1": "s1", + "selector_select2": "s2", + }, + }, + { + name: "empty filters equals all selectors", + handler: &mockHandler{ + responseMap: responseMap, + }, + hasError: false, + include: []string{}, + exclude: []string{}, + expected: map[string]string{ + "selector_select1": "s1", + "selector_select2": "s2", + }, + }, + { + name: "include filter equals only include-matched selectors", + handler: &mockHandler{ + responseMap: responseMap, + }, + hasError: false, + include: []string{"select1"}, + exclude: []string{}, + expected: map[string]string{ + "selector_select1": "s1", + }, + }, + { + name: "exclude filter equals only non-excluded selectors (overrides include filter)", + handler: &mockHandler{ + responseMap: responseMap, + }, + hasError: false, + include: []string{}, + exclude: []string{"select2"}, + expected: map[string]string{ + "selector_select1": "s1", + }, + }, + { + name: "include glob filter equals only include-matched selectors", + handler: &mockHandler{ + responseMap: responseMap, + }, + hasError: false, + include: []string{"*1"}, + exclude: []string{}, + expected: map[string]string{ + "selector_select1": "s1", + }, + }, + { + name: "exclude glob filter equals only non-excluded selectors", + handler: &mockHandler{ + responseMap: responseMap, + }, + hasError: false, + include: []string{}, + exclude: []string{"*2"}, + expected: map[string]string{ + "selector_select1": "s1", + }, + }, + { + name: "exclude glob filter equals only non-excluded selectors", + handler: &mockHandler{ + responseMap: responseMap, + }, + hasError: false, + include: []string{}, + exclude: []string{"*2"}, + expected: map[string]string{ + "selector_select1": "s1", + }, + }, + } + for _, v := range tests { + ks := &KubernetesInventory{ + client: cli, + } + ks.SelectorInclude = v.include + ks.SelectorExclude = v.exclude + ks.createSelectorFilters() + acc := new(testutil.Accumulator) + for _, ss := range ((v.handler.responseMap["/statefulsets/"]).(*v1.StatefulSetList)).Items { + err := ks.gatherStatefulSet(*ss, acc) + if err != nil { + t.Errorf("Failed to gather ss - %s", err.Error()) + } + } + + // Grab selector tags + actual := map[string]string{} + for _, metric := range acc.Metrics { + for key, val := range metric.Tags { + if strings.Contains(key, "selector_") { + actual[key] = val + } + } + } + + if !reflect.DeepEqual(v.expected, actual) { + t.Fatalf("actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected) + } + } +}