From e6f887d3f0b39bda46a8303fa2092e635a1749b6 Mon Sep 17 00:00:00 2001 From: yoyosir Date: Fri, 8 Nov 2024 10:46:00 -0500 Subject: [PATCH] Support kubernetes_role argument for prometheus.operator.servicemonitors (#2023) * Support kubernetes_role argument for prometheus.operator.servicemonitors * Update docs/sources/reference/components/prometheus/prometheus.operator.servicemonitors.md Co-authored-by: Clayton Cornell <131809008+clayton-cornell@users.noreply.github.com> * Use DefaultArguments to handle default * Fix doc * Update docs/sources/reference/components/prometheus/prometheus.operator.servicemonitors.md Co-authored-by: Piotr <17101802+thampiotr@users.noreply.github.com> * Add validation --------- Co-authored-by: Clayton Cornell <131809008+clayton-cornell@users.noreply.github.com> Co-authored-by: Piotr <17101802+thampiotr@users.noreply.github.com> --- CHANGELOG.md | 2 + .../prometheus.operator.servicemonitors.md | 9 ++- .../prometheus/operator/common/crdmanager.go | 3 +- .../configgen/config_gen_servicemonitor.go | 13 ++- .../config_gen_servicemonitor_test.go | 80 ++++++++++++++++++- .../component/prometheus/operator/types.go | 8 ++ 6 files changed, 105 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3436e24eb9..0c8019e348 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -39,6 +39,8 @@ v1.5.0-rc.0 - Add support to `loki.source.api` to be able to extract the tenant from the HTTP `X-Scope-OrgID` header (@QuentinBisson) +- Add support to `prometheus.operator.servicemonitors` to allow `endpointslice` role. (@yoyosir) + - (_Experimental_) Add a `loki.secretfilter` component to redact secrets from collected logs. - Add `otelcol.exporter.splunkhec` allowing to export otel data to Splunk HEC (@adlotsof) diff --git a/docs/sources/reference/components/prometheus/prometheus.operator.servicemonitors.md b/docs/sources/reference/components/prometheus/prometheus.operator.servicemonitors.md index c5182b7171..67b2fcc4d3 100644 --- a/docs/sources/reference/components/prometheus/prometheus.operator.servicemonitors.md +++ b/docs/sources/reference/components/prometheus/prometheus.operator.servicemonitors.md @@ -32,10 +32,11 @@ prometheus.operator.servicemonitors "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`forward_to` | `list(MetricsReceiver)` | List of receivers to send scraped metrics to. | | yes -`namespaces` | `list(string)` | List of namespaces to search for ServiceMonitor resources. If not specified, all namespaces will be searched. || no +Name | Type | Description | Default | Required +---- | ---- |----------------------------------------------------------------------------------------------------------------------------|-----------| -------- +`forward_to` | `list(MetricsReceiver)` | List of receivers to send scraped metrics to. | | yes +`namespaces` | `list(string)` | List of namespaces to search for ServiceMonitor resources. If not specified, all namespaces will be searched. | | no +`kubernetes_role` | `string` | The Kubernetes role used for discovery. Supports `endpoints` or `endpointslice`. | `endpoints` | no ## Blocks diff --git a/internal/component/prometheus/operator/common/crdmanager.go b/internal/component/prometheus/operator/common/crdmanager.go index f5c13da577..e24b99159b 100644 --- a/internal/component/prometheus/operator/common/crdmanager.go +++ b/internal/component/prometheus/operator/common/crdmanager.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + promk8s "github.com/prometheus/prometheus/discovery/kubernetes" "sort" "strings" "sync" @@ -486,7 +487,7 @@ func (c *crdManager) addServiceMonitor(sm *promopv1.ServiceMonitor) { mapKeys := []string{} for i, ep := range sm.Spec.Endpoints { var scrapeConfig *config.ScrapeConfig - scrapeConfig, err = gen.GenerateServiceMonitorConfig(sm, ep, i) + scrapeConfig, err = gen.GenerateServiceMonitorConfig(sm, ep, i, promk8s.Role(c.args.KubernetesRole)) if err != nil { // TODO(jcreixell): Generate Kubernetes event to inform of this error when running `kubectl get `. level.Error(c.logger).Log("name", sm.Name, "err", err, "msg", "error generating scrapeconfig from serviceMonitor") diff --git a/internal/component/prometheus/operator/configgen/config_gen_servicemonitor.go b/internal/component/prometheus/operator/configgen/config_gen_servicemonitor.go index 5893f07646..542f3c8ab7 100644 --- a/internal/component/prometheus/operator/configgen/config_gen_servicemonitor.go +++ b/internal/component/prometheus/operator/configgen/config_gen_servicemonitor.go @@ -16,7 +16,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -func (cg *ConfigGenerator) GenerateServiceMonitorConfig(m *promopv1.ServiceMonitor, ep promopv1.Endpoint, i int) (cfg *config.ScrapeConfig, err error) { +func (cg *ConfigGenerator) GenerateServiceMonitorConfig(m *promopv1.ServiceMonitor, ep promopv1.Endpoint, i int, role promk8s.Role) (cfg *config.ScrapeConfig, err error) { cfg = cg.generateDefaultScrapeConfig() cfg.JobName = fmt.Sprintf("serviceMonitor/%s/%s/%d", m.Namespace, m.Name, i) @@ -24,7 +24,7 @@ func (cg *ConfigGenerator) GenerateServiceMonitorConfig(m *promopv1.ServiceMonit if ep.HonorTimestamps != nil { cfg.HonorTimestamps = *ep.HonorTimestamps } - dConfig := cg.generateK8SSDConfig(m.Spec.NamespaceSelector, m.Namespace, promk8s.RoleEndpoint, m.Spec.AttachMetadata) + dConfig := cg.generateK8SSDConfig(m.Spec.NamespaceSelector, m.Namespace, role, m.Spec.AttachMetadata) cfg.ServiceDiscoveryConfigs = append(cfg.ServiceDiscoveryConfigs, dConfig) if ep.Interval != "" { @@ -153,6 +153,10 @@ func (cg *ConfigGenerator) GenerateServiceMonitorConfig(m *promopv1.ServiceMonit } } + labelPortName := "__meta_kubernetes_endpoint_port_name" + if role == promk8s.RoleEndpointSlice { + labelPortName = "__meta_kubernetes_endpointslice_port_name" + } // Filter targets based on correct port for the endpoint. if ep.Port != "" { regex, err := relabel.NewRegexp(ep.Port) @@ -160,7 +164,7 @@ func (cg *ConfigGenerator) GenerateServiceMonitorConfig(m *promopv1.ServiceMonit return nil, fmt.Errorf("parsing Port as regex: %w", err) } relabels.add(&relabel.Config{ - SourceLabels: model.LabelNames{"__meta_kubernetes_endpoint_port_name"}, + SourceLabels: model.LabelNames{model.LabelName(labelPortName)}, Action: "keep", Regex: regex, }) @@ -191,6 +195,9 @@ func (cg *ConfigGenerator) GenerateServiceMonitorConfig(m *promopv1.ServiceMonit } sourceLabels := model.LabelNames{"__meta_kubernetes_endpoint_address_target_kind", "__meta_kubernetes_endpoint_address_target_name"} + if role == promk8s.RoleEndpointSlice { + sourceLabels = model.LabelNames{"__meta_kubernetes_endpointslice_address_target_kind", "__meta_kubernetes_endpointslice_address_target_name"} + } // Relabel namespace and pod and service labels into proper labels. // Relabel node labels with meta labels available with Prometheus >= v2.3. relabels.add(&relabel.Config{ diff --git a/internal/component/prometheus/operator/configgen/config_gen_servicemonitor_test.go b/internal/component/prometheus/operator/configgen/config_gen_servicemonitor_test.go index 17d4791abf..2b772b3e3a 100644 --- a/internal/component/prometheus/operator/configgen/config_gen_servicemonitor_test.go +++ b/internal/component/prometheus/operator/configgen/config_gen_servicemonitor_test.go @@ -32,6 +32,7 @@ func TestGenerateServiceMonitorConfig(t *testing.T) { name string m *promopv1.ServiceMonitor ep promopv1.Endpoint + role promk8s.Role expectedRelabels string expectedMetricRelabels string expected *config.ScrapeConfig @@ -44,7 +45,8 @@ func TestGenerateServiceMonitorConfig(t *testing.T) { Name: "svcmonitor", }, }, - ep: promopv1.Endpoint{}, + ep: promopv1.Endpoint{}, + role: promk8s.RoleEndpoint, expectedRelabels: util.Untab(` - target_label: __meta_foo replacement: bar @@ -110,6 +112,7 @@ func TestGenerateServiceMonitorConfig(t *testing.T) { ep: promopv1.Endpoint{ TargetPort: &intstr.IntOrString{StrVal: "http_metrics", Type: intstr.String}, }, + role: promk8s.RoleEndpoint, expectedRelabels: util.Untab(` - target_label: __meta_foo replacement: bar @@ -180,6 +183,7 @@ func TestGenerateServiceMonitorConfig(t *testing.T) { ep: promopv1.Endpoint{ TargetPort: &intstr.IntOrString{IntVal: 4242, Type: intstr.Int}, }, + role: promk8s.RoleEndpoint, expectedRelabels: util.Untab(` - target_label: __meta_foo replacement: bar @@ -239,6 +243,77 @@ func TestGenerateServiceMonitorConfig(t *testing.T) { }, }, }, + { + name: "role_endpointslice", + m: &promopv1.ServiceMonitor{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "operator", + Name: "svcmonitor", + }, + }, + ep: promopv1.Endpoint{ + TargetPort: &intstr.IntOrString{IntVal: 4242, Type: intstr.Int}, + }, + role: promk8s.RoleEndpointSlice, + expectedRelabels: util.Untab(` + - target_label: __meta_foo + replacement: bar + - source_labels: [job] + target_label: __tmp_prometheus_job_name + - source_labels: ["__meta_kubernetes_pod_container_port_number"] + regex: "4242" + action: "keep" + - source_labels: [__meta_kubernetes_endpointslice_address_target_kind, __meta_kubernetes_endpointslice_address_target_name] + regex: Node;(.*) + target_label: node + replacement: ${1} + - source_labels: [__meta_kubernetes_endpointslice_address_target_kind, __meta_kubernetes_endpointslice_address_target_name] + regex: Pod;(.*) + target_label: pod + action: replace + replacement: ${1} + - source_labels: [__meta_kubernetes_namespace] + target_label: namespace + - source_labels: [__meta_kubernetes_service_name] + target_label: service + - source_labels: [__meta_kubernetes_pod_container_name] + target_label: container + - source_labels: [__meta_kubernetes_pod_name] + target_label: pod + - source_labels: [__meta_kubernetes_pod_phase] + regex: (Failed|Succeeded) + action: drop + - source_labels: [__meta_kubernetes_service_name] + target_label: job + replacement: ${1} + - target_label: endpoint + replacement: "4242" + `), + expected: &config.ScrapeConfig{ + JobName: "serviceMonitor/operator/svcmonitor/1", + HonorTimestamps: true, + ScrapeInterval: model.Duration(time.Minute), + ScrapeTimeout: model.Duration(10 * time.Second), + ScrapeProtocols: config.DefaultScrapeProtocols, + EnableCompression: true, + MetricsPath: "/metrics", + Scheme: "http", + HTTPClientConfig: commonConfig.HTTPClientConfig{ + FollowRedirects: true, + EnableHTTP2: true, + }, + ServiceDiscoveryConfigs: discovery.Configs{ + &promk8s.SDConfig{ + Role: "endpointslice", + + NamespaceDiscovery: promk8s.NamespaceDiscovery{ + IncludeOwnNamespace: false, + Names: []string{"operator"}, + }, + }, + }, + }, + }, { name: "everything", m: &promopv1.ServiceMonitor{ @@ -308,6 +383,7 @@ func TestGenerateServiceMonitorConfig(t *testing.T) { }, }, }, + role: promk8s.RoleEndpoint, expectedRelabels: util.Untab(` - target_label: __meta_foo replacement: bar @@ -427,7 +503,7 @@ func TestGenerateServiceMonitorConfig(t *testing.T) { {TargetLabel: "__meta_foo", Replacement: "bar"}, }, } - cfg, err := cg.GenerateServiceMonitorConfig(tc.m, tc.ep, 1) + cfg, err := cg.GenerateServiceMonitorConfig(tc.m, tc.ep, 1, tc.role) require.NoError(t, err) // check relabel configs separately rlcs := cfg.RelabelConfigs diff --git a/internal/component/prometheus/operator/types.go b/internal/component/prometheus/operator/types.go index 8ed8cc4149..ed5be3d6e0 100644 --- a/internal/component/prometheus/operator/types.go +++ b/internal/component/prometheus/operator/types.go @@ -1,6 +1,8 @@ package operator import ( + "fmt" + promk8s "github.com/prometheus/prometheus/discovery/kubernetes" "time" "github.com/grafana/alloy/internal/component/common/config" @@ -24,6 +26,8 @@ type Arguments struct { // Namespaces to search for monitor resources. Empty implies All namespaces Namespaces []string `alloy:"namespaces,attr,optional"` + KubernetesRole string `alloy:"kubernetes_role,attr,optional"` + // LabelSelector allows filtering discovered monitor resources by labels LabelSelector *config.LabelSelector `alloy:"selector,block,optional"` @@ -54,6 +58,7 @@ var DefaultArguments = Arguments{ Client: kubernetes.ClientArguments{ HTTPClientConfig: config.DefaultHTTPClientConfig, }, + KubernetesRole: string(promk8s.RoleEndpoint), } // SetToDefault implements syntax.Defaulter. @@ -66,6 +71,9 @@ func (args *Arguments) Validate() error { if len(args.Namespaces) == 0 { args.Namespaces = []string{apiv1.NamespaceAll} } + if args.KubernetesRole != string(promk8s.RoleEndpointSlice) && args.KubernetesRole != string(promk8s.RoleEndpoint) { + return fmt.Errorf("only endpoints and endpointslice are supported") + } return nil }