Skip to content

Commit

Permalink
Support kubernetes_role argument for prometheus.operator.servicemonit…
Browse files Browse the repository at this point in the history
…ors (#2023)

* Support kubernetes_role argument for prometheus.operator.servicemonitors

* Update docs/sources/reference/components/prometheus/prometheus.operator.servicemonitors.md

Co-authored-by: Clayton Cornell <[email protected]>

* Use DefaultArguments to handle default

* Fix doc

* Update docs/sources/reference/components/prometheus/prometheus.operator.servicemonitors.md

Co-authored-by: Piotr <[email protected]>

* Add validation

---------

Co-authored-by: Clayton Cornell <[email protected]>
Co-authored-by: Piotr <[email protected]>
  • Loading branch information
3 people authored Nov 8, 2024
1 parent 395e8cd commit e6f887d
Show file tree
Hide file tree
Showing 6 changed files with 105 additions and 10 deletions.
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,8 @@ v1.5.0-rc.0

- Add support to `loki.source.api` to be able to extract the tenant from the HTTP `X-Scope-OrgID` header (@QuentinBisson)

- Add support to `prometheus.operator.servicemonitors` to allow `endpointslice` role. (@yoyosir)

- (_Experimental_) Add a `loki.secretfilter` component to redact secrets from collected logs.
- Add `otelcol.exporter.splunkhec` allowing to export otel data to Splunk HEC (@adlotsof)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,10 +32,11 @@ prometheus.operator.servicemonitors "LABEL" {

The following arguments are supported:

Name | Type | Description | Default | Required
---- | ---- | ----------- | ------- | --------
`forward_to` | `list(MetricsReceiver)` | List of receivers to send scraped metrics to. | | yes
`namespaces` | `list(string)` | List of namespaces to search for ServiceMonitor resources. If not specified, all namespaces will be searched. || no
Name | Type | Description | Default | Required
---- | ---- |----------------------------------------------------------------------------------------------------------------------------|-----------| --------
`forward_to` | `list(MetricsReceiver)` | List of receivers to send scraped metrics to. | | yes
`namespaces` | `list(string)` | List of namespaces to search for ServiceMonitor resources. If not specified, all namespaces will be searched. | | no
`kubernetes_role` | `string` | The Kubernetes role used for discovery. Supports `endpoints` or `endpointslice`. | `endpoints` | no

## Blocks

Expand Down
3 changes: 2 additions & 1 deletion internal/component/prometheus/operator/common/crdmanager.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import (
"context"
"errors"
"fmt"
promk8s "github.com/prometheus/prometheus/discovery/kubernetes"
"sort"
"strings"
"sync"
Expand Down Expand Up @@ -486,7 +487,7 @@ func (c *crdManager) addServiceMonitor(sm *promopv1.ServiceMonitor) {
mapKeys := []string{}
for i, ep := range sm.Spec.Endpoints {
var scrapeConfig *config.ScrapeConfig
scrapeConfig, err = gen.GenerateServiceMonitorConfig(sm, ep, i)
scrapeConfig, err = gen.GenerateServiceMonitorConfig(sm, ep, i, promk8s.Role(c.args.KubernetesRole))
if err != nil {
// TODO(jcreixell): Generate Kubernetes event to inform of this error when running `kubectl get <servicemonitor>`.
level.Error(c.logger).Log("name", sm.Name, "err", err, "msg", "error generating scrapeconfig from serviceMonitor")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,15 +16,15 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

func (cg *ConfigGenerator) GenerateServiceMonitorConfig(m *promopv1.ServiceMonitor, ep promopv1.Endpoint, i int) (cfg *config.ScrapeConfig, err error) {
func (cg *ConfigGenerator) GenerateServiceMonitorConfig(m *promopv1.ServiceMonitor, ep promopv1.Endpoint, i int, role promk8s.Role) (cfg *config.ScrapeConfig, err error) {
cfg = cg.generateDefaultScrapeConfig()

cfg.JobName = fmt.Sprintf("serviceMonitor/%s/%s/%d", m.Namespace, m.Name, i)
cfg.HonorLabels = ep.HonorLabels
if ep.HonorTimestamps != nil {
cfg.HonorTimestamps = *ep.HonorTimestamps
}
dConfig := cg.generateK8SSDConfig(m.Spec.NamespaceSelector, m.Namespace, promk8s.RoleEndpoint, m.Spec.AttachMetadata)
dConfig := cg.generateK8SSDConfig(m.Spec.NamespaceSelector, m.Namespace, role, m.Spec.AttachMetadata)
cfg.ServiceDiscoveryConfigs = append(cfg.ServiceDiscoveryConfigs, dConfig)

if ep.Interval != "" {
Expand Down Expand Up @@ -153,14 +153,18 @@ func (cg *ConfigGenerator) GenerateServiceMonitorConfig(m *promopv1.ServiceMonit
}
}

labelPortName := "__meta_kubernetes_endpoint_port_name"
if role == promk8s.RoleEndpointSlice {
labelPortName = "__meta_kubernetes_endpointslice_port_name"
}
// Filter targets based on correct port for the endpoint.
if ep.Port != "" {
regex, err := relabel.NewRegexp(ep.Port)
if err != nil {
return nil, fmt.Errorf("parsing Port as regex: %w", err)
}
relabels.add(&relabel.Config{
SourceLabels: model.LabelNames{"__meta_kubernetes_endpoint_port_name"},
SourceLabels: model.LabelNames{model.LabelName(labelPortName)},
Action: "keep",
Regex: regex,
})
Expand Down Expand Up @@ -191,6 +195,9 @@ func (cg *ConfigGenerator) GenerateServiceMonitorConfig(m *promopv1.ServiceMonit
}

sourceLabels := model.LabelNames{"__meta_kubernetes_endpoint_address_target_kind", "__meta_kubernetes_endpoint_address_target_name"}
if role == promk8s.RoleEndpointSlice {
sourceLabels = model.LabelNames{"__meta_kubernetes_endpointslice_address_target_kind", "__meta_kubernetes_endpointslice_address_target_name"}
}
// Relabel namespace and pod and service labels into proper labels.
// Relabel node labels with meta labels available with Prometheus >= v2.3.
relabels.add(&relabel.Config{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ func TestGenerateServiceMonitorConfig(t *testing.T) {
name string
m *promopv1.ServiceMonitor
ep promopv1.Endpoint
role promk8s.Role
expectedRelabels string
expectedMetricRelabels string
expected *config.ScrapeConfig
Expand All @@ -44,7 +45,8 @@ func TestGenerateServiceMonitorConfig(t *testing.T) {
Name: "svcmonitor",
},
},
ep: promopv1.Endpoint{},
ep: promopv1.Endpoint{},
role: promk8s.RoleEndpoint,
expectedRelabels: util.Untab(`
- target_label: __meta_foo
replacement: bar
Expand Down Expand Up @@ -110,6 +112,7 @@ func TestGenerateServiceMonitorConfig(t *testing.T) {
ep: promopv1.Endpoint{
TargetPort: &intstr.IntOrString{StrVal: "http_metrics", Type: intstr.String},
},
role: promk8s.RoleEndpoint,
expectedRelabels: util.Untab(`
- target_label: __meta_foo
replacement: bar
Expand Down Expand Up @@ -180,6 +183,7 @@ func TestGenerateServiceMonitorConfig(t *testing.T) {
ep: promopv1.Endpoint{
TargetPort: &intstr.IntOrString{IntVal: 4242, Type: intstr.Int},
},
role: promk8s.RoleEndpoint,
expectedRelabels: util.Untab(`
- target_label: __meta_foo
replacement: bar
Expand Down Expand Up @@ -239,6 +243,77 @@ func TestGenerateServiceMonitorConfig(t *testing.T) {
},
},
},
{
name: "role_endpointslice",
m: &promopv1.ServiceMonitor{
ObjectMeta: metav1.ObjectMeta{
Namespace: "operator",
Name: "svcmonitor",
},
},
ep: promopv1.Endpoint{
TargetPort: &intstr.IntOrString{IntVal: 4242, Type: intstr.Int},
},
role: promk8s.RoleEndpointSlice,
expectedRelabels: util.Untab(`
- target_label: __meta_foo
replacement: bar
- source_labels: [job]
target_label: __tmp_prometheus_job_name
- source_labels: ["__meta_kubernetes_pod_container_port_number"]
regex: "4242"
action: "keep"
- source_labels: [__meta_kubernetes_endpointslice_address_target_kind, __meta_kubernetes_endpointslice_address_target_name]
regex: Node;(.*)
target_label: node
replacement: ${1}
- source_labels: [__meta_kubernetes_endpointslice_address_target_kind, __meta_kubernetes_endpointslice_address_target_name]
regex: Pod;(.*)
target_label: pod
action: replace
replacement: ${1}
- source_labels: [__meta_kubernetes_namespace]
target_label: namespace
- source_labels: [__meta_kubernetes_service_name]
target_label: service
- source_labels: [__meta_kubernetes_pod_container_name]
target_label: container
- source_labels: [__meta_kubernetes_pod_name]
target_label: pod
- source_labels: [__meta_kubernetes_pod_phase]
regex: (Failed|Succeeded)
action: drop
- source_labels: [__meta_kubernetes_service_name]
target_label: job
replacement: ${1}
- target_label: endpoint
replacement: "4242"
`),
expected: &config.ScrapeConfig{
JobName: "serviceMonitor/operator/svcmonitor/1",
HonorTimestamps: true,
ScrapeInterval: model.Duration(time.Minute),
ScrapeTimeout: model.Duration(10 * time.Second),
ScrapeProtocols: config.DefaultScrapeProtocols,
EnableCompression: true,
MetricsPath: "/metrics",
Scheme: "http",
HTTPClientConfig: commonConfig.HTTPClientConfig{
FollowRedirects: true,
EnableHTTP2: true,
},
ServiceDiscoveryConfigs: discovery.Configs{
&promk8s.SDConfig{
Role: "endpointslice",

NamespaceDiscovery: promk8s.NamespaceDiscovery{
IncludeOwnNamespace: false,
Names: []string{"operator"},
},
},
},
},
},
{
name: "everything",
m: &promopv1.ServiceMonitor{
Expand Down Expand Up @@ -308,6 +383,7 @@ func TestGenerateServiceMonitorConfig(t *testing.T) {
},
},
},
role: promk8s.RoleEndpoint,
expectedRelabels: util.Untab(`
- target_label: __meta_foo
replacement: bar
Expand Down Expand Up @@ -427,7 +503,7 @@ func TestGenerateServiceMonitorConfig(t *testing.T) {
{TargetLabel: "__meta_foo", Replacement: "bar"},
},
}
cfg, err := cg.GenerateServiceMonitorConfig(tc.m, tc.ep, 1)
cfg, err := cg.GenerateServiceMonitorConfig(tc.m, tc.ep, 1, tc.role)
require.NoError(t, err)
// check relabel configs separately
rlcs := cfg.RelabelConfigs
Expand Down
8 changes: 8 additions & 0 deletions internal/component/prometheus/operator/types.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
package operator

import (
"fmt"
promk8s "github.com/prometheus/prometheus/discovery/kubernetes"
"time"

"github.com/grafana/alloy/internal/component/common/config"
Expand All @@ -24,6 +26,8 @@ type Arguments struct {
// Namespaces to search for monitor resources. Empty implies All namespaces
Namespaces []string `alloy:"namespaces,attr,optional"`

KubernetesRole string `alloy:"kubernetes_role,attr,optional"`

// LabelSelector allows filtering discovered monitor resources by labels
LabelSelector *config.LabelSelector `alloy:"selector,block,optional"`

Expand Down Expand Up @@ -54,6 +58,7 @@ var DefaultArguments = Arguments{
Client: kubernetes.ClientArguments{
HTTPClientConfig: config.DefaultHTTPClientConfig,
},
KubernetesRole: string(promk8s.RoleEndpoint),
}

// SetToDefault implements syntax.Defaulter.
Expand All @@ -66,6 +71,9 @@ func (args *Arguments) Validate() error {
if len(args.Namespaces) == 0 {
args.Namespaces = []string{apiv1.NamespaceAll}
}
if args.KubernetesRole != string(promk8s.RoleEndpointSlice) && args.KubernetesRole != string(promk8s.RoleEndpoint) {
return fmt.Errorf("only endpoints and endpointslice are supported")
}
return nil
}

Expand Down

0 comments on commit e6f887d

Please sign in to comment.