-
Currently, I'm deploying prometheus-operator using the helm charts without the default grafana dashboards and prometheus alerts/rules. These are currently disabled. I am now using the kube-prometheus repo to build and compile my alerts, rules and dashboards. The alerts and rules for prometheus worked great, i managed to modify them to include my custom labels for each alert. However the grafana dashboards are proving problematic. Currently, my grafana is configured to automatically pull in any dashboards with a specific metadata label. This way, i don't need to constantly list all of my configmap dashboards into the grafana deployment as a volume mount. I need to be able to add custom metadata labels to all of my grafana dashboard configmaps (grafana_datasource or custom_label):
Below is my current libsonnet file:
It doesn't seem like there's a possible way to do this. Anyone have any ideas? |
Beta Was this translation helpful? Give feedback.
Replies: 2 comments 4 replies
-
without having tested this, something like this should work (see the local kp =
(import 'kube-prometheus/kube-prometheus.libsonnet') +
// Uncomment the following imports to enable its patches
// (import 'kube-prometheus/kube-prometheus-anti-affinity.libsonnet') +
// (import 'kube-prometheus/kube-prometheus-managed-cluster.libsonnet') +
// (import 'kube-prometheus/kube-prometheus-node-ports.libsonnet') +
// (import 'kube-prometheus/kube-prometheus-static-etcd.libsonnet') +
// (import 'kube-prometheus/kube-prometheus-thanos-sidecar.libsonnet') +
// (import 'kube-prometheus/kube-prometheus-custom-metrics.libsonnet') +
{
_config+:: {
namespace: 'monitoring',
},
prometheusAlerts+:: {
groups: std.map(
function(group)
group {
rules: std.map(
function(rule)
if std.objectHas(rule, 'labels') && std.objectHas(rule.labels, 'severity') && rule.labels.severity == "warning" then
rule {
labels: {
application: "{{ if $labels.service }} {{ $labels.service }} {{ else }} UKISS GKE {{ end }}",
severity: "P4",
sparkGroup: "UK IS DOST Containerisation Support",
affectedCi: "{{ $labels.sparkCi }}",
},
}
else if std.objectHas(rule, 'labels') && std.objectHas(rule.labels, 'severity') && rule.labels.severity == "critical" then
rule {
labels: {
application: "{{ if $labels.service }} {{ $labels.service }} {{ else }} UKISS GKE {{ end }}",
severity: "P3",
sparkGroup: "UK IS DOST Containerisation Support",
affectedCi: "{{ $labels.sparkCi }}",
},
}
else
rule,
group.rules
),
},
super.groups
),
},
grafanaDashboards+:: { // monitoring-mixin compatibility
'multi-cluster.json': (import 'grafana-dashboards/multi-cluster-dashboard.json'),
},
grafana+:: {
dashboards+:: { // use this method to import your dashboards to Grafana
'multi-cluster.json': (import 'grafana-dashboards/multi-cluster-dashboard.json'),
},
dashboardDefinitions: std.map(
function(cm) cm + {metadata+:{labels+: {label: value}}},
super.dashboardDefinitions),
},
};
{ ['setup/0namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
{
['setup/prometheus-operator-' + name]: kp.prometheusOperator[name]
for name in std.filter((function(name) name != 'serviceMonitor'), std.objectFields(kp.prometheusOperator))
} +
// serviceMonitor is separated so that it can be created after the CRDs are ready
// { 'prometheus-operator-serviceMonitor': kp.prometheusOperator.serviceMonitor } +
// { ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
// { ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
// { ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
// { ['prometheus-adapter-' + name]: kp.prometheusAdapter[name] for name in std.objectFields(kp.prometheusAdapter) } +
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) } |
Beta Was this translation helpful? Give feedback.
-
My apologies, I think I missed the local kp =
(import 'kube-prometheus/kube-prometheus.libsonnet') +
// Uncomment the following imports to enable its patches
// (import 'kube-prometheus/kube-prometheus-anti-affinity.libsonnet') +
// (import 'kube-prometheus/kube-prometheus-managed-cluster.libsonnet') +
// (import 'kube-prometheus/kube-prometheus-node-ports.libsonnet') +
// (import 'kube-prometheus/kube-prometheus-static-etcd.libsonnet') +
// (import 'kube-prometheus/kube-prometheus-thanos-sidecar.libsonnet') +
// (import 'kube-prometheus/kube-prometheus-custom-metrics.libsonnet') +
{
_config+:: {
namespace: 'monitoring',
},
prometheusAlerts+:: {
groups: std.map(
function(group)
group {
rules: std.map(
function(rule)
if std.objectHas(rule, 'labels') && std.objectHas(rule.labels, 'severity') && rule.labels.severity == "warning" then
rule {
labels: {
application: "{{ if $labels.service }} {{ $labels.service }} {{ else }} UKISS GKE {{ end }}",
severity: "P4",
sparkGroup: "UK IS DOST Containerisation Support",
affectedCi: "{{ $labels.sparkCi }}",
},
}
else if std.objectHas(rule, 'labels') && std.objectHas(rule.labels, 'severity') && rule.labels.severity == "critical" then
rule {
labels: {
application: "{{ if $labels.service }} {{ $labels.service }} {{ else }} UKISS GKE {{ end }}",
severity: "P3",
sparkGroup: "UK IS DOST Containerisation Support",
affectedCi: "{{ $labels.sparkCi }}",
},
}
else
rule,
group.rules
),
},
super.groups
),
},
grafanaDashboards+:: { // monitoring-mixin compatibility
'multi-cluster.json': (import 'grafana-dashboards/multi-cluster-dashboard.json'),
},
grafana+:: {
dashboards+:: { // use this method to import your dashboards to Grafana
'multi-cluster.json': (import 'grafana-dashboards/multi-cluster-dashboard.json'),
},
dashboardDefinitions+: {items: std.map(
function(cm) cm + {metadata+:{labels+: {label: value}}},
super.items)},
},
};
{ ['setup/0namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
{
['setup/prometheus-operator-' + name]: kp.prometheusOperator[name]
for name in std.filter((function(name) name != 'serviceMonitor'), std.objectFields(kp.prometheusOperator))
} +
// serviceMonitor is separated so that it can be created after the CRDs are ready
// { 'prometheus-operator-serviceMonitor': kp.prometheusOperator.serviceMonitor } +
// { ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
// { ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
// { ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
// { ['prometheus-adapter-' + name]: kp.prometheusAdapter[name] for name in std.objectFields(kp.prometheusAdapter) } +
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) } |
Beta Was this translation helpful? Give feedback.
My apologies, I think I missed the
items
array: