From 2e36c66ecedd6cc92717e20ba88db18656dfe8e3 Mon Sep 17 00:00:00 2001 From: Darkfella91 Date: Wed, 27 Nov 2024 21:18:29 +0000 Subject: [PATCH] Initial commit --- .devcontainer/Dockerfile | 94 ++ .devcontainer/devcontainer.json | 78 ++ .devcontainer/version.txt | 1 + .editorconfig | 23 + .envrc | 8 + .gitattributes | 2 + .github/labeler.yaml | 22 + .github/labels.yaml | 38 + .github/renovate.json5 | 46 + .github/renovate/allowedVersions.json5 | 10 + .github/renovate/autoMerge.json5 | 21 + .github/renovate/clusters.json5 | 10 + .github/renovate/commitMessage.json5 | 16 + .github/renovate/customManagers.json5 | 35 + .github/renovate/devcontainer.json5 | 18 + .github/renovate/grafanaDashboards.json5 | 34 + .github/renovate/groups.json5 | 66 + .github/renovate/labels.json5 | 37 + .github/renovate/packageRules.json5 | 17 + .github/renovate/semanticCommits.json5 | 105 ++ .github/workflows/build-dev-container.yaml | 47 + .github/workflows/flux-diff.yaml | 125 ++ .github/workflows/flux-hr-sync.yaml | 98 ++ .github/workflows/flux-image-test.yaml | 152 +++ .github/workflows/label-sync.yaml | 31 + .github/workflows/labeler.yaml | 21 + .github/workflows/release.yaml | 52 + .github/workflows/renovate.yaml | 63 + .gitignore | 11 + .sops.yaml | 26 + .taskfiles/VolSync/Taskfile.yaml | 221 +++ .taskfiles/VolSync/templates/list.tmpl.yaml | 20 + .../replicationdestination.tmpl.yaml | 29 + .taskfiles/VolSync/templates/unlock.tmpl.yaml | 27 + .taskfiles/VolSync/templates/wipe.tmpl.yaml | 26 + .taskfiles/bootstrap/Taskfile.yaml | 90 ++ LICENSE | 21 + README.md | 1 + Taskfile.yaml | 20 + .../app/externalsecret.yaml | 24 + .../app/helmrelease.yaml | 28 + .../app/kustomization.yaml | 7 + .../gha-runner-scale-set-controller/ks.yaml | 22 + .../gha-runner-scale-set/app/helmrelease.yaml | 57 + .../app/kustomization.yaml | 7 + .../gha-runner-scale-set/ks.yaml | 20 + .../actions-runner-system/kustomization.yaml | 10 + .../apps/actions-runner-system/namespace.yaml | 38 + .../cert-manager/app/helm-values.yaml | 7 + .../cert-manager/app/helmrelease.yaml | 27 + .../cert-manager/app/kustomization.yaml | 13 + .../cert-manager/app/kustomizeconfig.yaml | 7 + .../cert-manager/app/prometheusrule.yaml | 68 + .../cert-manager/issuers/externalsecret.yaml | 23 + .../cert-manager/issuers/issuers.yaml | 22 + .../cert-manager/issuers/kustomization.yaml | 7 + .../apps/cert-manager/cert-manager/ks.yaml | 43 + .../certificates/app/certificates.yaml | 15 + .../certificates/app/kustomization.yaml | 7 + .../certificates/app/pushsecret.yaml | 29 + .../import/clusterexternalsecret.yaml | 43 + .../certificates/import/kustomization.yaml | 6 + .../apps/cert-manager/certificates/ks.yaml | 46 + .../main/apps/cert-manager/kustomization.yaml | 10 + .../main/apps/cert-manager/namespace.yaml | 38 + .../cloudnative-pg/app/externalsecret.yaml | 34 + .../cloudnative-pg/app/helmrelease.yaml | 31 + .../cloudnative-pg/app/kustomization.yaml | 7 + .../cloudnative-pg/cluster/cluster17.yaml | 79 ++ .../cloudnative-pg/cluster/gatus.yaml | 21 + .../cloudnative-pg/cluster/kustomization.yaml | 9 + .../cluster/prometheusrule.yaml | 67 + .../cluster/scheduledbackup.yaml | 12 + .../main/apps/database/cloudnative-pg/ks.yaml | 44 + .../main/apps/database/kustomization.yaml | 10 + kubernetes/main/apps/database/namespace.yaml | 38 + .../database/redis/app/externalsecret.yaml | 19 + .../apps/database/redis/app/helmrelease.yaml | 39 + .../database/redis/app/kustomization.yaml | 16 + .../redis/app/resources/gatus-ep.yaml | 12 + kubernetes/main/apps/database/redis/ks.yaml | 22 + .../external-secrets/app/helmrelease.yaml | 55 + .../external-secrets/app/kustomization.yaml | 6 + .../external-secrets/external-secrets/ks.yaml | 42 + .../stores/clustersecretstore.yaml | 19 + .../stores/kustomization.yaml | 7 + .../apps/external-secrets/kustomization.yaml | 9 + .../main/apps/external-secrets/namespace.yaml | 37 + .../flux-system/addons/app/kustomization.yaml | 8 + .../addons/app/monitoring/kustomization.yaml | 8 + .../addons/app/monitoring/podmonitor.yaml | 30 + .../addons/app/monitoring/prometheusrule.yaml | 32 + .../notifications/github/externalsecret.yaml | 19 + .../notifications/github/kustomization.yaml | 7 + .../notifications/github/notification.yaml | 24 + .../app/notifications/kustomization.yaml | 6 + .../app/webhooks/github/externalsecret.yaml | 19 + .../addons/app/webhooks/github/ingress.yaml | 28 + .../app/webhooks/github/kustomization.yaml | 8 + .../addons/app/webhooks/github/receiver.yaml | 26 + .../addons/app/webhooks/kustomization.yaml | 6 + .../main/apps/flux-system/addons/ks.yaml | 22 + .../main/apps/flux-system/kustomization.yaml | 9 + .../main/apps/flux-system/namespace.yaml | 46 + .../apps/keycloak/crds/kustomization.yaml | 6 + .../main/apps/keycloak/deployment/cr.yaml | 94 ++ .../keycloak/deployment/externalsecret.yaml | 25 + .../apps/keycloak/deployment/ingress.yaml | 30 + .../keycloak/deployment/kustomization.yaml | 8 + kubernetes/main/apps/keycloak/ks.yaml | 66 + .../main/apps/keycloak/kustomization.yaml | 8 + kubernetes/main/apps/keycloak/namespace.yaml | 5 + .../apps/keycloak/operator/kustomization.yaml | 5 + .../kube-system/cilium/app/helm-values.yaml | 62 + .../kube-system/cilium/app/helmrelease.yaml | 91 ++ .../kube-system/cilium/app/kustomization.yaml | 12 + .../cilium/app/kustomizeconfig.yaml | 7 + .../cilium/config/kustomization.yaml | 6 + .../apps/kube-system/cilium/config/l2.yaml | 25 + .../main/apps/kube-system/cilium/ks.yaml | 42 + .../kube-system/coredns/app/helm-values.yaml | 72 + .../kube-system/coredns/app/helmrelease.yaml | 27 + .../coredns/app/kustomization.yaml | 12 + .../coredns/app/kustomizeconfig.yaml | 7 + .../main/apps/kube-system/coredns/ks.yaml | 20 + .../kube-system/generic-device-plugin.yaml | 58 + .../kubelet-csr-approver/app/helm-values.yaml | 4 + .../kubelet-csr-approver/app/helmrelease.yaml | 32 + .../app/kustomization.yaml | 11 + .../app/kustomizeconfig.yaml | 7 + .../kube-system/kubelet-csr-approver/ks.yaml | 19 + .../main/apps/kube-system/kustomization.yaml | 18 + .../metrics-server/app/helmrelease.yaml | 33 + .../metrics-server/app/kustomization.yaml | 6 + .../apps/kube-system/metrics-server/ks.yaml | 20 + .../kube-system/multus/app/kustomization.yaml | 8 + .../apps/kube-system/multus/app/patch.yaml | 33 + .../multus/config/kustomization.yaml | 5 + .../config/network-attachment-definition.yaml | 75 ++ .../main/apps/kube-system/multus/ks.yaml | 40 + .../main/apps/kube-system/namespace.yaml | 38 + .../nvidia-device-plugin/app/helmrelease.yaml | 35 + .../app/kustomization.yaml | 5 + .../config/kustomization.yaml | 5 + .../nvidia-device-plugin/config/runtime.yaml | 6 + .../kube-system/nvidia-device-plugin/ks.yaml | 40 + .../kube-system/reloader/app/helmrelease.yaml | 33 + .../reloader/app/kustomization.yaml | 6 + .../main/apps/kube-system/reloader/ks.yaml | 20 + .../main/apps/kube-system/vfio-binding.yaml | 116 ++ .../zfs-localpv/app/helm-values.yaml | 117 ++ .../zfs-localpv/app/helmrelease.yaml | 28 + .../zfs-localpv/app/kustomization.yaml | 12 + .../zfs-localpv/app/kustomizeconfig.yaml | 7 + .../zfs-localpv/config/kustomization.yaml | 7 + .../zfs-localpv/config/snapshotclass.yaml | 9 + .../zfs-localpv/config/storageclass.yaml | 43 + .../main/apps/kube-system/zfs-localpv/ks.yaml | 40 + .../main/apps/kyverno/kustomization.yaml | 9 + .../apps/kyverno/kyverno/app/helmrelease.yaml | 80 ++ .../kyverno/kyverno/app/kustomization.yaml | 6 + kubernetes/main/apps/kyverno/kyverno/ks.yaml | 42 + .../kyverno/policies/default-deny.yaml | 38 + .../kyverno/kyverno/policies/dns-config.yaml | 30 + .../kyverno/policies/hostpath-readonly.yaml | 52 + .../kyverno/policies/kustomization.yaml | 8 + kubernetes/main/apps/kyverno/namespace.yaml | 38 + .../media/autobrr/app/externalsecret.yaml | 32 + .../apps/media/autobrr/app/helmrelease.yaml | 118 ++ .../apps/media/autobrr/app/kustomization.yaml | 22 + .../media/autobrr/app/resources/gatus-ep.yaml | 15 + .../media/autobrr/app/resources/lokirule.yaml | 14 + kubernetes/main/apps/media/autobrr/ks.yaml | 27 + .../apps/media/bazarr/app/externalsecret.yaml | 357 +++++ .../apps/media/bazarr/app/helmrelease.yaml | 201 +++ .../apps/media/bazarr/app/kustomization.yaml | 18 + .../bazarr/app/resources/connectionpool.py | 1182 +++++++++++++++++ .../media/bazarr/app/resources/subcleaner.sh | 18 + kubernetes/main/apps/media/bazarr/ks.yaml | 22 + .../media/flaresolverr/app/helmrelease.yaml | 83 ++ .../media/flaresolverr/app/kustomization.yaml | 6 + .../main/apps/media/flaresolverr/ks.yaml | 23 + .../media/jellyseerr/app/helmrelease.yaml | 120 ++ .../media/jellyseerr/app/kustomization.yaml | 16 + .../jellyseerr/app/resources/gatus-ep.yaml | 12 + .../media/jellyseerr/app/volsync-dst.yaml | 18 + .../media/jellyseerr/app/volsync-src.yaml | 47 + kubernetes/main/apps/media/jellyseerr/ks.yaml | 25 + kubernetes/main/apps/media/kustomization.yaml | 21 + kubernetes/main/apps/media/namespace.yaml | 38 + .../media/notifiarr/app/externalsecret.yaml | 33 + .../apps/media/notifiarr/app/helmrelease.yaml | 112 ++ kubernetes/main/apps/media/notifiarr/ks.yaml | 25 + .../media/omegabrr/app/externalsecret.yaml | 40 + .../apps/media/omegabrr/app/helmrelease.yaml | 67 + .../media/omegabrr/app/kustomization.yaml | 8 + kubernetes/main/apps/media/omegabrr/ks.yaml | 25 + .../main/apps/media/plex/app/helmrelease.yaml | 152 +++ .../apps/media/plex/app/kustomization.yaml | 21 + .../media/plex/app/resources/gatus-ep.yaml | 11 + .../media/plex/app/resources/lokirule.yaml | 14 + kubernetes/main/apps/media/plex/ks.yaml | 22 + .../media/prowlarr/app/externalsecret.yaml | 24 + .../apps/media/prowlarr/app/helmrelease.yaml | 174 +++ .../media/prowlarr/app/kustomization.yaml | 16 + .../prowlarr/app/resources/gatus-ep.yaml | 15 + kubernetes/main/apps/media/prowlarr/ks.yaml | 23 + .../media/qbittorrent/app/externalsecret.yaml | 50 + .../media/qbittorrent/app/helmrelease.yaml | 319 +++++ .../media/qbittorrent/app/kustomization.yaml | 30 + .../qbittorrent/app/resources/gatus-ep.yaml | 14 + .../qbittorrent/app/resources/healthcheck.sh | 10 + .../qbittorrent/app/resources/lokirule.yaml | 14 + .../qbittorrent/app/resources/post-process.sh | 118 ++ .../app/resources/qbitmanage-config.yaml | 304 +++++ .../media/qbittorrent/app/volsync-dst.yaml | 18 + .../media/qbittorrent/app/volsync-src.yaml | 48 + .../main/apps/media/qbittorrent/ks.yaml | 22 + .../apps/media/radarr/app/externalsecret.yaml | 35 + .../apps/media/radarr/app/helmrelease.yaml | 167 +++ .../apps/media/radarr/app/kustomization.yaml | 19 + .../media/radarr/app/resources/gatus-ep.yaml | 15 + .../radarr/app/resources/pushover-notify.sh | 85 ++ kubernetes/main/apps/media/radarr/ks.yaml | 23 + .../media/sabnzbd/app/externalsecret.yaml | 25 + .../apps/media/sabnzbd/app/helmrelease.yaml | 161 +++ .../apps/media/sabnzbd/app/kustomization.yaml | 20 + .../media/sabnzbd/app/resources/gatus-ep.yaml | 15 + .../sabnzbd/app/resources/post-process.sh | 118 ++ .../apps/media/sabnzbd/app/volsync-dst.yaml | 18 + .../main/apps/media/sabnzbd/app/volsync.yaml | 48 + kubernetes/main/apps/media/sabnzbd/ks.yaml | 26 + .../apps/media/sonarr/app/externalsecret.yaml | 35 + .../apps/media/sonarr/app/helmrelease.yaml | 164 +++ .../apps/media/sonarr/app/kustomization.yaml | 20 + .../media/sonarr/app/resources/gatus-ep.yaml | 15 + .../sonarr/app/resources/pushover-notify.sh | 85 ++ .../sonarr/app/resources/refresh-series.sh | 21 + kubernetes/main/apps/media/sonarr/ks.yaml | 23 + .../media/unpackerr/app/externalsecret.yaml | 20 + .../apps/media/unpackerr/app/helmrelease.yaml | 103 ++ .../media/unpackerr/app/kustomization.yaml | 7 + kubernetes/main/apps/media/unpackerr/ks.yaml | 22 + .../network/cloudflared/app/dnsendpoint.yaml | 11 + .../cloudflared/app/externalsecret.yaml | 24 + .../network/cloudflared/app/helmrelease.yaml | 117 ++ .../cloudflared/app/kustomization.yaml | 14 + .../cloudflared/app/resources/config.yaml | 10 + .../main/apps/network/cloudflared/ks.yaml | 23 + .../network/crowdsec/app/externalsecret.yaml | 33 + .../network/crowdsec/app/helmrelease.yaml | 280 ++++ .../network/crowdsec/app/ingress-appsec.yaml | 29 + .../network/crowdsec/app/kustomization.yaml | 8 + kubernetes/main/apps/network/crowdsec/ks.yaml | 23 + .../network/external-dns/RFC3645/config.yaml | 32 + .../external-dns/RFC3645/helmrelease.yaml | 57 + .../external-dns/RFC3645/kustomization.yaml | 7 + .../cloudflare/externalsecret.yaml | 20 + .../external-dns/cloudflare/helmrelease.yaml | 58 + .../cloudflare/kustomization.yaml | 7 + .../main/apps/network/external-dns/ks.yaml | 44 + .../main/apps/network/kustomization.yaml | 12 + kubernetes/main/apps/network/namespace.yaml | 38 + .../nginx/external/externalsecret.yaml | 39 + .../network/nginx/external/helmrelease.yaml | 172 +++ .../network/nginx/external/kustomization.yaml | 7 + .../nginx/internal/externalsecret.yaml | 17 + .../network/nginx/internal/helmrelease.yaml | 88 ++ .../network/nginx/internal/kustomization.yaml | 7 + kubernetes/main/apps/network/nginx/ks.yaml | 42 + .../apps/oauth2-proxy/app/externalsecret.yaml | 25 + .../apps/oauth2-proxy/app/helmrelease.yaml | 459 +++++++ .../oauth2-proxy/app/ingress-external.yaml | 28 + .../oauth2-proxy/app/ingress-internal.yaml | 74 ++ .../apps/oauth2-proxy/app/kustomization.yaml | 18 + .../oauth2-proxy/app/resources/gatus-ep.yaml | 15 + kubernetes/main/apps/oauth2-proxy/ks.yaml | 20 + .../main/apps/oauth2-proxy/kustomization.yaml | 8 + .../main/apps/oauth2-proxy/namespace.yaml | 5 + .../gatus/app/externalsecret.yaml | 32 + .../observability/gatus/app/helmrelease.yaml | 146 ++ .../gatus/app/kustomization.yaml | 14 + .../apps/observability/gatus/app/rbac.yaml | 22 + .../gatus/app/resources/config.yaml | 57 + .../main/apps/observability/gatus/ks.yaml | 23 + .../grafana/app/externalsecret.yaml | 20 + .../grafana/app/helmrelease.yaml | 258 ++++ .../grafana/app/kustomization.yaml | 7 + .../main/apps/observability/grafana/ks.yaml | 22 + .../app/externalsecret.yaml | 90 ++ .../app/helmrelease.yaml | 147 ++ .../app/kustomization.yaml | 8 + .../app/prometheusrule.yaml | 25 + .../kube-prometheus-stack/ks.yaml | 22 + .../apps/observability/kustomization.yaml | 14 + .../observability/loki/app/helmrelease.yaml | 83 ++ .../observability/loki/app/kustomization.yaml | 6 + .../main/apps/observability/loki/ks.yaml | 20 + .../main/apps/observability/namespace.yaml | 37 + .../app/helmrelease.yaml | 23 + .../app/kustomization.yaml | 6 + .../prometheus-operator-crds/ks.yaml | 20 + .../promtail/app/helmrelease.yaml | 30 + .../promtail/app/kustomization.yaml | 6 + .../main/apps/observability/promtail/ks.yaml | 20 + .../apps/system-upgrade/kustomization.yaml | 9 + .../main/apps/system-upgrade/namespace.yaml | 38 + .../app/helmrelease.yaml | 101 ++ .../app/kustomization.yaml | 7 + .../system-upgrade-controller/app/rbac.yaml | 21 + .../system-upgrade-controller/ks.yaml | 49 + .../plans/kubernetes.yaml | 45 + .../plans/kustomization.yaml | 7 + .../plans/talos.yaml | 48 + kubernetes/main/apps/vault/kustomization.yaml | 9 + kubernetes/main/apps/vault/namespace.yaml | 5 + .../apps/vault/vault/app/helmrelease.yaml | 455 +++++++ .../apps/vault/vault/app/kustomization.yaml | 15 + .../vault/vault/app/resources/gatus-ep.yaml | 16 + kubernetes/main/apps/vault/vault/ks.yaml | 23 + .../apps/vaultwarden/app/externalsecret.yaml | 32 + .../apps/vaultwarden/app/helmrelease.yaml | 228 ++++ kubernetes/main/apps/vaultwarden/app/ks.yaml | 23 + .../apps/vaultwarden/app/kustomization.yaml | 15 + .../vaultwarden/app/resources/gatus-ep.yaml | 11 + .../main/apps/vaultwarden/kustomization.yaml | 9 + .../main/apps/vaultwarden/namespace.yaml | 35 + .../main/apps/virtualization/cdi/app/cr.yaml | 15 + .../virtualization/cdi/app/kustomization.yaml | 6 + .../main/apps/virtualization/cdi/ks.yaml | 19 + .../kubevirt-manager/app/ingress.yaml | 0 .../kubevirt-manager/app/kustomization.yaml | 5 + .../virtualization/kubevirt-manager/ks.yaml | 20 + .../apps/virtualization/kubevirt/app/cr.yaml | 17 + .../kubevirt/app/kustomization.yaml | 6 + .../main/apps/virtualization/kubevirt/ks.yaml | 17 + .../apps/virtualization/kustomization.yaml | 10 + .../main/apps/virtualization/namespace.yaml | 28 + .../truenas-scale/app/kustomization.yaml | 6 + .../truenas-scale/app/pvc.yaml | 28 + .../truenas-scale/app/virtualmachine.yaml | 68 + .../virtual-machines/truenas-scale/ks.yaml | 20 + .../windows-server/app/kustomization.yaml | 6 + .../windows-server/app/pvc.yaml | 29 + .../windows-server/app/virtualmachine.yaml | 58 + .../virtual-machines/windows-server/ks.yaml | 20 + .../apps/volsync-system/kustomization.yaml | 9 + .../main/apps/volsync-system/namespace.yaml | 38 + .../volsync/app/helmrelease.yaml | 28 + .../volsync/app/kustomization.yaml | 7 + .../volsync/app/prometheusrule.yaml | 28 + .../main/apps/volsync-system/volsync/ks.yaml | 20 + kubernetes/main/apps/zfs/kustomization.yaml | 6 + kubernetes/main/apps/zfs/namespace.yaml | 38 + .../zfs/zfs-scrubber/app/externalsecret.yaml | 20 + .../zfs/zfs-scrubber/app/helmrelease.yaml | 56 + .../zfs/zfs-scrubber/app/kustomization.yaml | 7 + kubernetes/main/apps/zfs/zfs-scrubber/ks.yaml | 24 + .../bootstrap/flux/age-key.secret.sops.yaml | 28 + .../flux/deploy-key.secret.sops.yaml | 30 + .../main/bootstrap/flux/kustomization.yaml | 135 ++ kubernetes/main/bootstrap/helmfile.yaml | 78 ++ .../bootstrap/talos/k8s-0.secret.sops.yaml | 219 +++ kubernetes/main/flux/apps.yaml | 41 + kubernetes/main/flux/config/cluster.yaml | 43 + kubernetes/main/flux/config/flux.yaml | 111 ++ .../main/flux/config/kustomization.yaml | 7 + .../helm/actions-runner-controller.yaml | 11 + .../main/flux/repositories/helm/backube.yaml | 10 + .../main/flux/repositories/helm/bitnami.yaml | 11 + .../main/flux/repositories/helm/bjw-s.yaml | 11 + .../main/flux/repositories/helm/cilium.yaml | 10 + .../repositories/helm/cloudnative-pg.yaml | 10 + .../main/flux/repositories/helm/coredns.yaml | 10 + .../main/flux/repositories/helm/crowdsec.yaml | 10 + .../repositories/helm/csi-driver-nfs.yaml | 10 + .../flux/repositories/helm/descheduler.yaml | 10 + .../flux/repositories/helm/emberstack.yaml | 10 + .../flux/repositories/helm/external-dns.yaml | 10 + .../repositories/helm/external-secrets.yaml | 10 + .../main/flux/repositories/helm/grafana.yaml | 10 + .../flux/repositories/helm/ingress-nginx.yaml | 10 + .../main/flux/repositories/helm/jetstack.yaml | 10 + .../flux/repositories/helm/kustomization.yaml | 33 + .../main/flux/repositories/helm/kyverno.yaml | 11 + .../repositories/helm/metrics-server.yaml | 10 + .../helm/node-feature-discovery.yaml | 10 + .../helm/nvidia-device-plugin.yaml | 10 + .../flux/repositories/helm/oauth2-proxy.yaml | 10 + .../main/flux/repositories/helm/piraeus.yaml | 10 + .../flux/repositories/helm/postfinance.yaml | 10 + .../helm/prometheus-community.yaml | 11 + .../main/flux/repositories/helm/stakater.yaml | 11 + .../main/flux/repositories/helm/vault.yaml | 10 + .../flux/repositories/helm/vaultwarden.yaml | 10 + .../flux/repositories/helm/zfs-localpv.yaml | 10 + .../main/flux/repositories/kustomization.yaml | 8 + .../vars/cluster-secrets.secret.sops.yaml | 36 + 398 files changed, 16586 insertions(+) create mode 100755 .devcontainer/Dockerfile create mode 100755 .devcontainer/devcontainer.json create mode 100644 .devcontainer/version.txt create mode 100755 .editorconfig create mode 100755 .envrc create mode 100755 .gitattributes create mode 100755 .github/labeler.yaml create mode 100755 .github/labels.yaml create mode 100755 .github/renovate.json5 create mode 100755 .github/renovate/allowedVersions.json5 create mode 100755 .github/renovate/autoMerge.json5 create mode 100755 .github/renovate/clusters.json5 create mode 100755 .github/renovate/commitMessage.json5 create mode 100755 .github/renovate/customManagers.json5 create mode 100644 .github/renovate/devcontainer.json5 create mode 100755 .github/renovate/grafanaDashboards.json5 create mode 100755 .github/renovate/groups.json5 create mode 100755 .github/renovate/labels.json5 create mode 100755 .github/renovate/packageRules.json5 create mode 100755 .github/renovate/semanticCommits.json5 create mode 100755 .github/workflows/build-dev-container.yaml create mode 100755 .github/workflows/flux-diff.yaml create mode 100755 .github/workflows/flux-hr-sync.yaml create mode 100755 .github/workflows/flux-image-test.yaml create mode 100755 .github/workflows/label-sync.yaml create mode 100755 .github/workflows/labeler.yaml create mode 100755 .github/workflows/release.yaml create mode 100755 .github/workflows/renovate.yaml create mode 100755 .gitignore create mode 100755 .sops.yaml create mode 100755 .taskfiles/VolSync/Taskfile.yaml create mode 100755 .taskfiles/VolSync/templates/list.tmpl.yaml create mode 100755 .taskfiles/VolSync/templates/replicationdestination.tmpl.yaml create mode 100755 .taskfiles/VolSync/templates/unlock.tmpl.yaml create mode 100755 .taskfiles/VolSync/templates/wipe.tmpl.yaml create mode 100755 .taskfiles/bootstrap/Taskfile.yaml create mode 100755 LICENSE create mode 100755 README.md create mode 100755 Taskfile.yaml create mode 100755 kubernetes/main/apps/actions-runner-system/gha-runner-scale-set-controller/app/externalsecret.yaml create mode 100755 kubernetes/main/apps/actions-runner-system/gha-runner-scale-set-controller/app/helmrelease.yaml create mode 100755 kubernetes/main/apps/actions-runner-system/gha-runner-scale-set-controller/app/kustomization.yaml create mode 100755 kubernetes/main/apps/actions-runner-system/gha-runner-scale-set-controller/ks.yaml create mode 100755 kubernetes/main/apps/actions-runner-system/gha-runner-scale-set/app/helmrelease.yaml create mode 100755 kubernetes/main/apps/actions-runner-system/gha-runner-scale-set/app/kustomization.yaml create mode 100755 kubernetes/main/apps/actions-runner-system/gha-runner-scale-set/ks.yaml create mode 100755 kubernetes/main/apps/actions-runner-system/kustomization.yaml create mode 100755 kubernetes/main/apps/actions-runner-system/namespace.yaml create mode 100755 kubernetes/main/apps/cert-manager/cert-manager/app/helm-values.yaml create mode 100755 kubernetes/main/apps/cert-manager/cert-manager/app/helmrelease.yaml create mode 100755 kubernetes/main/apps/cert-manager/cert-manager/app/kustomization.yaml create mode 100755 kubernetes/main/apps/cert-manager/cert-manager/app/kustomizeconfig.yaml create mode 100755 kubernetes/main/apps/cert-manager/cert-manager/app/prometheusrule.yaml create mode 100755 kubernetes/main/apps/cert-manager/cert-manager/issuers/externalsecret.yaml create mode 100755 kubernetes/main/apps/cert-manager/cert-manager/issuers/issuers.yaml create mode 100755 kubernetes/main/apps/cert-manager/cert-manager/issuers/kustomization.yaml create mode 100755 kubernetes/main/apps/cert-manager/cert-manager/ks.yaml create mode 100755 kubernetes/main/apps/cert-manager/certificates/app/certificates.yaml create mode 100755 kubernetes/main/apps/cert-manager/certificates/app/kustomization.yaml create mode 100755 kubernetes/main/apps/cert-manager/certificates/app/pushsecret.yaml create mode 100755 kubernetes/main/apps/cert-manager/certificates/import/clusterexternalsecret.yaml create mode 100755 kubernetes/main/apps/cert-manager/certificates/import/kustomization.yaml create mode 100755 kubernetes/main/apps/cert-manager/certificates/ks.yaml create mode 100755 kubernetes/main/apps/cert-manager/kustomization.yaml create mode 100755 kubernetes/main/apps/cert-manager/namespace.yaml create mode 100755 kubernetes/main/apps/database/cloudnative-pg/app/externalsecret.yaml create mode 100755 kubernetes/main/apps/database/cloudnative-pg/app/helmrelease.yaml create mode 100755 kubernetes/main/apps/database/cloudnative-pg/app/kustomization.yaml create mode 100755 kubernetes/main/apps/database/cloudnative-pg/cluster/cluster17.yaml create mode 100755 kubernetes/main/apps/database/cloudnative-pg/cluster/gatus.yaml create mode 100755 kubernetes/main/apps/database/cloudnative-pg/cluster/kustomization.yaml create mode 100755 kubernetes/main/apps/database/cloudnative-pg/cluster/prometheusrule.yaml create mode 100755 kubernetes/main/apps/database/cloudnative-pg/cluster/scheduledbackup.yaml create mode 100755 kubernetes/main/apps/database/cloudnative-pg/ks.yaml create mode 100755 kubernetes/main/apps/database/kustomization.yaml create mode 100755 kubernetes/main/apps/database/namespace.yaml create mode 100755 kubernetes/main/apps/database/redis/app/externalsecret.yaml create mode 100755 kubernetes/main/apps/database/redis/app/helmrelease.yaml create mode 100755 kubernetes/main/apps/database/redis/app/kustomization.yaml create mode 100644 kubernetes/main/apps/database/redis/app/resources/gatus-ep.yaml create mode 100755 kubernetes/main/apps/database/redis/ks.yaml create mode 100755 kubernetes/main/apps/external-secrets/external-secrets/app/helmrelease.yaml create mode 100755 kubernetes/main/apps/external-secrets/external-secrets/app/kustomization.yaml create mode 100755 kubernetes/main/apps/external-secrets/external-secrets/ks.yaml create mode 100755 kubernetes/main/apps/external-secrets/external-secrets/stores/clustersecretstore.yaml create mode 100755 kubernetes/main/apps/external-secrets/external-secrets/stores/kustomization.yaml create mode 100755 kubernetes/main/apps/external-secrets/kustomization.yaml create mode 100755 kubernetes/main/apps/external-secrets/namespace.yaml create mode 100755 kubernetes/main/apps/flux-system/addons/app/kustomization.yaml create mode 100755 kubernetes/main/apps/flux-system/addons/app/monitoring/kustomization.yaml create mode 100755 kubernetes/main/apps/flux-system/addons/app/monitoring/podmonitor.yaml create mode 100755 kubernetes/main/apps/flux-system/addons/app/monitoring/prometheusrule.yaml create mode 100755 kubernetes/main/apps/flux-system/addons/app/notifications/github/externalsecret.yaml create mode 100755 kubernetes/main/apps/flux-system/addons/app/notifications/github/kustomization.yaml create mode 100755 kubernetes/main/apps/flux-system/addons/app/notifications/github/notification.yaml create mode 100755 kubernetes/main/apps/flux-system/addons/app/notifications/kustomization.yaml create mode 100755 kubernetes/main/apps/flux-system/addons/app/webhooks/github/externalsecret.yaml create mode 100755 kubernetes/main/apps/flux-system/addons/app/webhooks/github/ingress.yaml create mode 100755 kubernetes/main/apps/flux-system/addons/app/webhooks/github/kustomization.yaml create mode 100755 kubernetes/main/apps/flux-system/addons/app/webhooks/github/receiver.yaml create mode 100755 kubernetes/main/apps/flux-system/addons/app/webhooks/kustomization.yaml create mode 100755 kubernetes/main/apps/flux-system/addons/ks.yaml create mode 100755 kubernetes/main/apps/flux-system/kustomization.yaml create mode 100755 kubernetes/main/apps/flux-system/namespace.yaml create mode 100755 kubernetes/main/apps/keycloak/crds/kustomization.yaml create mode 100755 kubernetes/main/apps/keycloak/deployment/cr.yaml create mode 100755 kubernetes/main/apps/keycloak/deployment/externalsecret.yaml create mode 100755 kubernetes/main/apps/keycloak/deployment/ingress.yaml create mode 100755 kubernetes/main/apps/keycloak/deployment/kustomization.yaml create mode 100755 kubernetes/main/apps/keycloak/ks.yaml create mode 100755 kubernetes/main/apps/keycloak/kustomization.yaml create mode 100755 kubernetes/main/apps/keycloak/namespace.yaml create mode 100755 kubernetes/main/apps/keycloak/operator/kustomization.yaml create mode 100755 kubernetes/main/apps/kube-system/cilium/app/helm-values.yaml create mode 100755 kubernetes/main/apps/kube-system/cilium/app/helmrelease.yaml create mode 100755 kubernetes/main/apps/kube-system/cilium/app/kustomization.yaml create mode 100755 kubernetes/main/apps/kube-system/cilium/app/kustomizeconfig.yaml create mode 100755 kubernetes/main/apps/kube-system/cilium/config/kustomization.yaml create mode 100755 kubernetes/main/apps/kube-system/cilium/config/l2.yaml create mode 100755 kubernetes/main/apps/kube-system/cilium/ks.yaml create mode 100755 kubernetes/main/apps/kube-system/coredns/app/helm-values.yaml create mode 100755 kubernetes/main/apps/kube-system/coredns/app/helmrelease.yaml create mode 100755 kubernetes/main/apps/kube-system/coredns/app/kustomization.yaml create mode 100755 kubernetes/main/apps/kube-system/coredns/app/kustomizeconfig.yaml create mode 100755 kubernetes/main/apps/kube-system/coredns/ks.yaml create mode 100755 kubernetes/main/apps/kube-system/generic-device-plugin.yaml create mode 100755 kubernetes/main/apps/kube-system/kubelet-csr-approver/app/helm-values.yaml create mode 100755 kubernetes/main/apps/kube-system/kubelet-csr-approver/app/helmrelease.yaml create mode 100755 kubernetes/main/apps/kube-system/kubelet-csr-approver/app/kustomization.yaml create mode 100755 kubernetes/main/apps/kube-system/kubelet-csr-approver/app/kustomizeconfig.yaml create mode 100755 kubernetes/main/apps/kube-system/kubelet-csr-approver/ks.yaml create mode 100755 kubernetes/main/apps/kube-system/kustomization.yaml create mode 100755 kubernetes/main/apps/kube-system/metrics-server/app/helmrelease.yaml create mode 100755 kubernetes/main/apps/kube-system/metrics-server/app/kustomization.yaml create mode 100755 kubernetes/main/apps/kube-system/metrics-server/ks.yaml create mode 100644 kubernetes/main/apps/kube-system/multus/app/kustomization.yaml create mode 100644 kubernetes/main/apps/kube-system/multus/app/patch.yaml create mode 100644 kubernetes/main/apps/kube-system/multus/config/kustomization.yaml create mode 100644 kubernetes/main/apps/kube-system/multus/config/network-attachment-definition.yaml create mode 100644 kubernetes/main/apps/kube-system/multus/ks.yaml create mode 100755 kubernetes/main/apps/kube-system/namespace.yaml create mode 100755 kubernetes/main/apps/kube-system/nvidia-device-plugin/app/helmrelease.yaml create mode 100755 kubernetes/main/apps/kube-system/nvidia-device-plugin/app/kustomization.yaml create mode 100755 kubernetes/main/apps/kube-system/nvidia-device-plugin/config/kustomization.yaml create mode 100755 kubernetes/main/apps/kube-system/nvidia-device-plugin/config/runtime.yaml create mode 100755 kubernetes/main/apps/kube-system/nvidia-device-plugin/ks.yaml create mode 100755 kubernetes/main/apps/kube-system/reloader/app/helmrelease.yaml create mode 100755 kubernetes/main/apps/kube-system/reloader/app/kustomization.yaml create mode 100755 kubernetes/main/apps/kube-system/reloader/ks.yaml create mode 100644 kubernetes/main/apps/kube-system/vfio-binding.yaml create mode 100644 kubernetes/main/apps/kube-system/zfs-localpv/app/helm-values.yaml create mode 100644 kubernetes/main/apps/kube-system/zfs-localpv/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/kube-system/zfs-localpv/app/kustomization.yaml create mode 100644 kubernetes/main/apps/kube-system/zfs-localpv/app/kustomizeconfig.yaml create mode 100644 kubernetes/main/apps/kube-system/zfs-localpv/config/kustomization.yaml create mode 100644 kubernetes/main/apps/kube-system/zfs-localpv/config/snapshotclass.yaml create mode 100644 kubernetes/main/apps/kube-system/zfs-localpv/config/storageclass.yaml create mode 100644 kubernetes/main/apps/kube-system/zfs-localpv/ks.yaml create mode 100755 kubernetes/main/apps/kyverno/kustomization.yaml create mode 100755 kubernetes/main/apps/kyverno/kyverno/app/helmrelease.yaml create mode 100755 kubernetes/main/apps/kyverno/kyverno/app/kustomization.yaml create mode 100755 kubernetes/main/apps/kyverno/kyverno/ks.yaml create mode 100644 kubernetes/main/apps/kyverno/kyverno/policies/default-deny.yaml create mode 100755 kubernetes/main/apps/kyverno/kyverno/policies/dns-config.yaml create mode 100755 kubernetes/main/apps/kyverno/kyverno/policies/hostpath-readonly.yaml create mode 100755 kubernetes/main/apps/kyverno/kyverno/policies/kustomization.yaml create mode 100755 kubernetes/main/apps/kyverno/namespace.yaml create mode 100755 kubernetes/main/apps/media/autobrr/app/externalsecret.yaml create mode 100755 kubernetes/main/apps/media/autobrr/app/helmrelease.yaml create mode 100755 kubernetes/main/apps/media/autobrr/app/kustomization.yaml create mode 100755 kubernetes/main/apps/media/autobrr/app/resources/gatus-ep.yaml create mode 100755 kubernetes/main/apps/media/autobrr/app/resources/lokirule.yaml create mode 100755 kubernetes/main/apps/media/autobrr/ks.yaml create mode 100755 kubernetes/main/apps/media/bazarr/app/externalsecret.yaml create mode 100755 kubernetes/main/apps/media/bazarr/app/helmrelease.yaml create mode 100755 kubernetes/main/apps/media/bazarr/app/kustomization.yaml create mode 100755 kubernetes/main/apps/media/bazarr/app/resources/connectionpool.py create mode 100755 kubernetes/main/apps/media/bazarr/app/resources/subcleaner.sh create mode 100755 kubernetes/main/apps/media/bazarr/ks.yaml create mode 100755 kubernetes/main/apps/media/flaresolverr/app/helmrelease.yaml create mode 100755 kubernetes/main/apps/media/flaresolverr/app/kustomization.yaml create mode 100755 kubernetes/main/apps/media/flaresolverr/ks.yaml create mode 100755 kubernetes/main/apps/media/jellyseerr/app/helmrelease.yaml create mode 100755 kubernetes/main/apps/media/jellyseerr/app/kustomization.yaml create mode 100755 kubernetes/main/apps/media/jellyseerr/app/resources/gatus-ep.yaml create mode 100644 kubernetes/main/apps/media/jellyseerr/app/volsync-dst.yaml create mode 100644 kubernetes/main/apps/media/jellyseerr/app/volsync-src.yaml create mode 100755 kubernetes/main/apps/media/jellyseerr/ks.yaml create mode 100755 kubernetes/main/apps/media/kustomization.yaml create mode 100755 kubernetes/main/apps/media/namespace.yaml create mode 100755 kubernetes/main/apps/media/notifiarr/app/externalsecret.yaml create mode 100755 kubernetes/main/apps/media/notifiarr/app/helmrelease.yaml create mode 100755 kubernetes/main/apps/media/notifiarr/ks.yaml create mode 100755 kubernetes/main/apps/media/omegabrr/app/externalsecret.yaml create mode 100755 kubernetes/main/apps/media/omegabrr/app/helmrelease.yaml create mode 100755 kubernetes/main/apps/media/omegabrr/app/kustomization.yaml create mode 100755 kubernetes/main/apps/media/omegabrr/ks.yaml create mode 100755 kubernetes/main/apps/media/plex/app/helmrelease.yaml create mode 100755 kubernetes/main/apps/media/plex/app/kustomization.yaml create mode 100755 kubernetes/main/apps/media/plex/app/resources/gatus-ep.yaml create mode 100755 kubernetes/main/apps/media/plex/app/resources/lokirule.yaml create mode 100755 kubernetes/main/apps/media/plex/ks.yaml create mode 100755 kubernetes/main/apps/media/prowlarr/app/externalsecret.yaml create mode 100755 kubernetes/main/apps/media/prowlarr/app/helmrelease.yaml create mode 100755 kubernetes/main/apps/media/prowlarr/app/kustomization.yaml create mode 100755 kubernetes/main/apps/media/prowlarr/app/resources/gatus-ep.yaml create mode 100755 kubernetes/main/apps/media/prowlarr/ks.yaml create mode 100755 kubernetes/main/apps/media/qbittorrent/app/externalsecret.yaml create mode 100755 kubernetes/main/apps/media/qbittorrent/app/helmrelease.yaml create mode 100755 kubernetes/main/apps/media/qbittorrent/app/kustomization.yaml create mode 100755 kubernetes/main/apps/media/qbittorrent/app/resources/gatus-ep.yaml create mode 100755 kubernetes/main/apps/media/qbittorrent/app/resources/healthcheck.sh create mode 100755 kubernetes/main/apps/media/qbittorrent/app/resources/lokirule.yaml create mode 100755 kubernetes/main/apps/media/qbittorrent/app/resources/post-process.sh create mode 100755 kubernetes/main/apps/media/qbittorrent/app/resources/qbitmanage-config.yaml create mode 100644 kubernetes/main/apps/media/qbittorrent/app/volsync-dst.yaml create mode 100644 kubernetes/main/apps/media/qbittorrent/app/volsync-src.yaml create mode 100755 kubernetes/main/apps/media/qbittorrent/ks.yaml create mode 100755 kubernetes/main/apps/media/radarr/app/externalsecret.yaml create mode 100755 kubernetes/main/apps/media/radarr/app/helmrelease.yaml create mode 100755 kubernetes/main/apps/media/radarr/app/kustomization.yaml create mode 100755 kubernetes/main/apps/media/radarr/app/resources/gatus-ep.yaml create mode 100755 kubernetes/main/apps/media/radarr/app/resources/pushover-notify.sh create mode 100755 kubernetes/main/apps/media/radarr/ks.yaml create mode 100755 kubernetes/main/apps/media/sabnzbd/app/externalsecret.yaml create mode 100755 kubernetes/main/apps/media/sabnzbd/app/helmrelease.yaml create mode 100755 kubernetes/main/apps/media/sabnzbd/app/kustomization.yaml create mode 100755 kubernetes/main/apps/media/sabnzbd/app/resources/gatus-ep.yaml create mode 100755 kubernetes/main/apps/media/sabnzbd/app/resources/post-process.sh create mode 100644 kubernetes/main/apps/media/sabnzbd/app/volsync-dst.yaml create mode 100755 kubernetes/main/apps/media/sabnzbd/app/volsync.yaml create mode 100755 kubernetes/main/apps/media/sabnzbd/ks.yaml create mode 100755 kubernetes/main/apps/media/sonarr/app/externalsecret.yaml create mode 100755 kubernetes/main/apps/media/sonarr/app/helmrelease.yaml create mode 100755 kubernetes/main/apps/media/sonarr/app/kustomization.yaml create mode 100755 kubernetes/main/apps/media/sonarr/app/resources/gatus-ep.yaml create mode 100755 kubernetes/main/apps/media/sonarr/app/resources/pushover-notify.sh create mode 100755 kubernetes/main/apps/media/sonarr/app/resources/refresh-series.sh create mode 100755 kubernetes/main/apps/media/sonarr/ks.yaml create mode 100755 kubernetes/main/apps/media/unpackerr/app/externalsecret.yaml create mode 100755 kubernetes/main/apps/media/unpackerr/app/helmrelease.yaml create mode 100755 kubernetes/main/apps/media/unpackerr/app/kustomization.yaml create mode 100755 kubernetes/main/apps/media/unpackerr/ks.yaml create mode 100755 kubernetes/main/apps/network/cloudflared/app/dnsendpoint.yaml create mode 100755 kubernetes/main/apps/network/cloudflared/app/externalsecret.yaml create mode 100755 kubernetes/main/apps/network/cloudflared/app/helmrelease.yaml create mode 100755 kubernetes/main/apps/network/cloudflared/app/kustomization.yaml create mode 100755 kubernetes/main/apps/network/cloudflared/app/resources/config.yaml create mode 100755 kubernetes/main/apps/network/cloudflared/ks.yaml create mode 100755 kubernetes/main/apps/network/crowdsec/app/externalsecret.yaml create mode 100755 kubernetes/main/apps/network/crowdsec/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/network/crowdsec/app/ingress-appsec.yaml create mode 100755 kubernetes/main/apps/network/crowdsec/app/kustomization.yaml create mode 100755 kubernetes/main/apps/network/crowdsec/ks.yaml create mode 100755 kubernetes/main/apps/network/external-dns/RFC3645/config.yaml create mode 100755 kubernetes/main/apps/network/external-dns/RFC3645/helmrelease.yaml create mode 100755 kubernetes/main/apps/network/external-dns/RFC3645/kustomization.yaml create mode 100755 kubernetes/main/apps/network/external-dns/cloudflare/externalsecret.yaml create mode 100755 kubernetes/main/apps/network/external-dns/cloudflare/helmrelease.yaml create mode 100755 kubernetes/main/apps/network/external-dns/cloudflare/kustomization.yaml create mode 100755 kubernetes/main/apps/network/external-dns/ks.yaml create mode 100755 kubernetes/main/apps/network/kustomization.yaml create mode 100755 kubernetes/main/apps/network/namespace.yaml create mode 100755 kubernetes/main/apps/network/nginx/external/externalsecret.yaml create mode 100755 kubernetes/main/apps/network/nginx/external/helmrelease.yaml create mode 100755 kubernetes/main/apps/network/nginx/external/kustomization.yaml create mode 100644 kubernetes/main/apps/network/nginx/internal/externalsecret.yaml create mode 100755 kubernetes/main/apps/network/nginx/internal/helmrelease.yaml create mode 100755 kubernetes/main/apps/network/nginx/internal/kustomization.yaml create mode 100755 kubernetes/main/apps/network/nginx/ks.yaml create mode 100755 kubernetes/main/apps/oauth2-proxy/app/externalsecret.yaml create mode 100755 kubernetes/main/apps/oauth2-proxy/app/helmrelease.yaml create mode 100755 kubernetes/main/apps/oauth2-proxy/app/ingress-external.yaml create mode 100755 kubernetes/main/apps/oauth2-proxy/app/ingress-internal.yaml create mode 100755 kubernetes/main/apps/oauth2-proxy/app/kustomization.yaml create mode 100755 kubernetes/main/apps/oauth2-proxy/app/resources/gatus-ep.yaml create mode 100755 kubernetes/main/apps/oauth2-proxy/ks.yaml create mode 100755 kubernetes/main/apps/oauth2-proxy/kustomization.yaml create mode 100755 kubernetes/main/apps/oauth2-proxy/namespace.yaml create mode 100755 kubernetes/main/apps/observability/gatus/app/externalsecret.yaml create mode 100755 kubernetes/main/apps/observability/gatus/app/helmrelease.yaml create mode 100755 kubernetes/main/apps/observability/gatus/app/kustomization.yaml create mode 100755 kubernetes/main/apps/observability/gatus/app/rbac.yaml create mode 100755 kubernetes/main/apps/observability/gatus/app/resources/config.yaml create mode 100755 kubernetes/main/apps/observability/gatus/ks.yaml create mode 100755 kubernetes/main/apps/observability/grafana/app/externalsecret.yaml create mode 100755 kubernetes/main/apps/observability/grafana/app/helmrelease.yaml create mode 100755 kubernetes/main/apps/observability/grafana/app/kustomization.yaml create mode 100755 kubernetes/main/apps/observability/grafana/ks.yaml create mode 100755 kubernetes/main/apps/observability/kube-prometheus-stack/app/externalsecret.yaml create mode 100755 kubernetes/main/apps/observability/kube-prometheus-stack/app/helmrelease.yaml create mode 100755 kubernetes/main/apps/observability/kube-prometheus-stack/app/kustomization.yaml create mode 100755 kubernetes/main/apps/observability/kube-prometheus-stack/app/prometheusrule.yaml create mode 100755 kubernetes/main/apps/observability/kube-prometheus-stack/ks.yaml create mode 100755 kubernetes/main/apps/observability/kustomization.yaml create mode 100755 kubernetes/main/apps/observability/loki/app/helmrelease.yaml create mode 100755 kubernetes/main/apps/observability/loki/app/kustomization.yaml create mode 100755 kubernetes/main/apps/observability/loki/ks.yaml create mode 100755 kubernetes/main/apps/observability/namespace.yaml create mode 100755 kubernetes/main/apps/observability/prometheus-operator-crds/app/helmrelease.yaml create mode 100755 kubernetes/main/apps/observability/prometheus-operator-crds/app/kustomization.yaml create mode 100755 kubernetes/main/apps/observability/prometheus-operator-crds/ks.yaml create mode 100755 kubernetes/main/apps/observability/promtail/app/helmrelease.yaml create mode 100755 kubernetes/main/apps/observability/promtail/app/kustomization.yaml create mode 100755 kubernetes/main/apps/observability/promtail/ks.yaml create mode 100755 kubernetes/main/apps/system-upgrade/kustomization.yaml create mode 100755 kubernetes/main/apps/system-upgrade/namespace.yaml create mode 100755 kubernetes/main/apps/system-upgrade/system-upgrade-controller/app/helmrelease.yaml create mode 100755 kubernetes/main/apps/system-upgrade/system-upgrade-controller/app/kustomization.yaml create mode 100755 kubernetes/main/apps/system-upgrade/system-upgrade-controller/app/rbac.yaml create mode 100755 kubernetes/main/apps/system-upgrade/system-upgrade-controller/ks.yaml create mode 100755 kubernetes/main/apps/system-upgrade/system-upgrade-controller/plans/kubernetes.yaml create mode 100755 kubernetes/main/apps/system-upgrade/system-upgrade-controller/plans/kustomization.yaml create mode 100755 kubernetes/main/apps/system-upgrade/system-upgrade-controller/plans/talos.yaml create mode 100755 kubernetes/main/apps/vault/kustomization.yaml create mode 100755 kubernetes/main/apps/vault/namespace.yaml create mode 100755 kubernetes/main/apps/vault/vault/app/helmrelease.yaml create mode 100755 kubernetes/main/apps/vault/vault/app/kustomization.yaml create mode 100755 kubernetes/main/apps/vault/vault/app/resources/gatus-ep.yaml create mode 100755 kubernetes/main/apps/vault/vault/ks.yaml create mode 100644 kubernetes/main/apps/vaultwarden/app/externalsecret.yaml create mode 100644 kubernetes/main/apps/vaultwarden/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/vaultwarden/app/ks.yaml create mode 100644 kubernetes/main/apps/vaultwarden/app/kustomization.yaml create mode 100644 kubernetes/main/apps/vaultwarden/app/resources/gatus-ep.yaml create mode 100755 kubernetes/main/apps/vaultwarden/kustomization.yaml create mode 100755 kubernetes/main/apps/vaultwarden/namespace.yaml create mode 100644 kubernetes/main/apps/virtualization/cdi/app/cr.yaml create mode 100644 kubernetes/main/apps/virtualization/cdi/app/kustomization.yaml create mode 100644 kubernetes/main/apps/virtualization/cdi/ks.yaml create mode 100644 kubernetes/main/apps/virtualization/kubevirt-manager/app/ingress.yaml create mode 100644 kubernetes/main/apps/virtualization/kubevirt-manager/app/kustomization.yaml create mode 100644 kubernetes/main/apps/virtualization/kubevirt-manager/ks.yaml create mode 100644 kubernetes/main/apps/virtualization/kubevirt/app/cr.yaml create mode 100644 kubernetes/main/apps/virtualization/kubevirt/app/kustomization.yaml create mode 100644 kubernetes/main/apps/virtualization/kubevirt/ks.yaml create mode 100644 kubernetes/main/apps/virtualization/kustomization.yaml create mode 100644 kubernetes/main/apps/virtualization/namespace.yaml create mode 100644 kubernetes/main/apps/virtualization/virtual-machines/truenas-scale/app/kustomization.yaml create mode 100644 kubernetes/main/apps/virtualization/virtual-machines/truenas-scale/app/pvc.yaml create mode 100644 kubernetes/main/apps/virtualization/virtual-machines/truenas-scale/app/virtualmachine.yaml create mode 100644 kubernetes/main/apps/virtualization/virtual-machines/truenas-scale/ks.yaml create mode 100644 kubernetes/main/apps/virtualization/virtual-machines/windows-server/app/kustomization.yaml create mode 100644 kubernetes/main/apps/virtualization/virtual-machines/windows-server/app/pvc.yaml create mode 100644 kubernetes/main/apps/virtualization/virtual-machines/windows-server/app/virtualmachine.yaml create mode 100644 kubernetes/main/apps/virtualization/virtual-machines/windows-server/ks.yaml create mode 100755 kubernetes/main/apps/volsync-system/kustomization.yaml create mode 100755 kubernetes/main/apps/volsync-system/namespace.yaml create mode 100755 kubernetes/main/apps/volsync-system/volsync/app/helmrelease.yaml create mode 100755 kubernetes/main/apps/volsync-system/volsync/app/kustomization.yaml create mode 100755 kubernetes/main/apps/volsync-system/volsync/app/prometheusrule.yaml create mode 100755 kubernetes/main/apps/volsync-system/volsync/ks.yaml create mode 100644 kubernetes/main/apps/zfs/kustomization.yaml create mode 100644 kubernetes/main/apps/zfs/namespace.yaml create mode 100644 kubernetes/main/apps/zfs/zfs-scrubber/app/externalsecret.yaml create mode 100644 kubernetes/main/apps/zfs/zfs-scrubber/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/zfs/zfs-scrubber/app/kustomization.yaml create mode 100644 kubernetes/main/apps/zfs/zfs-scrubber/ks.yaml create mode 100755 kubernetes/main/bootstrap/flux/age-key.secret.sops.yaml create mode 100755 kubernetes/main/bootstrap/flux/deploy-key.secret.sops.yaml create mode 100755 kubernetes/main/bootstrap/flux/kustomization.yaml create mode 100755 kubernetes/main/bootstrap/helmfile.yaml create mode 100644 kubernetes/main/bootstrap/talos/k8s-0.secret.sops.yaml create mode 100755 kubernetes/main/flux/apps.yaml create mode 100755 kubernetes/main/flux/config/cluster.yaml create mode 100755 kubernetes/main/flux/config/flux.yaml create mode 100755 kubernetes/main/flux/config/kustomization.yaml create mode 100755 kubernetes/main/flux/repositories/helm/actions-runner-controller.yaml create mode 100755 kubernetes/main/flux/repositories/helm/backube.yaml create mode 100755 kubernetes/main/flux/repositories/helm/bitnami.yaml create mode 100755 kubernetes/main/flux/repositories/helm/bjw-s.yaml create mode 100755 kubernetes/main/flux/repositories/helm/cilium.yaml create mode 100755 kubernetes/main/flux/repositories/helm/cloudnative-pg.yaml create mode 100755 kubernetes/main/flux/repositories/helm/coredns.yaml create mode 100755 kubernetes/main/flux/repositories/helm/crowdsec.yaml create mode 100755 kubernetes/main/flux/repositories/helm/csi-driver-nfs.yaml create mode 100755 kubernetes/main/flux/repositories/helm/descheduler.yaml create mode 100755 kubernetes/main/flux/repositories/helm/emberstack.yaml create mode 100755 kubernetes/main/flux/repositories/helm/external-dns.yaml create mode 100755 kubernetes/main/flux/repositories/helm/external-secrets.yaml create mode 100755 kubernetes/main/flux/repositories/helm/grafana.yaml create mode 100755 kubernetes/main/flux/repositories/helm/ingress-nginx.yaml create mode 100755 kubernetes/main/flux/repositories/helm/jetstack.yaml create mode 100755 kubernetes/main/flux/repositories/helm/kustomization.yaml create mode 100755 kubernetes/main/flux/repositories/helm/kyverno.yaml create mode 100755 kubernetes/main/flux/repositories/helm/metrics-server.yaml create mode 100755 kubernetes/main/flux/repositories/helm/node-feature-discovery.yaml create mode 100755 kubernetes/main/flux/repositories/helm/nvidia-device-plugin.yaml create mode 100755 kubernetes/main/flux/repositories/helm/oauth2-proxy.yaml create mode 100755 kubernetes/main/flux/repositories/helm/piraeus.yaml create mode 100755 kubernetes/main/flux/repositories/helm/postfinance.yaml create mode 100755 kubernetes/main/flux/repositories/helm/prometheus-community.yaml create mode 100755 kubernetes/main/flux/repositories/helm/stakater.yaml create mode 100755 kubernetes/main/flux/repositories/helm/vault.yaml create mode 100755 kubernetes/main/flux/repositories/helm/vaultwarden.yaml create mode 100644 kubernetes/main/flux/repositories/helm/zfs-localpv.yaml create mode 100755 kubernetes/main/flux/repositories/kustomization.yaml create mode 100755 kubernetes/main/flux/vars/cluster-secrets.secret.sops.yaml diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile new file mode 100755 index 000000000..39c765cab --- /dev/null +++ b/.devcontainer/Dockerfile @@ -0,0 +1,94 @@ +FROM alpine:edge@sha256:8431297eedca8df8f1e6144803c6d7e057ecff2408aa6861213cb9e507acadf8 + +ARG USERNAME=vscode +ARG USER_UID=1000 +ARG USER_GID=$USER_UID +# renovate: depName=fluxcd/flux2 +ARG FLUX_VERSION=v2.4.0 +# renovate: depName=getsops/sops +ARG SOPS_VERSION=v3.9.1 +# renovate: depName=kubernetes/kubernetes +ARG KUBECTL_VERSION=v1.31.3 +# renovate: depName=helmfile/helmfile +ARG HELMFILE_VERSION=v0.169.1 +# renovate: depName=helm/helm +ARG HELM_VERSION=v3.16.3 +# renovate: depName=go-task/task +ARG TASK_VERSION=v3.40.0 +# renovate: depName=siderolabs/talos +ARG TALOSCTL_VERSION=v1.8.3 +# renovate: depName=kubevirt/kubevirt +ARG VIRTCTL_VERSION=v1.4.0 + +ENV SHELL=/bin/bash + +WORKDIR /tmp + +RUN echo "**** Installing packages ****" && \ + apk add --no-cache \ + coreutils \ + curl \ + cosign \ + jq \ + nano \ + bash \ + openssl \ + ca-certificates \ + git \ + github-cli \ + libstdc++ \ + direnv \ + yq \ + unzip && \ + echo "**** Creating user and group ****" && \ + addgroup -g $USER_GID $USERNAME && \ + adduser -u $USER_UID -G $USERNAME -s /bin/sh -D $USERNAME && \ + echo "**** Adding direnv hook ****" && \ + echo 'eval "$(direnv hook bash)"' >> /home/$USERNAME/.bashrc && \ + echo "**** Installing binaries ****" && \ + curl -fsSLO "https://dl.k8s.io/release/$KUBECTL_VERSION/bin/linux/amd64/kubectl" && \ + curl -fsSLO "https://dl.k8s.io/release/$KUBECTL_VERSION/bin/linux/amd64/kubectl.sha256" && \ + echo "$(cat kubectl.sha256) kubectl" | sha256sum --check --strict && \ + chmod +x ./kubectl && \ + mv ./kubectl /usr/local/bin/kubectl && \ + curl -fsSL https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash -s -- -v $HELM_VERSION && \ + curl -fsSLO https://github.com/getsops/sops/releases/download/$SOPS_VERSION/sops-$SOPS_VERSION.linux.amd64 && \ + curl -fsSLO https://github.com/getsops/sops/releases/download/$SOPS_VERSION/sops-$SOPS_VERSION.checksums.txt && \ + curl -fsSLO https://github.com/getsops/sops/releases/download/$SOPS_VERSION/sops-$SOPS_VERSION.checksums.pem && \ + curl -fsSLO https://github.com/getsops/sops/releases/download/$SOPS_VERSION/sops-$SOPS_VERSION.checksums.sig && \ + cosign verify-blob sops-$SOPS_VERSION.checksums.txt \ + --certificate sops-$SOPS_VERSION.checksums.pem \ + --signature sops-$SOPS_VERSION.checksums.sig \ + --certificate-identity-regexp=https://github.com/getsops \ + --certificate-oidc-issuer=https://token.actions.githubusercontent.com && \ + grep "sops-$SOPS_VERSION.linux.amd64" sops-$SOPS_VERSION.checksums.txt > checksum-linux-amd64.txt && \ + echo "$(cat checksum-linux-amd64.txt)" | sha256sum --check --strict && \ + mv sops-$SOPS_VERSION.linux.amd64 /usr/local/bin/sops && \ + chmod +x /usr/local/bin/sops && \ + curl -fsSLO https://github.com/siderolabs/talos/releases/download/$TALOSCTL_VERSION/talosctl-linux-amd64 && \ + curl -fsSL -o talosctl_sha256sum.txt https://github.com/siderolabs/talos/releases/download/$TALOSCTL_VERSION/sha256sum.txt && \ + grep "talosctl-linux-amd64" talosctl_sha256sum.txt > talosctl_amd64_sha256sum.txt && \ + echo "$(cat talosctl_amd64_sha256sum.txt)" | sha256sum --check --strict && \ + chmod +x ./talosctl-linux-amd64 && mv ./talosctl-linux-amd64 /usr/local/bin/talosctl && \ + curl -fsSLO https://github.com/helmfile/helmfile/releases/download/$HELMFILE_VERSION/helmfile_$(echo $HELMFILE_VERSION | sed 's/^v//')_checksums.txt && \ + curl -fsSLO https://github.com/helmfile/helmfile/releases/download/$HELMFILE_VERSION/helmfile_$(echo $HELMFILE_VERSION | sed 's/^v//')_linux_amd64.tar.gz && \ + grep "helmfile_$(echo $HELMFILE_VERSION | sed 's/^v//')_linux_amd64.tar.gz" helmfile_$(echo $HELMFILE_VERSION | sed 's/^v//')_checksums.txt > checksum-helmfile-linux-amd64.txt && \ + echo "$(cat checksum-helmfile-linux-amd64.txt)" | sha256sum --check --strict && \ + tar -xvzf helmfile_$(echo $HELMFILE_VERSION | sed 's/^v//')_linux_amd64.tar.gz && \ + chmod +x ./helmfile && \ + mv helmfile /usr/local/bin/helmfile && \ + curl -fsSL https://taskfile.dev/install.sh | bash -s -- -d $TASK_VERSION -b /usr/local/bin && \ + curl -fsSL https://fluxcd.io/install.sh | FLUX_VERSION=$(echo $FLUX_VERSION | sed 's/^v//') bash && \ + curl -fsSLO https://github.com/kubevirt/kubevirt/releases/download/$VIRTCTL_VERSION/virtctl-$VIRTCTL_VERSION-linux-amd64 && \ + chmod +x ./virtctl-$VIRTCTL_VERSION-linux-amd64 && mv ./virtctl-$VIRTCTL_VERSION-linux-amd64 /usr/local/bin/virtctl && \ + curl -fsSLO https://releases.pagure.org/virt-viewer/virt-viewer-11.0.tar.xz && \ + tar -xJf virt-viewer-11.0.tar.xz && \ + chmod +x ./virt-viewer-11.0 && mv ./virt-viewer-11.0 /usr/local/bin/virt-viewer && \ + echo "**** Cleaning up ****" && \ + rm -rf /root/.cache /tmp/* + +WORKDIR /project + +USER $USERNAME + +CMD ["/bin/bash"] diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100755 index 000000000..a3600d994 --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,78 @@ +//devcontainer.json +{ + "name": "Kubernetes Management", + + "image": "ghcr.io/darkfella91/devcontainer:v1.0.0@sha256:a14b98f82f1ac17d530156e84b77a53c4e68112ed39016641a0158d418a97986", + + "postStartCommand": "/usr/bin/direnv allow /project/.envrc", + + "workspaceFolder": "/project", + "workspaceMount": "source=${localWorkspaceFolder},target=/project,type=bind,consistency=consistent", + + "customizations": { + "vscode": { + "settings": { + "editor.bracketPairColorization.enabled": true, + "editor.fontFamily": "FiraCode Nerd Font, monospace", + "editor.fontLigatures": true, + "editor.guides.bracketPairs": true, + "editor.guides.bracketPairsHorizontal": true, + "editor.guides.highlightActiveBracketPair": true, + "editor.hover.delay": 1500, + "editor.rulers": [ + 100 + ], + "editor.stickyScroll.enabled": false, + "explorer.autoReveal": false, + + "files.associations": { + "**/*.json5": "jsonc" + }, + "files.trimTrailingWhitespace": true, + + "material-icon-theme.files.associations": { + "*.secret.sops.env": "lock", + "*.secret.sops.yaml": "lock" + }, + "material-icon-theme.activeIconPack": "angular", + "material-icon-theme.folders.associations": { + ".archive": "archive", + ".github/workflows": "robot", + "actions-runner-system": "github", + "cert-manager": "guard", + "digester-system": "hook", + "external-secrets": "secure", + "flux-system": "pipe", + "flux": "pipe", + "kube-system": "kubernetes", + "network": "connection", + "observability": "event", + "rook-ceph": "base", + "storage": "dump", + "system-upgrade": "update", + "tools": "tools", + "volsync": "aws", + "cloudflared": "cloudflare", + "talos": "linux" + }, + "sops.defaults.ageKeyFile": "./age.key", + "sops.creationEnabled": true, + "yaml.schemaStore.enable": true, + "yaml.schemas": { + "kubernetes": "./kubernetes/**/*.yaml" + } + }, + "extensions": [ + "signageos.signageos-vscode-sops", + "BriteSnow.vscode-toggle-quotes", + "redhat.vscode-yaml", + "mitchdenny.ecdc", + "mikestead.dotenv", + "fcrespo82.markdown-table-formatter", + "albert.TabOut", + "PKief.material-icon-theme", + "PKief.material-product-icons" + ] + } + } +} \ No newline at end of file diff --git a/.devcontainer/version.txt b/.devcontainer/version.txt new file mode 100644 index 000000000..0ec25f750 --- /dev/null +++ b/.devcontainer/version.txt @@ -0,0 +1 @@ +v1.0.0 diff --git a/.editorconfig b/.editorconfig new file mode 100755 index 000000000..6e40cb65c --- /dev/null +++ b/.editorconfig @@ -0,0 +1,23 @@ +; https://editorconfig.org/ + +root = true + +[*] +indent_style = space +indent_size = 2 +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true + +[{Makefile,go.mod,go.sum,*.go,.gitmodules}] +indent_style = tab +indent_size = 4 + +[*.md] +indent_size = 4 +trim_trailing_whitespace = false + +[{Dockerfile,*.bash,*.sh}] +indent_style = space +indent_size = 4 diff --git a/.envrc b/.envrc new file mode 100755 index 000000000..9291d771d --- /dev/null +++ b/.envrc @@ -0,0 +1,8 @@ +#shellcheck disable=SC2148,SC2155 +# Kubernetes +export KUBECONFIG="$(expand_path ./kubernetes/main/kubeconfig)" +export SOPS_AGE_KEY_FILE="$(expand_path ./age.key)" +export TALOSCONFIG="$(expand_path ./kubernetes/main/talosconfig)" +# Taskfile +export TASK_X_ENV_PRECEDENCE=1 +export TASK_X_MAP_VARIABLES=0 diff --git a/.gitattributes b/.gitattributes new file mode 100755 index 000000000..3f5563f4c --- /dev/null +++ b/.gitattributes @@ -0,0 +1,2 @@ +* text=auto eol=lf +*.sops.* diff=sopsdiffer diff --git a/.github/labeler.yaml b/.github/labeler.yaml new file mode 100755 index 000000000..af6ba72e0 --- /dev/null +++ b/.github/labeler.yaml @@ -0,0 +1,22 @@ +--- +# Areas +area/docs: + - changed-files: + - any-glob-to-any-file: + - "docs/**/*" + - "README.md" +area/github: + - changed-files: + - any-glob-to-any-file: ".github/**/*" +area/kubernetes: + - changed-files: + - any-glob-to-any-file: "kubernetes/**/*" +area/taskfile: + - changed-files: + - any-glob-to-any-file: + - ".taskfiles/**/*" + - "Taskfile.yaml" +# Clusters +cluster/main: + - changed-files: + - any-glob-to-any-file: "kubernetes/main/**/*" diff --git a/.github/labels.yaml b/.github/labels.yaml new file mode 100755 index 000000000..86f42d1d9 --- /dev/null +++ b/.github/labels.yaml @@ -0,0 +1,38 @@ +--- +# Areas +- name: area/docs + color: "0e8a16" +- name: area/github + color: "0e8a16" +- name: area/kubernetes + color: "0e8a16" +- name: area/taskfile + color: "0e8a16" +# Clusters +- name: cluster/main + color: "ffc300" +# Renovate Types +- name: renovate/container + color: "027fa0" +- name: renovate/github-action + color: "027fa0" +- name: renovate/grafana-dashboard + color: "027fa0" +- name: renovate/github-release + color: "027fa0" +- name: renovate/helm + color: "027fa0" +# Semantic Types +- name: type/digest + color: "ffeC19" +- name: type/patch + color: "ffeC19" +- name: type/minor + color: "ff9800" +- name: type/major + color: "f6412d" +# Uncategorized +- name: community + color: "370fb2" +- name: hold + color: "ee0701" diff --git a/.github/renovate.json5 b/.github/renovate.json5 new file mode 100755 index 000000000..552a78267 --- /dev/null +++ b/.github/renovate.json5 @@ -0,0 +1,46 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "extends": [ + "config:recommended", + "docker:enableMajor", + "replacements:k8s-registry-move", + ":automergeBranch", + ":disableRateLimiting", + ":dependencyDashboard", + ":semanticCommits", + ":skipStatusChecks", + ":timezone(Europe/Sofia)", + "github>Darkfella91/home-ops//.github/renovate/allowedVersions.json5", + "github>Darkfella91/home-ops//.github/renovate/autoMerge.json5", + "github>Darkfella91/home-ops//.github/renovate/clusters.json5", + "github>Darkfella91/home-ops//.github/renovate/commitMessage.json5", + "github>Darkfella91/home-ops//.github/renovate/customManagers.json5", + "github>Darkfella91/home-ops//.github/renovate/grafanaDashboards.json5", + "github>Darkfella91/home-ops//.github/renovate/groups.json5", + "github>Darkfella91/home-ops//.github/renovate/labels.json5", + "github>Darkfella91/home-ops//.github/renovate/packageRules.json5", + "github>Darkfella91/home-ops//.github/renovate/semanticCommits.json5", + "github>Darkfella91/home-ops//.github/renovate/devcontainer.json5" + ], + "dependencyDashboardTitle": "Renovate Dashboard 🤖", + "suppressNotifications": ["prEditedNotification", "prIgnoreNotification"], + "onboarding": false, + "requireConfig": "ignored", + "ignorePaths": ["**/*.sops.*", "**/.archive/**", "**/resources/**"], + "flux": { + "fileMatch": [ + "(^|/)kubernetes/.+\\.ya?ml(?:\\.j2)?$" + ] + }, + "helm-values": { + "fileMatch": [ + "(^|/)kubernetes/.+\\.ya?ml(?:\\.j2)?$" + ] + }, + "kubernetes": { + "fileMatch": [ + "(^|/)\\.taskfiles/.+\\.ya?ml(?:\\.j2)?$", + "(^|/)kubernetes/.+\\.ya?ml(?:\\.j2)?$" + ] + } +} diff --git a/.github/renovate/allowedVersions.json5 b/.github/renovate/allowedVersions.json5 new file mode 100755 index 000000000..f02bf47a5 --- /dev/null +++ b/.github/renovate/allowedVersions.json5 @@ -0,0 +1,10 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "packageRules": [ + { + "matchDatasources": ["docker"], + "matchPackagePatterns": ["postgresql"], + "allowedVersions": "<18" + } + ] +} diff --git a/.github/renovate/autoMerge.json5 b/.github/renovate/autoMerge.json5 new file mode 100755 index 000000000..73d3cdc17 --- /dev/null +++ b/.github/renovate/autoMerge.json5 @@ -0,0 +1,21 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "packageRules": [ + { + "description": ["Auto-merge container digests updates for trusted containers"], + "matchDatasources": ["docker"], + "automerge": true, + "automergeType": "branch", + "matchUpdateTypes": ["digest"], + "matchPackagePatterns": ["ghcr.io/bjw-s", "ghcr.io/onedr0p"] + }, + { + "description": ["Auto-merge GitHub Actions for minor and patch"], + "matchManagers": ["github-actions"], + "matchDatasources": ["github-tags"], + "automerge": true, + "automergeType": "branch", + "matchUpdateTypes": ["minor", "patch"] + } + ] +} diff --git a/.github/renovate/clusters.json5 b/.github/renovate/clusters.json5 new file mode 100755 index 000000000..7ceb227b0 --- /dev/null +++ b/.github/renovate/clusters.json5 @@ -0,0 +1,10 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "packageRules": [ + { + "description": ["Separate PRs for main cluster"], + "matchFileNames": ["**/kubernetes/main/**"], + "additionalBranchPrefix": "main-" + } + ] +} diff --git a/.github/renovate/commitMessage.json5 b/.github/renovate/commitMessage.json5 new file mode 100755 index 000000000..3fea62872 --- /dev/null +++ b/.github/renovate/commitMessage.json5 @@ -0,0 +1,16 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "commitMessageTopic": "{{depName}}", + "commitMessageExtra": "to {{newVersion}}", + "commitMessageSuffix": "", + "packageRules": [ + { + "matchDatasources": ["helm"], + "commitMessageTopic": "chart {{depName}}" + }, + { + "matchDatasources": ["docker"], + "commitMessageTopic": "image {{depName}}" + } + ] +} diff --git a/.github/renovate/customManagers.json5 b/.github/renovate/customManagers.json5 new file mode 100755 index 000000000..47da2e311 --- /dev/null +++ b/.github/renovate/customManagers.json5 @@ -0,0 +1,35 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "customManagers": [ + { + "customType": "regex", + "description": ["Process YAML custom dependencies"], + "fileMatch": [ + "(^|/)kubernetes/.+\\.env$", + "(^|/)kubernetes/.+\\.ya?ml$" + ], + "matchStrings": [ + // # renovate: datasource=github-releases depName=k3s-io/k3s + // k3s_release_version: &version v1.29.0+k3s1 + // # renovate: datasource=helm depName=cilium repository=https://helm.cilium.io + // version: 1.15.1 + // # renovate: datasource=docker depName=ghcr.io/siderolabs/kubelet + // KUBERNETES_VERSION=v1.31.1 + "datasource=(?\\S+) depName=(?\\S+)( repository=(?\\S+))?\\n.+(:\\s|=)(&\\S+\\s)?(?\\S+)", + // # renovate: datasource=github-releases depName=rancher/system-upgrade-controller + // https://github.com/rancher/system-upgrade-controller/releases/download/v0.13.2/crd.yaml + "datasource=(?\\S+) depName=(?\\S+)\\n.+/(?(v|\\d)[^/]+)", + "datasource=(?\\S+) depName=(?\\S+)( repository=(?\\S+))?\n.+?\"(?\\S+)\"" + ], + "datasourceTemplate": "{{#if datasource}}{{{datasource}}}{{else}}github-releases{{/if}}" + }, + { + "customType": "regex", + "description": ["Process CloudnativePG Postgresql version"], + "fileMatch": ["(^|/)kubernetes/.+\\.ya?ml$"], + "matchStrings": ["imageName: (?\\S+):(?.*\\-.*)"], + "datasourceTemplate": "docker", + "versioningTemplate": "redhat" + } + ] +} diff --git a/.github/renovate/devcontainer.json5 b/.github/renovate/devcontainer.json5 new file mode 100644 index 000000000..549627cea --- /dev/null +++ b/.github/renovate/devcontainer.json5 @@ -0,0 +1,18 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "customManagers": [ + { + "customType": "regex", + "description": ["Process ARG versions in Dockerfile"], + "fileMatch": ["^\\.devcontainer/Dockerfile$"], + "matchStrings": [ + "# renovate: depName=(?\\S+)\\s*\\nARG (?\\S+)=v(?\\S+)" + ], + "datasourceTemplate": "github-releases", + "versioningTemplate": "semver", + "depNameTemplate": "{{depName}}", + "packageNameTemplate": "{{depName}}", + "lookupNameTemplate": "{{depName}}" + } + ] +} diff --git a/.github/renovate/grafanaDashboards.json5 b/.github/renovate/grafanaDashboards.json5 new file mode 100755 index 000000000..580d288db --- /dev/null +++ b/.github/renovate/grafanaDashboards.json5 @@ -0,0 +1,34 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "customDatasources": { + "grafana-dashboards": { + "defaultRegistryUrlTemplate": "https://grafana.com/api/dashboards/{{packageName}}", + "format": "json", + "transformTemplates": ["{\"releases\":[{\"version\": $string(revision)}]}"] + } + }, + "customManagers": [ + { + "customType": "regex", + "description": ["Process Grafana dashboards"], + "fileMatch": ["(^|/)kubernetes/.+\\.ya?ml$"], + "matchStrings": ["depName=\"(?.*)\"\\n(?\\s+)gnetId: (?\\d+)\\n.+revision: (?\\d+)"], + "autoReplaceStringTemplate": "depName=\"{{{depName}}}\"\n{{{indentation}}}gnetId: {{{packageName}}}\n{{{indentation}}}revision: {{{newValue}}}", + "datasourceTemplate": "custom.grafana-dashboards", + "versioningTemplate": "regex:^(?\\d+)$" + } + ], + "packageRules": [ + { + "addLabels": ["renovate/grafana-dashboard"], + "automerge": true, + "automergeType": "branch", + "matchDatasources": ["custom.grafana-dashboards"], + "matchUpdateTypes": ["major"], + "semanticCommitType": "chore", + "semanticCommitScope": "grafana-dashboards", + "commitMessageTopic": "dashboard {{depName}}", + "commitMessageExtra": "( {{currentVersion}} → {{newVersion}} )" + } + ] +} diff --git a/.github/renovate/groups.json5 b/.github/renovate/groups.json5 new file mode 100755 index 000000000..79a05f8e7 --- /dev/null +++ b/.github/renovate/groups.json5 @@ -0,0 +1,66 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "packageRules": [ + { + "description": ["1Password Connect Group"], + "groupName": "1Password Connnect", + "matchPackagePatterns": ["1password/connect"], + "matchDatasources": ["docker"], + "group": { + "commitMessageTopic": "{{{groupName}}} group" + }, + "separateMinorPatch": true + }, + { + "description": ["Actions Runner Controller Group"], + "groupName": "Actions Runner Controller", + "matchPackagePatterns": ["gha-runner-scale-set"], + "matchDatasources": ["docker", "helm"], + "group": { + "commitMessageTopic": "{{{groupName}}} group" + }, + "separateMinorPatch": true + }, + { + "description": ["Flux Group"], + "groupName": "Flux", + "matchPackagePatterns": ["fluxcd"], + "matchDatasources": ["docker", "github-tags"], + "versioning": "semver", + "group": { + "commitMessageTopic": "{{{groupName}}} group" + }, + "separateMinorPatch": true + }, + { + "description": ["Intel Device Plugins Group"], + "groupName": "Intel-Device-Plugins", + "matchPackagePatterns": ["intel-device-plugins"], + "matchDatasources": ["helm"], + "group": { + "commitMessageTopic": "{{{groupName}}} group" + }, + "separateMinorPatch": true + }, + { + "description": ["Rook-Ceph Group"], + "groupName": "Rook-Ceph", + "matchPackagePatterns": ["rook.ceph"], + "matchDatasources": ["helm"], + "group": { + "commitMessageTopic": "{{{groupName}}} group" + }, + "separateMinorPatch": true + }, + { + "description": ["Talos Group"], + "groupName": "Talos", + "matchPackagePatterns": ["siderolabs/talosctl", "siderolabs/installer"], + "matchDatasources": ["docker"], + "group": { + "commitMessageTopic": "{{{groupName}}} group" + }, + "separateMinorPatch": true + } + ] +} diff --git a/.github/renovate/labels.json5 b/.github/renovate/labels.json5 new file mode 100755 index 000000000..641ea6e98 --- /dev/null +++ b/.github/renovate/labels.json5 @@ -0,0 +1,37 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "packageRules": [ + { + "matchUpdateTypes": ["major"], + "labels": ["type/major"] + }, + { + "matchUpdateTypes": ["minor"], + "labels": ["type/minor"] + }, + { + "matchUpdateTypes": ["patch"], + "labels": ["type/patch"] + }, + { + "matchUpdateTypes": ["digest"], + "labels": ["type/digest"] + }, + { + "matchDatasources": ["docker"], + "addLabels": ["renovate/container"] + }, + { + "matchDatasources": ["helm"], + "addLabels": ["renovate/helm"] + }, + { + "matchDatasources": ["github-releases", "github-tags"], + "addLabels": ["renovate/github-release"] + }, + { + "matchManagers": ["github-actions"], + "addLabels": ["renovate/github-action"] + } + ] +} diff --git a/.github/renovate/packageRules.json5 b/.github/renovate/packageRules.json5 new file mode 100755 index 000000000..8ccd48652 --- /dev/null +++ b/.github/renovate/packageRules.json5 @@ -0,0 +1,17 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "packageRules": [ + { + "description": ["Loose versioning for non-semver packages"], + "matchDatasources": ["docker"], + "matchPackagePatterns": ["cross-seed", "plex"], + "versioning": "loose" + }, + { + "description": ["Custom schedule for frequently updated packages"], + "matchDataSources": ["docker", "helm"], + "matchPackagePatterns": ["postgresql", "reloader"], + "schedule": ["on the first day of the month"] + } + ] +} diff --git a/.github/renovate/semanticCommits.json5 b/.github/renovate/semanticCommits.json5 new file mode 100755 index 000000000..0d88d8db6 --- /dev/null +++ b/.github/renovate/semanticCommits.json5 @@ -0,0 +1,105 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "packageRules": [ + { + "matchDatasources": ["docker"], + "matchUpdateTypes": ["major"], + "commitMessagePrefix": "feat(container)!: ", + "commitMessageTopic": "{{depName}}", + "commitMessageExtra": " ( {{currentVersion}} → {{newVersion}} )" + }, + { + "matchDatasources": ["docker"], + "matchUpdateTypes": ["minor"], + "semanticCommitType": "feat", + "semanticCommitScope": "container", + "commitMessageTopic": "{{depName}}", + "commitMessageExtra": "( {{currentVersion}} → {{newVersion}} )" + }, + { + "matchDatasources": ["docker"], + "matchUpdateTypes": ["patch"], + "semanticCommitType": "fix", + "semanticCommitScope": "container", + "commitMessageTopic": "{{depName}}", + "commitMessageExtra": "( {{currentVersion}} → {{newVersion}} )" + }, + { + "matchDatasources": ["docker"], + "matchUpdateTypes": ["digest"], + "semanticCommitType": "chore", + "semanticCommitScope": "container", + "commitMessageTopic": "{{depName}}", + "commitMessageExtra": "( {{currentDigestShort}} → {{newDigestShort}} )" + }, + { + "matchDatasources": ["helm"], + "matchUpdateTypes": ["major"], + "commitMessagePrefix": "feat(helm)!: ", + "commitMessageTopic": "{{depName}}", + "commitMessageExtra": "( {{currentVersion}} → {{newVersion}} )" + }, + { + "matchDatasources": ["helm"], + "matchUpdateTypes": ["minor"], + "semanticCommitType": "feat", + "semanticCommitScope": "helm", + "commitMessageTopic": "{{depName}}", + "commitMessageExtra": "( {{currentVersion}} → {{newVersion}} )" + }, + { + "matchDatasources": ["helm"], + "matchUpdateTypes": ["patch"], + "semanticCommitType": "fix", + "semanticCommitScope": "helm", + "commitMessageTopic": "{{depName}}", + "commitMessageExtra": "( {{currentVersion}} → {{newVersion}} )" + }, + { + "matchDatasources": ["github-releases", "github-tags"], + "matchUpdateTypes": ["major"], + "commitMessagePrefix": "feat(github-release)!: ", + "commitMessageTopic": "{{depName}}", + "commitMessageExtra": "( {{currentVersion}} → {{newVersion}} )" + }, + { + "matchDatasources": ["github-releases", "github-tags"], + "matchUpdateTypes": ["minor"], + "semanticCommitType": "feat", + "semanticCommitScope": "github-release", + "commitMessageTopic": "{{depName}}", + "commitMessageExtra": "( {{currentVersion}} → {{newVersion}} )" + }, + { + "matchDatasources": ["github-releases", "github-tags"], + "matchUpdateTypes": ["patch"], + "semanticCommitType": "fix", + "semanticCommitScope": "github-release", + "commitMessageTopic": "{{depName}}", + "commitMessageExtra": "( {{currentVersion}} → {{newVersion}} )" + }, + { + "matchManagers": ["github-actions"], + "matchUpdateTypes": ["major"], + "commitMessagePrefix": "feat(github-action)!: ", + "commitMessageTopic": "{{depName}}", + "commitMessageExtra": "( {{currentVersion}} → {{newVersion}} )" + }, + { + "matchManagers": ["github-actions"], + "matchUpdateTypes": ["minor"], + "semanticCommitType": "feat", + "semanticCommitScope": "github-action", + "commitMessageTopic": "{{depName}}", + "commitMessageExtra": "( {{currentVersion}} → {{newVersion}} )" + }, + { + "matchManagers": ["github-actions"], + "matchUpdateTypes": ["patch"], + "semanticCommitType": "fix", + "semanticCommitScope": "github-action", + "commitMessageTopic": "{{depName}}", + "commitMessageExtra": "( {{currentVersion}} → {{newVersion}} )" + } + ] +} diff --git a/.github/workflows/build-dev-container.yaml b/.github/workflows/build-dev-container.yaml new file mode 100755 index 000000000..5857de94d --- /dev/null +++ b/.github/workflows/build-dev-container.yaml @@ -0,0 +1,47 @@ +name: Docker Image CI + +on: + push: + branches: + - main + paths: + - '.devcontainer/Dockerfile' + +jobs: + build: + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Image version + shell: bash + run: echo "VERSION=$(cat .devcontainer/version.txt)" >> $GITHUB_ENV + + - name: Lowercase repository owner + shell: bash + run: echo "LOWERCASE_REPO_OWNER=${GITHUB_REPOSITORY_OWNER,,}" >> $GITHUB_ENV + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Log in to GitHub Container Registry (GHCR) + uses: docker/login-action@v3 + with: + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + registry: ghcr.io + + - name: Build and Push Docker Image + uses: docker/build-push-action@v6 + with: + push: true + tags: | + ghcr.io/${{ env.LOWERCASE_REPO_OWNER }}/devcontainer:latest + ghcr.io/${{ env.LOWERCASE_REPO_OWNER }}/devcontainer:${{ env.VERSION }} + platforms: linux/amd64 + file: .devcontainer/Dockerfile + context: . diff --git a/.github/workflows/flux-diff.yaml b/.github/workflows/flux-diff.yaml new file mode 100755 index 000000000..1877c56e6 --- /dev/null +++ b/.github/workflows/flux-diff.yaml @@ -0,0 +1,125 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json +name: "Flux Diff" + +on: + pull_request: + branches: ["main"] + paths: ["kubernetes/**"] + +concurrency: + group: ${{ github.workflow }}-${{ github.event.number || github.ref }} + cancel-in-progress: true + +jobs: + changed-clusters: + name: Changed Clusters + runs-on: ubuntu-latest + outputs: + matrix: ${{ steps.changed-clusters.outputs.all_changed_and_modified_files }} + steps: + - name: Generate Token + uses: actions/create-github-app-token@v1 + id: app-token + with: + app-id: "${{ secrets.BOT_APP_ID }}" + private-key: "${{ secrets.BOT_APP_PRIVATE_KEY }}" + + - name: Checkout Default Branch + uses: actions/checkout@v4 + with: + token: "${{ steps.app-token.outputs.token }}" + fetch-depth: 0 + + - name: Get Changed Clusters + id: changed-clusters + uses: tj-actions/changed-files@v45 + with: + files: kubernetes/** + dir_names: true + dir_names_max_depth: 2 + matrix: true + + - name: List All Changed Clusters + run: echo "${{ steps.changed-clusters.outputs.all_changed_and_modified_files }}" + + flux-diff: + name: Flux Diff + runs-on: ubuntu-latest + needs: ["changed-clusters"] + permissions: + pull-requests: write + strategy: + matrix: + paths: ${{ fromJSON(needs.changed-clusters.outputs.matrix) }} + resources: ["helmrelease", "kustomization"] + max-parallel: 4 + fail-fast: false + steps: + - name: Generate Token + uses: actions/create-github-app-token@v1 + id: app-token + with: + app-id: "${{ secrets.BOT_APP_ID }}" + private-key: "${{ secrets.BOT_APP_PRIVATE_KEY }}" + + - name: Checkout + uses: actions/checkout@v4 + with: + token: "${{ steps.app-token.outputs.token }}" + path: pull + + - name: Checkout Default Branch + uses: actions/checkout@v4 + with: + token: "${{ steps.app-token.outputs.token }}" + ref: "${{ github.event.repository.default_branch }}" + path: default + + - name: Diff Resources + uses: docker://ghcr.io/allenporter/flux-local:v6.0.2 + with: + args: >- + diff ${{ matrix.resources }} + --unified 6 + --path /github/workspace/pull/${{ matrix.paths }}/flux + --path-orig /github/workspace/default/${{ matrix.paths }}/flux + --strip-attrs "helm.sh/chart,checksum/config,app.kubernetes.io/version,chart" + --limit-bytes 10000 + --all-namespaces + --sources "home-kubernetes" + --output-file diff.patch + + - name: Generate Diff + id: diff + run: | + echo "diff<> $GITHUB_OUTPUT + cat diff.patch >> $GITHUB_OUTPUT + echo "EOF" >> $GITHUB_OUTPUT + echo "### Diff" >> $GITHUB_STEP_SUMMARY + echo '```diff' >> $GITHUB_STEP_SUMMARY + cat diff.patch >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY + + - if: ${{ steps.diff.outputs.diff != '' }} + name: Add comment + uses: mshick/add-pr-comment@v2 + with: + repo-token: "${{ steps.app-token.outputs.token }}" + message-id: "${{ github.event.pull_request.number }}/${{ matrix.paths }}/${{ matrix.resources }}" + message-failure: Diff was not successful + message: | + ```diff + ${{ steps.diff.outputs.diff }} + ``` + + # Summarize matrix https://github.community/t/status-check-for-a-matrix-jobs/127354/7 + flux-diff-success: + if: ${{ always() }} + needs: ["flux-diff"] + name: Flux Diff Successful + runs-on: ubuntu-latest + steps: + - if: ${{ contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') }} + name: Check matrix status + run: exit 1 diff --git a/.github/workflows/flux-hr-sync.yaml b/.github/workflows/flux-hr-sync.yaml new file mode 100755 index 000000000..52390d93b --- /dev/null +++ b/.github/workflows/flux-hr-sync.yaml @@ -0,0 +1,98 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json +name: "Flux Helm Repository Sync" + +on: + workflow_dispatch: + inputs: + clusterName: + description: Cluster Name + default: main + required: true + helmRepoNamespace: + description: Helm Repository Namespace + default: flux-system + required: true + helmRepoName: + description: Helm Repository Name + required: true + pull_request: + branches: ["main"] + paths: ["kubernetes/**/helmrelease.yaml"] + +jobs: + sync: + name: Flux Helm Repository Sync + runs-on: ["gha-runner-scale-set"] + steps: + - name: Generate Token + uses: actions/create-github-app-token@v1 + id: app-token + with: + app-id: "${{ secrets.BOT_APP_ID }}" + private-key: "${{ secrets.BOT_APP_PRIVATE_KEY }}" + + - name: Checkout + uses: actions/checkout@v4 + with: + token: "${{ steps.app-token.outputs.token }}" + fetch-depth: 0 + + - name: Setup Homebrew + uses: Homebrew/actions/setup-homebrew@master + + - name: Setup Workflow Tools + shell: bash + run: brew install fluxcd/tap/flux yq + + - name: Write kubeconfig + id: kubeconfig + uses: timheuer/base64-to-file@v1 + with: + encodedString: "${{ secrets.KUBECONFIG }}" + fileName: kubeconfig + + - if: ${{ github.event.inputs.clusterName == '' && github.event.inputs.helmRepoNamespace == '' && github.event.inputs.helmRepoName == '' }} + name: Get Changed Files + id: changed-files + uses: tj-actions/changed-files@v45 + with: + files: kubernetes/**/helmrelease.yaml + safe_output: false + + - if: ${{ github.event.inputs.clusterName == '' && github.event.inputs.helmRepoNamespace == '' && github.event.inputs.helmRepoName == '' }} + name: List All Changed Files + run: echo "${{ steps.changed-files.outputs.all_changed_and_modified_files }}" + + - if: ${{ github.event.inputs.clusterName == '' && github.event.inputs.helmRepoNamespace == '' && github.event.inputs.helmRepoName == '' }} + name: Sync Helm Repository + env: + KUBECONFIG: "${{ steps.kubeconfig.outputs.filePath }}" + shell: bash + run: | + declare -a repos=() + for f in ${{ steps.changed-files.outputs.all_changed_and_modified_files }}; do + cluster_name=$(echo "${f}" | awk -F'/' '{print $2}') + repo_namespace="$(yq -r '.spec.chart.spec.sourceRef.namespace' "${f}")" + repo_name="$(yq -r '.spec.chart.spec.sourceRef.name' "${f}")" + repos+=("${cluster_name}:${repo_namespace}:${repo_name}") + done + mapfile -t repos < <(printf "%s\n" "${repos[@]}" | sort -u) + for r in "${repos[@]}"; do + IFS=':' read -r cluster_name repo_namespace repo_name <<< "${r}" + flux \ + --context admin@${cluster_name} \ + --namespace ${repo_namespace} \ + reconcile source helm ${repo_name} + done + + - if: ${{ github.event.inputs.clusterName != '' && github.event.inputs.helmRepoNamespace != '' && github.event.inputs.helmRepoName != '' }} + name: Sync Helm Repository + env: + KUBECONFIG: ${{ steps.kubeconfig.outputs.filePath }} + shell: bash + run: | + flux \ + --context ${{ github.event.inputs.clusterName }} \ + --namespace ${{ github.event.inputs.helmRepoNamespace }} \ + reconcile source helm ${{ github.event.inputs.helmRepoName }} diff --git a/.github/workflows/flux-image-test.yaml b/.github/workflows/flux-image-test.yaml new file mode 100755 index 000000000..e00efac8d --- /dev/null +++ b/.github/workflows/flux-image-test.yaml @@ -0,0 +1,152 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json +name: "Flux Image Test" + +on: + pull_request: + branches: ["main"] + paths: ["kubernetes/**"] + +concurrency: + group: ${{ github.workflow }}-${{ github.event.number || github.ref }} + cancel-in-progress: true + +jobs: + changed-clusters: + name: Changed Clusters + runs-on: ubuntu-latest + outputs: + matrix: ${{ steps.changed-clusters.outputs.all_changed_and_modified_files }} + steps: + - name: Generate Token + uses: actions/create-github-app-token@v1 + id: app-token + with: + app-id: "${{ secrets.BOT_APP_ID }}" + private-key: "${{ secrets.BOT_APP_PRIVATE_KEY }}" + + - name: Checkout + uses: actions/checkout@v4 + with: + token: "${{ steps.app-token.outputs.token }}" + fetch-depth: 0 + + - name: Get Changed Clusters + id: changed-clusters + uses: tj-actions/changed-files@v45 + with: + files: kubernetes/** + dir_names: true + dir_names_max_depth: 2 + matrix: true + + - name: List All Changed Clusters + run: echo "${{ steps.changed-clusters.outputs.all_changed_and_modified_files }}" + + extract-images: + name: Extract Images + runs-on: ubuntu-latest + needs: ["changed-clusters"] + permissions: + pull-requests: write + strategy: + matrix: + paths: ${{ fromJSON(needs.changed-clusters.outputs.matrix) }} + max-parallel: 4 + fail-fast: false + outputs: + matrix: ${{ steps.extract-images.outputs.images }} + steps: + - name: Generate Token + uses: actions/create-github-app-token@v1 + id: app-token + with: + app-id: "${{ secrets.BOT_APP_ID }}" + private-key: "${{ secrets.BOT_APP_PRIVATE_KEY }}" + + - name: Setup Homebrew + uses: Homebrew/actions/setup-homebrew@master + + - name: Setup Workflow Tools + shell: bash + run: brew install jo yq + + - name: Checkout Default Branch + uses: actions/checkout@v4 + with: + token: "${{ steps.app-token.outputs.token }}" + ref: "${{ github.event.repository.default_branch }}" + path: default + + - name: Checkout Pull Request Branch + uses: actions/checkout@v4 + with: + token: "${{ steps.app-token.outputs.token }}" + path: pull + + - name: Gather Images in Default Branch + uses: docker://ghcr.io/allenporter/flux-local:v6.0.2 + with: + args: >- + get cluster + --path /github/workspace/default/${{ matrix.paths }}/flux + --enable-images + --output yaml + --output-file default.yaml + + - name: Gather Images in Pull Request Branch + uses: docker://ghcr.io/allenporter/flux-local:v6.0.2 + with: + args: >- + get cluster + --path /github/workspace/pull/${{ matrix.paths }}/flux + --enable-images + --output yaml + --output-file pull.yaml + + - name: Filter Default Branch Results + shell: bash + run: | + yq -r '[.. | .images? | select(. != null)] | flatten | sort | unique | .[]' \ + default.yaml > default.txt + + - name: Filter Pull Request Branch Results + shell: bash + run: | + yq -r '[.. | .images? | select(. != null)] | flatten | sort | unique | .[]' \ + pull.yaml > pull.txt + + - name: Compare Default and Pull Request Images + id: extract-images + shell: bash + run: | + images=$(jo -a $(grep -vf default.txt pull.txt)) + echo "images=${images}" >> $GITHUB_OUTPUT + echo "${images}" + echo "### Images" >> $GITHUB_STEP_SUMMARY + echo "${images}" | jq -r 'to_entries[] | "* \(.value)"' >> $GITHUB_STEP_SUMMARY + + test-images: + if: ${{ needs.extract-images.outputs.matrix != '[]' }} + name: Test images + runs-on: ubuntu-latest + needs: ["extract-images"] + strategy: + matrix: + images: ${{ fromJSON(needs.extract-images.outputs.matrix) }} + max-parallel: 4 + fail-fast: false + steps: + - name: Inspect Image + run: docker buildx imagetools inspect ${{ matrix.images }} + + # Summarize matrix https://github.community/t/status-check-for-a-matrix-jobs/127354/7 + test-images-success: + if: ${{ always() }} + needs: ["test-images"] + name: Test Images Successful + runs-on: ubuntu-latest + steps: + - if: ${{ contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') }} + name: Check matrix status + run: exit 1 diff --git a/.github/workflows/label-sync.yaml b/.github/workflows/label-sync.yaml new file mode 100755 index 000000000..d1eab2683 --- /dev/null +++ b/.github/workflows/label-sync.yaml @@ -0,0 +1,31 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json +name: "Label Sync" + +on: + workflow_dispatch: + push: + branches: ["main"] + paths: [".github/labels.yaml"] + schedule: + - cron: "0 0 * * *" # Every day at midnight + +permissions: + issues: write + contents: read + +jobs: + label-sync: + name: Label Sync + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + sparse-checkout: .github/labels.yaml + + - name: Sync Labels + uses: EndBug/label-sync@v2 + with: + config-file: .github/labels.yaml + delete-other-labels: true diff --git a/.github/workflows/labeler.yaml b/.github/workflows/labeler.yaml new file mode 100755 index 000000000..d658c1d96 --- /dev/null +++ b/.github/workflows/labeler.yaml @@ -0,0 +1,21 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json +name: "Labeler" + +on: + workflow_dispatch: + pull_request_target: + branches: ["main"] + +jobs: + labeler: + name: Labeler + runs-on: ubuntu-latest + permissions: + contents: read + pull-requests: write + steps: + - name: Labeler + uses: actions/labeler@v5 + with: + configuration-path: .github/labeler.yaml diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml new file mode 100755 index 000000000..ab809acf3 --- /dev/null +++ b/.github/workflows/release.yaml @@ -0,0 +1,52 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json +name: "Release" + +on: + workflow_dispatch: + schedule: + - cron: "0 0 1 * *" # 1st of every month at midnight + +jobs: + release: + name: Release + runs-on: ubuntu-latest + steps: + - name: Generate Token + uses: actions/create-github-app-token@v1 + id: app-token + with: + app-id: "${{ secrets.BOT_APP_ID }}" + private-key: "${{ secrets.BOT_APP_PRIVATE_KEY }}" + + - name: Checkout + uses: actions/checkout@v4 + with: + token: "${{ steps.app-token.outputs.token }}" + + - name: Create Release + shell: bash + env: + GITHUB_TOKEN: "${{ steps.app-token.outputs.token }}" + run: | + # Retrieve previous release tag + previous_tag="$(gh release list --limit 1 | awk '{ print $1 }')" + previous_major="${previous_tag%%\.*}" + previous_minor="${previous_tag#*.}" + previous_minor="${previous_minor%.*}" + previous_patch="${previous_tag##*.}" + # Determine next release tag + next_major_minor="$(date +'%Y').$(date +'%-m')" + if [[ "${previous_major}.${previous_minor}" == "${next_major_minor}" ]]; then + echo "Month release already exists for year, incrementing patch number by 1" + next_patch="$((previous_patch + 1))" + else + echo "Month release does not exist for year, setting patch number to 0" + next_patch="0" + fi + # Create release + release_tag="${next_major_minor}.${next_patch}" + gh release create "${release_tag}" \ + --repo="${GITHUB_REPOSITORY}" \ + --title="${release_tag}" \ + --generate-notes diff --git a/.github/workflows/renovate.yaml b/.github/workflows/renovate.yaml new file mode 100755 index 000000000..622845727 --- /dev/null +++ b/.github/workflows/renovate.yaml @@ -0,0 +1,63 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json +name: "Renovate" + +on: + workflow_dispatch: + inputs: + dryRun: + description: Dry Run + default: "false" + required: false + logLevel: + description: Log Level + default: debug + required: false + version: + description: Renovate version + default: latest + required: false + schedule: + - cron: "0 * * * *" # Every hour + push: + branches: ["main"] + paths: + - .github/renovate.json5 + - .github/renovate/**.json5 + +concurrency: + group: ${{ github.workflow }}-${{ github.event.number || github.ref }} + cancel-in-progress: true + +env: + LOG_LEVEL: "${{ inputs.logLevel || 'debug' }}" + RENOVATE_AUTODISCOVER: true + RENOVATE_AUTODISCOVER_FILTER: "${{ github.repository }}" + RENOVATE_DRY_RUN: "${{ inputs.dryRun == true }}" + RENOVATE_PLATFORM: github + RENOVATE_PLATFORM_COMMIT: true + WORKFLOW_RENOVATE_VERSION: "${{ inputs.version || 'latest' }}" + +jobs: + renovate: + name: Renovate + runs-on: ubuntu-latest + steps: + - name: Generate Token + uses: actions/create-github-app-token@v1 + id: app-token + with: + app-id: "${{ secrets.BOT_APP_ID }}" + private-key: "${{ secrets.BOT_APP_PRIVATE_KEY }}" + + - name: Checkout + uses: actions/checkout@v4 + with: + token: "${{ steps.app-token.outputs.token }}" + + - name: Renovate + uses: renovatebot/github-action@v41.0.4 + with: + configurationFile: .github/renovate.json5 + token: "${{ steps.app-token.outputs.token }}" + renovate-version: "${{ env.WORKFLOW_RENOVATE_VERSION }}" diff --git a/.gitignore b/.gitignore new file mode 100755 index 000000000..fadd74e4a --- /dev/null +++ b/.gitignore @@ -0,0 +1,11 @@ +.private/ +.task/ +scripts/ +*.secret.env +*.secret.yaml +*.key +.DS_Store +Thumbs.db +.decrypted~* +kubeconfig +talosconfig diff --git a/.sops.yaml b/.sops.yaml new file mode 100755 index 000000000..1dfc27c9c --- /dev/null +++ b/.sops.yaml @@ -0,0 +1,26 @@ +--- +creation_rules: + # IMPORTANT: Keep this rule first + - path_regex: talos/.+\.secret(\.sops)?\.ya?ml + input_type: yaml + encrypted_regex: ^(token|crt|key|id|secret|secretboxEncryptionSecret|ca|urls|extraManifests)$ + mac_only_encrypted: true + key_groups: + - age: + - age1frhtpr8u3u99pvcuq5mjevxdq9agjfpkd8fjtnpj9qymzh5v845q53f37d + - path_regex: kubernetes/.+\.secret(\.sops)?\.ya?ml + input_type: yaml + encrypted_regex: ^(data|stringData)$ + mac_only_encrypted: true + key_groups: + - age: + - age1frhtpr8u3u99pvcuq5mjevxdq9agjfpkd8fjtnpj9qymzh5v845q53f37d + - path_regex: kubernetes/.+\.secret(\.sops)?\.env + input_type: env + mac_only_encrypted: true + key_groups: + - age: + - age1frhtpr8u3u99pvcuq5mjevxdq9agjfpkd8fjtnpj9qymzh5v845q53f37d +stores: + yaml: + indent: 2 diff --git a/.taskfiles/VolSync/Taskfile.yaml b/.taskfiles/VolSync/Taskfile.yaml new file mode 100755 index 000000000..987bf04b5 --- /dev/null +++ b/.taskfiles/VolSync/Taskfile.yaml @@ -0,0 +1,221 @@ +--- +# yaml-language-server: $schema=https://taskfile.dev/schema.json +version: "3" + +# This taskfile is used to manage certain VolSync tasks for a given application, limitations are described below. +# 1. Fluxtomization, HelmRelease, PVC, ReplicationSource all have the same name (e.g. plex) +# 2. ReplicationSource and ReplicationDestination are a Restic repository +# 3. Applications are deployed as either a Kubernetes Deployment or StatefulSet +# 4. Each application only has one PVC that is being replicated + +x-env: &env + app: "{{.app}}" + claim: "{{.claim}}" + controller: "{{.controller}}" + job: "{{.job}}" + ns: "{{.ns}}" + pgid: "{{.pgid}}" + previous: "{{.previous}}" + puid: "{{.puid}}" + +vars: + VOLSYNC_SCRIPTS_DIR: "{{.ROOT_DIR}}/.taskfiles/VolSync/scripts" + VOLSYNC_TEMPLATES_DIR: "{{.ROOT_DIR}}/.taskfiles/VolSync/templates" + +tasks: + + suspend-controller: + desc: Suspends Volsync + summary: | + Args: + cluster: Cluster to run command against (required) + cmds: + - flux --context {{.cluster}} suspend ks volsync + - flux --context {{.cluster}} suspend hr -n volsync-system volsync + - kubectl --context {{.cluster}} -n volsync-system scale deployment volsync --replicas 0 + env: *env + requires: + vars: ["cluster"] + + list: + desc: List snapshots for an application + summary: | + Args: + cluster: Cluster to run command against (required) + ns: Namespace the PVC is in (default: default) + app: Application to list snapshots for (required) + cmds: + - envsubst < <(cat {{.VOLSYNC_TEMPLATES_DIR}}/list.tmpl.yaml) | kubectl --context {{.cluster}} apply -f - + - bash {{.VOLSYNC_SCRIPTS_DIR}}/wait-for-job.sh {{.job}} {{.ns}} {{.cluster}} + - kubectl --context {{.cluster}} -n {{.ns}} wait job/{{.job}} --for condition=complete --timeout=1m + - kubectl --context {{.cluster}} -n {{.ns}} logs job/{{.job}} --container main + - kubectl --context {{.cluster}} -n {{.ns}} delete job {{.job}} + env: *env + requires: + vars: ["cluster", "app"] + vars: + ns: '{{.ns | default "default"}}' + job: volsync-list-{{.app}} + preconditions: + - test -f {{.VOLSYNC_SCRIPTS_DIR}}/wait-for-job.sh + - test -f {{.VOLSYNC_TEMPLATES_DIR}}/list.tmpl.yaml + silent: true + + unlock: + desc: Unlock a Restic repository for an application + summary: | + Args: + cluster: Cluster to run command against (required) + ns: Namespace the PVC is in (default: default) + app: Application to unlock (required) + cmds: + - envsubst < <(cat {{.VOLSYNC_TEMPLATES_DIR}}/unlock.tmpl.yaml) | kubectl --context {{.cluster}} apply -f - + - bash {{.VOLSYNC_SCRIPTS_DIR}}/wait-for-job.sh {{.job}} {{.ns}} {{.cluster}} + - kubectl --context {{.cluster}} -n {{.ns}} wait job/{{.job}} --for condition=complete --timeout=1m + - kubectl --context {{.cluster}} -n {{.ns}} logs job/{{.job}} --container minio + - kubectl --context {{.cluster}} -n {{.ns}} logs job/{{.job}} --container r2 + - kubectl --context {{.cluster}} -n {{.ns}} delete job {{.job}} + env: *env + requires: + vars: ["cluster", "app"] + vars: + ns: '{{.ns | default "default"}}' + job: volsync-unlock-{{.app}} + preconditions: + - test -f {{.VOLSYNC_SCRIPTS_DIR}}/wait-for-job.sh + - test -f {{.VOLSYNC_TEMPLATES_DIR}}/unlock.tmpl.yaml + silent: true + + # To run backup jobs in parallel for all replicationsources: + # - kubectl get replicationsources --all-namespaces --no-headers | awk '{print $2, $1}' | xargs --max-procs=4 -l bash -c 'task volsync:snapshot app=$0 ns=$1' + snapshot: + desc: Snapshot a PVC for an application + summary: | + Args: + cluster: Cluster to run command against (required) + ns: Namespace the PVC is in (default: default) + app: Application to snapshot (required) + cmds: + - kubectl --context {{.cluster}} -n {{.ns}} patch replicationsources {{.app}} --type merge -p '{"spec":{"trigger":{"manual":"{{.now}}"}}}' + - bash {{.VOLSYNC_SCRIPTS_DIR}}/wait-for-job.sh {{.job}} {{.ns}} {{.cluster}} + - kubectl --context {{.cluster}} -n {{.ns}} wait job/{{.job}} --for condition=complete --timeout=120m + env: *env + requires: + vars: ["cluster", "app"] + vars: + now: '{{now | date "150405"}}' + ns: '{{.ns | default "default"}}' + job: volsync-src-{{.app}} + controller: + sh: true && {{.VOLSYNC_SCRIPTS_DIR}}/which-controller.sh {{.app}} {{.ns}} {{.cluster}} + preconditions: + - test -f {{.VOLSYNC_SCRIPTS_DIR}}/which-controller.sh + - test -f {{.VOLSYNC_SCRIPTS_DIR}}/wait-for-job.sh + - kubectl --context {{.cluster}} -n {{.ns}} get replicationsources {{.app}} + + # To run restore jobs in parallel for all replicationdestinations: + # - kubectl get replicationsources --all-namespaces --no-headers | awk '{print $2, $1}' | xargs --max-procs=4 -l bash -c 'task volsync:restore app=$0 ns=$1' + restore: + desc: Restore a PVC for an application + summary: | + Args: + cluster: Cluster to run command against (required) + ns: Namespace the PVC is in (default: default) + app: Application to restore (required) + previous: Previous number of snapshots to restore (default: 2) + cmds: + - { task: .suspend, vars: *env } + - { task: .wipe, vars: *env } + - { task: .restore, vars: *env } + - { task: .resume, vars: *env } + env: *env + requires: + vars: ["cluster", "app"] + vars: + ns: '{{.ns | default "default"}}' + previous: '{{.previous | default 2}}' + controller: + sh: "{{.VOLSYNC_SCRIPTS_DIR}}/which-controller.sh {{.app}} {{.ns}}" + claim: + sh: kubectl --context {{.cluster}} -n {{.ns}} get replicationsources/{{.app}} -o jsonpath="{.spec.sourcePVC}" + puid: + sh: kubectl --context {{.cluster}} -n {{.ns}} get replicationsources/{{.app}} -o jsonpath="{.spec.restic.moverSecurityContext.runAsUser}" + pgid: + sh: kubectl --context {{.cluster}} -n {{.ns}} get replicationsources/{{.app}} -o jsonpath="{.spec.restic.moverSecurityContext.runAsGroup}" + preconditions: + - test -f {{.VOLSYNC_SCRIPTS_DIR}}/which-controller.sh + - test -f {{.VOLSYNC_SCRIPTS_DIR}}/wait-for-job.sh + - test -f {{.VOLSYNC_TEMPLATES_DIR}}/replicationdestination.tmpl.yaml + - test -f {{.VOLSYNC_TEMPLATES_DIR}}/wipe.tmpl.yaml + + cleanup: + desc: Delete volume populator PVCs in all namespaces + summary: | + Args: + cluster: Cluster to run command against (required) + cmds: + - for: { var: dest } + cmd: | + {{- $items := (split "/" .ITEM) }} + kubectl --context {{.cluster}} delete pvc -n {{ $items._0 }} {{ $items._1 }} + - for: { var: cache } + cmd: | + {{- $items := (split "/" .ITEM) }} + kubectl --context {{.cluster}} delete pvc -n {{ $items._0 }} {{ $items._1 }} + - for: { var: snaps } + cmd: | + {{- $items := (split "/" .ITEM) }} + kubectl --context {{.cluster}} delete volumesnapshot -n {{ $items._0 }} {{ $items._1 }} + env: *env + requires: + vars: ["cluster"] + vars: + dest: + sh: kubectl --context {{.cluster}} get pvc --all-namespaces --no-headers | grep "dst-dest" | awk '{print $1 "/" $2}' + cache: + sh: kubectl --context {{.cluster}} get pvc --all-namespaces --no-headers | grep "dst-cache" | awk '{print $1 "/" $2}' + snaps: + sh: kubectl --context {{.cluster}} get volumesnapshot --all-namespaces --no-headers | grep "dst-dest" | awk '{print $1 "/" $2}' + + # Suspend the Flux ks and hr + .suspend: + internal: true + cmds: + - flux --context {{.cluster}} -n flux-system suspend kustomization {{.app}} + - flux --context {{.cluster}} -n {{.ns}} suspend helmrelease {{.app}} + - kubectl --context {{.cluster}} -n {{.ns}} scale {{.controller}} --replicas 0 + - kubectl --context {{.cluster}} -n {{.ns}} wait pod --for delete --selector="app.kubernetes.io/name={{.app}}" --timeout=2m + env: *env + + # Wipe the PVC of all data + .wipe: + internal: true + cmds: + - envsubst < <(cat {{.VOLSYNC_TEMPLATES_DIR}}/wipe.tmpl.yaml) | kubectl --context {{.cluster}} apply -f - + - bash {{.VOLSYNC_SCRIPTS_DIR}}/wait-for-job.sh {{.job}} {{.ns}} {{.cluster}} + - kubectl --context {{.cluster}} -n {{.ns}} wait job/{{.job}} --for condition=complete --timeout=120m + - kubectl --context {{.cluster}} -n {{.ns}} logs job/{{.job}} --container main + - kubectl --context {{.cluster}} -n {{.ns}} delete job {{.job}} + env: *env + vars: + job: volsync-wipe-{{.app}} + + # Create VolSync replicationdestination CR to restore data + .restore: + internal: true + cmds: + - envsubst < <(cat {{.VOLSYNC_TEMPLATES_DIR}}/replicationdestination.tmpl.yaml) | kubectl --context {{.cluster}} apply -f - + - bash {{.VOLSYNC_SCRIPTS_DIR}}/wait-for-job.sh {{.job}} {{.ns}} {{.cluster}} + - kubectl --context {{.cluster}} -n {{.ns}} wait job/{{.job}} --for condition=complete --timeout=120m + - kubectl --context {{.cluster}} -n {{.ns}} delete replicationdestination {{.app}} + env: *env + vars: + job: volsync-dst-{{.app}} + + # Resume Flux ks and hr + .resume: + internal: true + cmds: + - flux --context {{.cluster}} -n {{.ns}} resume helmrelease {{.app}} + - flux --context {{.cluster}} -n flux-system resume kustomization {{.app}} + env: *env diff --git a/.taskfiles/VolSync/templates/list.tmpl.yaml b/.taskfiles/VolSync/templates/list.tmpl.yaml new file mode 100755 index 000000000..201e0ea24 --- /dev/null +++ b/.taskfiles/VolSync/templates/list.tmpl.yaml @@ -0,0 +1,20 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: ${job} + namespace: ${ns} +spec: + ttlSecondsAfterFinished: 3600 + template: + spec: + automountServiceAccountToken: false + restartPolicy: OnFailure + containers: + - name: main + image: docker.io/restic/restic:0.17.3 + args: ["snapshots"] + envFrom: + - secretRef: + name: ${app}-volsync-secret + resources: {} diff --git a/.taskfiles/VolSync/templates/replicationdestination.tmpl.yaml b/.taskfiles/VolSync/templates/replicationdestination.tmpl.yaml new file mode 100755 index 000000000..b1c7d4360 --- /dev/null +++ b/.taskfiles/VolSync/templates/replicationdestination.tmpl.yaml @@ -0,0 +1,29 @@ +--- +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationDestination +metadata: + name: ${app} + namespace: ${ns} +spec: + trigger: + manual: restore-once + restic: + repository: ${app}-volsync-secret + destinationPVC: ${claim} + copyMethod: Direct + accessModes: ["ReadWriteOnce"] + # IMPORTANT NOTE: + # Set to the last X number of snapshots to restore from + previous: ${previous} + # OR; + # IMPORTANT NOTE: + # On bootstrap set `restoreAsOf` to the time the old cluster was destroyed. + # This will essentially prevent volsync from trying to restore a backup + # from a application that started with default data in the PVC. + # Do not restore snapshots made after the following RFC3339 Timestamp. + # date --rfc-3339=seconds (--utc) + # restoreAsOf: "2022-12-10T16:00:00-05:00" + moverSecurityContext: + runAsUser: ${puid} + runAsGroup: ${pgid} + fsGroup: ${pgid} diff --git a/.taskfiles/VolSync/templates/unlock.tmpl.yaml b/.taskfiles/VolSync/templates/unlock.tmpl.yaml new file mode 100755 index 000000000..bf2bb9e89 --- /dev/null +++ b/.taskfiles/VolSync/templates/unlock.tmpl.yaml @@ -0,0 +1,27 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: ${job} + namespace: ${ns} +spec: + ttlSecondsAfterFinished: 3600 + template: + spec: + automountServiceAccountToken: false + restartPolicy: OnFailure + containers: + - name: minio + image: docker.io/restic/restic:0.17.3 + args: ["unlock", "--remove-all"] + envFrom: + - secretRef: + name: ${app}-volsync-secret + resources: {} + - name: r2 + image: docker.io/restic/restic:0.17.3 + args: ["unlock", "--remove-all"] + envFrom: + - secretRef: + name: ${app}-volsync-r2-secret + resources: {} diff --git a/.taskfiles/VolSync/templates/wipe.tmpl.yaml b/.taskfiles/VolSync/templates/wipe.tmpl.yaml new file mode 100755 index 000000000..ffc1cc75a --- /dev/null +++ b/.taskfiles/VolSync/templates/wipe.tmpl.yaml @@ -0,0 +1,26 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: ${job} + namespace: ${ns} +spec: + ttlSecondsAfterFinished: 3600 + template: + spec: + automountServiceAccountToken: false + restartPolicy: OnFailure + containers: + - name: main + image: docker.io/library/alpine:latest + command: ["/bin/sh", "-c", "cd /config; find . -delete"] + volumeMounts: + - name: config + mountPath: /config + securityContext: + privileged: true + resources: {} + volumes: + - name: config + persistentVolumeClaim: + claimName: ${claim} diff --git a/.taskfiles/bootstrap/Taskfile.yaml b/.taskfiles/bootstrap/Taskfile.yaml new file mode 100755 index 000000000..7f3a920cf --- /dev/null +++ b/.taskfiles/bootstrap/Taskfile.yaml @@ -0,0 +1,90 @@ + +--- +# yaml-language-server: $schema=https://taskfile.dev/schema.json +version: '3' + +vars: + KUBERNETES_VERSION: + sh: yq 'select(document_index == 1).spec.postBuild.substitute.KUBERNETES_VERSION' {{.CLUSTER_DIR}}/apps/system-upgrade/system-upgrade-controller/ks.yaml + TALOS_CONTROLLER: + sh: talosctl config info --output json | jq --raw-output '.endpoints[]' | shuf -n 1 + TALOS_SCHEMATIC_ID: + sh: yq 'select(document_index == 1).spec.postBuild.substitute.TALOS_SCHEMATIC_ID' {{.CLUSTER_DIR}}/apps/system-upgrade/system-upgrade-controller/ks.yaml + TALOS_VERSION: + sh: yq 'select(document_index == 1).spec.postBuild.substitute.TALOS_VERSION' {{.CLUSTER_DIR}}/apps/system-upgrade/system-upgrade-controller/ks.yaml + +tasks: + + kubernetes: + desc: Bootstrap a Talos Kubernetes cluster backed by flux and sops + prompt: Bootstrap a Talos Kubernetes cluster ... continue? + summary: | + CLUSTER: Cluster to run command against (default: main) + vars: &vars + CLUSTER: '{{.CLUSTER}}' + cmds: + - { task: apply-config, vars: *vars } + - { task: etcd, vars: *vars } + - { task: conf, vars: *vars } + - { task: apps, vars: *vars } + - { task: flux, vars: *vars } + requires: + vars: ['CLUSTER'] + preconditions: + - talosctl config info &>/dev/null + - test -f {{.CLUSTER_DIR}}/talosconfig + apply-config: + internal: true + cmd: | + export TALOS_VERSION={{.TALOS_VERSION}} TALOS_SCHEMATIC_ID={{.TALOS_SCHEMATIC_ID}} KUBERNETES_VERSION={{.KUBERNETES_VERSION}} + sops --decrypt {{.CLUSTER_DIR}}/bootstrap/talos/assets/{{.HOSTNAME}}.secret.sops.yaml | \ + /usr/local/bin/envsubst | \ + talosctl apply-config --insecure --nodes {{.HOSTNAME}} --file /dev/stdin + env: *vars + requires: + vars: ['CLUSTER', 'HOSTNAME'] + preconditions: + - test -f {{.CLUSTER_DIR}}/talosconfig + - test -f {{.CLUSTER_DIR}}/bootstrap/talos/assets/{{.HOSTNAME}}.secret.sops.yaml + + etcd: + internal: true + cmd: until talosctl --nodes {{.TALOS_CONTROLLER}} bootstrap; do sleep 10; done + preconditions: + - test -f {{.CLUSTER_DIR}}/talosconfig + - talosctl config info &>/dev/null + + conf: + internal: true + cmd: talosctl kubeconfig --nodes {{.TALOS_CONTROLLER}} --force --force-context-name {{.CLUSTER}} {{.CLUSTER_DIR}} + preconditions: + - test -f {{.CLUSTER_DIR}}/talosconfig + - talosctl config info &>/dev/null + + apps: + internal: true + cmds: + - until kubectl wait --for=condition=Ready=False nodes --all --timeout=10m; do sleep 10; done + - helmfile --quiet --file {{.CLUSTER_DIR}}/bootstrap/helmfile.yaml apply --skip-diff-on-install --suppress-diff + - until kubectl wait --for=condition=Ready nodes --all --timeout=10m; do sleep 10; done + preconditions: + - test -f {{.CLUSTER_DIR}}/talosconfig + - test -f {{.CLUSTER_DIR}}/bootstrap/helmfile.yaml + - talosctl config info &>/dev/null + + flux: + internal: true + cmds: + - kubectl apply --server-side --kustomize {{.CLUSTER_DIR}}/bootstrap/flux + - sops --decrypt {{.CLUSTER_DIR}}/bootstrap/flux/age-key.secret.sops.yaml | kubectl apply --server-side --filename - + - sops --decrypt {{.CLUSTER_DIR}}/bootstrap/flux/deploy-key.secret.sops.yaml | kubectl apply --server-side --filename - + - sops --decrypt {{.CLUSTER_DIR}}/flux/vars/cluster-secrets.secret.sops.yaml | kubectl apply --server-side --filename - + - kubectl apply --server-side --kustomize {{.CLUSTER_DIR}}/flux/config + preconditions: + - test -f {{.ROOT_DIR}}/age.key + - test -f {{.CLUSTER_DIR}}/bootstrap/flux/age-key.secret.sops.yaml + - test -f {{.CLUSTER_DIR}}/bootstrap/flux/deploy-key.secret.sops.yaml + - test -f {{.CLUSTER_DIR}}/flux/vars/cluster-secrets.secret.sops.yaml + - sops filestatus {{.CLUSTER_DIR}}/bootstrap/flux/age-key.secret.sops.yaml | jq --exit-status '.encrypted' + - sops filestatus {{.CLUSTER_DIR}}/bootstrap/flux/deploy-key.secret.sops.yaml | jq --exit-status '.encrypted' + - sops filestatus {{.CLUSTER_DIR}}/flux/vars/cluster-secrets.secret.sops.yaml | jq --exit-status '.encrypted' diff --git a/LICENSE b/LICENSE new file mode 100755 index 000000000..d919bd04f --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 Georgi Panov + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md new file mode 100755 index 000000000..069fecc23 --- /dev/null +++ b/README.md @@ -0,0 +1 @@ +# home-ops \ No newline at end of file diff --git a/Taskfile.yaml b/Taskfile.yaml new file mode 100755 index 000000000..c7ee45817 --- /dev/null +++ b/Taskfile.yaml @@ -0,0 +1,20 @@ +--- +# yaml-language-server: $schema=https://taskfile.dev/schema.json +version: '3' + +vars: + CLUSTER: '{{.CLUSTER | default "main"}}' + CLUSTER_DIR: '{{.ROOT_DIR}}/kubernetes/{{.CLUSTER}}' + +env: + KUBECONFIG: '{{.CLUSTER_DIR}}/kubeconfig' + TALOSCONFIG: '{{.CLUSTER_DIR}}/talosconfig' + SOPS_AGE_KEY_FILE: '{{.ROOT_DIR}}/age.key' + +includes: + bootstrap: .taskfiles/bootstrap + volsync: .taskfiles/VolSync/Taskfile.yaml + +tasks: + + default: task --list diff --git a/kubernetes/main/apps/actions-runner-system/gha-runner-scale-set-controller/app/externalsecret.yaml b/kubernetes/main/apps/actions-runner-system/gha-runner-scale-set-controller/app/externalsecret.yaml new file mode 100755 index 000000000..986ef1f80 --- /dev/null +++ b/kubernetes/main/apps/actions-runner-system/gha-runner-scale-set-controller/app/externalsecret.yaml @@ -0,0 +1,24 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: actions-runner-controller-auth +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: actions-runner-controller-auth-secret + template: + engineVersion: v2 + data: + ACTION_RUNNER_CONTROLLER_GITHUB_APP_ID: |- + {{ .ACTION_RUNNER_CONTROLLER_GITHUB_APP_ID }} + ACTION_RUNNER_CONTROLLER_GITHUB_INSTALLATION_ID: |- + {{ .ACTION_RUNNER_CONTROLLER_GITHUB_INSTALLATION_ID }} + ACTION_RUNNER_CONTROLLER_GITHUB_PRIVATE_KEY: |- + {{ .ACTION_RUNNER_CONTROLLER_GITHUB_PRIVATE_KEY }} + dataFrom: + - extract: + key: secrets/actions-runner-controller diff --git a/kubernetes/main/apps/actions-runner-system/gha-runner-scale-set-controller/app/helmrelease.yaml b/kubernetes/main/apps/actions-runner-system/gha-runner-scale-set-controller/app/helmrelease.yaml new file mode 100755 index 000000000..9979dd8dc --- /dev/null +++ b/kubernetes/main/apps/actions-runner-system/gha-runner-scale-set-controller/app/helmrelease.yaml @@ -0,0 +1,28 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: gha-runner-scale-set-controller +spec: + interval: 30m + chart: + spec: + chart: gha-runner-scale-set-controller + version: 0.9.3 + sourceRef: + kind: HelmRepository + name: actions-runner-controller + namespace: flux-system + install: + crds: CreateReplace + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + crds: CreateReplace + remediation: + strategy: rollback + retries: 3 + values: + fullnameOverride: gha-runner-scale-set-controller diff --git a/kubernetes/main/apps/actions-runner-system/gha-runner-scale-set-controller/app/kustomization.yaml b/kubernetes/main/apps/actions-runner-system/gha-runner-scale-set-controller/app/kustomization.yaml new file mode 100755 index 000000000..4eed917b9 --- /dev/null +++ b/kubernetes/main/apps/actions-runner-system/gha-runner-scale-set-controller/app/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml diff --git a/kubernetes/main/apps/actions-runner-system/gha-runner-scale-set-controller/ks.yaml b/kubernetes/main/apps/actions-runner-system/gha-runner-scale-set-controller/ks.yaml new file mode 100755 index 000000000..28039c9a0 --- /dev/null +++ b/kubernetes/main/apps/actions-runner-system/gha-runner-scale-set-controller/ks.yaml @@ -0,0 +1,22 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app gha-runner-scale-set-controller + namespace: flux-system +spec: + targetNamespace: actions-runner-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets-stores + path: ./kubernetes/main/apps/actions-runner-system/gha-runner-scale-set-controller/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/actions-runner-system/gha-runner-scale-set/app/helmrelease.yaml b/kubernetes/main/apps/actions-runner-system/gha-runner-scale-set/app/helmrelease.yaml new file mode 100755 index 000000000..d6c7afc65 --- /dev/null +++ b/kubernetes/main/apps/actions-runner-system/gha-runner-scale-set/app/helmrelease.yaml @@ -0,0 +1,57 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: gha-runner-scale-set +spec: + interval: 30m + chart: + spec: + chart: gha-runner-scale-set + version: 0.9.3 + sourceRef: + kind: HelmRepository + name: actions-runner-controller + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + dependsOn: + - name: gha-runner-scale-set-controller + namespace: actions-runner-system + valuesFrom: + - targetPath: githubConfigSecret.github_app_id + kind: Secret + name: actions-runner-controller-auth-secret + valuesKey: ACTION_RUNNER_CONTROLLER_GITHUB_APP_ID + - targetPath: githubConfigSecret.github_app_installation_id + kind: Secret + name: actions-runner-controller-auth-secret + valuesKey: ACTION_RUNNER_CONTROLLER_GITHUB_INSTALLATION_ID + - targetPath: githubConfigSecret.github_app_private_key + kind: Secret + name: actions-runner-controller-auth-secret + valuesKey: ACTION_RUNNER_CONTROLLER_GITHUB_PRIVATE_KEY + values: + nameOverride: gha-runner-scale-set + runnerScaleSetName: gha-runner-scale-set + githubConfigUrl: https://github.com/Darkfella91/home-ops + minRunners: 1 + maxRunners: 6 + containerMode: + type: dind + template: + spec: + containers: + - name: runner + image: ghcr.io/onedr0p/actions-runner:2.321.0@sha256:d968199e3772ef831c34eb8edd495ef9eb99339a2f7176d4f1774f252f7903fb + command: ["/home/runner/run.sh"] + controllerServiceAccount: + name: gha-runner-scale-set-controller + namespace: actions-runner-system diff --git a/kubernetes/main/apps/actions-runner-system/gha-runner-scale-set/app/kustomization.yaml b/kubernetes/main/apps/actions-runner-system/gha-runner-scale-set/app/kustomization.yaml new file mode 100755 index 000000000..ce84014a3 --- /dev/null +++ b/kubernetes/main/apps/actions-runner-system/gha-runner-scale-set/app/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml + diff --git a/kubernetes/main/apps/actions-runner-system/gha-runner-scale-set/ks.yaml b/kubernetes/main/apps/actions-runner-system/gha-runner-scale-set/ks.yaml new file mode 100755 index 000000000..533dc04e4 --- /dev/null +++ b/kubernetes/main/apps/actions-runner-system/gha-runner-scale-set/ks.yaml @@ -0,0 +1,20 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app gha-runner-scale-set + namespace: flux-system +spec: + targetNamespace: actions-runner-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/actions-runner-system/gha-runner-scale-set/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/actions-runner-system/kustomization.yaml b/kubernetes/main/apps/actions-runner-system/kustomization.yaml new file mode 100755 index 000000000..98183e38e --- /dev/null +++ b/kubernetes/main/apps/actions-runner-system/kustomization.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # Pre Flux-Kustomizations + - ./namespace.yaml + # Flux-Kustomizations + - ./gha-runner-scale-set-controller/ks.yaml + - ./gha-runner-scale-set/ks.yaml diff --git a/kubernetes/main/apps/actions-runner-system/namespace.yaml b/kubernetes/main/apps/actions-runner-system/namespace.yaml new file mode 100755 index 000000000..7bdef02e2 --- /dev/null +++ b/kubernetes/main/apps/actions-runner-system/namespace.yaml @@ -0,0 +1,38 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: actions-runner-system + annotations: + kustomize.toolkit.fluxcd.io/prune: disabled + volsync.backube/privileged-movers: "true" +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/provider_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Provider +metadata: + name: alert-manager + namespace: actions-runner-system +spec: + type: alertmanager + address: http://alertmanager-operated.observability.svc.cluster.local:9093/api/v2/alerts/ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/alert_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Alert +metadata: + name: alert-manager + namespace: actions-runner-system +spec: + providerRef: + name: alert-manager + eventSeverity: error + eventSources: + - kind: HelmRelease + name: "*" + exclusionList: + - "error.*lookup github\\.com" + - "error.*lookup raw\\.githubusercontent\\.com" + - "dial.*tcp.*timeout" + - "waiting.*socket" + suspend: false diff --git a/kubernetes/main/apps/cert-manager/cert-manager/app/helm-values.yaml b/kubernetes/main/apps/cert-manager/cert-manager/app/helm-values.yaml new file mode 100755 index 000000000..8e3e5ac23 --- /dev/null +++ b/kubernetes/main/apps/cert-manager/cert-manager/app/helm-values.yaml @@ -0,0 +1,7 @@ +installCRDs: true +dns01RecursiveNameservers: https://1.1.1.1:443/dns-query,https://1.0.0.1:443/dns-query +dns01RecursiveNameserversOnly: true +prometheus: + enabled: true + servicemonitor: + enabled: true diff --git a/kubernetes/main/apps/cert-manager/cert-manager/app/helmrelease.yaml b/kubernetes/main/apps/cert-manager/cert-manager/app/helmrelease.yaml new file mode 100755 index 000000000..7e1d7f30c --- /dev/null +++ b/kubernetes/main/apps/cert-manager/cert-manager/app/helmrelease.yaml @@ -0,0 +1,27 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: cert-manager +spec: + interval: 30m + chart: + spec: + chart: cert-manager + version: v1.16.2 + sourceRef: + kind: HelmRepository + name: jetstack + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + valuesFrom: + - kind: ConfigMap + name: cert-manager-helm-values diff --git a/kubernetes/main/apps/cert-manager/cert-manager/app/kustomization.yaml b/kubernetes/main/apps/cert-manager/cert-manager/app/kustomization.yaml new file mode 100755 index 000000000..8bb7bd3fb --- /dev/null +++ b/kubernetes/main/apps/cert-manager/cert-manager/app/kustomization.yaml @@ -0,0 +1,13 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml + - ./prometheusrule.yaml +configMapGenerator: + - name: cert-manager-helm-values + files: + - values.yaml=./helm-values.yaml +configurations: + - kustomizeconfig.yaml diff --git a/kubernetes/main/apps/cert-manager/cert-manager/app/kustomizeconfig.yaml b/kubernetes/main/apps/cert-manager/cert-manager/app/kustomizeconfig.yaml new file mode 100755 index 000000000..58f92ba15 --- /dev/null +++ b/kubernetes/main/apps/cert-manager/cert-manager/app/kustomizeconfig.yaml @@ -0,0 +1,7 @@ +--- +nameReference: + - kind: ConfigMap + version: v1 + fieldSpecs: + - path: spec/valuesFrom/name + kind: HelmRelease diff --git a/kubernetes/main/apps/cert-manager/cert-manager/app/prometheusrule.yaml b/kubernetes/main/apps/cert-manager/cert-manager/app/prometheusrule.yaml new file mode 100755 index 000000000..ae08bb147 --- /dev/null +++ b/kubernetes/main/apps/cert-manager/cert-manager/app/prometheusrule.yaml @@ -0,0 +1,68 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/monitoring.coreos.com/prometheusrule_v1.json +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: cert-manager-rules +spec: + groups: + - name: cert-manager + rules: + - alert: CertManagerAbsent + expr: | + absent(up{job="cert-manager"}) + for: 15m + labels: + severity: critical + annotations: + description: + "New certificates will not be able to be minted, and existing + ones can't be renewed until cert-manager is back." + runbook_url: https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagerabsent + summary: "Cert Manager has dissapeared from Prometheus service discovery." + - name: certificates + rules: + - alert: CertManagerCertExpirySoon + expr: | + avg by (exported_namespace, namespace, name) ( + certmanager_certificate_expiration_timestamp_seconds - time()) + < (21 * 24 * 3600) + for: 15m + labels: + severity: warning + annotations: + description: + "The domain that this cert covers will be unavailable after + {{ $value | humanizeDuration }}. Clients using endpoints that this cert + protects will start to fail in {{ $value | humanizeDuration }}." + runbook_url: https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagercertexpirysoon + summary: + "The cert {{ $labels.name }} is {{ $value | humanizeDuration }} + from expiry, it should have renewed over a week ago." + - alert: CertManagerCertNotReady + expr: | + max by (name, exported_namespace, namespace, condition) ( + certmanager_certificate_ready_status{condition!="True"} == 1) + for: 15m + labels: + severity: critical + annotations: + description: + "This certificate has not been ready to serve traffic for at least + 15m. If the cert is being renewed or there is another valid cert, the ingress + controller _may_ be able to serve that instead." + runbook_url: https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagercertnotready + summary: "The cert {{ $labels.name }} is not ready to serve traffic." + - alert: CertManagerHittingRateLimits + expr: | + sum by (host) (rate(certmanager_http_acme_client_request_count{status="429"}[5m])) + > 0 + for: 15m + labels: + severity: critical + annotations: + description: + "Depending on the rate limit, cert-manager may be unable to generate + certificates for up to a week." + runbook_url: https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagerhittingratelimits + summary: "Cert manager hitting LetsEncrypt rate limits." diff --git a/kubernetes/main/apps/cert-manager/cert-manager/issuers/externalsecret.yaml b/kubernetes/main/apps/cert-manager/cert-manager/issuers/externalsecret.yaml new file mode 100755 index 000000000..d2751201c --- /dev/null +++ b/kubernetes/main/apps/cert-manager/cert-manager/issuers/externalsecret.yaml @@ -0,0 +1,23 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: zerossl-credentials +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: zerossl-credentials + template: + engineVersion: v2 + data: + - secretKey: CF_API_TOKEN + remoteRef: + key: secrets/cloudflare + property: CF_API_TOKEN + - secretKey: EAB_HMAC_KEY + remoteRef: + key: secrets/zerossl + property: EAB_HMAC_KEY diff --git a/kubernetes/main/apps/cert-manager/cert-manager/issuers/issuers.yaml b/kubernetes/main/apps/cert-manager/cert-manager/issuers/issuers.yaml new file mode 100755 index 000000000..aa4992068 --- /dev/null +++ b/kubernetes/main/apps/cert-manager/cert-manager/issuers/issuers.yaml @@ -0,0 +1,22 @@ +--- +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: zerossl-prod +spec: + acme: + server: https://acme.zerossl.com/v2/DV90 + privateKeySecretRef: + name: zerossl-prod + externalAccountBinding: + keyID: jnTUJ0UFn6YhlBZUrjmrfA + keySecretRef: + name: &secret zerossl-credentials + key: EAB_HMAC_KEY + keyAlgorithm: HS256 + solvers: + - dns01: + cloudflare: + apiTokenSecretRef: + name: *secret + key: CF_API_TOKEN diff --git a/kubernetes/main/apps/cert-manager/cert-manager/issuers/kustomization.yaml b/kubernetes/main/apps/cert-manager/cert-manager/issuers/kustomization.yaml new file mode 100755 index 000000000..d6ac943fc --- /dev/null +++ b/kubernetes/main/apps/cert-manager/cert-manager/issuers/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./issuers.yaml diff --git a/kubernetes/main/apps/cert-manager/cert-manager/ks.yaml b/kubernetes/main/apps/cert-manager/cert-manager/ks.yaml new file mode 100755 index 000000000..af9134e15 --- /dev/null +++ b/kubernetes/main/apps/cert-manager/cert-manager/ks.yaml @@ -0,0 +1,43 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app cert-manager + namespace: flux-system +spec: + targetNamespace: cert-manager + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/cert-manager/cert-manager/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app cert-manager-issuers + namespace: flux-system +spec: + targetNamespace: cert-manager + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: cert-manager + - name: external-secrets-stores + path: ./kubernetes/main/apps/cert-manager/cert-manager/issuers + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/cert-manager/certificates/app/certificates.yaml b/kubernetes/main/apps/cert-manager/certificates/app/certificates.yaml new file mode 100755 index 000000000..1a87e6d85 --- /dev/null +++ b/kubernetes/main/apps/cert-manager/certificates/app/certificates.yaml @@ -0,0 +1,15 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/cert-manager.io/certificate_v1.json +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: darkfellanet +spec: + secretName: darkfellanet-tls + issuerRef: + name: zerossl-prod + kind: ClusterIssuer + commonName: ${PUBLIC_DOMAIN} + dnsNames: + - ${PUBLIC_DOMAIN} + - "*.${PUBLIC_DOMAIN}" diff --git a/kubernetes/main/apps/cert-manager/certificates/app/kustomization.yaml b/kubernetes/main/apps/cert-manager/certificates/app/kustomization.yaml new file mode 100755 index 000000000..a2b5d2050 --- /dev/null +++ b/kubernetes/main/apps/cert-manager/certificates/app/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./certificates.yaml + - ./pushsecret.yaml diff --git a/kubernetes/main/apps/cert-manager/certificates/app/pushsecret.yaml b/kubernetes/main/apps/cert-manager/certificates/app/pushsecret.yaml new file mode 100755 index 000000000..581bda72c --- /dev/null +++ b/kubernetes/main/apps/cert-manager/certificates/app/pushsecret.yaml @@ -0,0 +1,29 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/pushsecret_v1alpha1.json +apiVersion: external-secrets.io/v1alpha1 +kind: PushSecret +metadata: + name: darkfellanet-tls +spec: + secretStoreRefs: + - name: vault-backend + kind: ClusterSecretStore + selector: + secret: + name: darkfellanet-tls + template: + engineVersion: v2 + data: + tls.crt: '{{ index . "tls.crt" | b64enc }}' + tls.key: '{{ index . "tls.key" | b64enc }}' + data: + - match: + secretKey: &key tls.crt + remoteRef: + remoteKey: certificates/darkfellanet + property: *key + - match: + secretKey: &key tls.key + remoteRef: + remoteKey: certificates/darkfellanet + property: *key diff --git a/kubernetes/main/apps/cert-manager/certificates/import/clusterexternalsecret.yaml b/kubernetes/main/apps/cert-manager/certificates/import/clusterexternalsecret.yaml new file mode 100755 index 000000000..21f8705e8 --- /dev/null +++ b/kubernetes/main/apps/cert-manager/certificates/import/clusterexternalsecret.yaml @@ -0,0 +1,43 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/clusterexternalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ClusterExternalSecret +metadata: + name: darkfellanet-tls +spec: + externalSecretName: darkfellanet-tls + namespaceSelector: + matchExpressions: + - key: kubernetes.io/metadata.name + operator: In + values: + - network + - vault + refreshTime: 5m + externalSecretSpec: + refreshInterval: 5m + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: darkfellanet-tls + creationPolicy: Orphan + template: + engineVersion: v2 + type: kubernetes.io/tls + metadata: + annotations: + cert-manager.io/alt-names: '*.${PUBLIC_DOMAIN},${PUBLIC_DOMAIN}' + cert-manager.io/certificate-name: darkfellanet + cert-manager.io/common-name: ${PUBLIC_DOMAIN} + cert-manager.io/ip-sans: "" + cert-manager.io/issuer-group: "" + cert-manager.io/issuer-kind: ClusterIssuer + cert-manager.io/issuer-name: zerossl-prod + cert-manager.io/uri-sans: "" + labels: + controller.cert-manager.io/fao: "true" + dataFrom: + - extract: + key: secrets/certificates/darkfellanet + decodingStrategy: Auto diff --git a/kubernetes/main/apps/cert-manager/certificates/import/kustomization.yaml b/kubernetes/main/apps/cert-manager/certificates/import/kustomization.yaml new file mode 100755 index 000000000..fb80e7cda --- /dev/null +++ b/kubernetes/main/apps/cert-manager/certificates/import/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./clusterexternalsecret.yaml diff --git a/kubernetes/main/apps/cert-manager/certificates/ks.yaml b/kubernetes/main/apps/cert-manager/certificates/ks.yaml new file mode 100755 index 000000000..3ad003b6c --- /dev/null +++ b/kubernetes/main/apps/cert-manager/certificates/ks.yaml @@ -0,0 +1,46 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app certificates-import + namespace: flux-system +spec: + targetNamespace: cert-manager + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets-stores + path: ./kubernetes/main/apps/cert-manager/certificates/import + prune: false + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app certificates + namespace: flux-system +spec: + targetNamespace: cert-manager + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: certificates-import + - name: cert-manager-issuers + - name: external-secrets-stores + path: ./kubernetes/main/apps/cert-manager/certificates/app + prune: false + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/cert-manager/kustomization.yaml b/kubernetes/main/apps/cert-manager/kustomization.yaml new file mode 100755 index 000000000..890b1baa9 --- /dev/null +++ b/kubernetes/main/apps/cert-manager/kustomization.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # Pre Flux-Kustomizations + - ./namespace.yaml + # Flux-Kustomizations + - ./cert-manager/ks.yaml + - ./certificates/ks.yaml diff --git a/kubernetes/main/apps/cert-manager/namespace.yaml b/kubernetes/main/apps/cert-manager/namespace.yaml new file mode 100755 index 000000000..9e6a66025 --- /dev/null +++ b/kubernetes/main/apps/cert-manager/namespace.yaml @@ -0,0 +1,38 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: cert-manager + annotations: + kustomize.toolkit.fluxcd.io/prune: disabled + volsync.backube/privileged-movers: "true" +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/provider_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Provider +metadata: + name: alert-manager + namespace: cert-manager +spec: + type: alertmanager + address: http://alertmanager-operated.observability.svc.cluster.local:9093/api/v2/alerts/ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/alert_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Alert +metadata: + name: alert-manager + namespace: cert-manager +spec: + providerRef: + name: alert-manager + eventSeverity: error + eventSources: + - kind: HelmRelease + name: "*" + exclusionList: + - "error.*lookup github\\.com" + - "error.*lookup raw\\.githubusercontent\\.com" + - "dial.*tcp.*timeout" + - "waiting.*socket" + suspend: false diff --git a/kubernetes/main/apps/database/cloudnative-pg/app/externalsecret.yaml b/kubernetes/main/apps/database/cloudnative-pg/app/externalsecret.yaml new file mode 100755 index 000000000..c6711b6e1 --- /dev/null +++ b/kubernetes/main/apps/database/cloudnative-pg/app/externalsecret.yaml @@ -0,0 +1,34 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: cloudnative-pg-secret +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: cloudnative-pg-secret + template: + engineVersion: v2 + metadata: + labels: + cnpg.io/reload: "true" + data: + - secretKey: username + remoteRef: + key: secrets/cloudnative-pg + property: POSTGRES_SUPER_USER + - secretKey: password + remoteRef: + key: secrets/cloudnative-pg + property: POSTGRES_SUPER_PASS + - secretKey: CF_ACCESS_KEY_ID + remoteRef: + key: secrets/cloudflare + property: CF_ACCESS_KEY_ID + - secretKey: CF_SECRET_ACCESS_KEY + remoteRef: + key: secrets/cloudflare + property: CF_SECRET_ACCESS_KEY diff --git a/kubernetes/main/apps/database/cloudnative-pg/app/helmrelease.yaml b/kubernetes/main/apps/database/cloudnative-pg/app/helmrelease.yaml new file mode 100755 index 000000000..44439a68e --- /dev/null +++ b/kubernetes/main/apps/database/cloudnative-pg/app/helmrelease.yaml @@ -0,0 +1,31 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: cloudnative-pg +spec: + interval: 30m + chart: + spec: + chart: cloudnative-pg + version: 0.22.1 + sourceRef: + kind: HelmRepository + name: cloudnative-pg + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + values: + crds: + create: true + monitoring: + podMonitorEnabled: false + grafanaDashboard: + create: true diff --git a/kubernetes/main/apps/database/cloudnative-pg/app/kustomization.yaml b/kubernetes/main/apps/database/cloudnative-pg/app/kustomization.yaml new file mode 100755 index 000000000..4eed917b9 --- /dev/null +++ b/kubernetes/main/apps/database/cloudnative-pg/app/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml diff --git a/kubernetes/main/apps/database/cloudnative-pg/cluster/cluster17.yaml b/kubernetes/main/apps/database/cloudnative-pg/cluster/cluster17.yaml new file mode 100755 index 000000000..a65f4dd8a --- /dev/null +++ b/kubernetes/main/apps/database/cloudnative-pg/cluster/cluster17.yaml @@ -0,0 +1,79 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/postgresql.cnpg.io/cluster_v1.json +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: postgres17 +spec: + instances: 3 + imageName: ghcr.io/cloudnative-pg/postgresql:17.2-5@sha256:423ad68f6bba1020b6ae5f26aada7e00e2cea18ed1bb2386841a46c40041e808 + primaryUpdateStrategy: unsupervised + storage: + pvcTemplate: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5Gi + storageClassName: openebs-zfs-32k + volumeMode: Filesystem + resizeInUseVolumes: true + walStorage: + pvcTemplate: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5Gi + storageClassName: openebs-zfs-32k + volumeMode: Filesystem + superuserSecret: + name: &secret cloudnative-pg-secret + enableSuperuserAccess: true + postgresql: + parameters: + max_connections: "400" + shared_buffers: 256MB + nodeMaintenanceWindow: + inProgress: false + reusePVC: true + resources: + requests: + cpu: 500m + limits: + memory: 4Gi + monitoring: + enablePodMonitor: true + backup: + retentionPolicy: 30d + barmanObjectStore: &barmanObjectStore + data: + compression: bzip2 + encryption: AES256 + wal: + compression: bzip2 + encryption: AES256 + maxParallel: 8 + destinationPath: s3://backups/cloudnative-pg/ + endpointURL: ${S3URL} + # Note: serverName version needs to be inclemented + # when recovering from an existing cnpg cluster + serverName: ¤tCluster postgres17-v4 + s3Credentials: + accessKeyId: + name: *secret + key: CF_ACCESS_KEY_ID + secretAccessKey: + name: *secret + key: CF_SECRET_ACCESS_KEY + # Note: previousCluster needs to be set to the name of the previous + # cluster when recovering from an existing cnpg cluster + bootstrap: + recovery: + source: &previousCluster postgres17-v2 + # Note: externalClusters is needed when recovering from an existing cnpg cluster + externalClusters: + - name: *previousCluster + barmanObjectStore: + <<: *barmanObjectStore + serverName: *previousCluster diff --git a/kubernetes/main/apps/database/cloudnative-pg/cluster/gatus.yaml b/kubernetes/main/apps/database/cloudnative-pg/cluster/gatus.yaml new file mode 100755 index 000000000..3a0723a5a --- /dev/null +++ b/kubernetes/main/apps/database/cloudnative-pg/cluster/gatus.yaml @@ -0,0 +1,21 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: postgres-gatus-ep + labels: + gatus.io/enabled: "true" +data: + config.yaml: | + endpoints: + - name: postgres17 + group: infrastructure + url: tcp://postgres17-rw.database.svc.cluster.local:5432 + interval: 1m + ui: + hide-url: true + hide-hostname: true + conditions: + - "[CONNECTED] == true" + alerts: + - type: pushover diff --git a/kubernetes/main/apps/database/cloudnative-pg/cluster/kustomization.yaml b/kubernetes/main/apps/database/cloudnative-pg/cluster/kustomization.yaml new file mode 100755 index 000000000..4bbea0d6c --- /dev/null +++ b/kubernetes/main/apps/database/cloudnative-pg/cluster/kustomization.yaml @@ -0,0 +1,9 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./scheduledbackup.yaml + - ./prometheusrule.yaml + - ./cluster17.yaml + - ./gatus.yaml diff --git a/kubernetes/main/apps/database/cloudnative-pg/cluster/prometheusrule.yaml b/kubernetes/main/apps/database/cloudnative-pg/cluster/prometheusrule.yaml new file mode 100755 index 000000000..9c1d6a8db --- /dev/null +++ b/kubernetes/main/apps/database/cloudnative-pg/cluster/prometheusrule.yaml @@ -0,0 +1,67 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/monitoring.coreos.com/prometheusrule_v1.json +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: cloudnative-pg-rules + labels: + prometheus: k8s + role: alert-rules +spec: + groups: + - name: cloudnative-pg.rules + rules: + - alert: LongRunningTransaction + annotations: + description: Pod {{ $labels.pod }} is taking more than 5 minutes (300 seconds) for a query. + summary: A query is taking longer than 5 minutes. + expr: |- + cnpg_backends_max_tx_duration_seconds > 300 + for: 1m + labels: + severity: warning + - alert: BackendsWaiting + annotations: + description: Pod {{ $labels.pod }} has been waiting for longer than 5 minutes + summary: If a backend is waiting for longer than 5 minutes + expr: |- + cnpg_backends_waiting_total > 300 + for: 1m + labels: + severity: warning + - alert: PGDatabase + annotations: + description: Over 150,000,000 transactions from frozen xid on pod {{ $labels.pod }} + summary: Number of transactions from the frozen XID to the current one + expr: |- + cnpg_pg_database_xid_age > 150000000 + for: 1m + labels: + severity: warning + - alert: PGReplication + annotations: + description: Standby is lagging behind by over 300 seconds (5 minutes) + summary: The standby is lagging behind the primary + expr: |- + cnpg_pg_replication_lag > 300 + for: 1m + labels: + severity: warning + - alert: LastFailedArchiveTime + annotations: + description: Archiving failed for {{ $labels.pod }} + summary: Checks the last time archiving failed. Will be < 0 when it has not failed. + expr: |- + (cnpg_pg_stat_archiver_last_failed_time - cnpg_pg_stat_archiver_last_archived_time) > 1 + for: 1m + labels: + severity: warning + - alert: DatabaseDeadlockConflicts + annotations: + description: There are over 10 deadlock conflicts in {{ $labels.pod }} + summary: Checks the number of database conflicts + expr: |- + cnpg_pg_stat_database_deadlocks > 10 + for: 1m + labels: + severity: warning diff --git a/kubernetes/main/apps/database/cloudnative-pg/cluster/scheduledbackup.yaml b/kubernetes/main/apps/database/cloudnative-pg/cluster/scheduledbackup.yaml new file mode 100755 index 000000000..622733b8f --- /dev/null +++ b/kubernetes/main/apps/database/cloudnative-pg/cluster/scheduledbackup.yaml @@ -0,0 +1,12 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/postgresql.cnpg.io/scheduledbackup_v1.json +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: postgres17 +spec: + schedule: "@daily" + immediate: true + backupOwnerReference: self + cluster: + name: postgres17 diff --git a/kubernetes/main/apps/database/cloudnative-pg/ks.yaml b/kubernetes/main/apps/database/cloudnative-pg/ks.yaml new file mode 100755 index 000000000..c987243db --- /dev/null +++ b/kubernetes/main/apps/database/cloudnative-pg/ks.yaml @@ -0,0 +1,44 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app cloudnative-pg + namespace: flux-system +spec: + targetNamespace: database + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets-stores + path: ./kubernetes/main/apps/database/cloudnative-pg/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app cloudnative-pg-cluster + namespace: flux-system +spec: + targetNamespace: database + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: cloudnative-pg + path: ./kubernetes/main/apps/database/cloudnative-pg/cluster + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/database/kustomization.yaml b/kubernetes/main/apps/database/kustomization.yaml new file mode 100755 index 000000000..fd1bf3475 --- /dev/null +++ b/kubernetes/main/apps/database/kustomization.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # Pre Flux-Kustomizations + - ./namespace.yaml + # Flux-Kustomizations + - ./cloudnative-pg/ks.yaml + - ./redis/ks.yaml diff --git a/kubernetes/main/apps/database/namespace.yaml b/kubernetes/main/apps/database/namespace.yaml new file mode 100755 index 000000000..70a0cf101 --- /dev/null +++ b/kubernetes/main/apps/database/namespace.yaml @@ -0,0 +1,38 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: database + annotations: + kustomize.toolkit.fluxcd.io/prune: disabled + volsync.backube/privileged-movers: "true" +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/provider_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Provider +metadata: + name: alert-manager + namespace: database +spec: + type: alertmanager + address: http://alertmanager-operated.observability.svc.cluster.local:9093/api/v2/alerts/ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/alert_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Alert +metadata: + name: alert-manager + namespace: database +spec: + providerRef: + name: alert-manager + eventSeverity: error + eventSources: + - kind: HelmRelease + name: "*" + exclusionList: + - "error.*lookup github\\.com" + - "error.*lookup raw\\.githubusercontent\\.com" + - "dial.*tcp.*timeout" + - "waiting.*socket" + suspend: false diff --git a/kubernetes/main/apps/database/redis/app/externalsecret.yaml b/kubernetes/main/apps/database/redis/app/externalsecret.yaml new file mode 100755 index 000000000..b7f7fb9be --- /dev/null +++ b/kubernetes/main/apps/database/redis/app/externalsecret.yaml @@ -0,0 +1,19 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret redis-credentials +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + template: + engineVersion: v2 + data: + AUTHENTIK_REDIS__PASSWORD: "{{ .REDIS_PASSWORD }}" + dataFrom: + - extract: + key: secrets/redis diff --git a/kubernetes/main/apps/database/redis/app/helmrelease.yaml b/kubernetes/main/apps/database/redis/app/helmrelease.yaml new file mode 100755 index 000000000..0f3929d40 --- /dev/null +++ b/kubernetes/main/apps/database/redis/app/helmrelease.yaml @@ -0,0 +1,39 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: redis +spec: + interval: 30m + chart: + spec: + chart: redis + version: 20.3.0 + sourceRef: + kind: HelmRepository + name: bitnami + namespace: flux-system + maxHistory: 3 + install: + createNamespace: true + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + uninstall: + keepHistory: false + values: + master: + persistence: + storageClass: openebs-zfs-128k + configuration: | + databases 32 + architecture: standalone + auth: + enabled: true + sentinel: false + existingSecret: redis-credentials + existingSecretPasswordKey: AUTHENTIK_REDIS__PASSWORD diff --git a/kubernetes/main/apps/database/redis/app/kustomization.yaml b/kubernetes/main/apps/database/redis/app/kustomization.yaml new file mode 100755 index 000000000..79e214cfc --- /dev/null +++ b/kubernetes/main/apps/database/redis/app/kustomization.yaml @@ -0,0 +1,16 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml +configMapGenerator: + - name: redis-gatus-ep + options: + labels: + gatus.io/enabled: "true" + files: + - config.yaml=./resources/gatus-ep.yaml +generatorOptions: + disableNameSuffixHash: true diff --git a/kubernetes/main/apps/database/redis/app/resources/gatus-ep.yaml b/kubernetes/main/apps/database/redis/app/resources/gatus-ep.yaml new file mode 100644 index 000000000..ff89b2231 --- /dev/null +++ b/kubernetes/main/apps/database/redis/app/resources/gatus-ep.yaml @@ -0,0 +1,12 @@ +endpoints: + - name: redis + group: infrastructure + url: tcp://redis-master.database.svc.cluster.local:6379 + interval: 1m + ui: + hide-url: true + hide-hostname: true + conditions: + - "[CONNECTED] == true" + alerts: + - type: pushover diff --git a/kubernetes/main/apps/database/redis/ks.yaml b/kubernetes/main/apps/database/redis/ks.yaml new file mode 100755 index 000000000..69ecd0283 --- /dev/null +++ b/kubernetes/main/apps/database/redis/ks.yaml @@ -0,0 +1,22 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app redis + namespace: flux-system +spec: + targetNamespace: database + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets-stores + path: ./kubernetes/main/apps/database/redis/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/external-secrets/external-secrets/app/helmrelease.yaml b/kubernetes/main/apps/external-secrets/external-secrets/app/helmrelease.yaml new file mode 100755 index 000000000..8db99522f --- /dev/null +++ b/kubernetes/main/apps/external-secrets/external-secrets/app/helmrelease.yaml @@ -0,0 +1,55 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: external-secrets +spec: + interval: 30m + chart: + spec: + chart: external-secrets + version: 0.10.7 + sourceRef: + kind: HelmRepository + name: external-secrets + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + values: + installCRDs: true + image: + repository: ghcr.io/external-secrets/external-secrets + webhook: + image: + repository: ghcr.io/external-secrets/external-secrets + serviceMonitor: + enabled: true + interval: 1m + certController: + image: + repository: ghcr.io/external-secrets/external-secrets + serviceMonitor: + enabled: true + interval: 1m + serviceMonitor: + enabled: true + interval: 1m + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + enabled: true + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 1000 + seccompProfile: + type: RuntimeDefault diff --git a/kubernetes/main/apps/external-secrets/external-secrets/app/kustomization.yaml b/kubernetes/main/apps/external-secrets/external-secrets/app/kustomization.yaml new file mode 100755 index 000000000..17cbc72b2 --- /dev/null +++ b/kubernetes/main/apps/external-secrets/external-secrets/app/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/kubernetes/main/apps/external-secrets/external-secrets/ks.yaml b/kubernetes/main/apps/external-secrets/external-secrets/ks.yaml new file mode 100755 index 000000000..5cf51becc --- /dev/null +++ b/kubernetes/main/apps/external-secrets/external-secrets/ks.yaml @@ -0,0 +1,42 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app external-secrets + namespace: flux-system +spec: + targetNamespace: external-secrets + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/external-secrets/external-secrets/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app external-secrets-stores + namespace: flux-system +spec: + targetNamespace: external-secrets + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets + path: ./kubernetes/main/apps/external-secrets/external-secrets/stores + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/external-secrets/external-secrets/stores/clustersecretstore.yaml b/kubernetes/main/apps/external-secrets/external-secrets/stores/clustersecretstore.yaml new file mode 100755 index 000000000..3c07d0a42 --- /dev/null +++ b/kubernetes/main/apps/external-secrets/external-secrets/stores/clustersecretstore.yaml @@ -0,0 +1,19 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/clustersecretstore_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ClusterSecretStore +metadata: + name: vault-backend +spec: + provider: + vault: + server: https://vault.${PUBLIC_DOMAIN}:8200 + path: secrets + version: v2 + auth: + kubernetes: + mountPath: kubernetes + role: external-secrets-operator + serviceAccountRef: + name: external-secrets + namespace: external-secrets diff --git a/kubernetes/main/apps/external-secrets/external-secrets/stores/kustomization.yaml b/kubernetes/main/apps/external-secrets/external-secrets/stores/kustomization.yaml new file mode 100755 index 000000000..87f419341 --- /dev/null +++ b/kubernetes/main/apps/external-secrets/external-secrets/stores/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./clustersecretstore.yaml + diff --git a/kubernetes/main/apps/external-secrets/kustomization.yaml b/kubernetes/main/apps/external-secrets/kustomization.yaml new file mode 100755 index 000000000..8b5a7e346 --- /dev/null +++ b/kubernetes/main/apps/external-secrets/kustomization.yaml @@ -0,0 +1,9 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # Pre Flux-Kustomizations + - ./namespace.yaml + # Flux-Kustomizations + - ./external-secrets/ks.yaml diff --git a/kubernetes/main/apps/external-secrets/namespace.yaml b/kubernetes/main/apps/external-secrets/namespace.yaml new file mode 100755 index 000000000..2a7689896 --- /dev/null +++ b/kubernetes/main/apps/external-secrets/namespace.yaml @@ -0,0 +1,37 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: external-secrets + labels: + kustomize.toolkit.fluxcd.io/prune: disabled +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/provider_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Provider +metadata: + name: alert-manager + namespace: external-secrets +spec: + type: alertmanager + address: http://alertmanager-operated.observability.svc.cluster.local:9093/api/v2/alerts/ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/alert_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Alert +metadata: + name: alert-manager + namespace: external-secrets +spec: + providerRef: + name: alert-manager + eventSeverity: error + eventSources: + - kind: HelmRelease + name: "*" + exclusionList: + - "error.*lookup github\\.com" + - "error.*lookup raw\\.githubusercontent\\.com" + - "dial.*tcp.*timeout" + - "waiting.*socket" + suspend: false diff --git a/kubernetes/main/apps/flux-system/addons/app/kustomization.yaml b/kubernetes/main/apps/flux-system/addons/app/kustomization.yaml new file mode 100755 index 000000000..feb053584 --- /dev/null +++ b/kubernetes/main/apps/flux-system/addons/app/kustomization.yaml @@ -0,0 +1,8 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./monitoring + - ./notifications + - ./webhooks diff --git a/kubernetes/main/apps/flux-system/addons/app/monitoring/kustomization.yaml b/kubernetes/main/apps/flux-system/addons/app/monitoring/kustomization.yaml new file mode 100755 index 000000000..247c03744 --- /dev/null +++ b/kubernetes/main/apps/flux-system/addons/app/monitoring/kustomization.yaml @@ -0,0 +1,8 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: flux-system +resources: + - ./podmonitor.yaml + - ./prometheusrule.yaml diff --git a/kubernetes/main/apps/flux-system/addons/app/monitoring/podmonitor.yaml b/kubernetes/main/apps/flux-system/addons/app/monitoring/podmonitor.yaml new file mode 100755 index 000000000..bc68a6a45 --- /dev/null +++ b/kubernetes/main/apps/flux-system/addons/app/monitoring/podmonitor.yaml @@ -0,0 +1,30 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/monitoring.coreos.com/podmonitor_v1.json +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: flux-system + namespace: flux-system + labels: + app.kubernetes.io/part-of: flux + app.kubernetes.io/component: monitoring +spec: + namespaceSelector: + matchNames: + - flux-system + selector: + matchExpressions: + - key: app + operator: In + values: + - helm-controller + - source-controller + - kustomize-controller + - notification-controller + podMetricsEndpoints: + - port: http-prom + relabelings: + # Ref: https://github.com/prometheus-operator/prometheus-operator/issues/4816 + - sourceLabels: [__meta_kubernetes_pod_phase] + action: keep + regex: Running diff --git a/kubernetes/main/apps/flux-system/addons/app/monitoring/prometheusrule.yaml b/kubernetes/main/apps/flux-system/addons/app/monitoring/prometheusrule.yaml new file mode 100755 index 000000000..4257e56de --- /dev/null +++ b/kubernetes/main/apps/flux-system/addons/app/monitoring/prometheusrule.yaml @@ -0,0 +1,32 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/monitoring.coreos.com/prometheusrule_v1.json +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: flux-rules + namespace: flux-system +spec: + groups: + - name: flux.rules + rules: + - alert: FluxComponentAbsent + annotations: + summary: Flux component has disappeared from Prometheus target discovery. + expr: | + absent(up{job=~".*flux-system.*"} == 1) + for: 15m + labels: + severity: critical + - alert: FluxReconciliationFailure + annotations: + summary: >- + {{ $labels.kind }} {{ $labels.namespace }}/{{ $labels.name }} reconciliation + has been failing for more than 15 minutes. + expr: | + max(gotk_reconcile_condition{status="False",type="Ready"}) by (namespace, name, kind) + + + on(namespace, name, kind) (max(gotk_reconcile_condition{status="Deleted"}) + by (namespace, name, kind)) * 2 == 1 + for: 15m + labels: + severity: critical diff --git a/kubernetes/main/apps/flux-system/addons/app/notifications/github/externalsecret.yaml b/kubernetes/main/apps/flux-system/addons/app/notifications/github/externalsecret.yaml new file mode 100755 index 000000000..a141a5b43 --- /dev/null +++ b/kubernetes/main/apps/flux-system/addons/app/notifications/github/externalsecret.yaml @@ -0,0 +1,19 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: github-token +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: github-token-secret + template: + engineVersion: v2 + data: + token: "{{ .FLUX_GITHUB_TOKEN }}" + dataFrom: + - extract: + key: secrets/flux diff --git a/kubernetes/main/apps/flux-system/addons/app/notifications/github/kustomization.yaml b/kubernetes/main/apps/flux-system/addons/app/notifications/github/kustomization.yaml new file mode 100755 index 000000000..c6052dbc2 --- /dev/null +++ b/kubernetes/main/apps/flux-system/addons/app/notifications/github/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./notification.yaml diff --git a/kubernetes/main/apps/flux-system/addons/app/notifications/github/notification.yaml b/kubernetes/main/apps/flux-system/addons/app/notifications/github/notification.yaml new file mode 100755 index 000000000..183dce81a --- /dev/null +++ b/kubernetes/main/apps/flux-system/addons/app/notifications/github/notification.yaml @@ -0,0 +1,24 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/provider_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Provider +metadata: + name: github +spec: + type: github + address: https://github.com/Darkfella91/home-ops + secretRef: + name: github-token-secret +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/alert_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Alert +metadata: + name: github +spec: + providerRef: + name: github + eventSeverity: info + eventSources: + - kind: Kustomization + name: "*" diff --git a/kubernetes/main/apps/flux-system/addons/app/notifications/kustomization.yaml b/kubernetes/main/apps/flux-system/addons/app/notifications/kustomization.yaml new file mode 100755 index 000000000..08c1780f0 --- /dev/null +++ b/kubernetes/main/apps/flux-system/addons/app/notifications/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./github diff --git a/kubernetes/main/apps/flux-system/addons/app/webhooks/github/externalsecret.yaml b/kubernetes/main/apps/flux-system/addons/app/webhooks/github/externalsecret.yaml new file mode 100755 index 000000000..5364853ca --- /dev/null +++ b/kubernetes/main/apps/flux-system/addons/app/webhooks/github/externalsecret.yaml @@ -0,0 +1,19 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: github-webhook-token +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: github-webhook-token-secret + template: + engineVersion: v2 + data: + token: "{{ .FLUX_GITHUB_WEBHOOK_TOKEN }}" + dataFrom: + - extract: + key: flux diff --git a/kubernetes/main/apps/flux-system/addons/app/webhooks/github/ingress.yaml b/kubernetes/main/apps/flux-system/addons/app/webhooks/github/ingress.yaml new file mode 100755 index 000000000..547885a98 --- /dev/null +++ b/kubernetes/main/apps/flux-system/addons/app/webhooks/github/ingress.yaml @@ -0,0 +1,28 @@ +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: webhook-receiver + annotations: + external-dns.alpha.kubernetes.io/target: external.${PUBLIC_DOMAIN} + cert-manager.io/cluster-issuer: zerossl-prod + cert-manager.io/private-key-rotation-policy: Always + cert-manager.io/private-key-algorithm: ECDSA + cert-manager.io/private-key-size: "384" +spec: + ingressClassName: external + tls: + - hosts: + - &host "flux-webhook.${PUBLIC_DOMAIN}" + secretName: flux-webhook-tls + rules: + - host: *host + http: + paths: + - path: /hook/ + pathType: Prefix + backend: + service: + name: webhook-receiver + port: + number: 80 diff --git a/kubernetes/main/apps/flux-system/addons/app/webhooks/github/kustomization.yaml b/kubernetes/main/apps/flux-system/addons/app/webhooks/github/kustomization.yaml new file mode 100755 index 000000000..58532a27c --- /dev/null +++ b/kubernetes/main/apps/flux-system/addons/app/webhooks/github/kustomization.yaml @@ -0,0 +1,8 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./ingress.yaml + - ./receiver.yaml diff --git a/kubernetes/main/apps/flux-system/addons/app/webhooks/github/receiver.yaml b/kubernetes/main/apps/flux-system/addons/app/webhooks/github/receiver.yaml new file mode 100755 index 000000000..fd67703a2 --- /dev/null +++ b/kubernetes/main/apps/flux-system/addons/app/webhooks/github/receiver.yaml @@ -0,0 +1,26 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/receiver_v1.json +apiVersion: notification.toolkit.fluxcd.io/v1 +kind: Receiver +metadata: + name: home-ops +spec: + type: github + events: + - ping + - push + secretRef: + name: github-webhook-token-secret + resources: + - apiVersion: source.toolkit.fluxcd.io/v1 + kind: GitRepository + name: home-kubernetes + namespace: flux-system + - apiVersion: kustomize.toolkit.fluxcd.io/v1 + kind: Kustomization + name: cluster + namespace: flux-system + - apiVersion: kustomize.toolkit.fluxcd.io/v1 + kind: Kustomization + name: cluster-apps + namespace: flux-system diff --git a/kubernetes/main/apps/flux-system/addons/app/webhooks/kustomization.yaml b/kubernetes/main/apps/flux-system/addons/app/webhooks/kustomization.yaml new file mode 100755 index 000000000..08c1780f0 --- /dev/null +++ b/kubernetes/main/apps/flux-system/addons/app/webhooks/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./github diff --git a/kubernetes/main/apps/flux-system/addons/ks.yaml b/kubernetes/main/apps/flux-system/addons/ks.yaml new file mode 100755 index 000000000..8a2780483 --- /dev/null +++ b/kubernetes/main/apps/flux-system/addons/ks.yaml @@ -0,0 +1,22 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app flux-addons + namespace: flux-system +spec: + targetNamespace: flux-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets-stores + path: ./kubernetes/main/apps/flux-system/addons/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/flux-system/kustomization.yaml b/kubernetes/main/apps/flux-system/kustomization.yaml new file mode 100755 index 000000000..95df4db76 --- /dev/null +++ b/kubernetes/main/apps/flux-system/kustomization.yaml @@ -0,0 +1,9 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # Pre Flux-Kustomizations + - ./namespace.yaml + # Flux-Kustomizations + - ./addons/ks.yaml diff --git a/kubernetes/main/apps/flux-system/namespace.yaml b/kubernetes/main/apps/flux-system/namespace.yaml new file mode 100755 index 000000000..38c6a2adf --- /dev/null +++ b/kubernetes/main/apps/flux-system/namespace.yaml @@ -0,0 +1,46 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: flux-system + annotations: + kustomize.toolkit.fluxcd.io/prune: disabled + volsync.backube/privileged-movers: "true" +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/provider_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Provider +metadata: + name: alert-manager + namespace: flux-system +spec: + type: alertmanager + address: http://alertmanager-operated.observability.svc.cluster.local:9093/api/v2/alerts/ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/alert_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Alert +metadata: + name: alert-manager + namespace: flux-system +spec: + providerRef: + name: alert-manager + eventSeverity: error + eventSources: + - kind: GitRepository + name: "*" + - kind: HelmRelease + name: "*" + - kind: HelmRepository + name: "*" + - kind: Kustomization + name: "*" + - kind: OCIRepository + name: "*" + exclusionList: + - "error.*lookup github\\.com" + - "error.*lookup raw\\.githubusercontent\\.com" + - "dial.*tcp.*timeout" + - "waiting.*socket" + suspend: false diff --git a/kubernetes/main/apps/keycloak/crds/kustomization.yaml b/kubernetes/main/apps/keycloak/crds/kustomization.yaml new file mode 100755 index 000000000..09dfd524a --- /dev/null +++ b/kubernetes/main/apps/keycloak/crds/kustomization.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - https://raw.githubusercontent.com/keycloak/keycloak-k8s-resources/26.0.5/kubernetes/keycloaks.k8s.keycloak.org-v1.yml + - https://raw.githubusercontent.com/keycloak/keycloak-k8s-resources/26.0.5/kubernetes/keycloakrealmimports.k8s.keycloak.org-v1.yml diff --git a/kubernetes/main/apps/keycloak/deployment/cr.yaml b/kubernetes/main/apps/keycloak/deployment/cr.yaml new file mode 100755 index 000000000..339d74b07 --- /dev/null +++ b/kubernetes/main/apps/keycloak/deployment/cr.yaml @@ -0,0 +1,94 @@ +--- +apiVersion: k8s.keycloak.org/v2alpha1 +kind: Keycloak +metadata: + name: keycloak +spec: + unsupported: + podTemplate: + spec: + containers: + - securityContext: + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: false + capabilities: { drop: ["ALL"] } + + initContainers: + - name: init-db + image: "ghcr.io/onedr0p/postgres-init:16.6@sha256:35353a77777ee8f634d0f3945f495b4a40065134b8619e0d18bd49b0ee9c855b" + imagePullPolicy: IfNotPresent + env: + - name: INIT_POSTGRES_HOST + value: &dbHost postgres17-rw.database.svc.cluster.local + - name: INIT_POSTGRES_PORT + value: '5432' + - name: INIT_POSTGRES_DBNAME + value: &dbName keycloak + - name: INIT_POSTGRES_SUPER_PASS + valueFrom: + secretKeyRef: + name: &secret keycloak-secret + key: POSTGRES_SUPER_PASS + - name: INIT_POSTGRES_USER + valueFrom: + secretKeyRef: + name: *secret + key: POSTGRES_USER + - name: INIT_POSTGRES_PASS + valueFrom: + secretKeyRef: + name: *secret + key: POSTGRES_PASS + securityContext: + runAsUser: 10002 + runAsGroup: 10002 + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + + securityContext: + runAsNonRoot: true + seccompProfile: { type: RuntimeDefault } + + db: + vendor: postgres + usernameSecret: + name: *secret + key: POSTGRES_USER + passwordSecret: + name: *secret + key: POSTGRES_PASS + host: *dbHost + database: *dbName + port: 5432 + schema: public + + instances: 1 + + image: ghcr.io/darkfella91/keycloak-image:26.0.6@sha256:84cdcb3044af445d6fd6a3c18e88c7957955eb670d2802f42040d8e9c1deda80 + + startOptimized: true + + additionalOptions: + - name: https-protocols + value: "TLSv1.3" + + bootstrapAdmin: + user: + secret: *secret + + proxy: + headers: xforwarded + + hostname: + hostname: https://accounts.${PUBLIC_DOMAIN} + + http: + httpEnabled: false + httpsPort: 443 + tlsSecret: keycloak-tls + + ingress: + enabled: false diff --git a/kubernetes/main/apps/keycloak/deployment/externalsecret.yaml b/kubernetes/main/apps/keycloak/deployment/externalsecret.yaml new file mode 100755 index 000000000..1f395b78f --- /dev/null +++ b/kubernetes/main/apps/keycloak/deployment/externalsecret.yaml @@ -0,0 +1,25 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret keycloak-secret +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + template: + engineVersion: v2 + data: + POSTGRES_USER: "{{ .KEYCLOAK_POSTGRES_USER }}" + POSTGRES_PASS: "{{ .KEYCLOAK_POSTGRES_PASS }}" + POSTGRES_SUPER_PASS: "{{ .POSTGRES_SUPER_PASS }}" + username: "{{ .ADMIN_USERNAME }}" + password: "{{ .ADMIN_PASSWORD }}" + dataFrom: + - extract: + key: secrets/cloudnative-pg + - extract: + key: secrets/keycloak diff --git a/kubernetes/main/apps/keycloak/deployment/ingress.yaml b/kubernetes/main/apps/keycloak/deployment/ingress.yaml new file mode 100755 index 000000000..e9114f9a2 --- /dev/null +++ b/kubernetes/main/apps/keycloak/deployment/ingress.yaml @@ -0,0 +1,30 @@ +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: keycloak-ingress + namespace: idp + annotations: + external-dns.alpha.kubernetes.io/target: external.${PUBLIC_DOMAIN} + nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" + cert-manager.io/cluster-issuer: zerossl-prod + cert-manager.io/private-key-rotation-policy: Always + cert-manager.io/private-key-algorithm: ECDSA + cert-manager.io/private-key-size: "384" +spec: + ingressClassName: external + tls: + - hosts: + - &host accounts.${PUBLIC_DOMAIN} + secretName: keycloak-tls + rules: + - host: *host + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: keycloak-service + port: + number: 443 diff --git a/kubernetes/main/apps/keycloak/deployment/kustomization.yaml b/kubernetes/main/apps/keycloak/deployment/kustomization.yaml new file mode 100755 index 000000000..1af901a21 --- /dev/null +++ b/kubernetes/main/apps/keycloak/deployment/kustomization.yaml @@ -0,0 +1,8 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./ingress.yaml + - ./externalsecret.yaml + - ./cr.yaml diff --git a/kubernetes/main/apps/keycloak/ks.yaml b/kubernetes/main/apps/keycloak/ks.yaml new file mode 100755 index 000000000..c77ec8201 --- /dev/null +++ b/kubernetes/main/apps/keycloak/ks.yaml @@ -0,0 +1,66 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app keycloak-crds + namespace: flux-system +spec: + targetNamespace: idp + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/keycloak/crds + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app keycloak-operator + namespace: flux-system +spec: + dependsOn: + - name: keycloak-crds + targetNamespace: idp + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/keycloak/operator + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app keycloak-deployment + namespace: flux-system +spec: + dependsOn: + - name: external-secrets-stores + - name: cloudnative-pg-cluster + - name: keycloak-operator + targetNamespace: idp + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/keycloak/deployment + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/keycloak/kustomization.yaml b/kubernetes/main/apps/keycloak/kustomization.yaml new file mode 100755 index 000000000..ad2040382 --- /dev/null +++ b/kubernetes/main/apps/keycloak/kustomization.yaml @@ -0,0 +1,8 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # Pre Flux-Kustomizations + - ./namespace.yaml + - ./ks.yaml diff --git a/kubernetes/main/apps/keycloak/namespace.yaml b/kubernetes/main/apps/keycloak/namespace.yaml new file mode 100755 index 000000000..8c452403a --- /dev/null +++ b/kubernetes/main/apps/keycloak/namespace.yaml @@ -0,0 +1,5 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: idp diff --git a/kubernetes/main/apps/keycloak/operator/kustomization.yaml b/kubernetes/main/apps/keycloak/operator/kustomization.yaml new file mode 100755 index 000000000..9018944b8 --- /dev/null +++ b/kubernetes/main/apps/keycloak/operator/kustomization.yaml @@ -0,0 +1,5 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - https://raw.githubusercontent.com/keycloak/keycloak-k8s-resources/26.0.5/kubernetes/kubernetes.yml diff --git a/kubernetes/main/apps/kube-system/cilium/app/helm-values.yaml b/kubernetes/main/apps/kube-system/cilium/app/helm-values.yaml new file mode 100755 index 000000000..8e3258cb9 --- /dev/null +++ b/kubernetes/main/apps/kube-system/cilium/app/helm-values.yaml @@ -0,0 +1,62 @@ +--- +cni: + exclusive: false +enableIPv4BIGTCP: true +autoDirectNodeRoutes: true +bandwidthManager: + enabled: true + bbr: true +bpf: + masquerade: true + preallocateMaps: true + tproxy: true +cgroup: + automount: + enabled: false + hostRoot: /sys/fs/cgroup +devices: br0 +cluster: + id: 1 + name: main +endpointRoutes: + enabled: true +envoy: + enabled: false +hubble: + enabled: false +ipam: + mode: kubernetes +ipv4NativeRoutingCIDR: 172.16.0.0/16 +k8sServiceHost: 127.0.0.1 +k8sServicePort: 7445 +kubeProxyReplacement: true +kubeProxyReplacementHealthzBindAddr: 0.0.0.0:10256 +l2announcements: + enabled: true +loadBalancer: + algorithm: maglev + mode: dsr +localRedirectPolicy: false +operator: + replicas: 1 + rollOutPods: true +rollOutCiliumPods: true +routingMode: native +securityContext: + capabilities: + ciliumAgent: + - CHOWN + - KILL + - NET_ADMIN + - NET_RAW + - IPC_LOCK + - SYS_ADMIN + - SYS_RESOURCE + - DAC_OVERRIDE + - FOWNER + - SETGID + - SETUID + cleanCiliumState: + - NET_ADMIN + - SYS_ADMIN + - SYS_RESOURCE diff --git a/kubernetes/main/apps/kube-system/cilium/app/helmrelease.yaml b/kubernetes/main/apps/kube-system/cilium/app/helmrelease.yaml new file mode 100755 index 000000000..b0efc68e9 --- /dev/null +++ b/kubernetes/main/apps/kube-system/cilium/app/helmrelease.yaml @@ -0,0 +1,91 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: cilium +spec: + interval: 30m + chart: + spec: + chart: cilium + version: 1.16.4 + sourceRef: + kind: HelmRepository + name: cilium + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + + valuesFrom: + - kind: ConfigMap + name: cilium-helm-values + + values: + hubble: + enabled: true + metrics: + enabled: + - dns:query + - drop + - tcp + - flow + - port-distribution + - icmp + - http + serviceMonitor: + enabled: true + dashboards: + enabled: true + annotations: + grafana_folder: Cilium + relay: + enabled: true + rollOutPods: true + prometheus: + serviceMonitor: + enabled: true + + ui: + enabled: true + rollOutPods: true + ingress: + enabled: true + annotations: + external-dns.alpha.kubernetes.io/target: internal.${PUBLIC_DOMAIN} + cert-manager.io/cluster-issuer: zerossl-prod + cert-manager.io/private-key-rotation-policy: Always + cert-manager.io/private-key-algorithm: ECDSA + cert-manager.io/private-key-size: "384" + className: internal + tls: + - hosts: &host ["hubble.${PUBLIC_DOMAIN}"] + secretName: hubble-tls + hosts: *host + + operator: + prometheus: + enabled: true + serviceMonitor: + enabled: true + dashboards: + enabled: true + annotations: + grafana_folder: Cilium + + prometheus: + enabled: true + serviceMonitor: + enabled: true + trustCRDsExist: true + + dashboards: + enabled: true + annotations: + grafana_folder: Cilium diff --git a/kubernetes/main/apps/kube-system/cilium/app/kustomization.yaml b/kubernetes/main/apps/kube-system/cilium/app/kustomization.yaml new file mode 100755 index 000000000..25781ef11 --- /dev/null +++ b/kubernetes/main/apps/kube-system/cilium/app/kustomization.yaml @@ -0,0 +1,12 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml +configMapGenerator: + - name: cilium-helm-values + files: + - values.yaml=./helm-values.yaml +configurations: + - kustomizeconfig.yaml diff --git a/kubernetes/main/apps/kube-system/cilium/app/kustomizeconfig.yaml b/kubernetes/main/apps/kube-system/cilium/app/kustomizeconfig.yaml new file mode 100755 index 000000000..58f92ba15 --- /dev/null +++ b/kubernetes/main/apps/kube-system/cilium/app/kustomizeconfig.yaml @@ -0,0 +1,7 @@ +--- +nameReference: + - kind: ConfigMap + version: v1 + fieldSpecs: + - path: spec/valuesFrom/name + kind: HelmRelease diff --git a/kubernetes/main/apps/kube-system/cilium/config/kustomization.yaml b/kubernetes/main/apps/kube-system/cilium/config/kustomization.yaml new file mode 100755 index 000000000..89773e64e --- /dev/null +++ b/kubernetes/main/apps/kube-system/cilium/config/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./l2.yaml diff --git a/kubernetes/main/apps/kube-system/cilium/config/l2.yaml b/kubernetes/main/apps/kube-system/cilium/config/l2.yaml new file mode 100755 index 000000000..b266fdfbb --- /dev/null +++ b/kubernetes/main/apps/kube-system/cilium/config/l2.yaml @@ -0,0 +1,25 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/cilium.io/ciliuml2announcementpolicy_v2alpha1.json +apiVersion: cilium.io/v2alpha1 +kind: CiliumL2AnnouncementPolicy +metadata: + name: l2-policy +spec: + loadBalancerIPs: true + interfaces: + - br0 + nodeSelector: + matchLabels: + kubernetes.io/os: linux +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/cilium.io/ciliumloadbalancerippool_v2alpha1.json +apiVersion: cilium.io/v2alpha1 +kind: CiliumLoadBalancerIPPool +metadata: + name: l2-pool +spec: + allowFirstLastIPs: "Yes" + blocks: + - # Controller VIP: 192.168.91.21 + start: 192.168.91.80 + stop: 192.168.91.99 diff --git a/kubernetes/main/apps/kube-system/cilium/ks.yaml b/kubernetes/main/apps/kube-system/cilium/ks.yaml new file mode 100755 index 000000000..e416f8e66 --- /dev/null +++ b/kubernetes/main/apps/kube-system/cilium/ks.yaml @@ -0,0 +1,42 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app cilium + namespace: flux-system +spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/kube-system/cilium/app + prune: false # never should be deleted + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app cilium-config + namespace: flux-system +spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: cilium + path: ./kubernetes/main/apps/kube-system/cilium/config + prune: false # never should be deleted + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/kube-system/coredns/app/helm-values.yaml b/kubernetes/main/apps/kube-system/coredns/app/helm-values.yaml new file mode 100755 index 000000000..5308ec98e --- /dev/null +++ b/kubernetes/main/apps/kube-system/coredns/app/helm-values.yaml @@ -0,0 +1,72 @@ +--- +fullnameOverride: coredns +replicaCount: 2 +k8sAppLabelOverride: kube-dns +serviceAccount: + create: true +service: + name: kube-dns + clusterIP: 172.17.0.10 +servers: + - zones: + - zone: . + scheme: dns:// + use_tcp: true + port: 53 + plugins: + - name: errors + - name: health + configBlock: |- + lameduck 5s + - name: ready + - name: log + configBlock: |- + class error + - name: prometheus + parameters: 0.0.0.0:9153 + - name: kubernetes + parameters: cluster.local in-addr.arpa ip6.arpa + configBlock: |- + pods insecure + fallthrough in-addr.arpa ip6.arpa + - name: forward + parameters: . 192.168.91.1 + - name: cache + parameters: 30 + - name: loop + - name: reload + - name: loadbalance + - zones: + - zone: ${PUBLIC_DOMAIN} + scheme: dns:// + use_tcp: true + port: 53 + plugins: + - name: errors + - name: forward + parameters: . 192.168.91.41 + - name: cache + parameters: 86400 + - name: loop + - name: reload + - name: loadbalance + - name: ready + - name: health + configBlock: |- + lameduck 5s + - name: log + configBlock: |- + class error +affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists +tolerations: + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule diff --git a/kubernetes/main/apps/kube-system/coredns/app/helmrelease.yaml b/kubernetes/main/apps/kube-system/coredns/app/helmrelease.yaml new file mode 100755 index 000000000..ce31f06de --- /dev/null +++ b/kubernetes/main/apps/kube-system/coredns/app/helmrelease.yaml @@ -0,0 +1,27 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: coredns +spec: + interval: 30m + chart: + spec: + chart: coredns + version: 1.36.1 + sourceRef: + kind: HelmRepository + name: coredns + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + valuesFrom: + - kind: ConfigMap + name: coredns-helm-values diff --git a/kubernetes/main/apps/kube-system/coredns/app/kustomization.yaml b/kubernetes/main/apps/kube-system/coredns/app/kustomization.yaml new file mode 100755 index 000000000..39444bbd4 --- /dev/null +++ b/kubernetes/main/apps/kube-system/coredns/app/kustomization.yaml @@ -0,0 +1,12 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml +configMapGenerator: + - name: coredns-helm-values + files: + - values.yaml=./helm-values.yaml +configurations: + - kustomizeconfig.yaml diff --git a/kubernetes/main/apps/kube-system/coredns/app/kustomizeconfig.yaml b/kubernetes/main/apps/kube-system/coredns/app/kustomizeconfig.yaml new file mode 100755 index 000000000..58f92ba15 --- /dev/null +++ b/kubernetes/main/apps/kube-system/coredns/app/kustomizeconfig.yaml @@ -0,0 +1,7 @@ +--- +nameReference: + - kind: ConfigMap + version: v1 + fieldSpecs: + - path: spec/valuesFrom/name + kind: HelmRelease diff --git a/kubernetes/main/apps/kube-system/coredns/ks.yaml b/kubernetes/main/apps/kube-system/coredns/ks.yaml new file mode 100755 index 000000000..269f52ede --- /dev/null +++ b/kubernetes/main/apps/kube-system/coredns/ks.yaml @@ -0,0 +1,20 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app coredns + namespace: flux-system +spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/kube-system/coredns/app + prune: false # never should be deleted + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/kube-system/generic-device-plugin.yaml b/kubernetes/main/apps/kube-system/generic-device-plugin.yaml new file mode 100755 index 000000000..d77d349b5 --- /dev/null +++ b/kubernetes/main/apps/kube-system/generic-device-plugin.yaml @@ -0,0 +1,58 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: generic-device-plugin + namespace: kube-system + labels: + app.kubernetes.io/name: generic-device-plugin +spec: + selector: + matchLabels: + app.kubernetes.io/name: generic-device-plugin + template: + metadata: + labels: + app.kubernetes.io/name: generic-device-plugin + spec: + priorityClassName: system-node-critical + tolerations: + - operator: "Exists" + effect: "NoExecute" + - operator: "Exists" + effect: "NoSchedule" + containers: + - image: squat/generic-device-plugin + args: + - --device + - | + name: tun + groups: + - count: 5 + paths: + - path: /dev/net/tun + name: generic-device-plugin + resources: + requests: + cpu: 50m + memory: 10Mi + limits: + memory: 100Mi + ports: + - containerPort: 8080 + name: http + securityContext: + privileged: true + volumeMounts: + - name: device-plugin + mountPath: /var/lib/kubelet/device-plugins + - name: dev + mountPath: /dev + volumes: + - name: device-plugin + hostPath: + path: /var/lib/kubelet/device-plugins + - name: dev + hostPath: + path: /dev + updateStrategy: + type: RollingUpdate diff --git a/kubernetes/main/apps/kube-system/kubelet-csr-approver/app/helm-values.yaml b/kubernetes/main/apps/kube-system/kubelet-csr-approver/app/helm-values.yaml new file mode 100755 index 000000000..c737caff1 --- /dev/null +++ b/kubernetes/main/apps/kube-system/kubelet-csr-approver/app/helm-values.yaml @@ -0,0 +1,4 @@ +--- +replicas: 1 +providerRegex: ^k8s-\d$ +bypassDnsResolution: true diff --git a/kubernetes/main/apps/kube-system/kubelet-csr-approver/app/helmrelease.yaml b/kubernetes/main/apps/kube-system/kubelet-csr-approver/app/helmrelease.yaml new file mode 100755 index 000000000..b8146b0bb --- /dev/null +++ b/kubernetes/main/apps/kube-system/kubelet-csr-approver/app/helmrelease.yaml @@ -0,0 +1,32 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: kubelet-csr-approver +spec: + interval: 30m + chart: + spec: + chart: kubelet-csr-approver + version: 1.2.3 + sourceRef: + kind: HelmRepository + name: postfinance + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + valuesFrom: + - kind: ConfigMap + name: kubelet-csr-approver-helm-values + values: + metrics: + enable: true + serviceMonitor: + enabled: true diff --git a/kubernetes/main/apps/kube-system/kubelet-csr-approver/app/kustomization.yaml b/kubernetes/main/apps/kube-system/kubelet-csr-approver/app/kustomization.yaml new file mode 100755 index 000000000..30dddafcb --- /dev/null +++ b/kubernetes/main/apps/kube-system/kubelet-csr-approver/app/kustomization.yaml @@ -0,0 +1,11 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml +configMapGenerator: + - name: kubelet-csr-approver-helm-values + files: + - values.yaml=./helm-values.yaml +configurations: + - kustomizeconfig.yaml diff --git a/kubernetes/main/apps/kube-system/kubelet-csr-approver/app/kustomizeconfig.yaml b/kubernetes/main/apps/kube-system/kubelet-csr-approver/app/kustomizeconfig.yaml new file mode 100755 index 000000000..58f92ba15 --- /dev/null +++ b/kubernetes/main/apps/kube-system/kubelet-csr-approver/app/kustomizeconfig.yaml @@ -0,0 +1,7 @@ +--- +nameReference: + - kind: ConfigMap + version: v1 + fieldSpecs: + - path: spec/valuesFrom/name + kind: HelmRelease diff --git a/kubernetes/main/apps/kube-system/kubelet-csr-approver/ks.yaml b/kubernetes/main/apps/kube-system/kubelet-csr-approver/ks.yaml new file mode 100755 index 000000000..507320ba6 --- /dev/null +++ b/kubernetes/main/apps/kube-system/kubelet-csr-approver/ks.yaml @@ -0,0 +1,19 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app kubelet-csr-approver + namespace: flux-system +spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/kube-system/kubelet-csr-approver/app + prune: false # never should be deleted + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/kube-system/kustomization.yaml b/kubernetes/main/apps/kube-system/kustomization.yaml new file mode 100755 index 000000000..b8fd8f21a --- /dev/null +++ b/kubernetes/main/apps/kube-system/kustomization.yaml @@ -0,0 +1,18 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # Pre Flux-Kustomizations + - ./namespace.yaml + # Flux-Kustomizations + - ./coredns/ks.yaml + - ./cilium/ks.yaml + - ./kubelet-csr-approver/ks.yaml + - ./metrics-server/ks.yaml + - ./reloader/ks.yaml + - ./generic-device-plugin.yaml + - ./nvidia-device-plugin/ks.yaml + - ./zfs-localpv/ks.yaml + - ./multus/ks.yaml + - ./vfio-binding.yaml diff --git a/kubernetes/main/apps/kube-system/metrics-server/app/helmrelease.yaml b/kubernetes/main/apps/kube-system/metrics-server/app/helmrelease.yaml new file mode 100755 index 000000000..78b197d29 --- /dev/null +++ b/kubernetes/main/apps/kube-system/metrics-server/app/helmrelease.yaml @@ -0,0 +1,33 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: metrics-server +spec: + interval: 30m + chart: + spec: + chart: metrics-server + version: 3.12.2 + sourceRef: + kind: HelmRepository + name: metrics-server + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + values: + args: + - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname + - --kubelet-use-node-status-port + - --metric-resolution=15s + metrics: + enabled: true + serviceMonitor: + enabled: true diff --git a/kubernetes/main/apps/kube-system/metrics-server/app/kustomization.yaml b/kubernetes/main/apps/kube-system/metrics-server/app/kustomization.yaml new file mode 100755 index 000000000..17cbc72b2 --- /dev/null +++ b/kubernetes/main/apps/kube-system/metrics-server/app/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/kubernetes/main/apps/kube-system/metrics-server/ks.yaml b/kubernetes/main/apps/kube-system/metrics-server/ks.yaml new file mode 100755 index 000000000..c0bd19d06 --- /dev/null +++ b/kubernetes/main/apps/kube-system/metrics-server/ks.yaml @@ -0,0 +1,20 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app metrics-server + namespace: flux-system +spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/kube-system/metrics-server/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/kube-system/multus/app/kustomization.yaml b/kubernetes/main/apps/kube-system/multus/app/kustomization.yaml new file mode 100644 index 000000000..5eb50565a --- /dev/null +++ b/kubernetes/main/apps/kube-system/multus/app/kustomization.yaml @@ -0,0 +1,8 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - https://raw.githubusercontent.com/k8snetworkplumbingwg/multus-cni/master/deployments/multus-daemonset-thick.yml +patchesStrategicMerge: + - patch.yaml diff --git a/kubernetes/main/apps/kube-system/multus/app/patch.yaml b/kubernetes/main/apps/kube-system/multus/app/patch.yaml new file mode 100644 index 000000000..87a32d2f3 --- /dev/null +++ b/kubernetes/main/apps/kube-system/multus/app/patch.yaml @@ -0,0 +1,33 @@ +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-multus-ds + namespace: kube-system +spec: + template: + spec: + volumes: + - name: host-run-netns + hostPath: + path: /var/run/netns/ + initContainers: + - command: + - /install-cni.sh + image: ghcr.io/siderolabs/install-cni:v1.9.0-alpha.0-2-g78ba66b + name: install-cni + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/opt/cni/bin + mountPropagation: Bidirectional + name: cnibin + containers: + - name: kube-multus + resources: + limits: + cpu: 500m + memory: 2Gi + requests: + cpu: 100m + memory: 50Mi diff --git a/kubernetes/main/apps/kube-system/multus/config/kustomization.yaml b/kubernetes/main/apps/kube-system/multus/config/kustomization.yaml new file mode 100644 index 000000000..a4c3e9ac2 --- /dev/null +++ b/kubernetes/main/apps/kube-system/multus/config/kustomization.yaml @@ -0,0 +1,5 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./network-attachment-definition.yaml diff --git a/kubernetes/main/apps/kube-system/multus/config/network-attachment-definition.yaml b/kubernetes/main/apps/kube-system/multus/config/network-attachment-definition.yaml new file mode 100644 index 000000000..0a52e0500 --- /dev/null +++ b/kubernetes/main/apps/kube-system/multus/config/network-attachment-definition.yaml @@ -0,0 +1,75 @@ +--- +apiVersion: "k8s.cni.cncf.io/v1" +kind: NetworkAttachmentDefinition +metadata: + name: bridge-truenas-1 + namespace: kube-system +spec: + config: '{ + "cniVersion": "0.3.1", + "name": "bridge-truenas-1", + "type": "bridge", + "bridge": "br0", + "ipam": { + "type": "static", + "addresses": [ + { + "address": "192.168.91.39/24", + "gateway": "192.168.91.1" + } + ], + "routes": [ + { "dst": "0.0.0.0/0", "gw": "192.168.91.1" } + ] + } + }' +--- +apiVersion: "k8s.cni.cncf.io/v1" +kind: NetworkAttachmentDefinition +metadata: + name: bridge-truenas-2 + namespace: kube-system +spec: + config: '{ + "cniVersion": "0.3.1", + "name": "bridge-truenas-2", + "type": "bridge", + "bridge": "br0", + "ipam": { + "type": "static", + "addresses": [ + { + "address": "192.168.91.40/24", + "gateway": "192.168.91.1" + } + ], + "routes": [ + { "dst": "0.0.0.0/0", "gw": "192.168.91.1" } + ] + } + }' +--- +apiVersion: "k8s.cni.cncf.io/v1" +kind: NetworkAttachmentDefinition +metadata: + name: bridge-windows-server + namespace: kube-system +spec: + config: '{ + "cniVersion": "0.3.1", + "name": "bridge-windows-server", + "type": "bridge", + "bridge": "br0", + "ipam": { + "type": "static", + "addresses": [ + { + "address": "192.168.91.41/24", + "gateway": "192.168.91.1" + } + ], + "routes": [ + { "dst": "0.0.0.0/0", "gw": "192.168.91.1" } + ] + } + }' diff --git a/kubernetes/main/apps/kube-system/multus/ks.yaml b/kubernetes/main/apps/kube-system/multus/ks.yaml new file mode 100644 index 000000000..9b82241f0 --- /dev/null +++ b/kubernetes/main/apps/kube-system/multus/ks.yaml @@ -0,0 +1,40 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app multus + namespace: flux-system +spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/kube-system/multus/app + prune: false + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app multus-config + namespace: flux-system +spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/kube-system/multus/config + prune: false + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/kube-system/namespace.yaml b/kubernetes/main/apps/kube-system/namespace.yaml new file mode 100755 index 000000000..c16492e39 --- /dev/null +++ b/kubernetes/main/apps/kube-system/namespace.yaml @@ -0,0 +1,38 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: kube-system + annotations: + kustomize.toolkit.fluxcd.io/prune: disabled + volsync.backube/privileged-movers: "true" +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/provider_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Provider +metadata: + name: alert-manager + namespace: kube-system +spec: + type: alertmanager + address: http://alertmanager-operated.observability.svc.cluster.local:9093/api/v2/alerts/ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/alert_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Alert +metadata: + name: alert-manager + namespace: kube-system +spec: + providerRef: + name: alert-manager + eventSeverity: error + eventSources: + - kind: HelmRelease + name: "*" + exclusionList: + - "error.*lookup github\\.com" + - "error.*lookup raw\\.githubusercontent\\.com" + - "dial.*tcp.*timeout" + - "waiting.*socket" + suspend: false diff --git a/kubernetes/main/apps/kube-system/nvidia-device-plugin/app/helmrelease.yaml b/kubernetes/main/apps/kube-system/nvidia-device-plugin/app/helmrelease.yaml new file mode 100755 index 000000000..522bbf24f --- /dev/null +++ b/kubernetes/main/apps/kube-system/nvidia-device-plugin/app/helmrelease.yaml @@ -0,0 +1,35 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: nvidia-device-plugin +spec: + interval: 30m + chart: + spec: + chart: nvidia-device-plugin + version: 0.17.0 + sourceRef: + kind: HelmRepository + name: nvdp + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + values: + gfd: + enabled: true + nfd: + enableNodeFeatureApi: true + worker: + config: + sources: + pci: + deviceClassWhitelist: + - "03" diff --git a/kubernetes/main/apps/kube-system/nvidia-device-plugin/app/kustomization.yaml b/kubernetes/main/apps/kube-system/nvidia-device-plugin/app/kustomization.yaml new file mode 100755 index 000000000..5dd7baca7 --- /dev/null +++ b/kubernetes/main/apps/kube-system/nvidia-device-plugin/app/kustomization.yaml @@ -0,0 +1,5 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/kubernetes/main/apps/kube-system/nvidia-device-plugin/config/kustomization.yaml b/kubernetes/main/apps/kube-system/nvidia-device-plugin/config/kustomization.yaml new file mode 100755 index 000000000..4d166dc9a --- /dev/null +++ b/kubernetes/main/apps/kube-system/nvidia-device-plugin/config/kustomization.yaml @@ -0,0 +1,5 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./runtime.yaml diff --git a/kubernetes/main/apps/kube-system/nvidia-device-plugin/config/runtime.yaml b/kubernetes/main/apps/kube-system/nvidia-device-plugin/config/runtime.yaml new file mode 100755 index 000000000..7ba6add19 --- /dev/null +++ b/kubernetes/main/apps/kube-system/nvidia-device-plugin/config/runtime.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: node.k8s.io/v1 +kind: RuntimeClass +metadata: + name: nvidia +handler: nvidia diff --git a/kubernetes/main/apps/kube-system/nvidia-device-plugin/ks.yaml b/kubernetes/main/apps/kube-system/nvidia-device-plugin/ks.yaml new file mode 100755 index 000000000..f23aa39c6 --- /dev/null +++ b/kubernetes/main/apps/kube-system/nvidia-device-plugin/ks.yaml @@ -0,0 +1,40 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app nvdp-config + namespace: flux-system +spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/kube-system/nvidia-device-plugin/config + prune: false + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app nvdp + namespace: flux-system +spec: + dependsOn: + - name: nvdp-config + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/kube-system/nvidia-device-plugin/app + prune: false + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/kube-system/reloader/app/helmrelease.yaml b/kubernetes/main/apps/kube-system/reloader/app/helmrelease.yaml new file mode 100755 index 000000000..135b7d469 --- /dev/null +++ b/kubernetes/main/apps/kube-system/reloader/app/helmrelease.yaml @@ -0,0 +1,33 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: reloader +spec: + interval: 30m + chart: + spec: + chart: reloader + version: 1.2.0 + sourceRef: + kind: HelmRepository + name: stakater + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + values: + fullnameOverride: reloader + reloader: + reloadOnCreate: true + reloadOnDelete: true + readOnlyRootFileSystem: true + podMonitor: + enabled: true + namespace: "{{ .Release.Namespace }}" diff --git a/kubernetes/main/apps/kube-system/reloader/app/kustomization.yaml b/kubernetes/main/apps/kube-system/reloader/app/kustomization.yaml new file mode 100755 index 000000000..17cbc72b2 --- /dev/null +++ b/kubernetes/main/apps/kube-system/reloader/app/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/kubernetes/main/apps/kube-system/reloader/ks.yaml b/kubernetes/main/apps/kube-system/reloader/ks.yaml new file mode 100755 index 000000000..2f7c7ba89 --- /dev/null +++ b/kubernetes/main/apps/kube-system/reloader/ks.yaml @@ -0,0 +1,20 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app reloader + namespace: flux-system +spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/kube-system/reloader/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/kube-system/vfio-binding.yaml b/kubernetes/main/apps/kube-system/vfio-binding.yaml new file mode 100644 index 000000000..d93644117 --- /dev/null +++ b/kubernetes/main/apps/kube-system/vfio-binding.yaml @@ -0,0 +1,116 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: pci-device-binder + namespace: kube-system + labels: + app: pci-binder +spec: + selector: + matchLabels: + app: pci-binder + template: + metadata: + labels: + app: pci-binder + spec: + initContainers: + - name: pci-bind + image: ubuntu:latest + command: + - bash + - -c + - | + # Function to install required tools + install_tools() { + echo "Installing required tools..." + apt-get update && apt-get install -y zfsutils-linux || { + echo "Error: Failed to install required packages." + return 1 + } + return 0 + } + + # Function to export the zpool + export_zpool() { + ZPOOL_NAME="exos20" + echo "Attempting to export zpool $ZPOOL_NAME..." + if zpool export $ZPOOL_NAME 2>/dev/null; then + echo "Zpool $ZPOOL_NAME exported successfully." + else + echo "Error: Failed to export zpool $ZPOOL_NAME." + return 1 + fi + return 0 + } + + # Function to bind devices to vfio-pci + check_and_bind() { + DEVICE=$1 + EXPECTED_DRIVER="vfio-pci" + CURRENT_DRIVER=$(basename $(readlink /sys/bus/pci/devices/$DEVICE/driver) 2>/dev/null || echo "none") + echo "Device $DEVICE current driver: $CURRENT_DRIVER" + + if [ "$CURRENT_DRIVER" != "$EXPECTED_DRIVER" ]; then + # Check if the zpool needs to be exported + ZPOOL_NAME="exos20" + ZPOOL_CHECKED=false + if zpool list $ZPOOL_NAME >/dev/null 2>&1; then + echo "Zpool $ZPOOL_NAME is imported. Proceeding to export..." + export_zpool || return 1 + ZPOOL_CHECKED=true + else + echo "Zpool $ZPOOL_NAME is already exported or not found." + ZPOOL_CHECKED=true + fi + + # Ensure zpool state was validated + if [ "$ZPOOL_CHECKED" != "true" ]; then + echo "Error: Zpool validation failed unexpectedly. Aborting." + return 1 + fi + + # Unbind from the current driver if necessary + if [ "$CURRENT_DRIVER" != "none" ]; then + echo "Unbinding device $DEVICE from driver $CURRENT_DRIVER" + echo $DEVICE > /sys/bus/pci/devices/$DEVICE/driver/unbind + fi + + # Set driver_override + echo "Setting driver_override for device $DEVICE to $EXPECTED_DRIVER" + echo "$EXPECTED_DRIVER" > /sys/bus/pci/devices/$DEVICE/driver_override + + # Bind the device to the new driver + echo "Binding device $DEVICE to $EXPECTED_DRIVER" + echo $DEVICE > /sys/bus/pci/drivers/$EXPECTED_DRIVER/bind + else + echo "Device $DEVICE is already bound to $EXPECTED_DRIVER" + fi + } + + # Execute steps + install_tools || exit 1 + check_and_bind "0000:05:00.0" + check_and_bind "0000:0a:00.0" + + volumeMounts: + - mountPath: /sys + name: sysfs + - mountPath: /dev + name: dev + securityContext: + privileged: true + + containers: + - name: placeholder + image: busybox:latest + command: ["sh", "-c", "echo 'DaemonSet active but idle'; sleep infinity"] + + restartPolicy: Always + volumes: + - name: sysfs + hostPath: + path: /sys + - name: dev + hostPath: + path: /dev diff --git a/kubernetes/main/apps/kube-system/zfs-localpv/app/helm-values.yaml b/kubernetes/main/apps/kube-system/zfs-localpv/app/helm-values.yaml new file mode 100644 index 000000000..84898cdbc --- /dev/null +++ b/kubernetes/main/apps/kube-system/zfs-localpv/app/helm-values.yaml @@ -0,0 +1,117 @@ +feature: + storageCapacity: true + +rbac: + pspEnabled: false + +zfsNode: + componentName: openebs-zfs-node + driverRegistrar: + name: "csi-node-driver-registrar" + image: + registry: registry.k8s.io/ + repository: sig-storage/csi-node-driver-registrar + pullPolicy: IfNotPresent + tag: v2.12.0 + updateStrategy: + type: RollingUpdate + annotations: {} + podAnnotations: {} + resources: {} + kubeletDir: "/var/lib/kubelet/" + encrKeysDir: "/var/openebs/keys" + podLabels: {} + nodeSelector: {} + tolerations: [] + securityContext: {} + labels: {} + priorityClass: + create: true + name: zfs-csi-node-critical + allowedTopologyKeys: "All" + initContainers: {} + additionalVolumes: {} + +zfsController: + componentName: openebs-zfs-controller + initContainers: {} + additionalVolumes: {} + replicas: 1 + resizer: + name: "csi-resizer" + image: + registry: registry.k8s.io/ + repository: sig-storage/csi-resizer + pullPolicy: IfNotPresent + tag: v1.12.0 + extraArgs: [] + snapshotter: + name: "csi-snapshotter" + image: + registry: registry.k8s.io/ + repository: sig-storage/csi-snapshotter + pullPolicy: IfNotPresent + tag: v8.1.0 + extraArgs: [] + snapshotController: + name: "snapshot-controller" + image: + registry: registry.k8s.io/ + repository: sig-storage/snapshot-controller + pullPolicy: IfNotPresent + tag: v8.1.0 + extraArgs: [] + provisioner: + name: "csi-provisioner" + image: + registry: registry.k8s.io/ + repository: sig-storage/csi-provisioner + pullPolicy: IfNotPresent + tag: v5.1.0 + extraArgs: [] + updateStrategy: + type: RollingUpdate + annotations: {} + podAnnotations: {} + resources: {} + podLabels: + name: openebs-zfs-controller + nodeSelector: {} + tolerations: [] + securityContext: {} + priorityClass: + create: true + name: zfs-csi-controller-critical + +zfsPlugin: + name: "openebs-zfs-plugin" + image: + registry: + repository: openebs/zfs-driver + pullPolicy: IfNotPresent + tag: 2.7.0-develop + +role: openebs-zfs + +serviceAccount: + zfsController: + create: true + name: openebs-zfs-controller-sa + zfsNode: + create: true + name: openebs-zfs-node-sa + +analytics: + enabled: true + installerType: "zfs-localpv-helm" +zfs: + bin: zfs + +crds: + zfsLocalPv: + enabled: true + csi: + volumeSnapshots: + enabled: true + +enableHelmMetaLabels: true diff --git a/kubernetes/main/apps/kube-system/zfs-localpv/app/helmrelease.yaml b/kubernetes/main/apps/kube-system/zfs-localpv/app/helmrelease.yaml new file mode 100644 index 000000000..cdf4676a2 --- /dev/null +++ b/kubernetes/main/apps/kube-system/zfs-localpv/app/helmrelease.yaml @@ -0,0 +1,28 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: zfs-localpv +spec: + interval: 30m + chart: + spec: + chart: zfs-localpv + version: 2.7.0-develop + sourceRef: + kind: HelmRepository + name: zfs-localpv + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + + valuesFrom: + - kind: ConfigMap + name: zfs-localpv-helm-values diff --git a/kubernetes/main/apps/kube-system/zfs-localpv/app/kustomization.yaml b/kubernetes/main/apps/kube-system/zfs-localpv/app/kustomization.yaml new file mode 100644 index 000000000..990fb0d26 --- /dev/null +++ b/kubernetes/main/apps/kube-system/zfs-localpv/app/kustomization.yaml @@ -0,0 +1,12 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml +configMapGenerator: + - name: zfs-localpv-helm-values + files: + - values.yaml=./helm-values.yaml +configurations: + - kustomizeconfig.yaml diff --git a/kubernetes/main/apps/kube-system/zfs-localpv/app/kustomizeconfig.yaml b/kubernetes/main/apps/kube-system/zfs-localpv/app/kustomizeconfig.yaml new file mode 100644 index 000000000..58f92ba15 --- /dev/null +++ b/kubernetes/main/apps/kube-system/zfs-localpv/app/kustomizeconfig.yaml @@ -0,0 +1,7 @@ +--- +nameReference: + - kind: ConfigMap + version: v1 + fieldSpecs: + - path: spec/valuesFrom/name + kind: HelmRelease diff --git a/kubernetes/main/apps/kube-system/zfs-localpv/config/kustomization.yaml b/kubernetes/main/apps/kube-system/zfs-localpv/config/kustomization.yaml new file mode 100644 index 000000000..6645bd343 --- /dev/null +++ b/kubernetes/main/apps/kube-system/zfs-localpv/config/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./storageclass.yaml + - ./snapshotclass.yaml diff --git a/kubernetes/main/apps/kube-system/zfs-localpv/config/snapshotclass.yaml b/kubernetes/main/apps/kube-system/zfs-localpv/config/snapshotclass.yaml new file mode 100644 index 000000000..6a6fe6aa2 --- /dev/null +++ b/kubernetes/main/apps/kube-system/zfs-localpv/config/snapshotclass.yaml @@ -0,0 +1,9 @@ +--- +kind: VolumeSnapshotClass +apiVersion: snapshot.storage.k8s.io/v1 +metadata: + name: zfspv-snapclass + annotations: + snapshot.storage.kubernetes.io/is-default-class: "true" +driver: zfs.csi.openebs.io +deletionPolicy: Delete diff --git a/kubernetes/main/apps/kube-system/zfs-localpv/config/storageclass.yaml b/kubernetes/main/apps/kube-system/zfs-localpv/config/storageclass.yaml new file mode 100644 index 000000000..bd785d914 --- /dev/null +++ b/kubernetes/main/apps/kube-system/zfs-localpv/config/storageclass.yaml @@ -0,0 +1,43 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: openebs-zfs-1m +parameters: + thinprovision: "yes" + recordsize: "1m" + compression: "lz4" + dedup: "off" + fstype: "zfs" + shared: "yes" + poolname: "ssd_pool" +provisioner: zfs.csi.openebs.io + +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: openebs-zfs-128k +parameters: + thinprovision: "yes" + recordsize: "128k" + compression: "lz4" + dedup: "off" + fstype: "zfs" + shared: "yes" + poolname: "ssd_pool" +provisioner: zfs.csi.openebs.io +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: openebs-zfs-32k +parameters: + thinprovision: "yes" + recordsize: "32k" + compression: "lz4" + dedup: "off" + fstype: "zfs" + shared: "yes" + poolname: "ssd_pool" +provisioner: zfs.csi.openebs.io diff --git a/kubernetes/main/apps/kube-system/zfs-localpv/ks.yaml b/kubernetes/main/apps/kube-system/zfs-localpv/ks.yaml new file mode 100644 index 000000000..bd1ad38e8 --- /dev/null +++ b/kubernetes/main/apps/kube-system/zfs-localpv/ks.yaml @@ -0,0 +1,40 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app zfs-localpv + namespace: flux-system +spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/kube-system/zfs-localpv/app + prune: false + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app zfs-localpv-config + namespace: flux-system +spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/kube-system/zfs-localpv/config + prune: false + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/kyverno/kustomization.yaml b/kubernetes/main/apps/kyverno/kustomization.yaml new file mode 100755 index 000000000..10b5d06cd --- /dev/null +++ b/kubernetes/main/apps/kyverno/kustomization.yaml @@ -0,0 +1,9 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # Pre Flux-Kustomizations + - ./namespace.yaml + # Flux-Kustomizations + - ./kyverno/ks.yaml diff --git a/kubernetes/main/apps/kyverno/kyverno/app/helmrelease.yaml b/kubernetes/main/apps/kyverno/kyverno/app/helmrelease.yaml new file mode 100755 index 000000000..a177c8e86 --- /dev/null +++ b/kubernetes/main/apps/kyverno/kyverno/app/helmrelease.yaml @@ -0,0 +1,80 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: &app kyverno +spec: + interval: 30m + chart: + spec: + chart: kyverno + version: 3.3.3 + sourceRef: + kind: HelmRepository + name: kyverno + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + values: + crds: + install: true + grafana: + enabled: true + admissionController: + replicas: 1 + rbac: + clusterRole: + extraResources: + - apiGroups: + - "" + resources: + - pods + verbs: + - create + - update + - delete + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + labelSelector: + matchLabels: + app.kubernetes.io/instance: *app + app.kubernetes.io/component: admission-controller + serviceMonitor: + enabled: true + backgroundController: + rbac: + clusterRole: + extraResources: + - apiGroups: + - "" + resources: + - pods + verbs: + - create + - update + - patch + - delete + - get + - list + resources: + requests: + cpu: 100m + limits: + memory: 1Gi + serviceMonitor: + enabled: true + cleanupController: + serviceMonitor: + enabled: true + reportsController: + serviceMonitor: + enabled: true diff --git a/kubernetes/main/apps/kyverno/kyverno/app/kustomization.yaml b/kubernetes/main/apps/kyverno/kyverno/app/kustomization.yaml new file mode 100755 index 000000000..17cbc72b2 --- /dev/null +++ b/kubernetes/main/apps/kyverno/kyverno/app/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/kubernetes/main/apps/kyverno/kyverno/ks.yaml b/kubernetes/main/apps/kyverno/kyverno/ks.yaml new file mode 100755 index 000000000..e4b225062 --- /dev/null +++ b/kubernetes/main/apps/kyverno/kyverno/ks.yaml @@ -0,0 +1,42 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app kyverno + namespace: flux-system +spec: + targetNamespace: kyverno + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/kyverno/kyverno/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app kyverno-policies + namespace: flux-system +spec: + targetNamespace: kyverno + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: kyverno + path: ./kubernetes/main/apps/kyverno/kyverno/policies + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/kyverno/kyverno/policies/default-deny.yaml b/kubernetes/main/apps/kyverno/kyverno/policies/default-deny.yaml new file mode 100644 index 000000000..9da170582 --- /dev/null +++ b/kubernetes/main/apps/kyverno/kyverno/policies/default-deny.yaml @@ -0,0 +1,38 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kyverno.io/clusterpolicy_v1.json +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: add-networkpolicy + annotations: + policies.kyverno.io/title: Add Network Policy + policies.kyverno.io/category: Multi-Tenancy, EKS Best Practices + policies.kyverno.io/subject: NetworkPolicy + policies.kyverno.io/minversion: 1.6.0 + policies.kyverno.io/description: >- + By default, Kubernetes allows communications across all Pods within a cluster. + The NetworkPolicy resource and a CNI plug-in that supports NetworkPolicy must be used to restrict + communications. A default NetworkPolicy should be configured for each Namespace to + default deny all ingress and egress traffic to the Pods in the Namespace. Application + teams can then configure additional NetworkPolicy resources to allow desired traffic + to application Pods from select sources. This policy will create a new NetworkPolicy resource + named `default-deny` which will deny all traffic anytime a new Namespace is created. +spec: + rules: + - name: default-deny + match: + any: + - resources: + kinds: + - Namespace + generate: + apiVersion: networking.k8s.io/v1 + kind: NetworkPolicy + name: default-deny + namespace: "{{request.object.metadata.name}}" + synchronize: true + data: + spec: + podSelector: {} + policyTypes: + - Egress diff --git a/kubernetes/main/apps/kyverno/kyverno/policies/dns-config.yaml b/kubernetes/main/apps/kyverno/kyverno/policies/dns-config.yaml new file mode 100755 index 000000000..83d70af64 --- /dev/null +++ b/kubernetes/main/apps/kyverno/kyverno/policies/dns-config.yaml @@ -0,0 +1,30 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kyverno.io/clusterpolicy_v1.json +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: add-ndots + annotations: + policies.kyverno.io/title: Add ndots + policies.kyverno.io/category: dnsConfig + policies.kyverno.io/subject: Pod + policies.kyverno.io/minversion: 1.6.0 + policies.kyverno.io/description: >- + The ndots value controls where DNS lookups are first performed in a cluster + and needs to be set to a lower value than the default of 5 in some cases. + This policy mutates all Pods to add the ndots option with a value of 1. +spec: + rules: + - name: add-ndots + match: + any: + - resources: + kinds: + - Pod + mutate: + patchStrategicMerge: + spec: + dnsConfig: + options: + - name: ndots + value: "1" diff --git a/kubernetes/main/apps/kyverno/kyverno/policies/hostpath-readonly.yaml b/kubernetes/main/apps/kyverno/kyverno/policies/hostpath-readonly.yaml new file mode 100755 index 000000000..005f9da58 --- /dev/null +++ b/kubernetes/main/apps/kyverno/kyverno/policies/hostpath-readonly.yaml @@ -0,0 +1,52 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kyverno.io/clusterpolicy_v1.json +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: ensure-readonly-hostpath + annotations: + policies.kyverno.io/title: Ensure Read Only hostPath + policies.kyverno.io/category: Other + policies.kyverno.io/severity: medium + policies.kyverno.io/minversion: 1.6.0 + policies.kyverno.io/subject: Pod + policies.kyverno.io/description: >- + Pods which are allowed to mount hostPath volumes in read/write mode pose a security risk + even if confined to a "safe" file system on the host and may escape those confines (see + https://blog.aquasec.com/kubernetes-security-pod-escape-log-mounts). The only true way + to ensure safety is to enforce that all Pods mounting hostPath volumes do so in read only + mode. This policy checks all containers for any hostPath volumes and ensures they are + explicitly mounted in readOnly mode. +spec: + background: false + validationFailureAction: audit + rules: + - name: ensure-hostpaths-readonly + match: + any: + - resources: + kinds: + - Pod + preconditions: + all: + - key: "{{ request.operation || 'BACKGROUND' }}" + operator: AnyIn + value: + - CREATE + - UPDATE + validate: + message: All hostPath volumes must be mounted as readOnly. + foreach: + # Fetch all volumes in the Pod which are a hostPath. Store the names in an array. There could be multiple in a Pod so can't assume just one. + - list: "request.object.spec.volumes[?hostPath][]" + deny: + conditions: + # For every name found for a hostPath volume (stored as `{{element}}`), check all containers, initContainers, and ephemeralContainers which mount this volume and + # total up the number of them. Compare that to the ones with that same name which explicitly specify that `readOnly: true`. If these two + # counts aren't equal, deny the Pod because at least one is attempting to mount that hostPath in read/write mode. Note that the absence of + # the `readOnly: true` field implies read/write access. Therefore, every hostPath volume must explicitly specify that it should be mounted + # in readOnly mode, regardless of where that occurs in a Pod. + any: + - key: "{{ request.object.spec.[containers, initContainers, ephemeralContainers][].volumeMounts[?name == '{{element.name}}'][] | length(@) }}" + operator: NotEquals + value: "{{ request.object.spec.[containers, initContainers, ephemeralContainers][].volumeMounts[?name == '{{element.name}}' && readOnly] [] | length(@) }}" diff --git a/kubernetes/main/apps/kyverno/kyverno/policies/kustomization.yaml b/kubernetes/main/apps/kyverno/kyverno/policies/kustomization.yaml new file mode 100755 index 000000000..75f09898a --- /dev/null +++ b/kubernetes/main/apps/kyverno/kyverno/policies/kustomization.yaml @@ -0,0 +1,8 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + #- ./dns-config.yaml + - ./hostpath-readonly.yaml + #- ./default-deny.yaml diff --git a/kubernetes/main/apps/kyverno/namespace.yaml b/kubernetes/main/apps/kyverno/namespace.yaml new file mode 100755 index 000000000..2ca250077 --- /dev/null +++ b/kubernetes/main/apps/kyverno/namespace.yaml @@ -0,0 +1,38 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: kyverno + annotations: + kustomize.toolkit.fluxcd.io/prune: disabled + volsync.backube/privileged-movers: "true" +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/provider_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Provider +metadata: + name: alert-manager + namespace: kyverno +spec: + type: alertmanager + address: http://alertmanager-operated.observability.svc.cluster.local:9093/api/v2/alerts/ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/alert_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Alert +metadata: + name: alert-manager + namespace: kyverno +spec: + providerRef: + name: alert-manager + eventSeverity: error + eventSources: + - kind: HelmRelease + name: "*" + exclusionList: + - "error.*lookup github\\.com" + - "error.*lookup raw\\.githubusercontent\\.com" + - "dial.*tcp.*timeout" + - "waiting.*socket" + suspend: false diff --git a/kubernetes/main/apps/media/autobrr/app/externalsecret.yaml b/kubernetes/main/apps/media/autobrr/app/externalsecret.yaml new file mode 100755 index 000000000..d6a1c7145 --- /dev/null +++ b/kubernetes/main/apps/media/autobrr/app/externalsecret.yaml @@ -0,0 +1,32 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret autobrr-secret +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + template: + engineVersion: v2 + data: + AUTOBRR__DATABASE_TYPE: postgres + AUTOBRR__POSTGRES_DATABASE: &dbName autobrr + AUTOBRR__POSTGRES_HOST: &dbHost postgres17-rw.database.svc.cluster.local + AUTOBRR__POSTGRES_USER: &dbUser "{{ .AUTOBRR_POSTGRES_USER }}" + AUTOBRR__POSTGRES_PASS: "{{ .AUTOBRR_POSTGRESS_ENCODED_PASS }}" + AUTOBRR__POSTGRES_PORT: "5432" + AUTOBRR__SESSION_SECRET: "{{ .AUTOBRR_SESSION_SECRET }}" + INIT_POSTGRES_DBNAME: *dbName + INIT_POSTGRES_HOST: *dbHost + INIT_POSTGRES_USER: *dbUser + INIT_POSTGRES_PASS: "{{ .AUTOBRR_POSTGRES_PASS }}" + INIT_POSTGRES_SUPER_PASS: "{{ .POSTGRES_SUPER_PASS }}" + dataFrom: + - extract: + key: secrets/autobrr + - extract: + key: secrets/cloudnative-pg diff --git a/kubernetes/main/apps/media/autobrr/app/helmrelease.yaml b/kubernetes/main/apps/media/autobrr/app/helmrelease.yaml new file mode 100755 index 000000000..a7c8fdfb6 --- /dev/null +++ b/kubernetes/main/apps/media/autobrr/app/helmrelease.yaml @@ -0,0 +1,118 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: autobrr +spec: + interval: 30m + chart: + spec: + verify: + provider: cosign + chart: app-template + version: 3.5.1 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + values: + controllers: + autobrr: + annotations: + reloader.stakater.com/auto: "true" + initContainers: + init-db: + image: + repository: ghcr.io/onedr0p/postgres-init + tag: 16.6@sha256:35353a77777ee8f634d0f3945f495b4a40065134b8619e0d18bd49b0ee9c855b + envFrom: &envFrom + - secretRef: + name: autobrr-secret + containers: + app: + image: + repository: ghcr.io/autobrr/autobrr + tag: v1.51.1@sha256:747c682d8d59e72a202ee4239bafbd7cfa10b0fc3a6220b61446de73dbd0c956 + env: + AUTOBRR__CHECK_FOR_UPDATES: "false" + AUTOBRR__HOST: 0.0.0.0 + AUTOBRR__PORT: &port 80 + AUTOBRR__LOG_LEVEL: INFO + envFrom: *envFrom + probes: + liveness: &probes + enabled: true + custom: true + spec: + httpGet: + path: /api/healthz/liveness + port: *port + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + readiness: + enabled: true + custom: true + spec: + httpGet: + path: /api/healthz/readiness + port: *port + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + resources: + requests: + cpu: 10m + limits: + memory: 256Mi + defaultPodOptions: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + seccompProfile: { type: RuntimeDefault } + service: + app: + controller: autobrr + ports: + http: + port: *port + ingress: + app: + annotations: + external-dns.alpha.kubernetes.io/target: internal.${PUBLIC_DOMAIN} + cert-manager.io/cluster-issuer: zerossl-prod + cert-manager.io/private-key-rotation-policy: Always + cert-manager.io/private-key-algorithm: ECDSA + cert-manager.io/private-key-size: "384" + className: internal + tls: + - hosts: + - &host "{{ .Release.Name }}.${PUBLIC_DOMAIN}" + secretName: autobrr-tls + hosts: + - host: *host + paths: + - path: / + service: + identifier: app + port: http + + persistence: + tmp: + type: emptyDir diff --git a/kubernetes/main/apps/media/autobrr/app/kustomization.yaml b/kubernetes/main/apps/media/autobrr/app/kustomization.yaml new file mode 100755 index 000000000..7233accee --- /dev/null +++ b/kubernetes/main/apps/media/autobrr/app/kustomization.yaml @@ -0,0 +1,22 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml +configMapGenerator: + - name: autobrr-loki-rules + files: + - autobrr.yaml=./resources/lokirule.yaml + options: + labels: + loki_rule: "true" + - name: autobrr-gatus-ep + options: + labels: + gatus.io/enabled: "true" + files: + - config.yaml=./resources/gatus-ep.yaml +generatorOptions: + disableNameSuffixHash: true diff --git a/kubernetes/main/apps/media/autobrr/app/resources/gatus-ep.yaml b/kubernetes/main/apps/media/autobrr/app/resources/gatus-ep.yaml new file mode 100755 index 000000000..fbde44a9c --- /dev/null +++ b/kubernetes/main/apps/media/autobrr/app/resources/gatus-ep.yaml @@ -0,0 +1,15 @@ +endpoints: + - name: "Autobrr" + group: guarded + url: "https://autobrr.${PUBLIC_DOMAIN}/api/healthz/liveness" + interval: 1m + ui: + hide-hostname: true + hide-url: true + client: + dns-resolver: tcp://172.17.0.10:53 + conditions: + - "[STATUS] == 200" + - "[BODY] == OK" + alerts: + - type: pushover diff --git a/kubernetes/main/apps/media/autobrr/app/resources/lokirule.yaml b/kubernetes/main/apps/media/autobrr/app/resources/lokirule.yaml new file mode 100755 index 000000000..e478ebbb8 --- /dev/null +++ b/kubernetes/main/apps/media/autobrr/app/resources/lokirule.yaml @@ -0,0 +1,14 @@ +--- +groups: + - name: autobrr + rules: + - alert: AutobrrNetworkUnhealthy + expr: | + sum by (app) (count_over_time({app="autobrr"} |~ "(?i)network unhealthy"[2m])) > 0 + for: 2m + labels: + severity: critical + category: logs + annotations: + app: "{{ $labels.app }}" + summary: "{{ $labels.app }} has a unhealthy network" diff --git a/kubernetes/main/apps/media/autobrr/ks.yaml b/kubernetes/main/apps/media/autobrr/ks.yaml new file mode 100755 index 000000000..347c40c72 --- /dev/null +++ b/kubernetes/main/apps/media/autobrr/ks.yaml @@ -0,0 +1,27 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app autobrr + namespace: flux-system +spec: + targetNamespace: media + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: cloudnative-pg-cluster + - name: external-secrets-stores + path: ./kubernetes/main/apps/media/autobrr/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m + postBuild: + substitute: + APP: *app + GATUS_PATH: /api/healthz/liveness diff --git a/kubernetes/main/apps/media/bazarr/app/externalsecret.yaml b/kubernetes/main/apps/media/bazarr/app/externalsecret.yaml new file mode 100755 index 000000000..aeb5f220f --- /dev/null +++ b/kubernetes/main/apps/media/bazarr/app/externalsecret.yaml @@ -0,0 +1,357 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret bazarr-secret +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + template: + engineVersion: v2 + data: + PLEX_TOKEN: "{{ .PLEX_TOKEN }}" + POSTGRES_USER: "{{ .BAZARR_POSTGRES_USER }}" + POSTGRES_PASS: "{{ .BAZARR_POSTGRES_PASS }}" + POSTGRES_SUPER_PASS: "{{ .POSTGRES_SUPER_PASS }}" + dataFrom: + - extract: + key: secrets/plex + - extract: + key: secrets/cloudnative-pg +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret bazarr-config +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + template: + engineVersion: v2 + data: + config.yaml: | + addic7ed: + cookies: '' + password: '' + user_agent: '' + username: '' + vip: false + analytics: + enabled: false + anidb: + api_client: '' + api_client_ver: 1 + animetosho: + anidb_api_client: '' + anidb_api_client_ver: 1 + search_threshold: 6 + anticaptcha: + anti_captcha_key: '' + assrt: + token: '' + auth: + apikey: "{{ .BAZARR_API_KEY}}" + password: '' + type: null + username: '' + avistaz: + cookies: '' + user_agent: '' + backup: + day: 6 + folder: /config/backup + frequency: Weekly + hour: 3 + retention: 31 + betaseries: + token: '' + cinemaz: + cookies: '' + user_agent: '' + cors: + enabled: false + deathbycaptcha: + password: '' + username: '' + embeddedsubtitles: + fallback_lang: en + hi_fallback: true + included_codecs: [] + timeout: 600 + unknown_as_fallback: false + general: + adaptive_searching: true + adaptive_searching_delay: 3w + adaptive_searching_delta: 1w + anti_captcha_provider: null + auto_update: false + base_url: '' + branch: master + chmod: '0640' + chmod_enabled: false + days_to_upgrade_subs: 7 + debug: false + default_und_audio_lang: '' + default_und_embedded_subtitles_lang: '' + dont_notify_manual_actions: false + embedded_subs_show_desired: true + embedded_subtitles_parser: ffprobe + enabled_integrations: [] + enabled_providers: + - subssabbz + - subsunacs + - yavkanet + - embeddedsubtitles + - opensubtitlescom + flask_secret_key: "{{ .FLASK_SECRET_KEY }}" + hi_extension: hi + ignore_ass_subs: false + ignore_pgs_subs: false + ignore_vobsub_subs: false + ip: '*' + language_equals: [] + minimum_score: 90 + minimum_score_movie: 70 + movie_default_enabled: true + movie_default_profile: 1 + movie_tag_enabled: false + multithreading: true + page_size: 25 + parse_embedded_audio_track: false + path_mappings: [] + path_mappings_movie: [] + port: 6767 + postprocessing_cmd: /scripts/subcleaner.sh {{"{{"}}subtitles{{"}}"}} + postprocessing_threshold: 90 + postprocessing_threshold_movie: 70 + remove_profile_tags: [] + serie_default_enabled: true + serie_default_profile: 1 + serie_tag_enabled: false + single_language: false + skip_hashing: false + subfolder: current + subfolder_custom: '' + subzero_mods: remove_HI + theme: auto + upgrade_frequency: 12 + upgrade_manual: true + upgrade_subs: true + use_embedded_subs: false + use_postprocessing: true + use_postprocessing_threshold: false + use_postprocessing_threshold_movie: false + use_radarr: true + use_scenename: true + use_sonarr: true + utf8_encode: true + wanted_search_frequency: 6 + wanted_search_frequency_movie: 6 + hdbits: + passkey: '' + username: '' + jimaku: + api_key: '' + enable_ai_subs: false + enable_archives_download: false + enable_name_search_fallback: true + karagarga: + f_password: '' + f_username: '' + password: '' + username: '' + ktuvit: + email: '' + hashed_password: '' + legendasdivx: + password: '' + skip_wrong_fps: false + username: '' + legendasnet: + password: '' + username: '' + log: + exclude_filter: '' + ignore_case: false + include_filter: '' + use_regex: false + movie_scores: + audio_codec: 3 + edition: 1 + hash: 119 + hearing_impaired: 1 + release_group: 13 + resolution: 2 + source: 7 + streaming_service: 1 + title: 60 + video_codec: 2 + year: 30 + napisy24: + password: '' + username: '' + opensubtitles: + password: '' + skip_wrong_fps: false + ssl: false + timeout: 15 + use_tag_search: false + username: '' + vip: false + opensubtitlescom: + include_ai_translated: false + password: "{{ .OPENSUBTITLES_PASS }}" + use_hash: true + username: "{{ .OPENSUBTITLES_USER }}" + podnapisi: + verify_ssl: true + postgresql: + database: '' + enabled: false + host: localhost + password: '' + port: 5432 + username: '' + proxy: + exclude: + - localhost + - 127.0.0.1 + password: '' + port: '' + type: null + url: '' + username: '' + radarr: + apikey: "{{ .RADARR_API_KEY }}" + base_url: '' + defer_search_signalr: false + excluded_tags: [] + full_update: Daily + full_update_day: 6 + full_update_hour: 4 + http_timeout: 60 + ip: radarr.${PUBLIC_DOMAIN} + movies_sync: 60 + only_monitored: false + port: 443 + ssl: true + sync_only_monitored_movies: false + use_ffprobe_cache: true + series_scores: + audio_codec: 3 + episode: 30 + hash: 359 + hearing_impaired: 1 + release_group: 14 + resolution: 2 + season: 30 + series: 180 + source: 7 + streaming_service: 1 + video_codec: 2 + year: 90 + sonarr: + apikey: "{{ .SONARR_API_KEY }}" + base_url: '' + defer_search_signalr: false + exclude_season_zero: false + excluded_series_types: [] + excluded_tags: [] + full_update: Daily + full_update_day: 6 + full_update_hour: 4 + http_timeout: 60 + ip: sonarr.${PUBLIC_DOMAIN} + only_monitored: false + port: 443 + series_sync: 60 + ssl: true + sync_only_monitored_episodes: false + sync_only_monitored_series: false + use_ffprobe_cache: true + subdl: + api_key: '' + subf2m: + user_agent: '' + verify_ssl: true + subsync: + checker: + blacklisted_languages: [] + blacklisted_providers: [] + debug: false + force_audio: false + gss: true + max_offset_seconds: 60 + no_fix_framerate: false + subsync_movie_threshold: 99 + subsync_threshold: 99 + use_subsync: true + use_subsync_movie_threshold: true + use_subsync_threshold: true + titlovi: + password: '' + username: '' + titulky: + approved_only: false + password: '' + username: '' + whisperai: + endpoint: http://127.0.0.1:9000 + loglevel: INFO + response: 5 + timeout: 3600 + xsubs: + password: '' + username: '' + dataFrom: + - extract: + key: secrets/opensubtitles + - extract: + key: secrets/api-keys +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret bazarr-gatus-ep +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + template: + metadata: + labels: + gatus.io/enabled: "true" + engineVersion: v2 + data: + config.yaml: | + endpoints: + - name: "Bazarr" + group: guarded + url: "https://bazarr.${PUBLIC_DOMAIN}/api/system/status" + interval: 1m + ui: + hide-hostname: true + hide-url: true + client: + dns-resolver: tcp://172.17.0.10:53 + conditions: + - "[STATUS] == 200" + - "[BODY].data.bazarr_directory == /app/bin" + alerts: + - type: pushover + headers: + X-API-KEY: {{ .BAZARR_API_KEY }} + dataFrom: + - extract: + key: secrets/api-keys diff --git a/kubernetes/main/apps/media/bazarr/app/helmrelease.yaml b/kubernetes/main/apps/media/bazarr/app/helmrelease.yaml new file mode 100755 index 000000000..28546c50d --- /dev/null +++ b/kubernetes/main/apps/media/bazarr/app/helmrelease.yaml @@ -0,0 +1,201 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: bazarr +spec: + interval: 30m + chart: + spec: + verify: + provider: cosign + chart: app-template + version: 3.5.1 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + values: + controllers: + bazarr: + annotations: + reloader.stakater.com/auto: "true" + initContainers: + init-db: + image: + repository: ghcr.io/onedr0p/postgres-init + tag: 16.6@sha256:35353a77777ee8f634d0f3945f495b4a40065134b8619e0d18bd49b0ee9c855b + securityContext: &securityContext + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + env: + INIT_POSTGRES_HOST: &dbHost postgres17-rw.database.svc.cluster.local + INIT_POSTGRES_PORT: &dbPort 5432 + INIT_POSTGRES_DBNAME: &dbName bazarr + INIT_POSTGRES_SUPER_PASS: + valueFrom: + secretKeyRef: + name: &secret bazarr-secret + key: POSTGRES_SUPER_PASS + INIT_POSTGRES_USER: + valueFrom: + secretKeyRef: + name: *secret + key: POSTGRES_USER + INIT_POSTGRES_PASS: + valueFrom: + secretKeyRef: + name: *secret + key: POSTGRES_PASS + containers: + app: + image: + repository: ghcr.io/darkfella91/bazarr + tag: 1.4.5@sha256:1e3ca601b51bb061e342ab8da9ee79062a51f254b50fcc43756299a29647d3aa + env: + TZ: Europe/Sofia + POSTGRES_ENABLED: "true" + POSTGRES_HOST: *dbHost + POSTGRES_PORT: *dbPort + POSTGRES_DATABASE: *dbName + POSTGRES_USERNAME: + valueFrom: + secretKeyRef: + name: *secret + key: POSTGRES_USER + POSTGRES_PASSWORD: + valueFrom: + secretKeyRef: + name: *secret + key: POSTGRES_PASS + PLEX_TOKEN: + valueFrom: + secretKeyRef: + name: *secret + key: PLEX_TOKEN + probes: + liveness: &probes + enabled: true + custom: true + spec: + httpGet: + path: /health + port: &port 6767 + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + readiness: *probes + securityContext: *securityContext + lifecycle: + postStart: + exec: + command: ["/bin/sh", "-c", "mkdir -p /config/config & cp /secret/config.yaml /config/config/config.yaml"] + resources: + requests: + cpu: 10m + limits: + memory: 1Gi + subcleaner: + image: + repository: registry.k8s.io/git-sync/git-sync + tag: v4.3.0@sha256:5813a7da0ccd58f6dfb9d5e48480e2877355e6bb3d7d81c8908eb1adc3a23b6e + env: + GITSYNC_REPO: https://github.com/KBlixt/subcleaner + GITSYNC_REF: master + GITSYNC_PERIOD: 24h + GITSYNC_ROOT: /add-ons + resources: + requests: + cpu: 10m + limits: + memory: 128Mi + securityContext: *securityContext + defaultPodOptions: + securityContext: + runAsNonRoot: true + runAsUser: 568 + runAsGroup: 568 + fsGroup: 568 + fsGroupChangePolicy: OnRootMismatch + supplementalGroups: [10000] + seccompProfile: { type: RuntimeDefault } + service: + app: + controller: bazarr + ports: + http: + port: *port + ingress: + app: + annotations: + external-dns.alpha.kubernetes.io/target: internal.${PUBLIC_DOMAIN} + cert-manager.io/cluster-issuer: zerossl-prod + cert-manager.io/private-key-rotation-policy: Always + cert-manager.io/private-key-algorithm: ECDSA + cert-manager.io/private-key-size: "384" + nginx.ingress.kubernetes.io/auth-url: "https://$host/oauth2/auth" + nginx.ingress.kubernetes.io/auth-signin: "https://$host/oauth2/start?rd=$escaped_request_uri" + nginx.ingress.kubernetes.io/auth-snippet: | + if ($request_uri ~* "^/api(/|$)") { + return 200; + } + className: internal + hosts: + - host: &host "{{ .Release.Name }}.${PUBLIC_DOMAIN}" + paths: + - path: / + service: + identifier: app + port: http + tls: + - hosts: + - *host + secretName: bazarr-tls + persistence: + add-ons: + type: emptyDir + config: + type: emptyDir + secret-file: + type: secret + name: bazarr-config + globalMounts: + - readOnly: true + path: /secret/config.yaml + subPath: config.yaml + media: + type: nfs + server: 192.168.91.40 + path: /mnt/exos20/data + globalMounts: + - path: /data/media + subPath: media + scripts: + type: configMap + name: bazarr-scripts + defaultMode: 0775 + globalMounts: + - readOnly: true + tmp: + type: emptyDir + + test: + type: configMap + name: connectionpool + advancedMounts: + bazarr: + app: + - readOnly: true + path: /app/bin/libs/urllib3/connectionpool.py + subPath: connectionpool.py diff --git a/kubernetes/main/apps/media/bazarr/app/kustomization.yaml b/kubernetes/main/apps/media/bazarr/app/kustomization.yaml new file mode 100755 index 000000000..cc82034b7 --- /dev/null +++ b/kubernetes/main/apps/media/bazarr/app/kustomization.yaml @@ -0,0 +1,18 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml +configMapGenerator: + - name: bazarr-scripts + files: + - subcleaner.sh=./resources/subcleaner.sh + - name: connectionpool + files: + - connectionpool.py=./resources/connectionpool.py +generatorOptions: + disableNameSuffixHash: true + annotations: + kustomize.toolkit.fluxcd.io/substitute: disabled diff --git a/kubernetes/main/apps/media/bazarr/app/resources/connectionpool.py b/kubernetes/main/apps/media/bazarr/app/resources/connectionpool.py new file mode 100755 index 000000000..6ab8ecdfb --- /dev/null +++ b/kubernetes/main/apps/media/bazarr/app/resources/connectionpool.py @@ -0,0 +1,1182 @@ +from __future__ import annotations + +import errno +import logging +import queue +import sys +import typing +import warnings +import weakref +import ssl +from socket import timeout as SocketTimeout +from types import TracebackType + +from ._base_connection import _TYPE_BODY +from ._collections import HTTPHeaderDict +from ._request_methods import RequestMethods +from .connection import ( + BaseSSLError, + BrokenPipeError, + DummyConnection, + HTTPConnection, + HTTPException, + HTTPSConnection, + ProxyConfig, + _wrap_proxy_error, +) +from .connection import port_by_scheme as port_by_scheme +from .exceptions import ( + ClosedPoolError, + EmptyPoolError, + FullPoolError, + HostChangedError, + InsecureRequestWarning, + LocationValueError, + MaxRetryError, + NewConnectionError, + ProtocolError, + ProxyError, + ReadTimeoutError, + SSLError, + TimeoutError, +) +from .response import BaseHTTPResponse +from .util.connection import is_connection_dropped +from .util.proxy import connection_requires_http_tunnel +from .util.request import _TYPE_BODY_POSITION, set_file_position +from .util.retry import Retry +from .util.ssl_match_hostname import CertificateError +from .util.timeout import _DEFAULT_TIMEOUT, _TYPE_DEFAULT, Timeout +from .util.url import Url, _encode_target +from .util.url import _normalize_host as normalize_host +from .util.url import parse_url +from .util.util import to_str + +if typing.TYPE_CHECKING: + import ssl + from typing import Literal + + from ._base_connection import BaseHTTPConnection, BaseHTTPSConnection + +log = logging.getLogger(__name__) + +_TYPE_TIMEOUT = typing.Union[Timeout, float, _TYPE_DEFAULT, None] + +_SelfT = typing.TypeVar("_SelfT") + + +# Pool objects +class ConnectionPool: + """ + Base class for all connection pools, such as + :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`. + + .. note:: + ConnectionPool.urlopen() does not normalize or percent-encode target URIs + which is useful if your target server doesn't support percent-encoded + target URIs. + """ + + scheme: str | None = None + QueueCls = queue.LifoQueue + + def __init__(self, host: str, port: int | None = None) -> None: + if not host: + raise LocationValueError("No host specified.") + + self.host = _normalize_host(host, scheme=self.scheme) + self.port = port + + # This property uses 'normalize_host()' (not '_normalize_host()') + # to avoid removing square braces around IPv6 addresses. + # This value is sent to `HTTPConnection.set_tunnel()` if called + # because square braces are required for HTTP CONNECT tunneling. + self._tunnel_host = normalize_host(host, scheme=self.scheme).lower() + + def __str__(self) -> str: + return f"{type(self).__name__}(host={self.host!r}, port={self.port!r})" + + def __enter__(self: _SelfT) -> _SelfT: + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> Literal[False]: + self.close() + # Return False to re-raise any potential exceptions + return False + + def close(self) -> None: + """ + Close all pooled connections and disable the pool. + """ + + +# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252 +_blocking_errnos = {errno.EAGAIN, errno.EWOULDBLOCK} + + +class HTTPConnectionPool(ConnectionPool, RequestMethods): + """ + Thread-safe connection pool for one host. + + :param host: + Host used for this HTTP Connection (e.g. "localhost"), passed into + :class:`http.client.HTTPConnection`. + + :param port: + Port used for this HTTP Connection (None is equivalent to 80), passed + into :class:`http.client.HTTPConnection`. + + :param timeout: + Socket timeout in seconds for each individual connection. This can + be a float or integer, which sets the timeout for the HTTP request, + or an instance of :class:`urllib3.util.Timeout` which gives you more + fine-grained control over request timeouts. After the constructor has + been parsed, this is always a `urllib3.util.Timeout` object. + + :param maxsize: + Number of connections to save that can be reused. More than 1 is useful + in multithreaded situations. If ``block`` is set to False, more + connections will be created but they will not be saved once they've + been used. + + :param block: + If set to True, no more than ``maxsize`` connections will be used at + a time. When no free connections are available, the call will block + until a connection has been released. This is a useful side effect for + particular multithreaded situations where one does not want to use more + than maxsize connections per host to prevent flooding. + + :param headers: + Headers to include with all requests, unless other headers are given + explicitly. + + :param retries: + Retry configuration to use by default with requests in this pool. + + :param _proxy: + Parsed proxy URL, should not be used directly, instead, see + :class:`urllib3.ProxyManager` + + :param _proxy_headers: + A dictionary with proxy headers, should not be used directly, + instead, see :class:`urllib3.ProxyManager` + + :param \\**conn_kw: + Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`, + :class:`urllib3.connection.HTTPSConnection` instances. + """ + + scheme = "http" + ConnectionCls: ( + type[BaseHTTPConnection] | type[BaseHTTPSConnection] + ) = HTTPConnection + + def __init__( + self, + host: str, + port: int | None = None, + timeout: _TYPE_TIMEOUT | None = _DEFAULT_TIMEOUT, + maxsize: int = 1, + block: bool = False, + headers: typing.Mapping[str, str] | None = None, + retries: Retry | bool | int | None = None, + _proxy: Url | None = None, + _proxy_headers: typing.Mapping[str, str] | None = None, + _proxy_config: ProxyConfig | None = None, + **conn_kw: typing.Any, + ): + ConnectionPool.__init__(self, host, port) + RequestMethods.__init__(self, headers) + + if not isinstance(timeout, Timeout): + timeout = Timeout.from_float(timeout) + + if retries is None: + retries = Retry.DEFAULT + + self.timeout = timeout + self.retries = retries + + self.pool: queue.LifoQueue[typing.Any] | None = self.QueueCls(maxsize) + self.block = block + + self.proxy = _proxy + self.proxy_headers = _proxy_headers or {} + self.proxy_config = _proxy_config + + # Fill the queue up so that doing get() on it will block properly + for _ in range(maxsize): + self.pool.put(None) + + # These are mostly for testing and debugging purposes. + self.num_connections = 0 + self.num_requests = 0 + self.conn_kw = conn_kw + + if self.proxy: + # Enable Nagle's algorithm for proxies, to avoid packet fragmentation. + # We cannot know if the user has added default socket options, so we cannot replace the + # list. + self.conn_kw.setdefault("socket_options", []) + + self.conn_kw["proxy"] = self.proxy + self.conn_kw["proxy_config"] = self.proxy_config + + # Do not pass 'self' as callback to 'finalize'. + # Then the 'finalize' would keep an endless living (leak) to self. + # By just passing a reference to the pool allows the garbage collector + # to free self if nobody else has a reference to it. + pool = self.pool + + # Close all the HTTPConnections in the pool before the + # HTTPConnectionPool object is garbage collected. + weakref.finalize(self, _close_pool_connections, pool) + + def _new_conn(self) -> BaseHTTPConnection: + """ + Return a fresh :class:`HTTPConnection`. + """ + self.num_connections += 1 + log.debug( + "Starting new HTTP connection (%d): %s:%s", + self.num_connections, + self.host, + self.port or "80", + ) + + conn = self.ConnectionCls( + host=self.host, + port=self.port, + timeout=self.timeout.connect_timeout, + **self.conn_kw, + ) + return conn + + def _get_conn(self, timeout: float | None = None) -> BaseHTTPConnection: + """ + Get a connection. Will return a pooled connection if one is available. + + If no connections are available and :prop:`.block` is ``False``, then a + fresh connection is returned. + + :param timeout: + Seconds to wait before giving up and raising + :class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and + :prop:`.block` is ``True``. + """ + conn = None + + if self.pool is None: + raise ClosedPoolError(self, "Pool is closed.") + + try: + conn = self.pool.get(block=self.block, timeout=timeout) + + except AttributeError: # self.pool is None + raise ClosedPoolError(self, "Pool is closed.") from None # Defensive: + + except queue.Empty: + if self.block: + raise EmptyPoolError( + self, + "Pool is empty and a new connection can't be opened due to blocking mode.", + ) from None + pass # Oh well, we'll create a new connection then + + # If this is a persistent connection, check if it got disconnected + if conn and is_connection_dropped(conn): + log.debug("Resetting dropped connection: %s", self.host) + conn.close() + + return conn or self._new_conn() + + def _put_conn(self, conn: BaseHTTPConnection | None) -> None: + """ + Put a connection back into the pool. + + :param conn: + Connection object for the current host and port as returned by + :meth:`._new_conn` or :meth:`._get_conn`. + + If the pool is already full, the connection is closed and discarded + because we exceeded maxsize. If connections are discarded frequently, + then maxsize should be increased. + + If the pool is closed, then the connection will be closed and discarded. + """ + if self.pool is not None: + try: + self.pool.put(conn, block=False) + return # Everything is dandy, done. + except AttributeError: + # self.pool is None. + pass + except queue.Full: + # Connection never got put back into the pool, close it. + if conn: + conn.close() + + if self.block: + # This should never happen if you got the conn from self._get_conn + raise FullPoolError( + self, + "Pool reached maximum size and no more connections are allowed.", + ) from None + + log.warning( + "Connection pool is full, discarding connection: %s. Connection pool size: %s", + self.host, + self.pool.qsize(), + ) + + # Connection never got put back into the pool, close it. + if conn: + conn.close() + + def _validate_conn(self, conn: BaseHTTPConnection) -> None: + """ + Called right before a request is made, after the socket is created. + """ + + def _prepare_proxy(self, conn: BaseHTTPConnection) -> None: + # Nothing to do for HTTP connections. + pass + + def _get_timeout(self, timeout: _TYPE_TIMEOUT) -> Timeout: + """Helper that always returns a :class:`urllib3.util.Timeout`""" + if timeout is _DEFAULT_TIMEOUT: + return self.timeout.clone() + + if isinstance(timeout, Timeout): + return timeout.clone() + else: + # User passed us an int/float. This is for backwards compatibility, + # can be removed later + return Timeout.from_float(timeout) + + def _raise_timeout( + self, + err: BaseSSLError | OSError | SocketTimeout, + url: str, + timeout_value: _TYPE_TIMEOUT | None, + ) -> None: + """Is the error actually a timeout? Will raise a ReadTimeout or pass""" + + if isinstance(err, SocketTimeout): + raise ReadTimeoutError( + self, url, f"Read timed out. (read timeout={timeout_value})" + ) from err + + # See the above comment about EAGAIN in Python 3. + if hasattr(err, "errno") and err.errno in _blocking_errnos: + raise ReadTimeoutError( + self, url, f"Read timed out. (read timeout={timeout_value})" + ) from err + + def _make_request( + self, + conn: BaseHTTPConnection, + method: str, + url: str, + body: _TYPE_BODY | None = None, + headers: typing.Mapping[str, str] | None = None, + retries: Retry | None = None, + timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT, + chunked: bool = False, + response_conn: BaseHTTPConnection | None = None, + preload_content: bool = True, + decode_content: bool = True, + enforce_content_length: bool = True, + ) -> BaseHTTPResponse: + """ + Perform a request on a given urllib connection object taken from our + pool. + + :param conn: + a connection from one of our connection pools + + :param method: + HTTP request method (such as GET, POST, PUT, etc.) + + :param url: + The URL to perform the request on. + + :param body: + Data to send in the request body, either :class:`str`, :class:`bytes`, + an iterable of :class:`str`/:class:`bytes`, or a file-like object. + + :param headers: + Dictionary of custom headers to send, such as User-Agent, + If-None-Match, etc. If None, pool headers are used. If provided, + these headers completely replace any pool-specific headers. + + :param retries: + Configure the number of retries to allow before raising a + :class:`~urllib3.exceptions.MaxRetryError` exception. + + Pass ``None`` to retry until you receive a response. Pass a + :class:`~urllib3.util.retry.Retry` object for fine-grained control + over different types of retries. + Pass an integer number to retry connection errors that many times, + but no other types of errors. Pass zero to never retry. + + If ``False``, then retries are disabled and any exception is raised + immediately. Also, instead of raising a MaxRetryError on redirects, + the redirect response will be returned. + + :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int. + + :param timeout: + If specified, overrides the default timeout for this one + request. It may be a float (in seconds) or an instance of + :class:`urllib3.util.Timeout`. + + :param chunked: + If True, urllib3 will send the body using chunked transfer + encoding. Otherwise, urllib3 will send the body using the standard + content-length form. Defaults to False. + + :param response_conn: + Set this to ``None`` if you will handle releasing the connection or + set the connection to have the response release it. + + :param preload_content: + If True, the response's body will be preloaded during construction. + + :param decode_content: + If True, will attempt to decode the body based on the + 'content-encoding' header. + + :param enforce_content_length: + Enforce content length checking. Body returned by server must match + value of Content-Length header, if present. Otherwise, raise error. + """ + self.num_requests += 1 + + timeout_obj = self._get_timeout(timeout) + timeout_obj.start_connect() + conn.timeout = Timeout.resolve_default_timeout(timeout_obj.connect_timeout) + + try: + # Trigger any extra validation we need to do. + try: + self._validate_conn(conn) + except (SocketTimeout, BaseSSLError) as e: + self._raise_timeout(err=e, url=url, timeout_value=conn.timeout) + raise + + # _validate_conn() starts the connection to an HTTPS proxy + # so we need to wrap errors with 'ProxyError' here too. + except ( + OSError, + NewConnectionError, + TimeoutError, + BaseSSLError, + CertificateError, + SSLError, + ) as e: + new_e: Exception = e + if isinstance(e, (BaseSSLError, CertificateError)): + new_e = SSLError(e) + # If the connection didn't successfully connect to it's proxy + # then there + if isinstance( + new_e, (OSError, NewConnectionError, TimeoutError, SSLError) + ) and (conn and conn.proxy and not conn.has_connected_to_proxy): + new_e = _wrap_proxy_error(new_e, conn.proxy.scheme) + raise new_e + + # conn.request() calls http.client.*.request, not the method in + # urllib3.request. It also calls makefile (recv) on the socket. + try: + conn.request( + method, + url, + body=body, + headers=headers, + chunked=chunked, + preload_content=preload_content, + decode_content=decode_content, + enforce_content_length=enforce_content_length, + ) + + # We are swallowing BrokenPipeError (errno.EPIPE) since the server is + # legitimately able to close the connection after sending a valid response. + # With this behaviour, the received response is still readable. + except BrokenPipeError: + pass + except OSError as e: + # MacOS/Linux + # EPROTOTYPE and ECONNRESET are needed on macOS + # https://erickt.github.io/blog/2014/11/19/adventures-in-debugging-a-potential-osx-kernel-bug/ + # Condition changed later to emit ECONNRESET instead of only EPROTOTYPE. + if e.errno != errno.EPROTOTYPE and e.errno != errno.ECONNRESET: + raise + + # Reset the timeout for the recv() on the socket + read_timeout = timeout_obj.read_timeout + + if not conn.is_closed: + # In Python 3 socket.py will catch EAGAIN and return None when you + # try and read into the file pointer created by http.client, which + # instead raises a BadStatusLine exception. Instead of catching + # the exception and assuming all BadStatusLine exceptions are read + # timeouts, check for a zero timeout before making the request. + if read_timeout == 0: + raise ReadTimeoutError( + self, url, f"Read timed out. (read timeout={read_timeout})" + ) + conn.timeout = read_timeout + + # Receive the response from the server + try: + response = conn.getresponse() + except (BaseSSLError, OSError) as e: + self._raise_timeout(err=e, url=url, timeout_value=read_timeout) + raise + + # Set properties that are used by the pooling layer. + response.retries = retries + response._connection = response_conn # type: ignore[attr-defined] + response._pool = self # type: ignore[attr-defined] + + # emscripten connection doesn't have _http_vsn_str + http_version = getattr(conn, "_http_vsn_str", "HTTP/?") + log.debug( + '%s://%s:%s "%s %s %s" %s %s', + self.scheme, + self.host, + self.port, + method, + url, + # HTTP version + http_version, + response.status, + response.length_remaining, + ) + + return response + + def close(self) -> None: + """ + Close all pooled connections and disable the pool. + """ + if self.pool is None: + return + # Disable access to the pool + old_pool, self.pool = self.pool, None + + # Close all the HTTPConnections in the pool. + _close_pool_connections(old_pool) + + def is_same_host(self, url: str) -> bool: + """ + Check if the given ``url`` is a member of the same host as this + connection pool. + """ + if url.startswith("/"): + return True + + # TODO: Add optional support for socket.gethostbyname checking. + scheme, _, host, port, *_ = parse_url(url) + scheme = scheme or "http" + if host is not None: + host = _normalize_host(host, scheme=scheme) + + # Use explicit default port for comparison when none is given + if self.port and not port: + port = port_by_scheme.get(scheme) + elif not self.port and port == port_by_scheme.get(scheme): + port = None + + return (scheme, host, port) == (self.scheme, self.host, self.port) + + def urlopen( # type: ignore[override] + self, + method: str, + url: str, + body: _TYPE_BODY | None = None, + headers: typing.Mapping[str, str] | None = None, + retries: Retry | bool | int | None = None, + redirect: bool = True, + assert_same_host: bool = True, + timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT, + pool_timeout: int | None = None, + release_conn: bool | None = None, + chunked: bool = False, + body_pos: _TYPE_BODY_POSITION | None = None, + preload_content: bool = True, + decode_content: bool = True, + **response_kw: typing.Any, + ) -> BaseHTTPResponse: + """ + Get a connection from the pool and perform an HTTP request. This is the + lowest level call for making a request, so you'll need to specify all + the raw details. + + .. note:: + + More commonly, it's appropriate to use a convenience method + such as :meth:`request`. + + .. note:: + + `release_conn` will only behave as expected if + `preload_content=False` because we want to make + `preload_content=False` the default behaviour someday soon without + breaking backwards compatibility. + + :param method: + HTTP request method (such as GET, POST, PUT, etc.) + + :param url: + The URL to perform the request on. + + :param body: + Data to send in the request body, either :class:`str`, :class:`bytes`, + an iterable of :class:`str`/:class:`bytes`, or a file-like object. + + :param headers: + Dictionary of custom headers to send, such as User-Agent, + If-None-Match, etc. If None, pool headers are used. If provided, + these headers completely replace any pool-specific headers. + + :param retries: + Configure the number of retries to allow before raising a + :class:`~urllib3.exceptions.MaxRetryError` exception. + + If ``None`` (default) will retry 3 times, see ``Retry.DEFAULT``. Pass a + :class:`~urllib3.util.retry.Retry` object for fine-grained control + over different types of retries. + Pass an integer number to retry connection errors that many times, + but no other types of errors. Pass zero to never retry. + + If ``False``, then retries are disabled and any exception is raised + immediately. Also, instead of raising a MaxRetryError on redirects, + the redirect response will be returned. + + :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int. + + :param redirect: + If True, automatically handle redirects (status codes 301, 302, + 303, 307, 308). Each redirect counts as a retry. Disabling retries + will disable redirect, too. + + :param assert_same_host: + If ``True``, will make sure that the host of the pool requests is + consistent else will raise HostChangedError. When ``False``, you can + use the pool on an HTTP proxy and request foreign hosts. + + :param timeout: + If specified, overrides the default timeout for this one + request. It may be a float (in seconds) or an instance of + :class:`urllib3.util.Timeout`. + + :param pool_timeout: + If set and the pool is set to block=True, then this method will + block for ``pool_timeout`` seconds and raise EmptyPoolError if no + connection is available within the time period. + + :param bool preload_content: + If True, the response's body will be preloaded into memory. + + :param bool decode_content: + If True, will attempt to decode the body based on the + 'content-encoding' header. + + :param release_conn: + If False, then the urlopen call will not release the connection + back into the pool once a response is received (but will release if + you read the entire contents of the response such as when + `preload_content=True`). This is useful if you're not preloading + the response's content immediately. You will need to call + ``r.release_conn()`` on the response ``r`` to return the connection + back into the pool. If None, it takes the value of ``preload_content`` + which defaults to ``True``. + + :param bool chunked: + If True, urllib3 will send the body using chunked transfer + encoding. Otherwise, urllib3 will send the body using the standard + content-length form. Defaults to False. + + :param int body_pos: + Position to seek to in file-like body in the event of a retry or + redirect. Typically this won't need to be set because urllib3 will + auto-populate the value when needed. + """ + parsed_url = parse_url(url) + destination_scheme = parsed_url.scheme + + if headers is None: + headers = self.headers + + if not isinstance(retries, Retry): + retries = Retry.from_int(retries, redirect=redirect, default=self.retries) + + if release_conn is None: + release_conn = preload_content + + # Check host + if assert_same_host and not self.is_same_host(url): + raise HostChangedError(self, url, retries) + + # Ensure that the URL we're connecting to is properly encoded + if url.startswith("/"): + url = to_str(_encode_target(url)) + else: + url = to_str(parsed_url.url) + + conn = None + + # Track whether `conn` needs to be released before + # returning/raising/recursing. Update this variable if necessary, and + # leave `release_conn` constant throughout the function. That way, if + # the function recurses, the original value of `release_conn` will be + # passed down into the recursive call, and its value will be respected. + # + # See issue #651 [1] for details. + # + # [1] + release_this_conn = release_conn + + http_tunnel_required = connection_requires_http_tunnel( + self.proxy, self.proxy_config, destination_scheme + ) + + # Merge the proxy headers. Only done when not using HTTP CONNECT. We + # have to copy the headers dict so we can safely change it without those + # changes being reflected in anyone else's copy. + if not http_tunnel_required: + headers = headers.copy() # type: ignore[attr-defined] + headers.update(self.proxy_headers) # type: ignore[union-attr] + + # Must keep the exception bound to a separate variable or else Python 3 + # complains about UnboundLocalError. + err = None + + # Keep track of whether we cleanly exited the except block. This + # ensures we do proper cleanup in finally. + clean_exit = False + + # Rewind body position, if needed. Record current position + # for future rewinds in the event of a redirect/retry. + body_pos = set_file_position(body, body_pos) + + try: + # Request a connection from the queue. + timeout_obj = self._get_timeout(timeout) + conn = self._get_conn(timeout=pool_timeout) + + conn.timeout = timeout_obj.connect_timeout # type: ignore[assignment] + + # Is this a closed/new connection that requires CONNECT tunnelling? + if self.proxy is not None and http_tunnel_required and conn.is_closed: + try: + self._prepare_proxy(conn) + except (BaseSSLError, OSError, SocketTimeout) as e: + self._raise_timeout( + err=e, url=self.proxy.url, timeout_value=conn.timeout + ) + raise + + # If we're going to release the connection in ``finally:``, then + # the response doesn't need to know about the connection. Otherwise + # it will also try to release it and we'll have a double-release + # mess. + response_conn = conn if not release_conn else None + + # Make the request on the HTTPConnection object + response = self._make_request( + conn, + method, + url, + timeout=timeout_obj, + body=body, + headers=headers, + chunked=chunked, + retries=retries, + response_conn=response_conn, + preload_content=preload_content, + decode_content=decode_content, + **response_kw, + ) + + # Everything went great! + clean_exit = True + + except EmptyPoolError: + # Didn't get a connection from the pool, no need to clean up + clean_exit = True + release_this_conn = False + raise + + except ( + TimeoutError, + HTTPException, + OSError, + ProtocolError, + BaseSSLError, + SSLError, + CertificateError, + ProxyError, + ) as e: + # Discard the connection for these exceptions. It will be + # replaced during the next _get_conn() call. + clean_exit = False + new_e: Exception = e + if isinstance(e, (BaseSSLError, CertificateError)): + new_e = SSLError(e) + if isinstance( + new_e, + ( + OSError, + NewConnectionError, + TimeoutError, + SSLError, + HTTPException, + ), + ) and (conn and conn.proxy and not conn.has_connected_to_proxy): + new_e = _wrap_proxy_error(new_e, conn.proxy.scheme) + elif isinstance(new_e, (OSError, HTTPException)): + new_e = ProtocolError("Connection aborted.", new_e) + + retries = retries.increment( + method, url, error=new_e, _pool=self, _stacktrace=sys.exc_info()[2] + ) + retries.sleep() + + # Keep track of the error for the retry warning. + err = e + + finally: + if not clean_exit: + # We hit some kind of exception, handled or otherwise. We need + # to throw the connection away unless explicitly told not to. + # Close the connection, set the variable to None, and make sure + # we put the None back in the pool to avoid leaking it. + if conn: + conn.close() + conn = None + release_this_conn = True + + if release_this_conn: + # Put the connection back to be reused. If the connection is + # expired then it will be None, which will get replaced with a + # fresh connection during _get_conn. + self._put_conn(conn) + + if not conn: + # Try again + log.warning( + "Retrying (%r) after connection broken by '%r': %s", retries, err, url + ) + return self.urlopen( + method, + url, + body, + headers, + retries, + redirect, + assert_same_host, + timeout=timeout, + pool_timeout=pool_timeout, + release_conn=release_conn, + chunked=chunked, + body_pos=body_pos, + preload_content=preload_content, + decode_content=decode_content, + **response_kw, + ) + + # Handle redirect? + redirect_location = redirect and response.get_redirect_location() + if redirect_location: + if response.status == 303: + # Change the method according to RFC 9110, Section 15.4.4. + method = "GET" + # And lose the body not to transfer anything sensitive. + body = None + headers = HTTPHeaderDict(headers)._prepare_for_method_change() + + try: + retries = retries.increment(method, url, response=response, _pool=self) + except MaxRetryError: + if retries.raise_on_redirect: + response.drain_conn() + raise + return response + + response.drain_conn() + retries.sleep_for_retry(response) + log.debug("Redirecting %s -> %s", url, redirect_location) + return self.urlopen( + method, + redirect_location, + body, + headers, + retries=retries, + redirect=redirect, + assert_same_host=assert_same_host, + timeout=timeout, + pool_timeout=pool_timeout, + release_conn=release_conn, + chunked=chunked, + body_pos=body_pos, + preload_content=preload_content, + decode_content=decode_content, + **response_kw, + ) + + # Check if we should retry the HTTP response. + has_retry_after = bool(response.headers.get("Retry-After")) + if retries.is_retry(method, response.status, has_retry_after): + try: + retries = retries.increment(method, url, response=response, _pool=self) + except MaxRetryError: + if retries.raise_on_status: + response.drain_conn() + raise + return response + + response.drain_conn() + retries.sleep(response) + log.debug("Retry: %s", url) + return self.urlopen( + method, + url, + body, + headers, + retries=retries, + redirect=redirect, + assert_same_host=assert_same_host, + timeout=timeout, + pool_timeout=pool_timeout, + release_conn=release_conn, + chunked=chunked, + body_pos=body_pos, + preload_content=preload_content, + decode_content=decode_content, + **response_kw, + ) + + return response + + +class HTTPSConnectionPool(HTTPConnectionPool): + """ + Same as :class:`.HTTPConnectionPool`, but HTTPS. + Always enables SSL certificate validation. + """ + + scheme = "https" + ConnectionCls: type[HTTPSConnection] = HTTPSConnection + + def __init__( + self, + host: str, + port: int | None = None, + timeout: int | None = None, + maxsize: int = 1, + block: bool = False, + headers: typing.Mapping[str, str] | None = None, + retries: Retry | bool | int | None = None, + _proxy: Url | None = None, + _proxy_headers: typing.Mapping[str, str] | None = None, + key_file: str | None = None, + cert_file: str | None = None, + cert_reqs: int | str | None = ssl.CERT_REQUIRED, # Always validate certs + key_password: str | None = None, + ca_certs: str | None = None, + ssl_version: int | str | None = None, + ssl_minimum_version: ssl.TLSVersion | None = None, + ssl_maximum_version: ssl.TLSVersion | None = None, + assert_hostname: bool | str | None = None, + assert_fingerprint: str | None = None, + ca_cert_dir: str | None = None, + **conn_kw: typing.Any, + ) -> None: + super().__init__( + host, + port, + timeout, + maxsize, + block, + headers, + retries, + _proxy, + _proxy_headers, + **conn_kw, + ) + + # Initialize SSL-related arguments + self.key_file = key_file + self.cert_file = cert_file + self.cert_reqs = cert_reqs + self.key_password = key_password + self.ca_certs = ca_certs + self.ca_cert_dir = ca_cert_dir + self.ssl_version = ssl_version + self.ssl_minimum_version = ssl_minimum_version + self.ssl_maximum_version = ssl_maximum_version + self.assert_hostname = assert_hostname + self.assert_fingerprint = assert_fingerprint + + def _prepare_proxy(self, conn: HTTPSConnection) -> None: # type: ignore[override] + """Establishes a tunnel connection through HTTP CONNECT.""" + if self.proxy and self.proxy.scheme == "https": + tunnel_scheme = "https" + else: + tunnel_scheme = "http" + + conn.set_tunnel( + scheme=tunnel_scheme, + host=self._tunnel_host, + port=self.port, + headers=self.proxy_headers, + ) + conn.connect() + + def _new_conn(self) -> HTTPSConnection: + """ + Return a fresh :class:`urllib3.connection.HTTPSConnection`. + Always enforces certificate validation. + """ + self.num_connections += 1 + log.debug( + "Starting new HTTPS connection (%d): %s:%s", + self.num_connections, + self.host, + self.port or "443", + ) + + if not self.ConnectionCls or self.ConnectionCls is DummyConnection: # type: ignore[comparison-overlap] + raise ImportError( + "Can't connect to HTTPS URL because the SSL module is not available." + ) + + actual_host: str = self.host + actual_port = self.port + if self.proxy is not None and self.proxy.host is not None: + actual_host = self.proxy.host + actual_port = self.proxy.port + + # Create the HTTPS connection, enforcing certificate validation + return self.ConnectionCls( + host=actual_host, + port=actual_port, + timeout=self.timeout.connect_timeout, + cert_file=self.cert_file, + key_file=self.key_file, + key_password=self.key_password, + cert_reqs=ssl.CERT_REQUIRED, # Ensure always verifying certificates + ca_certs=self.ca_certs, + ca_cert_dir=self.ca_cert_dir, + assert_hostname=self.assert_hostname, + assert_fingerprint=self.assert_fingerprint, + ssl_version=self.ssl_version, + ssl_minimum_version=self.ssl_minimum_version, + ssl_maximum_version=self.ssl_maximum_version, + **self.conn_kw, + ) + + def _validate_conn(self, conn: HTTPSConnection) -> None: + """ + Called right before a request is made, after the socket is created. + Always enforces SSL verification. + """ + super()._validate_conn(conn) + + # Force connection early to allow us to validate the connection. + if conn.is_closed: + conn.connect() + + # Always ensure certificate verification is done and show warning if not verified + if not conn.is_verified and not conn.proxy_is_verified: + warnings.warn( + ( + f"Unverified HTTPS request is being made to host '{conn.host}'. " + "Adding certificate verification is strongly advised. See: " + "https://urllib3.readthedocs.io/en/latest/advanced-usage.html" + "#tls-warnings" + ), + InsecureRequestWarning, + ) + +def connection_from_url(url: str, **kw: typing.Any) -> HTTPConnectionPool: + """ + Given a url, return an :class:`.ConnectionPool` instance of its host. + + This is a shortcut for not having to parse out the scheme, host, and port + of the url before creating an :class:`.ConnectionPool` instance. + + :param url: + Absolute URL string that must include the scheme. Port is optional. + + :param \\**kw: + Passes additional parameters to the constructor of the appropriate + :class:`.ConnectionPool`. Useful for specifying things like + timeout, maxsize, headers, etc. + + Example:: + + >>> conn = connection_from_url('http://google.com/') + >>> r = conn.request('GET', '/') + """ + scheme, _, host, port, *_ = parse_url(url) + scheme = scheme or "http" + port = port or port_by_scheme.get(scheme, 80) + if scheme == "https": + return HTTPSConnectionPool(host, port=port, **kw) # type: ignore[arg-type] + else: + return HTTPConnectionPool(host, port=port, **kw) # type: ignore[arg-type] + + +@typing.overload +def _normalize_host(host: None, scheme: str | None) -> None: + ... + + +@typing.overload +def _normalize_host(host: str, scheme: str | None) -> str: + ... + + +def _normalize_host(host: str | None, scheme: str | None) -> str | None: + """ + Normalize hosts for comparisons and use with sockets. + """ + + host = normalize_host(host, scheme) + + # httplib doesn't like it when we include brackets in IPv6 addresses + # Specifically, if we include brackets but also pass the port then + # httplib crazily doubles up the square brackets on the Host header. + # Instead, we need to make sure we never pass ``None`` as the port. + # However, for backward compatibility reasons we can't actually + # *assert* that. See http://bugs.python.org/issue28539 + if host and host.startswith("[") and host.endswith("]"): + host = host[1:-1] + return host + + +def _url_from_pool( + pool: HTTPConnectionPool | HTTPSConnectionPool, path: str | None = None +) -> str: + """Returns the URL from a given connection pool. This is mainly used for testing and logging.""" + return Url(scheme=pool.scheme, host=pool.host, port=pool.port, path=path).url + + +def _close_pool_connections(pool: queue.LifoQueue[typing.Any]) -> None: + """Drains a queue of connections and closes each one.""" + try: + while True: + conn = pool.get(block=False) + if conn: + conn.close() + except queue.Empty: + pass # Done. \ No newline at end of file diff --git a/kubernetes/main/apps/media/bazarr/app/resources/subcleaner.sh b/kubernetes/main/apps/media/bazarr/app/resources/subcleaner.sh new file mode 100755 index 000000000..34f06a55b --- /dev/null +++ b/kubernetes/main/apps/media/bazarr/app/resources/subcleaner.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +printf "Cleaning subtitles for '%s' ...\n" "$1" +python3 /add-ons/subcleaner/subcleaner.py "$1" -s + +case $1 in + *movies*) section="1";; + *shows*) section="2";; +esac + +if [[ -n "$section" ]]; then + printf "Refreshing Plex section '%s' for '%s' ...\n" "$section" "$(dirname "$1")" + /usr/bin/curl -I -X GET -G \ + --data-urlencode "path=$(dirname "$1")" \ + --data-urlencode "X-Plex-Token=${PLEX_TOKEN}" \ + --no-progress-meter \ + "http://plex.media.svc.cluster.local:32400/library/sections/${section}/refresh" +fi diff --git a/kubernetes/main/apps/media/bazarr/ks.yaml b/kubernetes/main/apps/media/bazarr/ks.yaml new file mode 100755 index 000000000..3b71c9d8f --- /dev/null +++ b/kubernetes/main/apps/media/bazarr/ks.yaml @@ -0,0 +1,22 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app bazarr + namespace: flux-system +spec: + targetNamespace: media + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets-stores + path: ./kubernetes/main/apps/media/bazarr/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/media/flaresolverr/app/helmrelease.yaml b/kubernetes/main/apps/media/flaresolverr/app/helmrelease.yaml new file mode 100755 index 000000000..85c62cee1 --- /dev/null +++ b/kubernetes/main/apps/media/flaresolverr/app/helmrelease.yaml @@ -0,0 +1,83 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: flaresolverr +spec: + interval: 30m + chart: + spec: + verify: + provider: cosign + chart: app-template + version: 3.5.1 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 6 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + values: + controllers: + flaresolverr: + containers: + app: + image: + repository: ghcr.io/flaresolverr/flaresolverr + tag: v3.3.21@sha256:f104ee51e5124d83cf3be9b37480649355d223f7d8f9e453d0d5ef06c6e3b31b + env: + TZ: Europe/Sofia + PORT: &port 80 + HOST: "0.0.0.0" + TEST_URL: https://www.google.com + BROWSER_TIMEOUT: 40000 + HEADLESS: true + LOG_LEVEL: info + LOG_HTML: false + CAPTCHA_SOLVER: none + probes: + liveness: &probes + enabled: true + custom: true + spec: + httpGet: + path: /health + port: *port + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + readiness: *probes + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: false + capabilities: { drop: ["ALL"] } + resources: + requests: + cpu: 10m + limits: + memory: 1024Mi + defaultPodOptions: + securityContext: + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + fsGroupChangePolicy: OnRootMismatch + seccompProfile: { type: RuntimeDefault } + service: + app: + controller: flaresolverr + ports: + http: + port: *port + persistence: + tmp: + type: emptyDir diff --git a/kubernetes/main/apps/media/flaresolverr/app/kustomization.yaml b/kubernetes/main/apps/media/flaresolverr/app/kustomization.yaml new file mode 100755 index 000000000..17cbc72b2 --- /dev/null +++ b/kubernetes/main/apps/media/flaresolverr/app/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/kubernetes/main/apps/media/flaresolverr/ks.yaml b/kubernetes/main/apps/media/flaresolverr/ks.yaml new file mode 100755 index 000000000..57d915ffe --- /dev/null +++ b/kubernetes/main/apps/media/flaresolverr/ks.yaml @@ -0,0 +1,23 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app flaresolverr + namespace: flux-system +spec: + targetNamespace: media + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/media/flaresolverr/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m + postBuild: + substitute: + APP: *app diff --git a/kubernetes/main/apps/media/jellyseerr/app/helmrelease.yaml b/kubernetes/main/apps/media/jellyseerr/app/helmrelease.yaml new file mode 100755 index 000000000..c6d5cf726 --- /dev/null +++ b/kubernetes/main/apps/media/jellyseerr/app/helmrelease.yaml @@ -0,0 +1,120 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: jellyseerr +spec: + interval: 30m + chart: + spec: + verify: + provider: cosign + chart: app-template + version: 3.5.1 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + dependsOn: + - name: volsync + namespace: volsync-system + values: + controllers: + jellyseerr: + annotations: + reloader.stakater.com/auto: "true" + containers: + app: + image: + repository: ghcr.io/fallenbagel/jellyseerr + tag: develop@sha256:2ae0107aa00afc452ac556643a327e43c5b7bd50c23a39446c8afd6cac7b79a3 + env: + TZ: Europe/Sofia + LOG_LEVEL: "info" + PORT: &port 80 + probes: + liveness: &probes + enabled: true + custom: true + spec: + httpGet: + path: /api/v1/status + port: *port + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + readiness: *probes + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + resources: + requests: + cpu: 10m + memory: 512Mi + limits: + memory: 2Gi + defaultPodOptions: + securityContext: + runAsNonRoot: true + runAsUser: 568 + runAsGroup: 568 + fsGroup: 568 + fsGroupChangePolicy: OnRootMismatch + seccompProfile: { type: RuntimeDefault } + service: + app: + controller: jellyseerr + ports: + http: + port: *port + ingress: + app: + annotations: + external-dns.alpha.kubernetes.io/target: external.${PUBLIC_DOMAIN} + nginx.ingress.kubernetes.io/configuration-snippet: | + more_set_headers "access-control-allow-origin https://requests.${PUBLIC_DOMAIN}"; + proxy_set_header Accept-Encoding ""; + sub_filter '' ''; + sub_filter_once on; + cert-manager.io/cluster-issuer: zerossl-prod + cert-manager.io/private-key-rotation-policy: Always + cert-manager.io/private-key-algorithm: ECDSA + cert-manager.io/private-key-size: "384" + className: external + tls: + - hosts: + - &host requests.${PUBLIC_DOMAIN} + secretName: jellyseerr-tls + hosts: + - host: *host + paths: + - path: / + service: + identifier: app + port: http + persistence: + config: + enabled: true + type: persistentVolumeClaim + size: 20Gi + accessMode: ReadWriteOnce + storageClass: openebs-zfs-128k + globalMounts: + - path: /app/config + logs: + type: emptyDir + globalMounts: + - path: /app/config/logs + tmp: + type: emptyDir diff --git a/kubernetes/main/apps/media/jellyseerr/app/kustomization.yaml b/kubernetes/main/apps/media/jellyseerr/app/kustomization.yaml new file mode 100755 index 000000000..8ad877d6d --- /dev/null +++ b/kubernetes/main/apps/media/jellyseerr/app/kustomization.yaml @@ -0,0 +1,16 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml + - ./volsync-src.yaml +configMapGenerator: + - name: jellyseerr-gatus-ep + options: + labels: + gatus.io/enabled: "true" + files: + - config.yaml=./resources/gatus-ep.yaml +generatorOptions: + disableNameSuffixHash: true diff --git a/kubernetes/main/apps/media/jellyseerr/app/resources/gatus-ep.yaml b/kubernetes/main/apps/media/jellyseerr/app/resources/gatus-ep.yaml new file mode 100755 index 000000000..828e0909b --- /dev/null +++ b/kubernetes/main/apps/media/jellyseerr/app/resources/gatus-ep.yaml @@ -0,0 +1,12 @@ +endpoints: + - name: "Jellyseerr" + group: external + url: "https://requests.${PUBLIC_DOMAIN}/api/v1/status" + interval: 1m + client: + dns-resolver: tcp://1.1.1.1:53 + conditions: + - "[STATUS] == 200" + - "has([BODY].version) == true" + alerts: + - type: pushover diff --git a/kubernetes/main/apps/media/jellyseerr/app/volsync-dst.yaml b/kubernetes/main/apps/media/jellyseerr/app/volsync-dst.yaml new file mode 100644 index 000000000..3a9d164cf --- /dev/null +++ b/kubernetes/main/apps/media/jellyseerr/app/volsync-dst.yaml @@ -0,0 +1,18 @@ +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationDestination +metadata: + name: jellyseerr-dst + namespace: media +spec: + trigger: + manual: restore-once + restic: + repository: jellyseerr-volsync-secret + copyMethod: Direct + storageClassName: openebs-zfs-128k + destinationPVC: jellyseerr-config + enableFileDeletion: true + moverSecurityContext: + runAsUser: 568 + runAsGroup: 568 + fsGroup: 568 diff --git a/kubernetes/main/apps/media/jellyseerr/app/volsync-src.yaml b/kubernetes/main/apps/media/jellyseerr/app/volsync-src.yaml new file mode 100644 index 000000000..31e42c4a6 --- /dev/null +++ b/kubernetes/main/apps/media/jellyseerr/app/volsync-src.yaml @@ -0,0 +1,47 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.ok8.sh/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret jellyseerr-volsync-secret +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + creationPolicy: Owner + template: + engineVersion: v2 + data: + RESTIC_REPOSITORY: '{{ .RESTIC_REPOSITORY }}/jellyseerr/volsync' + RESTIC_PASSWORD: '{{ .ENCRYPTION_KEY }}' + AWS_ACCESS_KEY_ID: '{{ .CF_ACCESS_KEY_ID }}' + AWS_SECRET_ACCESS_KEY: '{{ .CF_SECRET_ACCESS_KEY }}' + dataFrom: + - extract: + key: secrets/volsync + - extract: + key: secrets/cloudflare +--- +# yaml-language-server: $schema=https://kubernetes-schemas.ok8.sh/volsync.backube/replicationsource_v1alpha1.json +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: jellyseerr +spec: + sourcePVC: jellyseerr-config + trigger: + schedule: "0 7 * * *" + restic: + copyMethod: Snapshot + pruneIntervalDays: 7 + repository: jellyseerr-volsync-secret + cacheCapacity: 2Gi + moverSecurityContext: + runAsUser: 568 + runAsGroup: 568 + fsGroup: 568 + retain: + daily: 7 + within: 3d diff --git a/kubernetes/main/apps/media/jellyseerr/ks.yaml b/kubernetes/main/apps/media/jellyseerr/ks.yaml new file mode 100755 index 000000000..cad339495 --- /dev/null +++ b/kubernetes/main/apps/media/jellyseerr/ks.yaml @@ -0,0 +1,25 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app jellyseerr + namespace: flux-system +spec: + targetNamespace: media + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/media/jellyseerr/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m + postBuild: + substitute: + APP: *app + GATUS_SUBDOMAIN: requests + GATUS_PATH: /api/v1/status diff --git a/kubernetes/main/apps/media/kustomization.yaml b/kubernetes/main/apps/media/kustomization.yaml new file mode 100755 index 000000000..e413c3eb6 --- /dev/null +++ b/kubernetes/main/apps/media/kustomization.yaml @@ -0,0 +1,21 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # Pre Flux-Kustomizations + - ./namespace.yaml + # Flux-Kustomizations + - ./jellyseerr/ks.yaml + - ./radarr/ks.yaml + - ./sonarr/ks.yaml + - ./unpackerr/ks.yaml + - ./autobrr/ks.yaml + - ./bazarr/ks.yaml + - ./sabnzbd/ks.yaml + - ./qbittorrent/ks.yaml + - ./prowlarr/ks.yaml + - ./flaresolverr/ks.yaml + - ./notifiarr/ks.yaml + - ./omegabrr/ks.yaml + - ./plex/ks.yaml diff --git a/kubernetes/main/apps/media/namespace.yaml b/kubernetes/main/apps/media/namespace.yaml new file mode 100755 index 000000000..a9e74c9e5 --- /dev/null +++ b/kubernetes/main/apps/media/namespace.yaml @@ -0,0 +1,38 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: media + annotations: + kustomize.toolkit.fluxcd.io/prune: disabled + volsync.backube/privileged-movers: "true" +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/provider_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Provider +metadata: + name: alert-manager + namespace: media +spec: + type: alertmanager + address: http://alertmanager-operated.observability.svc.cluster.local:9093/api/v2/alerts/ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/alert_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Alert +metadata: + name: alert-manager + namespace: media +spec: + providerRef: + name: alert-manager + eventSeverity: error + eventSources: + - kind: HelmRelease + name: "*" + exclusionList: + - "error.*lookup github\\.com" + - "error.*lookup raw\\.githubusercontent\\.com" + - "dial.*tcp.*timeout" + - "waiting.*socket" + suspend: false diff --git a/kubernetes/main/apps/media/notifiarr/app/externalsecret.yaml b/kubernetes/main/apps/media/notifiarr/app/externalsecret.yaml new file mode 100755 index 000000000..c04bbc5c3 --- /dev/null +++ b/kubernetes/main/apps/media/notifiarr/app/externalsecret.yaml @@ -0,0 +1,33 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret notifiarr-secret +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + template: + engineVersion: v2 + data: + DN_API_KEY: "{{ .DN_API_KEY }}" + DN_UI_PASSWORD: "{{ .DN_UI_PASSWORD }}" + DN_UPSTREAMS_0: "{{ .DN_UPSTREAMS_0 }}" + TMPDIR: "{{ .TMPDIR }}" + DN_BIND_ADDR: "{{ .DN_BIND_ADDR }}" + DN_MODE: "{{ .DN_MODE }}" + DN_PLEX_TOKEN: "{{ .DN_PLEX_TOKEN }}" + DN_PLEX_URL: "{{ .DN_PLEX_URL }}" + DN_RADARR_0_API_KEY: "{{ .DN_RADARR_0_API_KEY }}" + DN_RADARR_0_NAME: "{{ .DN_RADARR_0_NAME }}" + DN_RADARR_0_URL: "{{ .DN_RADARR_0_URL }}" + DN_SONARR_0_API_KEY: "{{ .DN_SONARR_0_API_KEY }}" + DN_SONARR_0_NAME: "{{ .DN_SONARR_0_NAME }}" + DN_SONARR_0_URL: "{{ .DN_SONARR_0_URL }}" + + dataFrom: + - extract: + key: secrets/notifiarr diff --git a/kubernetes/main/apps/media/notifiarr/app/helmrelease.yaml b/kubernetes/main/apps/media/notifiarr/app/helmrelease.yaml new file mode 100755 index 000000000..009fe3804 --- /dev/null +++ b/kubernetes/main/apps/media/notifiarr/app/helmrelease.yaml @@ -0,0 +1,112 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: notifiarr +spec: + interval: 30m + chart: + spec: + verify: + provider: cosign + chart: app-template + version: 3.5.1 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + values: + controllers: + notifiarr: + annotations: + reloader.stakater.com/auto: "true" + containers: + app: + image: + repository: golift/notifiarr + tag: 0.8.3@sha256:7922f7d0e0336ca0e91182820c5d4b2ddc2d86083fa847c5a1088b41d5b20903 + envFrom: + - secretRef: + name: notifiarr-secret + probes: + liveness: &probes + enabled: true + custom: true + spec: + httpGet: + path: / + port: &port 80 + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + readiness: *probes + startup: + enabled: true + spec: + failureThreshold: 30 + periodSeconds: 10 + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + resources: + requests: + cpu: 100m + limits: + memory: 1Gi + defaultPodOptions: + hostname: def39d9a-546b-4b72-9466-a858e8aba5ff + securityContext: + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + fsGroupChangePolicy: OnRootMismatch + seccompProfile: { type: RuntimeDefault } + service: + app: + controller: notifiarr + ports: + http: + port: *port + ingress: + app: + annotations: + external-dns.alpha.kubernetes.io/target: internal.${PUBLIC_DOMAIN} + cert-manager.io/cluster-issuer: zerossl-prod + cert-manager.io/private-key-rotation-policy: Always + cert-manager.io/private-key-algorithm: ECDSA + cert-manager.io/private-key-size: "384" + className: internal + tls: + - hosts: + - &host "{{ .Release.Name }}.${PUBLIC_DOMAIN}" + secretName: notifiarr-tls + hosts: + - host: *host + paths: + - path: / + service: + identifier: app + port: http + persistence: + config: + enabled: true + type: persistentVolumeClaim + size: 2Gi + storageClass: openebs-zfs-128k + accessMode: ReadWriteOnce + + tmpdir: + type: emptyDir + medium: Memory diff --git a/kubernetes/main/apps/media/notifiarr/ks.yaml b/kubernetes/main/apps/media/notifiarr/ks.yaml new file mode 100755 index 000000000..e78eaf796 --- /dev/null +++ b/kubernetes/main/apps/media/notifiarr/ks.yaml @@ -0,0 +1,25 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app notifiarr + namespace: flux-system +spec: + targetNamespace: media + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets-stores + path: ./kubernetes/main/apps/media/notifiarr/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m + postBuild: + substitute: + APP: *app diff --git a/kubernetes/main/apps/media/omegabrr/app/externalsecret.yaml b/kubernetes/main/apps/media/omegabrr/app/externalsecret.yaml new file mode 100755 index 000000000..349b9cdda --- /dev/null +++ b/kubernetes/main/apps/media/omegabrr/app/externalsecret.yaml @@ -0,0 +1,40 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret omegabrr-secret +spec: + refreshInterval: 5m + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + template: + engineVersion: v2 + data: + config.yaml: | + server: + host: 0.0.0.0 + port: 80 + apiToken: {{ .OMEGABRR_API_KEY }} + clients: + autobrr: + host: https://autobrr.${PUBLIC_DOMAIN} + apikey: {{ .AUTOBRR_API_KEY }} + arr: + - name: radarr + type: radarr + host: https://radarr.${PUBLIC_DOMAIN} + apikey: {{ .RADARR_API_KEY }} + filters: [5] + - name: sonarr + type: sonarr + host: https://sonarr.${PUBLIC_DOMAIN} + apikey: {{ .SONARR_API_KEY }} + filters: [6] + excludeAlternateTitles: true + dataFrom: + - extract: + key: secrets/api-keys diff --git a/kubernetes/main/apps/media/omegabrr/app/helmrelease.yaml b/kubernetes/main/apps/media/omegabrr/app/helmrelease.yaml new file mode 100755 index 000000000..6ea388356 --- /dev/null +++ b/kubernetes/main/apps/media/omegabrr/app/helmrelease.yaml @@ -0,0 +1,67 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: omegabrr +spec: + interval: 30m + chart: + spec: + verify: + provider: cosign + chart: app-template + version: 3.5.1 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + values: + controllers: + omegabrr: + annotations: + reloader.stakater.com/auto: "true" + containers: + app: + image: + repository: ghcr.io/autobrr/omegabrr + tag: v1.15.0@sha256:4f6099a76ff9d248e9f032e29c04a92b483f21456e46f3b01eb20399f4732ad0 + env: + TZ: Europe/Sofia + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + resources: + requests: + cpu: 10m + limits: + memory: 256Mi + defaultPodOptions: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + seccompProfile: { type: RuntimeDefault } + service: + app: + controller: omegabrr + ports: + http: + port: 80 + persistence: + config-file: + type: secret + name: omegabrr-secret + globalMounts: + - path: /config/config.yaml + subPath: config.yaml + readOnly: true diff --git a/kubernetes/main/apps/media/omegabrr/app/kustomization.yaml b/kubernetes/main/apps/media/omegabrr/app/kustomization.yaml new file mode 100755 index 000000000..85e530b33 --- /dev/null +++ b/kubernetes/main/apps/media/omegabrr/app/kustomization.yaml @@ -0,0 +1,8 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: default +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml diff --git a/kubernetes/main/apps/media/omegabrr/ks.yaml b/kubernetes/main/apps/media/omegabrr/ks.yaml new file mode 100755 index 000000000..d4006d805 --- /dev/null +++ b/kubernetes/main/apps/media/omegabrr/ks.yaml @@ -0,0 +1,25 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app omegabrr + namespace: flux-system +spec: + targetNamespace: media + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets-stores + path: ./kubernetes/main/apps/media/omegabrr/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m + postBuild: + substitute: + APP: *app diff --git a/kubernetes/main/apps/media/plex/app/helmrelease.yaml b/kubernetes/main/apps/media/plex/app/helmrelease.yaml new file mode 100755 index 000000000..ac561db99 --- /dev/null +++ b/kubernetes/main/apps/media/plex/app/helmrelease.yaml @@ -0,0 +1,152 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: plex +spec: + interval: 30m + chart: + spec: + chart: app-template + version: 3.5.1 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + + values: + controllers: + plex: + annotations: + reloader.stakater.com/auto: "true" + + containers: + app: + image: + repository: ghcr.io/onedr0p/plex + tag: 1.41.2.9200-c6bbc1b53@sha256:47c6f3d85f4e739210860934a0bb24126170fa2f6a602fb909467f17a035c311 + env: + TZ: Europe/Sofia + PLEX_ADVERTISE_URL: https://plex.${PUBLIC_DOMAIN}:443,http://192.168.91.98:32400 + PLEX_UID: 568 + PLEX_GID: 568 + NVIDIA_VISIBLE_DEVICES: all + NVIDIA_DRIVER_CAPABILITIES: all + probes: + liveness: &probes + enabled: true + custom: true + spec: + httpGet: + path: /identity + port: 32400 + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + readiness: *probes + startup: + enabled: true + spec: + failureThreshold: 30 + periodSeconds: 10 + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + resources: + requests: + cpu: 100m + limits: + memory: 3Gi + nvidia.com/gpu: 1 + + defaultPodOptions: + securityContext: + runAsNonRoot: true + runAsUser: 568 + runAsGroup: 568 + fsGroup: 568 + fsGroupChangePolicy: OnRootMismatch + seccompProfile: { type: RuntimeDefault } + + service: + app: + controller: plex + type: LoadBalancer + allocateLoadBalancerNodePorts: false + annotations: + lbipam.cilium.io/ips: 192.168.91.98 + ports: + http: + port: 32400 + + ingress: + app: + annotations: + external-dns.alpha.kubernetes.io/target: external.${PUBLIC_DOMAIN} + nginx.ingress.kubernetes.io/backend-protocol: "HTTP" + nginx.ingress.kubernetes.io/configuration-snippet: | + add_header referrer-policy "same-origin" always; + add_header x-frame-options sameorigin; + add_header x-content-type-options "nosniff" always; + more_set_headers "access-control-allow-origin https://plex.${PUBLIC_DOMAIN}"; + cert-manager.io/cluster-issuer: zerossl-prod + cert-manager.io/private-key-rotation-policy: Always + cert-manager.io/private-key-algorithm: ECDSA + cert-manager.io/private-key-size: "384" + className: external + tls: + - hosts: + - &host "{{ .Release.Name }}.${PUBLIC_DOMAIN}" + secretName: plex-tls + hosts: + - host: *host + paths: + - path: / + pathType: ImplementationSpecific + service: + identifier: app + port: http + + persistence: + config: + type: persistentVolumeClaim + size: 50Gi + accessMode: ReadWriteOnce + storageClass: openebs-zfs-1m + globalMounts: + - path: /config/Library/Application Support/Plex Media Server + + logs: + type: emptyDir + globalMounts: + - path: /config/Library/Application Support/Plex Media Server/Logs + + tmp: + type: emptyDir + medium: Memory + + transcode: + type: persistentVolumeClaim + size: 150Gi + storageClass: openebs-zfs-1m + accessMode: ReadWriteOnce + + media: + type: nfs + server: 192.168.91.40 + path: /mnt/exos20/data + globalMounts: + - path: /data/media + readOnly: true + subPath: media diff --git a/kubernetes/main/apps/media/plex/app/kustomization.yaml b/kubernetes/main/apps/media/plex/app/kustomization.yaml new file mode 100755 index 000000000..61678f5de --- /dev/null +++ b/kubernetes/main/apps/media/plex/app/kustomization.yaml @@ -0,0 +1,21 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml +configMapGenerator: + - name: plex-loki-rules + files: + - plex.yaml=./resources/lokirule.yaml + options: + labels: + loki_rule: "true" + - name: plex-gatus-ep + options: + labels: + gatus.io/enabled: "true" + files: + - config.yaml=./resources/gatus-ep.yaml +generatorOptions: + disableNameSuffixHash: true diff --git a/kubernetes/main/apps/media/plex/app/resources/gatus-ep.yaml b/kubernetes/main/apps/media/plex/app/resources/gatus-ep.yaml new file mode 100755 index 000000000..d5e0e4381 --- /dev/null +++ b/kubernetes/main/apps/media/plex/app/resources/gatus-ep.yaml @@ -0,0 +1,11 @@ +endpoints: + - name: "Plex Media Server" + group: external + url: "https://plex.${PUBLIC_DOMAIN}/web" + interval: 1m + client: + dns-resolver: tcp://1.1.1.1:53 + conditions: + - "[STATUS] == 200" + alerts: + - type: pushover diff --git a/kubernetes/main/apps/media/plex/app/resources/lokirule.yaml b/kubernetes/main/apps/media/plex/app/resources/lokirule.yaml new file mode 100755 index 000000000..ed0c09a34 --- /dev/null +++ b/kubernetes/main/apps/media/plex/app/resources/lokirule.yaml @@ -0,0 +1,14 @@ +--- +groups: + - name: plex + rules: + - alert: PlexDatabaseBusy + expr: | + sum by (app) (count_over_time({app="plex"} |~ "(?i)retry busy DB"[2m])) > 0 + for: 2m + labels: + severity: critical + category: logs + annotations: + app: "{{ $labels.app }}" + summary: "{{ $labels.app }} is experiencing database issues" diff --git a/kubernetes/main/apps/media/plex/ks.yaml b/kubernetes/main/apps/media/plex/ks.yaml new file mode 100755 index 000000000..f97b3232f --- /dev/null +++ b/kubernetes/main/apps/media/plex/ks.yaml @@ -0,0 +1,22 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app plex + namespace: flux-system +spec: + targetNamespace: media + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets-stores + path: ./kubernetes/main/apps/media/plex/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/media/prowlarr/app/externalsecret.yaml b/kubernetes/main/apps/media/prowlarr/app/externalsecret.yaml new file mode 100755 index 000000000..16b7cc592 --- /dev/null +++ b/kubernetes/main/apps/media/prowlarr/app/externalsecret.yaml @@ -0,0 +1,24 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret prowlarr-secret +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + template: + engineVersion: v2 + data: + PROWLARR__AUTH__APIKEY: "{{ .PROWLARR_API_KEY }}" + INIT_POSTGRES_USER: "{{ .PROWLARR_POSTGRES_USER }}" + INIT_POSTGRES_PASS: "{{ .PROWLARR_POSTGRES_PASS }}" + INIT_POSTGRES_SUPER_PASS: "{{ .POSTGRES_SUPER_PASS }}" + dataFrom: + - extract: + key: secrets/api-keys + - extract: + key: secrets/cloudnative-pg diff --git a/kubernetes/main/apps/media/prowlarr/app/helmrelease.yaml b/kubernetes/main/apps/media/prowlarr/app/helmrelease.yaml new file mode 100755 index 000000000..e0063ce69 --- /dev/null +++ b/kubernetes/main/apps/media/prowlarr/app/helmrelease.yaml @@ -0,0 +1,174 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: &app prowlarr +spec: + interval: 30m + chart: + spec: + verify: + provider: cosign + chart: app-template + version: 3.5.1 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + + values: + controllers: + prowlarr: + annotations: + reloader.stakater.com/auto: "true" + + initContainers: + init-db: + image: + repository: ghcr.io/onedr0p/postgres-init + tag: 16.6@sha256:35353a77777ee8f634d0f3945f495b4a40065134b8619e0d18bd49b0ee9c855b + env: + INIT_POSTGRES_DBNAME: &dbName prowlarr + INIT_POSTGRES_HOST: &dbHost postgres17-rw.database.svc.cluster.local + INIT_POSTGRES_USER: + valueFrom: + secretKeyRef: + name: &secret prowlarr-secret + key: INIT_POSTGRES_USER + INIT_POSTGRES_PASS: + valueFrom: + secretKeyRef: + name: *secret + key: INIT_POSTGRES_PASS + INIT_POSTGRES_SUPER_PASS: + valueFrom: + secretKeyRef: + name: *secret + key: INIT_POSTGRES_SUPER_PASS + + containers: + app: + image: + repository: ghcr.io/onedr0p/prowlarr-develop + tag: 1.27.0.4852@sha256:6e1041a558cceee6356efe74cc9a9138909f4a8bac5eb714a420a37e4b478c77 + env: + PROWLARR__APP__INSTANCENAME: Prowlarr + PROWLARR__APP__THEME: dark + PROWLARR__AUTH__METHOD: External + PROWLARR__AUTH__REQUIRED: DisabledForLocalAddresses + PROWLARR__LOG__DBENABLED: "False" + PROWLARR__LOG__LEVEL: info + PROWLARR__SERVER__PORT: &port 80 + PROWLARR__UPDATE__BRANCH: develop + PROWLARR__AUTH__APIKEY: + valueFrom: + secretKeyRef: + name: *secret + key: PROWLARR__AUTH__APIKEY + PROWLARR__POSTGRES__HOST: *dbHost + PROWLARR__POSTGRES__PORT: "5432" + PROWLARR__POSTGRES__USER: + valueFrom: + secretKeyRef: + name: *secret + key: INIT_POSTGRES_USER + PROWLARR__POSTGRES__PASSWORD: + valueFrom: + secretKeyRef: + name: *secret + key: INIT_POSTGRES_PASS + PROWLARR__POSTGRES__MAINDB: *dbName + TZ: Europe/Sofia + probes: + liveness: &probes + enabled: true + custom: true + spec: + httpGet: + path: /ping + port: *port + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + readiness: *probes + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + resources: + requests: + cpu: 100m + limits: + memory: 1Gi + + defaultPodOptions: + securityContext: + runAsNonRoot: true + runAsUser: 568 + runAsGroup: 568 + fsGroup: 568 + fsGroupChangePolicy: OnRootMismatch + seccompProfile: { type: RuntimeDefault } + + service: + app: + controller: *app + ports: + http: + port: *port + + ingress: + app: + annotations: + external-dns.alpha.kubernetes.io/target: internal.${PUBLIC_DOMAIN} + nginx.ingress.kubernetes.io/auth-url: "https://$host/oauth2/auth" + nginx.ingress.kubernetes.io/auth-signin: "https://$host/oauth2/start?rd=$escaped_request_uri" + nginx.ingress.kubernetes.io/auth-snippet: | + if ($request_uri ~* "(\/|\/[0-9]+\/)api(/|$|[?])") { + return 200; + } + if ($request_uri ~* "^/ping") { + return 200; + } + if ($request_uri ~* "(\/|\/[0-9]+\/)download(/|$|[?])") { + return 200; + } + nginx.ingress.kubernetes.io/configuration-snippet: | + proxy_set_header Accept-Encoding ""; + sub_filter '' ''; + sub_filter_once on; + cert-manager.io/cluster-issuer: zerossl-prod + cert-manager.io/private-key-rotation-policy: Always + cert-manager.io/private-key-algorithm: ECDSA + cert-manager.io/private-key-size: "384" + className: internal + tls: + - hosts: + - &host "{{ .Release.Name }}.${PUBLIC_DOMAIN}" + secretName: prowlarr-tls + hosts: + - host: *host + paths: + - path: / + service: + identifier: app + port: http + + persistence: + config: + type: emptyDir + medium: Memory + + tmp: + type: emptyDir + medium: Memory diff --git a/kubernetes/main/apps/media/prowlarr/app/kustomization.yaml b/kubernetes/main/apps/media/prowlarr/app/kustomization.yaml new file mode 100755 index 000000000..0f24c444d --- /dev/null +++ b/kubernetes/main/apps/media/prowlarr/app/kustomization.yaml @@ -0,0 +1,16 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml +configMapGenerator: + - name: prowlarr-gatus-ep + options: + labels: + gatus.io/enabled: "true" + files: + - config.yaml=./resources/gatus-ep.yaml +generatorOptions: + disableNameSuffixHash: true diff --git a/kubernetes/main/apps/media/prowlarr/app/resources/gatus-ep.yaml b/kubernetes/main/apps/media/prowlarr/app/resources/gatus-ep.yaml new file mode 100755 index 000000000..24f8719f2 --- /dev/null +++ b/kubernetes/main/apps/media/prowlarr/app/resources/gatus-ep.yaml @@ -0,0 +1,15 @@ +endpoints: + - name: "Prowlarr" + group: guarded + url: "https://prowlarr.${PUBLIC_DOMAIN}/ping" + interval: 1m + ui: + hide-hostname: true + hide-url: true + client: + dns-resolver: tcp://172.17.0.10:53 + conditions: + - "[STATUS] == 200" + - "[BODY].status == OK" + alerts: + - type: pushover diff --git a/kubernetes/main/apps/media/prowlarr/ks.yaml b/kubernetes/main/apps/media/prowlarr/ks.yaml new file mode 100755 index 000000000..911f79fd6 --- /dev/null +++ b/kubernetes/main/apps/media/prowlarr/ks.yaml @@ -0,0 +1,23 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app prowlarr + namespace: flux-system +spec: + targetNamespace: media + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: cloudnative-pg-cluster + - name: external-secrets-stores + path: ./kubernetes/main/apps/media/prowlarr/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/media/qbittorrent/app/externalsecret.yaml b/kubernetes/main/apps/media/qbittorrent/app/externalsecret.yaml new file mode 100755 index 000000000..595f2dd1c --- /dev/null +++ b/kubernetes/main/apps/media/qbittorrent/app/externalsecret.yaml @@ -0,0 +1,50 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret qbittorrent-secret +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + template: + engineVersion: v2 + data: + CROSS_SEED_API_KEY: "{{ .CROSS_SEED_API_KEY }}" + PUSHOVER_TOKEN: "{{ .QBITTORRENT_PUSHOVER_TOKEN }}" + PUSHOVER_USER_KEY: "{{ .PUSHOVER_USER_KEY }}" + config.js: | + module.exports = { + action: "inject", + apiKey: "{{.CROSS_SEED_API_KEY}}", + delay: 30, + duplicateCategories: false, + flatLinking: false, + includeEpisodes: true, + includeNonVideos: true, + includeSingleEpisodes: true, + linkCategory: "cross-seed", + linkDir: "/data/downloads/torrents/complete/cross-seed", + linkType: "hardlink", + matchMode: "safe", + outputDir: "/config", + port: 8080, + qbittorrentUrl: "http://localhost", + radarr: ["https://radarr.${PUBLIC_DOMAIN}/?apikey={{ .RADARR_API_KEY }}"], + skipRecheck: true, + sonarr: ["https://sonarr.${PUBLIC_DOMAIN}/?apikey={{ .SONARR_API_KEY }}"], + torrentDir: "/qbittorrent/qBittorrent/BT_backup", + torznab: [ + 3, // IPT + 1, // SA + 2, // TL + ].map(i => `https://prowlarr.${PUBLIC_DOMAIN}/$${i}/api?apikey={{ .PROWLARR_API_KEY }}`), + }; + dataFrom: + - extract: + key: secrets/api-keys + - extract: + key: secrets/pushover diff --git a/kubernetes/main/apps/media/qbittorrent/app/helmrelease.yaml b/kubernetes/main/apps/media/qbittorrent/app/helmrelease.yaml new file mode 100755 index 000000000..51f627a02 --- /dev/null +++ b/kubernetes/main/apps/media/qbittorrent/app/helmrelease.yaml @@ -0,0 +1,319 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: qbittorrent +spec: + interval: 30m + chart: + spec: + verify: + provider: cosign + chart: app-template + version: 3.5.1 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + dependsOn: + - name: volsync + namespace: volsync-system + + values: + controllers: + torrenting: + annotations: + reloader.stakater.com/auto: "true" + + containers: + qbittorrent: + image: + repository: ghcr.io/onedr0p/qbittorrent + tag: 5.0.2@sha256:b905a9e4eef9ffabd10ad8618ffdc2661d20c1a910441e5f921c851c10974444 + env: + UMASK: "022" + TZ: &TZ Europe/Sofia + QBT_WEBUI_PORT: &port 80 + QBT_TORRENTING_PORT: &torrentPort 50413 + CROSS_SEED_ENABLED: true + CROSS_SEED_HOST: localhost + CROSS_SEED_PORT: 8080 + CROSS_SEED_SLEEP_INTERVAL: 15 + PUSHOVER_ENABLED: true + CROSS_SEED_API_KEY: + valueFrom: + secretKeyRef: + name: &secret qbittorrent-secret + key: CROSS_SEED_API_KEY + PUSHOVER_TOKEN: + valueFrom: + secretKeyRef: + name: *secret + key: PUSHOVER_TOKEN + PUSHOVER_USER_KEY: + valueFrom: + secretKeyRef: + name: *secret + key: PUSHOVER_USER_KEY + probes: + readiness: + enabled: true + custom: true + spec: + exec: + command: + - /scripts/healthcheck.sh + initialDelaySeconds: 0 + periodSeconds: 10 + failureThreshold: 3 + liveness: + enabled: true + custom: true + spec: + exec: + command: + - /scripts/healthcheck.sh + initialDelaySeconds: 0 + periodSeconds: 10 + failureThreshold: 3 + securityContext: + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + resources: + requests: + cpu: 100m + limits: + memory: 16Gi + + qbitmanage: + dependsOn: qbittorrent + image: + repository: ghcr.io/stuffanthings/qbit_manage + tag: v4.1.13@sha256:fa623102eeac2c9cda115aa23f7a5bb85af2ab2cffec766b5173c85a073926b9 + env: + TZ: *TZ + probes: + liveness: + enabled: true + readiness: + enabled: false + securityContext: + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + resources: + requests: + cpu: 10m + limits: + memory: 8Gi + lifecycle: + postStart: + exec: + command: ["/bin/sh", "-c", "cp /secret/config.yml /config/config.yml"] + + cross-seed: + dependsOn: qbittorrent + image: + repository: ghcr.io/cross-seed/cross-seed + tag: 6.1.1@sha256:5cb54b46a614f362c0332a91e5754d6998d45ae27ccc2c77b04b1a3493816331 + env: + TZ: *TZ + args: ["daemon"] + probes: + liveness: + enabled: false + readiness: + enabled: false + securityContext: + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + resources: + requests: + cpu: 10m + limits: + memory: 512Mi + + vuetorrent: + dependsOn: qbittorrent + image: + repository: registry.k8s.io/git-sync/git-sync + tag: v4.3.0 + args: + - --repo=https://github.com/WDaan/VueTorrent + - --ref=latest-release + - --period=86400s + - --root=/addons + securityContext: + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: false + capabilities: { drop: ["ALL"] } + resources: + requests: + cpu: 10m + memory: 25Mi + + defaultPodOptions: + securityContext: + runAsNonRoot: true + runAsUser: 568 + runAsGroup: 568 + fsGroup: 568 + fsGroupChangePolicy: OnRootMismatch + seccompProfile: { type: RuntimeDefault } + terminationGracePeriodSeconds: 300 + + service: + qbittorrent: + controller: torrenting + type: LoadBalancer + allocateLoadBalancerNodePorts: false + annotations: + lbipam.cilium.io/ips: 192.168.91.82 + ports: + http: + port: *port + bittorrent-tcp: + enabled: true + port: *torrentPort + protocol: TCP + bittorrent-udp: + enabled: true + port: *torrentPort + protocol: UDP + + ingress: + qbittorrent: + annotations: + external-dns.alpha.kubernetes.io/target: internal.${PUBLIC_DOMAIN} + nginx.ingress.kubernetes.io/auth-url: "https://$host/oauth2/auth" + nginx.ingress.kubernetes.io/auth-signin: "https://$host/oauth2/start?rd=$escaped_request_uri" + nginx.ingress.kubernetes.io/auth-snippet: | + if ($request_uri ~* "^/api(/|$)") { + return 202; + } + nginx.ingress.kubernetes.io/configuration-snippet: | + proxy_set_header Accept-Encoding ""; + sub_filter '' ''; + sub_filter_once on; + cert-manager.io/cluster-issuer: zerossl-prod + cert-manager.io/private-key-rotation-policy: Always + cert-manager.io/private-key-algorithm: ECDSA + cert-manager.io/private-key-size: "384" + className: internal + tls: + - hosts: + - &host "{{ .Release.Name }}.${PUBLIC_DOMAIN}" + secretName: qbittorrent-tls + hosts: + - host: *host + paths: + - path: / + service: + identifier: qbittorrent + port: http + + persistence: + config: + enabled: true + type: persistentVolumeClaim + size: 5Gi + accessMode: ReadWriteOnce + storageClass: openebs-zfs-128k + advancedMounts: + torrenting: + qbittorrent: + - path: /config + qbitmanage: + - path: /qBittorrent/BT_backup + subPath: qBittorrent/BT_backup + readOnly: true + cross-seed: + - path: /qbittorrent/qBittorrent/BT_backup + subPath: qBittorrent/BT_backup + readOnly: true + scripts: + type: configMap + name: qbittorrent-scripts + defaultMode: 0550 + advancedMounts: + torrenting: + qbittorrent: + - readOnly: true + + media: + type: nfs + server: 192.168.91.40 + path: /mnt/exos20/data + advancedMounts: + torrenting: + qbitmanage: + - path: /data/downloads/torrents + subPath: downloads/torrents + qbittorrent: + - path: /data/downloads/torrents + subPath: downloads/torrents + cross-seed: + - path: /data/downloads/torrents/complete + subPath: downloads/torrents/complete + + config-qbitmanage: + type: emptyDir + advancedMounts: + torrenting: + qbitmanage: + - path: /app/config + - path: /config + + config-file-qbitmanage: + type: configMap + name: qbitmanage-config + defaultMode: 0600 + advancedMounts: + torrenting: + qbitmanage: + - path: /secret/config.yml + subPath: config.yml + readOnly: true + + config-cross-seed: + type: emptyDir + medium: Memory + advancedMounts: + torrenting: + cross-seed: + - path: /config + + config-file-cross-seed: + type: secret + name: qbittorrent-secret + advancedMounts: + torrenting: + cross-seed: + - path: /config/config.js + subPath: config.js + readOnly: true + + addons: + type: emptyDir + advancedMounts: + torrenting: + qbittorrent: + - path: /addons + readOnly: true + vuetorrent: + - path: /addons diff --git a/kubernetes/main/apps/media/qbittorrent/app/kustomization.yaml b/kubernetes/main/apps/media/qbittorrent/app/kustomization.yaml new file mode 100755 index 000000000..eab70bf1a --- /dev/null +++ b/kubernetes/main/apps/media/qbittorrent/app/kustomization.yaml @@ -0,0 +1,30 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml + - ./volsync-src.yaml +configMapGenerator: + - name: qbittorrent-loki-rules + files: + - qbittorrent.yaml=./resources/lokirule.yaml + options: + labels: + loki_rule: "true" + - name: qbittorrent-scripts + files: + - post-process.sh=./resources/post-process.sh + - healthcheck.sh=./resources/healthcheck.sh + - name: qbitmanage-config + files: + - config.yml=./resources/qbitmanage-config.yaml + - name: qbittorrent-gatus-ep + options: + labels: + gatus.io/enabled: "true" + files: + - config.yaml=./resources/gatus-ep.yaml +generatorOptions: + disableNameSuffixHash: true diff --git a/kubernetes/main/apps/media/qbittorrent/app/resources/gatus-ep.yaml b/kubernetes/main/apps/media/qbittorrent/app/resources/gatus-ep.yaml new file mode 100755 index 000000000..b33c83adb --- /dev/null +++ b/kubernetes/main/apps/media/qbittorrent/app/resources/gatus-ep.yaml @@ -0,0 +1,14 @@ +endpoints: + - name: "qBittorrent" + group: guarded + url: "https://qbittorrent.${PUBLIC_DOMAIN}/api/v2/app/version" + interval: 1m + ui: + hide-hostname: true + hide-url: true + conditions: + - "[STATUS] == 403" + alerts: + - type: pushover + headers: + Accept: application/json diff --git a/kubernetes/main/apps/media/qbittorrent/app/resources/healthcheck.sh b/kubernetes/main/apps/media/qbittorrent/app/resources/healthcheck.sh new file mode 100755 index 000000000..ba84b8f12 --- /dev/null +++ b/kubernetes/main/apps/media/qbittorrent/app/resources/healthcheck.sh @@ -0,0 +1,10 @@ +#!/bin/bash +http_code=$(curl -s -o /dev/null -w "%{http_code}" http://localhost:80/api/v2/app/version) +if [[ $http_code != 200 ]]; then + log "App status: not up yet, did you enable \"Bypass authentication for clients on localhost\" in the Web UI options?" + exit 1 +else + log "App status: up and running" +fi + +exit 0 diff --git a/kubernetes/main/apps/media/qbittorrent/app/resources/lokirule.yaml b/kubernetes/main/apps/media/qbittorrent/app/resources/lokirule.yaml new file mode 100755 index 000000000..e2f6b0c76 --- /dev/null +++ b/kubernetes/main/apps/media/qbittorrent/app/resources/lokirule.yaml @@ -0,0 +1,14 @@ +--- +groups: + - name: qbittorrent + rules: + - alert: QbittorrentFastResumeRejected + expr: | + sum by (app) (count_over_time({app="qbittorrent"} |~ "(?i)fast resume rejected"[1h])) > 0 + for: 2m + labels: + severity: critical + category: logs + annotations: + app: "{{ $labels.container }}" + summary: "{{ $labels.container }} has a torrent with fast resume rejected" diff --git a/kubernetes/main/apps/media/qbittorrent/app/resources/post-process.sh b/kubernetes/main/apps/media/qbittorrent/app/resources/post-process.sh new file mode 100755 index 000000000..a12a4c286 --- /dev/null +++ b/kubernetes/main/apps/media/qbittorrent/app/resources/post-process.sh @@ -0,0 +1,118 @@ +#!/usr/bin/env bash +# shellcheck disable=SC2154 + +set -euo pipefail + +# User-defined variables +CROSS_SEED_ENABLED="$${CROSS_SEED_ENABLED:-false}" +CROSS_SEED_HOST="$${CROSS_SEED_HOST:-required}" +CROSS_SEED_PORT="$${CROSS_SEED_PORT:-required}" +CROSS_SEED_API_KEY="$${CROSS_SEED_API_KEY:-required}" +CROSS_SEED_SLEEP_INTERVAL="$${CROSS_SEED_SLEEP_INTERVAL:-30}" +PUSHOVER_ENABLED="$${PUSHOVER_ENABLED:-false}" +PUSHOVER_USER_KEY="$${PUSHOVER_USER_KEY:-required}" +PUSHOVER_TOKEN="$${PUSHOVER_TOKEN:-required}" + +# Function to set release variables from SABnzbd +set_sab_vars() { + RELEASE_NAME="$${SAB_FILENAME:-}" + RELEASE_DIR="$${SAB_COMPLETE_DIR:-}" + RELEASE_CAT="$${SAB_CAT:-}" + RELEASE_SIZE="$${SAB_BYTES:-}" + RELEASE_STATUS="$${SAB_PP_STATUS:-}" + RELEASE_INDEXER="$${SAB_URL:-}" + RELEASE_TYPE="NZB" +} + +# Function to set release variables from qBittorrent +set_qb_vars() { + RELEASE_NAME="$1" # %N + RELEASE_DIR="$2" # %F + RELEASE_CAT="$3" # %L + RELEASE_SIZE="$4" # %Z + RELEASE_INDEXER="$5" # %T + RELEASE_STATUS=0 # Always 0 for qBittorrent + RELEASE_TYPE="Torrent" +} + +# Function to send pushover notification +send_pushover_notification() { + local pushover_message status_code json_data + printf -v pushover_message \ + "%s\nCategory: %s\nIndexer: %s\nSize: %s" \ + "$${RELEASE_NAME%.*}" \ + "$${RELEASE_CAT}" \ + "$(trurl --url "$${RELEASE_INDEXER}" --get '{idn:host}')" \ + "$(numfmt --to iec --format "%8.2f" "$${RELEASE_SIZE}")" + + json_data=$(jo \ + token="$${PUSHOVER_TOKEN}" \ + user="$${PUSHOVER_USER_KEY}" \ + title="$${RELEASE_TYPE} Downloaded" \ + message="$${pushover_message}" \ + priority="-2" \ + html="1" + ) + + status_code=$(curl \ + --silent \ + --write-out "%{http_code}" \ + --output /dev/null \ + --request POST \ + --header "Content-Type: application/json" \ + --data-binary "$${json_data}" \ + "https://api.pushover.net/1/messages.json" + ) + + printf "pushover notification returned with HTTP status code %s and payload: %s\n" \ + "$${status_code}" \ + "$(echo "$${json_data}" | jq --compact-output)" >&2 +} + +# Function to search for cross-seed +search_cross_seed() { + local status_code + status_code=$(curl \ + --silent \ + --output /dev/null \ + --write-out "%{http_code}" \ + --request POST \ + --data-urlencode "path=$${RELEASE_DIR}" \ + --header "X-Api-Key: $${CROSS_SEED_API_KEY}" \ + "http://$${CROSS_SEED_HOST}:$${CROSS_SEED_PORT}/api/webhook" + ) + + printf "cross-seed search returned with HTTP status code %s and path %s\n" \ + "$${status_code}" \ + "$${RELEASE_DIR}" >&2 + + sleep "$${CROSS_SEED_SLEEP_INTERVAL}" +} + +main() { + # Determine the source and set release variables accordingly + if env | grep -q "^SAB_"; then + set_sab_vars + else + set_qb_vars "$@" + fi + + # Check if post-processing was successful + if [[ "$${RELEASE_STATUS}" -ne 0 ]]; then + printf "post-processing failed with sabnzbd status code %s\n" \ + "$${RELEASE_STATUS}" >&2 + exit 1 + fi + + # Send pushover notification + if [[ "$${PUSHOVER_ENABLED}" == "true" ]]; then + send_pushover_notification + fi + + # Search for cross-seed + if [[ "$${CROSS_SEED_ENABLED}" == "true" ]]; then + search_cross_seed + fi +} + +main "$@" diff --git a/kubernetes/main/apps/media/qbittorrent/app/resources/qbitmanage-config.yaml b/kubernetes/main/apps/media/qbittorrent/app/resources/qbitmanage-config.yaml new file mode 100755 index 000000000..a2d58ce9e --- /dev/null +++ b/kubernetes/main/apps/media/qbittorrent/app/resources/qbitmanage-config.yaml @@ -0,0 +1,304 @@ +commands: + dry_run: false + cross_seed: false + recheck: false + cat_update: false + tag_update: true + rem_unregistered: true + tag_tracker_error: true + rem_orphaned: true + tag_nohardlinks: true + share_limits: true + skip_qb_version_check: true + skip_cleanup: false + +qbt: + host: localhost:80 + user: "" + pass: "" + +settings: + force_auto_tmm: false # Will force qBittorrent to enable Automatic Torrent Management for each torrent. + tracker_error_tag: issue # Will set the tag of any torrents that do not have a working tracker. + nohardlinks_tag: noHL # Will set the tag of any torrents with no hardlinks. + share_limits_tag: ~share_limit # Will add this tag when applying share limits to provide an easy way to filter torrents by share limit group/priority for each torrent + share_limits_min_seeding_time_tag: MinSeedTimeNotReached # Tag to be added to torrents that have not yet reached the minimum seeding time + share_limits_min_num_seeds_tag: MinSeedsNotMet # Tag to be added to torrents that have not yet reached the minimum number of seeds + share_limits_last_active_tag: LastActiveLimitNotReached # Tag to be added to torrents that have not yet reached the last active limit + cross_seed_tag: cross-seed # Will set the tag of any torrents that are added by cross-seed command + cat_filter_completed: true # Filters for completed torrents only when running cat_update command + share_limits_filter_completed: true # Filters for completed torrents only when running share_limits command + tag_nohardlinks_filter_completed: true # Filters for completed torrents only when running tag_nohardlinks command + cat_update_all: true # Checks and udpates all torrent categories if set to True when running cat_update command, otherwise only update torrents that are uncategorized + + force_auto_tmm_ignore_tags: cross-seed + disable_qbt_default_share_limits: true +directory: + # Do not remove these + # Cross-seed var: # Output directory of cross-seed + # root_dir var: # Root downloads directory used to check for orphaned files, noHL, and RecycleBin. + # remote_dir var: # Path of docker host mapping of root_dir. + # remote_dir must be set if you're running qbit_manage locally and qBittorrent/cross_seed is in a docker + # remote_dir should not be set if qbit_manage is running in a container + # recycle_bin var: # Path of the RecycleBin folder. Default location is set to remote_dir/.RecycleBin + # torrents_dir var: # Path of the your qbittorrent torrents directory. Required for `save_torrents` attribute in recyclebin + # orphaned_dir var: # Path of the the Orphaned Data folder. This is similar to RecycleBin, but only for orphaned data. + remote_dir: /data/downloads/torrents/complete + cross_seed: /your/path/here/ + root_dir: /data/downloads/torrents/complete + recycle_bin: /data/downloads/torrents/.RecycleBin + torrents_dir: /qBittorrent/BT_backup + orphaned_dir: /data/downloads/torrents/orphaned_data + +cat: + # Category & Path Parameters + # : # Path of your save directory. + +cat_change: + # This moves all the torrents from one category to another category. This executes on --cat-update + # WARNING: if the paths are different and Default Torrent Management Mode is set to automatic the files could be moved !!! + # : + +tracker: + # Mandatory + # Tag Parameters + # : # This is the keyword in the tracker url. You can define multiple tracker urls by splitting with `|` delimiter + # Set tag name. Can be a list of tags or a single tag + # tag: + # Set the category based on tracker URL. This category option takes priority over the category defined by save directory + # cat: + # Set this to the notifiarr react name. This is used to add indexer reactions to the notifications sent by Notifiarr + # notifiarr: + animebytes.tv: + tag: AnimeBytes + notifiarr: animebytes + avistaz: + tag: + - Avistaz + - tag2 + - tag3 + notifiarr: avistaz + beyond-hd: + tag: [Beyond-HD, tag2, tag3] + cat: movies + notifiarr: beyondhd + blutopia: + tag: Blutopia + notifiarr: blutopia + cartoonchaos: + tag: CartoonChaos + digitalcore: + tag: DigitalCore + notifiarr: digitalcore + gazellegames: + tag: GGn + hdts: + tag: HDTorrents + landof.tv: + tag: BroadcasTheNet + notifiarr: broadcasthenet + myanonamouse: + tag: MaM + passthepopcorn: + tag: PassThePopcorn + notifiarr: passthepopcorn + privatehd: + tag: PrivateHD + notifiarr: + torrentdb: + tag: TorrentDB + notifiarr: torrentdb + torrentleech|tleechreload: + tag: TorrentLeech + notifiarr: torrentleech + tv-vault: + tag: TV-Vault + stackoverflow|empirehost|bgp: + tag: IPTorrents + notifiarr: iptorrents + speedapp: + tag: speedapp.io + notifiarr: speedapp.io + # The "other" key is a special keyword and if defined will tag any other trackers that don't match the above trackers into this tag + other: + tag: other + +nohardlinks: + # Tag Movies/Series that are not hard linked outside the root directory + # Mandatory to fill out directory parameter above to use this function (root_dir/remote_dir) + # This variable should be set to your category name of your completed movies/completed series in qbit. Acceptable variable can be any category you would like to tag if there are no hardlinks found + movies: + ignore_root_dir: true + movies-imported: + exclude_tags: + - Beyond-HD + - AnimeBytes + - MaM + ignore_root_dir: true + tv: + ignore_root_dir: true + tv-imported: + exclude_tags: + - Beyond-HD + - AnimeBytes + - MaM + ignore_root_dir: true + cross-seed: + ignore_root_dir: false + +share_limits: + + noHL_cross-seed: + priority: 1 + include_all_tags: + - noHL + - cross-seed + categories: + - cross-seed + max_seeding_time: 10m + cleanup: true + custom_tag: sharelimits_noHL_cross-seed + noHL_TorrentLeech: + priority: 2 + include_all_tags: + - noHL + - TorrentLeech + categories: + - movies + - movies-imported + - tv + - tv-imported + max_seeding_time: 10d + cleanup: true + resume_torrent_after_change: true + add_group_to_tag: true + custom_tag: sharelimits_noHL_TorrentLeech + + noHL_speedapp.io: + priority: 3 + include_all_tags: + - noHL + - speedapp.io + categories: + - movies + - movies-imported + - tv + - tv-imported + max_seeding_time: 3d + cleanup: true + resume_torrent_after_change: true + add_group_to_tag: true + custom_tag: sharelimits_noHL_speedapp.io + + noHL_IPTorrents: + priority: 4 + include_all_tags: + - noHL + - IPTorrents + categories: + - movies + - movies-imported + - tv + - tv-imported + max_seeding_time: 16d + cleanup: true + resume_torrent_after_change: true + add_group_to_tag: true + custom_tag: sharelimits_noHL_IPTorrents + + TorrentLeech: + priority: 5 + include_all_tags: + - TorrentLeech + categories: + - movies + - movies-imported + - tv + - tv-imported + - cross-seed + max_seeding_time: 30d + cleanup: true + resume_torrent_after_change: true + add_group_to_tag: true + custom_tag: sharelimits_TorrentLeech + + IPTorrents: + priority: 6 + include_all_tags: + - IPTorrents + categories: + - movies + - movies-imported + - tv + - tv-imported + - cross-seed + max_seeding_time: 30d + cleanup: true + resume_torrent_after_change: true + add_group_to_tag: true + custom_tag: sharelimits_IPTorrents + + speedapp.io: + priority: 7 + include_all_tags: + - speedapp.io + categories: + - movies + - movies-imported + - tv + - tv-imported + - cross-seed + max_seeding_time: 30d + cleanup: true + resume_torrent_after_change: true + add_group_to_tag: true + custom_tag: sharelimits_speedapp.io + + general: + priority: 8 + include_any_tags: + - speedapp.io + - IPTorrents + - TorrentLeech + categories: + - general-completed + max_ratio: 5 + max_seeding_time: 30d + cleanup: false + resume_torrent_after_change: true + add_group_to_tag: true + custom_tag: sharelimits_general + +recyclebin: + # Recycle Bin method of deletion will move files into the recycle bin (Located in /root_dir/.RecycleBin) instead of directly deleting them in qbit + # By default the Recycle Bin will be emptied on every run of the qbit_manage script if empty_after_x_days is defined. + enabled: true + # empty_after_x_days var: + # Will automatically remove all files and folders in recycle bin after x days. (Checks every script run) + # If this variable is not defined it, the RecycleBin will never be emptied. + # WARNING: Setting this variable to 0 will delete all files immediately upon script run! + empty_after_x_days: 7 + # save_torrents var: + # If this option is set to true you MUST fill out the torrents_dir in the directory attribute. + # This will save a copy of your .torrent and .fastresume file in the recycle bin before deleting it from qbittorrent + save_torrents: true + # split_by_category var: + # This will split the recycle bin folder by the save path defined in the `cat` attribute + # and add the base folder name of the recycle bin that was defined in the `recycle_bin` sub-attribute under directory. + split_by_category: true + +orphaned: + # Orphaned files are those in the root_dir download directory that are not referenced by any active torrents. + # Will automatically remove all files and folders in orphaned data after x days. (Checks every script run) + # If this variable is not defined it, the orphaned data will never be emptied. + # WARNING: Setting this variable to 0 will delete all files immediately upon script run! + empty_after_x_days: 7 + # File patterns that will not be considered orphaned files. Handy for generated files that aren't part of the torrent but belong with the torrent's files + exclude_patterns: + - '**/.DS_Store' + - '**/Thumbs.db' + - '**/@eaDir' + - '**/general/*' + - '**/*.!qB' + - '**/*_unpackerred' + max_orphaned_files_to_delete: 50000 diff --git a/kubernetes/main/apps/media/qbittorrent/app/volsync-dst.yaml b/kubernetes/main/apps/media/qbittorrent/app/volsync-dst.yaml new file mode 100644 index 000000000..2cd96fa59 --- /dev/null +++ b/kubernetes/main/apps/media/qbittorrent/app/volsync-dst.yaml @@ -0,0 +1,18 @@ +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationDestination +metadata: + name: datavol-dest + namespace: media +spec: + trigger: + manual: restore-once + restic: + repository: qbittorrent-volsync-secret + # Use an existing PVC, don't provision a new one + destinationPVC: qbittorrent-config + copyMethod: Snapshot + storageClassName: openebs-zfs-128k + moverSecurityContext: + runAsUser: 568 + runAsGroup: 568 + fsGroup: 568 diff --git a/kubernetes/main/apps/media/qbittorrent/app/volsync-src.yaml b/kubernetes/main/apps/media/qbittorrent/app/volsync-src.yaml new file mode 100644 index 000000000..596be60af --- /dev/null +++ b/kubernetes/main/apps/media/qbittorrent/app/volsync-src.yaml @@ -0,0 +1,48 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.ok8.sh/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret qbittorrent-volsync-secret +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + creationPolicy: Owner + template: + engineVersion: v2 + data: + RESTIC_REPOSITORY: '{{ .RESTIC_REPOSITORY }}/qbittorrent/volsync/config-volsync-config' + RESTIC_PASSWORD: '{{ .ENCRYPTION_KEY }}' + AWS_ACCESS_KEY_ID: '{{ .CF_ACCESS_KEY_ID }}' + AWS_SECRET_ACCESS_KEY: '{{ .CF_SECRET_ACCESS_KEY }}' + dataFrom: + - extract: + key: secrets/volsync + - extract: + key: secrets/cloudflare +--- +# yaml-language-server: $schema=https://kubernetes-schemas.ok8.sh/volsync.backube/replicationsource_v1alpha1.json +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: qbittorrent +spec: + sourcePVC: qbittorrent-config + trigger: + schedule: "0 7 * * *" + restic: + copyMethod: Snapshot + storageClassName: openebs-zfs-128k + pruneIntervalDays: 7 + repository: qbittorrent-volsync-secret + cacheCapacity: 2Gi + moverSecurityContext: + runAsUser: 568 + runAsGroup: 568 + fsGroup: 568 + retain: + daily: 7 + within: 3d diff --git a/kubernetes/main/apps/media/qbittorrent/ks.yaml b/kubernetes/main/apps/media/qbittorrent/ks.yaml new file mode 100755 index 000000000..502f68fe9 --- /dev/null +++ b/kubernetes/main/apps/media/qbittorrent/ks.yaml @@ -0,0 +1,22 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app qbittorrent + namespace: flux-system +spec: + targetNamespace: media + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets-stores + path: ./kubernetes/main/apps/media/qbittorrent/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/media/radarr/app/externalsecret.yaml b/kubernetes/main/apps/media/radarr/app/externalsecret.yaml new file mode 100755 index 000000000..3b6feb2e8 --- /dev/null +++ b/kubernetes/main/apps/media/radarr/app/externalsecret.yaml @@ -0,0 +1,35 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret radarr-secret +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + template: + engineVersion: v2 + data: + RADARR__AUTH__APIKEY: "{{ .RADARR_API_KEY }}" + RADARR__POSTGRES__HOST: &dbHost postgres17-rw.database.svc.cluster.local + RADARR__POSTGRES__PORT: "5432" + RADARR__POSTGRES__USER: &dbUser "{{ .RADARR_POSTGRES_USER }}" + RADARR__POSTGRES__PASSWORD: &dbPass "{{ .RADARR_POSTGRES_PASSWORD }}" + RADARR__POSTGRES__MAINDB: &dbName radarr + PUSHOVER_TOKEN: "{{ .RADARR_PUSHOVER_TOKEN }}" + PUSHOVER_USER_KEY: "{{ .PUSHOVER_USER_KEY }}" + INIT_POSTGRES_DBNAME: *dbName + INIT_POSTGRES_HOST: *dbHost + INIT_POSTGRES_USER: *dbUser + INIT_POSTGRES_PASS: *dbPass + INIT_POSTGRES_SUPER_PASS: "{{ .POSTGRES_SUPER_PASS }}" + dataFrom: + - extract: + key: secrets/cloudnative-pg + - extract: + key: secrets/pushover + - extract: + key: secrets/api-keys diff --git a/kubernetes/main/apps/media/radarr/app/helmrelease.yaml b/kubernetes/main/apps/media/radarr/app/helmrelease.yaml new file mode 100755 index 000000000..1ef400fd9 --- /dev/null +++ b/kubernetes/main/apps/media/radarr/app/helmrelease.yaml @@ -0,0 +1,167 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: &app radarr +spec: + interval: 30m + chart: + spec: + verify: + provider: cosign + chart: app-template + version: 3.5.1 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + + values: + controllers: + radarr: + annotations: + reloader.stakater.com/auto: "true" + + initContainers: + init-db: + image: + repository: ghcr.io/onedr0p/postgres-init + tag: 16.6@sha256:35353a77777ee8f634d0f3945f495b4a40065134b8619e0d18bd49b0ee9c855b + envFrom: &envFrom + - secretRef: + name: radarr-secret + + containers: + app: + image: + repository: ghcr.io/onedr0p/radarr-develop + tag: 5.15.1.9463@sha256:e9144f8a76d8e2a98f57a2ff6170e05f5880c6839d9fc351a9cb854813f8bc69 + env: + RADARR__APP__INSTANCENAME: Radarr + RADARR__APP__THEME: dark + RADARR__AUTH__METHOD: External + RADARR__AUTH__REQUIRED: DisabledForLocalAddresses + RADARR__LOG__DBENABLED: "False" + RADARR__LOG__LEVEL: info + RADARR__SERVER__PORT: &port 80 + RADARR__UPDATE__BRANCH: develop + TZ: Europe/Sofia + envFrom: *envFrom + probes: + liveness: &probes + enabled: true + custom: true + spec: + httpGet: + path: /ping + port: *port + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + readiness: *probes + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + memory: 4Gi + + defaultPodOptions: + securityContext: + runAsNonRoot: true + runAsUser: 568 + runAsGroup: 568 + fsGroup: 568 + fsGroupChangePolicy: OnRootMismatch + seccompProfile: { type: RuntimeDefault } + + service: + app: + controller: *app + ports: + http: + port: *port + + ingress: + app: + annotations: + nginx.ingress.kubernetes.io/auth-url: "https://$host/oauth2/auth" + nginx.ingress.kubernetes.io/auth-signin: "https://$host/oauth2/start?rd=$escaped_request_uri" + nginx.ingress.kubernetes.io/auth-snippet: | + if ($request_uri ~* "^/api(/|$)") { + return 200; + } + if ($request_uri ~* "^/ping") { + return 200; + } + external-dns.alpha.kubernetes.io/target: internal.${PUBLIC_DOMAIN} + nginx.ingress.kubernetes.io/configuration-snippet: | + proxy_set_header Accept-Encoding ""; + sub_filter '' ''; + sub_filter_once on; + cert-manager.io/cluster-issuer: zerossl-prod + cert-manager.io/private-key-rotation-policy: Always + cert-manager.io/private-key-algorithm: ECDSA + cert-manager.io/private-key-size: "384" + className: internal + tls: + - hosts: + - &host "{{ .Release.Name }}.${PUBLIC_DOMAIN}" + secretName: radarr-tls + hosts: + - host: *host + paths: + - path: / + service: + identifier: app + port: http + + persistence: + config: + enabled: true + type: persistentVolumeClaim + size: 15Gi + accessMode: ReadWriteOnce + storageClass: openebs-zfs-128k + advancedMounts: + radarr: + app: + - path: /config + + scripts: + type: configMap + name: radarr-configmap + defaultMode: 0550 + advancedMounts: + radarr: + app: + - path: /scripts/pushover-notify.sh + subPath: pushover-notify.sh + readOnly: true + + tmp: + type: emptyDir + medium: Memory + + media: + type: nfs + server: 192.168.91.40 + path: /mnt/exos20/data + advancedMounts: + radarr: + app: + - path: /data diff --git a/kubernetes/main/apps/media/radarr/app/kustomization.yaml b/kubernetes/main/apps/media/radarr/app/kustomization.yaml new file mode 100755 index 000000000..aa0235584 --- /dev/null +++ b/kubernetes/main/apps/media/radarr/app/kustomization.yaml @@ -0,0 +1,19 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml +configMapGenerator: + - name: radarr-configmap + files: + - pushover-notify.sh=./resources/pushover-notify.sh + - name: radarr-gatus-ep + options: + labels: + gatus.io/enabled: "true" + files: + - config.yaml=./resources/gatus-ep.yaml +generatorOptions: + disableNameSuffixHash: true diff --git a/kubernetes/main/apps/media/radarr/app/resources/gatus-ep.yaml b/kubernetes/main/apps/media/radarr/app/resources/gatus-ep.yaml new file mode 100755 index 000000000..0b4ff3148 --- /dev/null +++ b/kubernetes/main/apps/media/radarr/app/resources/gatus-ep.yaml @@ -0,0 +1,15 @@ +endpoints: + - name: "Radarr" + group: guarded + url: "https://radarr.${PUBLIC_DOMAIN}/ping" + interval: 1m + ui: + hide-hostname: true + hide-url: true + client: + dns-resolver: tcp://172.17.0.10:53 + conditions: + - "[STATUS] == 200" + - "[BODY].status == OK" + alerts: + - type: pushover diff --git a/kubernetes/main/apps/media/radarr/app/resources/pushover-notify.sh b/kubernetes/main/apps/media/radarr/app/resources/pushover-notify.sh new file mode 100755 index 000000000..d7079abf1 --- /dev/null +++ b/kubernetes/main/apps/media/radarr/app/resources/pushover-notify.sh @@ -0,0 +1,85 @@ +#!/usr/bin/env bash +# shellcheck disable=SC2154 +set -euo pipefail + +# User defined variables for pushover +PUSHOVER_USER_KEY="$${PUSHOVER_USER_KEY:-required}" +PUSHOVER_TOKEN="$${PUSHOVER_TOKEN:-required}" +PUSHOVER_PRIORITY="$${PUSHOVER_PRIORITY:-"-2"}" + +if [[ "$${radarr_eventtype:-}" == "Test" ]]; then + PUSHOVER_PRIORITY="1" + printf -v PUSHOVER_TITLE \ + "Test Notification" + printf -v PUSHOVER_MESSAGE \ + "Howdy this is a test notification from %s" \ + "$${radarr_instancename:-Radarr}" + printf -v PUSHOVER_URL \ + "%s" \ + "$${radarr_applicationurl:-localhost}" + printf -v PUSHOVER_URL_TITLE \ + "Open %s" \ + "$${radarr_instancename:-Radarr}" +fi + +if [[ "$${radarr_eventtype:-}" == "Download" ]]; then + printf -v PUSHOVER_TITLE \ + "Movie %s" \ + "$( [[ "$${radarr_isupgrade}" == "True" ]] && echo "Upgraded" || echo "Downloaded" )" + printf -v PUSHOVER_MESSAGE \ + "%s (%s)\n%s\n\nClient: %s\nQuality: %s\nSize: %s" \ + "$${radarr_movie_title}" \ + "$${radarr_movie_year}" \ + "$${radarr_movie_overview}" \ + "$${radarr_download_client:-Unknown}" \ + "$${radarr_moviefile_quality:-Unknown}" \ + "$(numfmt --to iec --format "%8.2f" "$${radarr_release_size:-0}")" + printf -v PUSHOVER_URL \ + "%s/movie/%s" \ + "$${radarr_applicationurl:-localhost}" "$${radarr_movie_tmdbid}" + printf -v PUSHOVER_URL_TITLE \ + "View movie in %s" \ + "$${radarr_instancename:-Radarr}" +fi + +if [[ "$${radarr_eventtype:-}" == "ManualInteractionRequired" ]]; then + PUSHOVER_PRIORITY="1" + printf -v PUSHOVER_TITLE \ + "Movie import requires intervention" + printf -v PUSHOVER_MESSAGE \ + "%s (%s)\nClient: %s" \ + "$${radarr_movie_title}" \ + "$${radarr_movie_year}" \ + "$${radarr_download_client:-Unknown}" + printf -v PUSHOVER_URL \ + "%s/activity/queue" \ + "$${radarr_applicationurl:-localhost}" + printf -v PUSHOVER_URL_TITLE \ + "View queue in %s" \ + "$${radarr_instancename:-Radarr}" +fi + +json_data=$(jo \ + token="$${PUSHOVER_TOKEN}" \ + user="$${PUSHOVER_USER_KEY}" \ + title="$${PUSHOVER_TITLE}" \ + message="$${PUSHOVER_MESSAGE}" \ + url="$${PUSHOVER_URL}" \ + url_title="$${PUSHOVER_URL_TITLE}" \ + priority="$${PUSHOVER_PRIORITY}" \ + html="1" +) + +status_code=$(curl \ + --silent \ + --write-out "%{http_code}" \ + --output /dev/null \ + --request POST \ + --header "Content-Type: application/json" \ + --data-binary "$${json_data}" \ + "https://api.pushover.net/1/messages.json" \ +) + +printf "pushover notification returned with HTTP status code %s and payload: %s\n" \ + "$${status_code}" \ + "$(echo "$${json_data}" | jq --compact-output)" >&2 diff --git a/kubernetes/main/apps/media/radarr/ks.yaml b/kubernetes/main/apps/media/radarr/ks.yaml new file mode 100755 index 000000000..1381e7b71 --- /dev/null +++ b/kubernetes/main/apps/media/radarr/ks.yaml @@ -0,0 +1,23 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app radarr + namespace: flux-system +spec: + targetNamespace: media + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: cloudnative-pg-cluster + - name: external-secrets-stores + path: ./kubernetes/main/apps/media/radarr/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/media/sabnzbd/app/externalsecret.yaml b/kubernetes/main/apps/media/sabnzbd/app/externalsecret.yaml new file mode 100755 index 000000000..ce972da67 --- /dev/null +++ b/kubernetes/main/apps/media/sabnzbd/app/externalsecret.yaml @@ -0,0 +1,25 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret sabnzbd-secret +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + template: + engineVersion: v2 + data: + CROSS_SEED_API_KEY: "{{ .CROSS_SEED_API_KEY }}" + PUSHOVER_TOKEN: "{{ .SABNZBD_PUSHOVER_TOKEN }}" + PUSHOVER_USER_KEY: "{{ .PUSHOVER_USER_KEY }}" + SABNZBD__API_KEY: &apiKey "{{ .SABNZBD_API_KEY }}" + SABNZBD__NZB_KEY: *apiKey + dataFrom: + - extract: + key: secrets/api-keys + - extract: + key: pushover diff --git a/kubernetes/main/apps/media/sabnzbd/app/helmrelease.yaml b/kubernetes/main/apps/media/sabnzbd/app/helmrelease.yaml new file mode 100755 index 000000000..5b11505bd --- /dev/null +++ b/kubernetes/main/apps/media/sabnzbd/app/helmrelease.yaml @@ -0,0 +1,161 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: sabnzbd +spec: + interval: 30m + chart: + spec: + verify: + provider: cosign + chart: app-template + version: 3.5.1 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + dependsOn: + - name: volsync + namespace: volsync-system + + values: + controllers: + sabnzbd: + annotations: + reloader.stakater.com/auto: "true" + + containers: + app: + image: + repository: ghcr.io/onedr0p/sabnzbd + tag: 4.3.3@sha256:86c645db93affcbf01cc2bce2560082bfde791009e1506dba68269b9c50bc341 + env: + TZ: Europe/Sofia + SABNZBD__PORT: &port 80 + SABNZBD__HOST_WHITELIST_ENTRIES: >- + sabnzbd, + sabnzbd.media, + sabnzbd.media.svc, + sabnzbd.media.svc.cluster, + sabnzbd.media.svc.cluster.local, + sabnzbd.${PUBLIC_DOMAIN} + CROSS_SEED_HOST: cross-seed.media.svc.cluster.local + CROSS_SEED_ENABLED: true + CROSS_SEED_PORT: 80 + CROSS_SEED_SLEEP_INTERVAL: 30 + PUSHOVER_ENABLED: true + envFrom: + - secretRef: + name: sabnzbd-secret + probes: + liveness: &probes + enabled: true + custom: true + spec: + httpGet: + path: /api?mode=version + port: *port + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + readiness: *probes + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + memory: 8Gi + + defaultPodOptions: + securityContext: + runAsNonRoot: true + runAsUser: 568 + runAsGroup: 568 + fsGroup: 568 + fsGroupChangePolicy: OnRootMismatch + seccompProfile: { type: RuntimeDefault } + + service: + app: + controller: sabnzbd + ports: + http: + port: *port + + ingress: + app: + annotations: + external-dns.alpha.kubernetes.io/target: internal.${PUBLIC_DOMAIN} + nginx.ingress.kubernetes.io/auth-url: "https://$host/oauth2/auth" + nginx.ingress.kubernetes.io/auth-signin: "https://$host/oauth2/start?rd=$escaped_request_uri" + nginx.ingress.kubernetes.io/auth-snippet: | + if ($request_uri ~* "(\/|\/[0-9]+\/)api(/|$|[?])") { + return 202; + } + nginx.ingress.kubernetes.io/configuration-snippet: | + proxy_set_header Accept-Encoding ""; + sub_filter '' ''; + sub_filter_once on; + cert-manager.io/cluster-issuer: zerossl-prod + cert-manager.io/private-key-rotation-policy: Always + cert-manager.io/private-key-algorithm: ECDSA + cert-manager.io/private-key-size: "384" + className: internal + tls: + - hosts: + - &host "{{ .Release.Name }}.${PUBLIC_DOMAIN}" + secretName: sabnzbd-tls + hosts: + - host: *host + paths: + - path: / + service: + identifier: app + port: http + + persistence: + config: + enabled: true + type: persistentVolumeClaim + size: 2Gi + storageClass: openebs-zfs-128k + accessMode: ReadWriteOnce + + logs: + type: emptyDir + globalMounts: + - path: /config/logs + + tmp: + type: emptyDir + medium: Memory + + scripts: + type: configMap + name: sabnzbd-scripts + defaultMode: 0550 + globalMounts: + - readOnly: true + + media: + type: nfs + server: 192.168.91.40 + path: /mnt/exos20/data + globalMounts: + - path: /data/downloads/usenet + subPath: downloads/usenet diff --git a/kubernetes/main/apps/media/sabnzbd/app/kustomization.yaml b/kubernetes/main/apps/media/sabnzbd/app/kustomization.yaml new file mode 100755 index 000000000..86f9a86cd --- /dev/null +++ b/kubernetes/main/apps/media/sabnzbd/app/kustomization.yaml @@ -0,0 +1,20 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml + - ./volsync.yaml +configMapGenerator: + - name: sabnzbd-scripts + files: + - post-process.sh=./resources/post-process.sh + - name: sabnzbd-gatus-ep + options: + labels: + gatus.io/enabled: "true" + files: + - config.yaml=./resources/gatus-ep.yaml +generatorOptions: + disableNameSuffixHash: true diff --git a/kubernetes/main/apps/media/sabnzbd/app/resources/gatus-ep.yaml b/kubernetes/main/apps/media/sabnzbd/app/resources/gatus-ep.yaml new file mode 100755 index 000000000..0b12bd646 --- /dev/null +++ b/kubernetes/main/apps/media/sabnzbd/app/resources/gatus-ep.yaml @@ -0,0 +1,15 @@ +endpoints: + - name: "SABnzbd" + group: guarded + url: "https://sabnzbd.${PUBLIC_DOMAIN}/api?mode=version" + interval: 1m + ui: + hide-hostname: true + hide-url: true + client: + dns-resolver: tcp://172.17.0.10:53 + conditions: + - "[STATUS] == 200" + - "has([BODY].version) == true" + alerts: + - type: pushover diff --git a/kubernetes/main/apps/media/sabnzbd/app/resources/post-process.sh b/kubernetes/main/apps/media/sabnzbd/app/resources/post-process.sh new file mode 100755 index 000000000..a12a4c286 --- /dev/null +++ b/kubernetes/main/apps/media/sabnzbd/app/resources/post-process.sh @@ -0,0 +1,118 @@ +#!/usr/bin/env bash +# shellcheck disable=SC2154 + +set -euo pipefail + +# User-defined variables +CROSS_SEED_ENABLED="$${CROSS_SEED_ENABLED:-false}" +CROSS_SEED_HOST="$${CROSS_SEED_HOST:-required}" +CROSS_SEED_PORT="$${CROSS_SEED_PORT:-required}" +CROSS_SEED_API_KEY="$${CROSS_SEED_API_KEY:-required}" +CROSS_SEED_SLEEP_INTERVAL="$${CROSS_SEED_SLEEP_INTERVAL:-30}" +PUSHOVER_ENABLED="$${PUSHOVER_ENABLED:-false}" +PUSHOVER_USER_KEY="$${PUSHOVER_USER_KEY:-required}" +PUSHOVER_TOKEN="$${PUSHOVER_TOKEN:-required}" + +# Function to set release variables from SABnzbd +set_sab_vars() { + RELEASE_NAME="$${SAB_FILENAME:-}" + RELEASE_DIR="$${SAB_COMPLETE_DIR:-}" + RELEASE_CAT="$${SAB_CAT:-}" + RELEASE_SIZE="$${SAB_BYTES:-}" + RELEASE_STATUS="$${SAB_PP_STATUS:-}" + RELEASE_INDEXER="$${SAB_URL:-}" + RELEASE_TYPE="NZB" +} + +# Function to set release variables from qBittorrent +set_qb_vars() { + RELEASE_NAME="$1" # %N + RELEASE_DIR="$2" # %F + RELEASE_CAT="$3" # %L + RELEASE_SIZE="$4" # %Z + RELEASE_INDEXER="$5" # %T + RELEASE_STATUS=0 # Always 0 for qBittorrent + RELEASE_TYPE="Torrent" +} + +# Function to send pushover notification +send_pushover_notification() { + local pushover_message status_code json_data + printf -v pushover_message \ + "%s\nCategory: %s\nIndexer: %s\nSize: %s" \ + "$${RELEASE_NAME%.*}" \ + "$${RELEASE_CAT}" \ + "$(trurl --url "$${RELEASE_INDEXER}" --get '{idn:host}')" \ + "$(numfmt --to iec --format "%8.2f" "$${RELEASE_SIZE}")" + + json_data=$(jo \ + token="$${PUSHOVER_TOKEN}" \ + user="$${PUSHOVER_USER_KEY}" \ + title="$${RELEASE_TYPE} Downloaded" \ + message="$${pushover_message}" \ + priority="-2" \ + html="1" + ) + + status_code=$(curl \ + --silent \ + --write-out "%{http_code}" \ + --output /dev/null \ + --request POST \ + --header "Content-Type: application/json" \ + --data-binary "$${json_data}" \ + "https://api.pushover.net/1/messages.json" + ) + + printf "pushover notification returned with HTTP status code %s and payload: %s\n" \ + "$${status_code}" \ + "$(echo "$${json_data}" | jq --compact-output)" >&2 +} + +# Function to search for cross-seed +search_cross_seed() { + local status_code + status_code=$(curl \ + --silent \ + --output /dev/null \ + --write-out "%{http_code}" \ + --request POST \ + --data-urlencode "path=$${RELEASE_DIR}" \ + --header "X-Api-Key: $${CROSS_SEED_API_KEY}" \ + "http://$${CROSS_SEED_HOST}:$${CROSS_SEED_PORT}/api/webhook" + ) + + printf "cross-seed search returned with HTTP status code %s and path %s\n" \ + "$${status_code}" \ + "$${RELEASE_DIR}" >&2 + + sleep "$${CROSS_SEED_SLEEP_INTERVAL}" +} + +main() { + # Determine the source and set release variables accordingly + if env | grep -q "^SAB_"; then + set_sab_vars + else + set_qb_vars "$@" + fi + + # Check if post-processing was successful + if [[ "$${RELEASE_STATUS}" -ne 0 ]]; then + printf "post-processing failed with sabnzbd status code %s\n" \ + "$${RELEASE_STATUS}" >&2 + exit 1 + fi + + # Send pushover notification + if [[ "$${PUSHOVER_ENABLED}" == "true" ]]; then + send_pushover_notification + fi + + # Search for cross-seed + if [[ "$${CROSS_SEED_ENABLED}" == "true" ]]; then + search_cross_seed + fi +} + +main "$@" diff --git a/kubernetes/main/apps/media/sabnzbd/app/volsync-dst.yaml b/kubernetes/main/apps/media/sabnzbd/app/volsync-dst.yaml new file mode 100644 index 000000000..beef6ff66 --- /dev/null +++ b/kubernetes/main/apps/media/sabnzbd/app/volsync-dst.yaml @@ -0,0 +1,18 @@ +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationDestination +metadata: + name: sabnzbd-dst + namespace: media +spec: + trigger: + manual: restore-once + restic: + repository: sabnzbd-volsync-secret + copyMethod: Direct + storageClassName: openebs-zfs-128k + destinationPVC: sabnzbd-config + enableFileDeletion: true + moverSecurityContext: + runAsUser: 568 + runAsGroup: 568 + fsGroup: 568 diff --git a/kubernetes/main/apps/media/sabnzbd/app/volsync.yaml b/kubernetes/main/apps/media/sabnzbd/app/volsync.yaml new file mode 100755 index 000000000..b437e7c40 --- /dev/null +++ b/kubernetes/main/apps/media/sabnzbd/app/volsync.yaml @@ -0,0 +1,48 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.ok8.sh/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret sabnzbd-volsync-secret +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + creationPolicy: Owner + template: + engineVersion: v2 + data: + RESTIC_REPOSITORY: '{{ .RESTIC_REPOSITORY }}/sabnzbd/volsync/config-volsync-config' + RESTIC_PASSWORD: '{{ .ENCRYPTION_KEY }}' + AWS_ACCESS_KEY_ID: '{{ .CF_ACCESS_KEY_ID }}' + AWS_SECRET_ACCESS_KEY: '{{ .CF_SECRET_ACCESS_KEY }}' + dataFrom: + - extract: + key: secrets/volsync + - extract: + key: secrets/cloudflare +--- +# yaml-language-server: $schema=https://kubernetes-schemas.ok8.sh/volsync.backube/replicationsource_v1alpha1.json +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: sabnzbd +spec: + sourcePVC: sabnzbd-config + trigger: + schedule: "0 7 * * *" + restic: + copyMethod: Snapshot + storageClassName: openebs-zfs-128k + pruneIntervalDays: 7 + repository: sabnzbd-volsync-secret + cacheCapacity: 2Gi + moverSecurityContext: + runAsUser: 568 + runAsGroup: 568 + fsGroup: 568 + retain: + daily: 7 + within: 3d diff --git a/kubernetes/main/apps/media/sabnzbd/ks.yaml b/kubernetes/main/apps/media/sabnzbd/ks.yaml new file mode 100755 index 000000000..a93524885 --- /dev/null +++ b/kubernetes/main/apps/media/sabnzbd/ks.yaml @@ -0,0 +1,26 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app sabnzbd + namespace: flux-system +spec: + targetNamespace: media + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets-stores + path: ./kubernetes/main/apps/media/sabnzbd/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m + postBuild: + substitute: + APP: *app + GATUS_PATH: /api?mode=version diff --git a/kubernetes/main/apps/media/sonarr/app/externalsecret.yaml b/kubernetes/main/apps/media/sonarr/app/externalsecret.yaml new file mode 100755 index 000000000..2b5fa75af --- /dev/null +++ b/kubernetes/main/apps/media/sonarr/app/externalsecret.yaml @@ -0,0 +1,35 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret sonarr-secret +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + template: + engineVersion: v2 + data: + SONARR__AUTH__APIKEY: "{{ .SONARR_API_KEY }}" + SONARR__POSTGRES__HOST: &dbHost postgres17-rw.database.svc.cluster.local + SONARR__POSTGRES__PORT: "5432" + SONARR__POSTGRES__USER: &dbUser "{{ .SONARR_POSTGRES_USER }}" + SONARR__POSTGRES__PASSWORD: &dbPass "{{ .SONARR_POSTGRES_PASS }}" + SONARR__POSTGRES__MAINDB: &dbName sonarr + PUSHOVER_TOKEN: "{{ .SONARR_PUSHOVER_TOKEN }}" + PUSHOVER_USER_KEY: "{{ .PUSHOVER_USER_KEY }}" + INIT_POSTGRES_DBNAME: *dbName + INIT_POSTGRES_HOST: *dbHost + INIT_POSTGRES_USER: *dbUser + INIT_POSTGRES_PASS: *dbPass + INIT_POSTGRES_SUPER_PASS: "{{ .POSTGRES_SUPER_PASS }}" + dataFrom: + - extract: + key: api-keys + - extract: + key: cloudnative-pg + - extract: + key: pushover diff --git a/kubernetes/main/apps/media/sonarr/app/helmrelease.yaml b/kubernetes/main/apps/media/sonarr/app/helmrelease.yaml new file mode 100755 index 000000000..289357eac --- /dev/null +++ b/kubernetes/main/apps/media/sonarr/app/helmrelease.yaml @@ -0,0 +1,164 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: sonarr +spec: + interval: 30m + chart: + spec: + verify: + provider: cosign + chart: app-template + version: 3.5.1 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + + values: + controllers: + sonarr: + annotations: + reloader.stakater.com/auto: "true" + + initContainers: + init-db: + image: + repository: ghcr.io/onedr0p/postgres-init + tag: 16.6@sha256:35353a77777ee8f634d0f3945f495b4a40065134b8619e0d18bd49b0ee9c855b + envFrom: &envFrom + - secretRef: + name: sonarr-secret + + containers: + app: + image: + repository: ghcr.io/onedr0p/sonarr-develop + tag: 4.0.11.2688@sha256:5bc8cdd83a98862807a1c8d047bcae2658b108ac0a3ee67bf562a229ed1895c6 + env: + SONARR__APP__INSTANCENAME: Sonarr + SONARR__APP__THEME: dark + SONARR__AUTH__METHOD: External + SONARR__AUTH__REQUIRED: DisabledForLocalAddresses + SONARR__LOG__DBENABLED: "False" + SONARR__LOG__LEVEL: info + SONARR__SERVER__PORT: &port 80 + SONARR__UPDATE__BRANCH: develop + TZ: Europe/Sofia + envFrom: *envFrom + probes: + liveness: &probes + enabled: true + custom: true + spec: + httpGet: + path: /ping + port: *port + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + readiness: *probes + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + memory: 4Gi + + defaultPodOptions: + securityContext: + runAsNonRoot: true + runAsUser: 568 + runAsGroup: 568 + fsGroup: 568 + fsGroupChangePolicy: OnRootMismatch + supplementalGroups: [10000] + seccompProfile: { type: RuntimeDefault } + + service: + app: + controller: sonarr + ports: + http: + port: *port + + ingress: + app: + annotations: + external-dns.alpha.kubernetes.io/target: internal.${PUBLIC_DOMAIN} + nginx.ingress.kubernetes.io/auth-url: "https://$host/oauth2/auth" + nginx.ingress.kubernetes.io/auth-signin: "https://$host/oauth2/start?rd=$escaped_request_uri" + nginx.ingress.kubernetes.io/auth-snippet: | + if ($request_uri ~* "(\/|\/[0-9]+\/)api(/|$|[?])") { + return 200; + } + if ($request_uri ~* "^/ping") { + return 200; + } + nginx.ingress.kubernetes.io/configuration-snippet: | + proxy_set_header Accept-Encoding ""; + sub_filter '' ''; + sub_filter_once on; + cert-manager.io/cluster-issuer: zerossl-prod + cert-manager.io/private-key-rotation-policy: Always + cert-manager.io/private-key-algorithm: ECDSA + cert-manager.io/private-key-size: "384" + className: internal + tls: + - hosts: + - &host "{{ .Release.Name }}.${PUBLIC_DOMAIN}" + secretName: sonarr-tls + hosts: + - host: *host + paths: + - path: / + service: + identifier: app + port: http + + persistence: + config: + enabled: true + type: persistentVolumeClaim + size: 15Gi + storageClass: openebs-zfs-128k + accessMode: ReadWriteOnce + globalMounts: + - path: /config + + scripts: + type: configMap + name: sonarr-configmap + defaultMode: 0775 + globalMounts: + - path: /scripts/pushover-notify.sh + subPath: pushover-notify.sh + readOnly: true + - path: /scripts/refresh-series.sh + subPath: refresh-series.sh + readOnly: true + tmp: + type: emptyDir + medium: Memory + + media: + type: nfs + server: 192.168.91.40 + path: /mnt/exos20/data + globalMounts: + - path: /data diff --git a/kubernetes/main/apps/media/sonarr/app/kustomization.yaml b/kubernetes/main/apps/media/sonarr/app/kustomization.yaml new file mode 100755 index 000000000..c57ec19bc --- /dev/null +++ b/kubernetes/main/apps/media/sonarr/app/kustomization.yaml @@ -0,0 +1,20 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml +configMapGenerator: + - name: sonarr-configmap + files: + - pushover-notify.sh=./resources/pushover-notify.sh + - refresh-series.sh=./resources/refresh-series.sh + - name: sonarr-gatus-ep + options: + labels: + gatus.io/enabled: "true" + files: + - config.yaml=./resources/gatus-ep.yaml +generatorOptions: + disableNameSuffixHash: true diff --git a/kubernetes/main/apps/media/sonarr/app/resources/gatus-ep.yaml b/kubernetes/main/apps/media/sonarr/app/resources/gatus-ep.yaml new file mode 100755 index 000000000..6ab2e6566 --- /dev/null +++ b/kubernetes/main/apps/media/sonarr/app/resources/gatus-ep.yaml @@ -0,0 +1,15 @@ +endpoints: + - name: "Sonarr" + group: guarded + url: "https://sonarr.${PUBLIC_DOMAIN}/ping" + interval: 1m + ui: + hide-hostname: true + hide-url: true + client: + dns-resolver: tcp://172.17.0.10:53 + conditions: + - "[STATUS] == 200" + - "[BODY].status == OK" + alerts: + - type: pushover diff --git a/kubernetes/main/apps/media/sonarr/app/resources/pushover-notify.sh b/kubernetes/main/apps/media/sonarr/app/resources/pushover-notify.sh new file mode 100755 index 000000000..9f607fd1d --- /dev/null +++ b/kubernetes/main/apps/media/sonarr/app/resources/pushover-notify.sh @@ -0,0 +1,85 @@ +#!/usr/bin/env bash +# shellcheck disable=SC2154 +set -euo pipefail + +# User defined variables for pushover +PUSHOVER_USER_KEY="$${PUSHOVER_USER_KEY:-required}" +PUSHOVER_TOKEN="$${PUSHOVER_TOKEN:-required}" +PUSHOVER_PRIORITY="$${PUSHOVER_PRIORITY:-"-2"}" + +if [[ "$${sonarr_eventtype:-}" == "Test" ]]; then + PUSHOVER_PRIORITY="1" + printf -v PUSHOVER_TITLE \ + "Test Notification" + printf -v PUSHOVER_MESSAGE \ + "Howdy this is a test notification from %s" \ + "$${sonarr_instancename:-Sonarr}" + printf -v PUSHOVER_URL \ + "%s" \ + "$${sonarr_applicationurl:-localhost}" + printf -v PUSHOVER_URL_TITLE \ + "Open %s" \ + "$${sonarr_instancename:-Sonarr}" +fi + +if [[ "$${sonarr_eventtype:-}" == "Download" ]]; then + printf -v PUSHOVER_TITLE \ + "Episode %s" \ + "$( [[ "$${sonarr_isupgrade}" == "True" ]] && echo "Upgraded" || echo "Downloaded" )" + printf -v PUSHOVER_MESSAGE \ + "%s (S%02dE%02d)\n%s\n\nQuality: %s\nClient: %s" \ + "$${sonarr_series_title}" \ + "$${sonarr_episodefile_seasonnumber}" \ + "$${sonarr_episodefile_episodenumbers}" \ + "$${sonarr_episodefile_episodetitles}" \ + "$${sonarr_episodefile_quality:-Unknown}" \ + "$${sonarr_download_client:-Unknown}" + printf -v PUSHOVER_URL \ + "%s/series/%s" \ + "$${sonarr_applicationurl:-localhost}" \ + "$${sonarr_series_titleslug}" + printf -v PUSHOVER_URL_TITLE \ + "View series in %s" \ + "$${sonarr_instancename:-Sonarr}" +fi + +if [[ "$${sonarr_eventtype:-}" == "ManualInteractionRequired" ]]; then + PUSHOVER_PRIORITY="1" + printf -v PUSHOVER_TITLE \ + "Episode import requires intervention" + printf -v PUSHOVER_MESSAGE \ + "%s\nClient: %s" \ + "$${sonarr_series_title}" \ + "$${sonarr_download_client:-Unknown}" + printf -v PUSHOVER_URL \ + "%s/activity/queue" \ + "$${sonarr_applicationurl:-localhost}" + printf -v PUSHOVER_URL_TITLE \ + "View queue in %s" \ + "$${sonarr_instancename:-Sonarr}" +fi + +json_data=$(jo \ + token="$${PUSHOVER_TOKEN}" \ + user="$${PUSHOVER_USER_KEY}" \ + title="$${PUSHOVER_TITLE}" \ + message="$${PUSHOVER_MESSAGE}" \ + url="$${PUSHOVER_URL}" \ + url_title="$${PUSHOVER_URL_TITLE}" \ + priority="$${PUSHOVER_PRIORITY}" \ + html="1" +) + +status_code=$(curl \ + --silent \ + --write-out "%{http_code}" \ + --output /dev/null \ + --request POST \ + --header "Content-Type: application/json" \ + --data-binary "$${json_data}" \ + "https://api.pushover.net/1/messages.json" \ +) + +printf "pushover notification returned with HTTP status code %s and payload: %s\n" \ + "$${status_code}" \ + "$(echo "$${json_data}" | jq --compact-output)" >&2 diff --git a/kubernetes/main/apps/media/sonarr/app/resources/refresh-series.sh b/kubernetes/main/apps/media/sonarr/app/resources/refresh-series.sh new file mode 100755 index 000000000..77a8ca791 --- /dev/null +++ b/kubernetes/main/apps/media/sonarr/app/resources/refresh-series.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +# shellcheck disable=SC2154 +set -euo pipefail + +CURL_CMD=(curl -fsSL --header "X-Api-Key: $${SONARR__AUTH__APIKEY:-}") +SONARR_API_URL="http://localhost:$${SONARR__SERVER__PORT:-}/api/v3" + +if [[ "$${sonarr_eventtype:-}" == "Grab" ]]; then + tba=$("$${CURL_CMD[@]}" "$${SONARR_API_URL}/episode?seriesId=$${sonarr_series_id:-}" | jq --raw-output ' + [.[] | select((.title == "TBA") or (.title == "TBD"))] | length + ') + + if (( tba > 0 )); then + echo "INFO: Refreshing series $${sonarr_series_id:-} due to TBA/TBD episodes found" + "$${CURL_CMD[@]}" \ + --request POST \ + --header "Content-Type: application/json" \ + --data-binary '{"name": "RefreshSeries", "seriesId": '"$${sonarr_series_id:-}"'}' \ + "$${SONARR_API_URL}/command" &>/dev/null + fi +fi diff --git a/kubernetes/main/apps/media/sonarr/ks.yaml b/kubernetes/main/apps/media/sonarr/ks.yaml new file mode 100755 index 000000000..86b278184 --- /dev/null +++ b/kubernetes/main/apps/media/sonarr/ks.yaml @@ -0,0 +1,23 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app sonarr + namespace: flux-system +spec: + targetNamespace: media + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: cloudnative-pg-cluster + - name: external-secrets-stores + path: ./kubernetes/main/apps/media/sonarr/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/media/unpackerr/app/externalsecret.yaml b/kubernetes/main/apps/media/unpackerr/app/externalsecret.yaml new file mode 100755 index 000000000..7bc6e3f3c --- /dev/null +++ b/kubernetes/main/apps/media/unpackerr/app/externalsecret.yaml @@ -0,0 +1,20 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret unpackerr-secret +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + template: + engineVersion: v2 + data: + UN_RADARR_0_API_KEY: "{{ .RADARR_API_KEY }}" + UN_SONARR_0_API_KEY: "{{ .SONARR_API_KEY }}" + dataFrom: + - extract: + key: secrets/api-keys diff --git a/kubernetes/main/apps/media/unpackerr/app/helmrelease.yaml b/kubernetes/main/apps/media/unpackerr/app/helmrelease.yaml new file mode 100755 index 000000000..e2217b8b8 --- /dev/null +++ b/kubernetes/main/apps/media/unpackerr/app/helmrelease.yaml @@ -0,0 +1,103 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: &app unpackerr +spec: + interval: 30m + chart: + spec: + verify: + provider: cosign + chart: app-template + version: 3.5.1 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + + values: + controllers: + unpackerr: + annotations: + reloader.stakater.com/auto: "true" + + containers: + app: + image: + repository: ghcr.io/unpackerr/unpackerr + tag: 0.14.5@sha256:dc72256942ce50d1c8a1aeb5aa85b6ae2680a36eefd2182129d8d210fce78044 + env: + TZ: Europe/Sofia + UN_WEBSERVER_METRICS: true + UN_WEBSERVER_LOG_FILE: /logs/webserver.log + UN_ACTIVITY: true + UN_SONARR_0_URL: https://sonarr.${PUBLIC_DOMAIN} + UN_SONARR_0_PATHS_0: /data/downloads/torrents/complete/tv + UN_RADARR_0_URL: https://radarr.${PUBLIC_DOMAIN} + UN_RADARR_0_PATHS_0: /data/downloads/torrents/complete/movies + envFrom: + - secretRef: + name: unpackerr-secret + probes: + liveness: + enabled: true + readiness: + enabled: true + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + resources: + requests: + cpu: 10m + limits: + memory: 4Gi + + defaultPodOptions: + securityContext: + runAsNonRoot: true + runAsUser: 568 + runAsGroup: 568 + fsGroup: 568 + fsGroupChangePolicy: OnRootMismatch + supplementalGroups: [10000] + seccompProfile: { type: RuntimeDefault } + + service: + app: + controller: *app + ports: + http: + port: 5656 + + serviceMonitor: + app: + serviceName: *app + endpoints: + - port: http + scheme: http + path: /metrics + interval: 1m + scrapeTimeout: 10s + + persistence: + logs: + type: emptyDir + + media: + type: nfs + server: 192.168.91.40 + path: /mnt/exos20/data + globalMounts: + - path: /data/downloads/torrents/complete + subPath: downloads/torrents/complete diff --git a/kubernetes/main/apps/media/unpackerr/app/kustomization.yaml b/kubernetes/main/apps/media/unpackerr/app/kustomization.yaml new file mode 100755 index 000000000..4eed917b9 --- /dev/null +++ b/kubernetes/main/apps/media/unpackerr/app/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml diff --git a/kubernetes/main/apps/media/unpackerr/ks.yaml b/kubernetes/main/apps/media/unpackerr/ks.yaml new file mode 100755 index 000000000..cc1b2c96f --- /dev/null +++ b/kubernetes/main/apps/media/unpackerr/ks.yaml @@ -0,0 +1,22 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app unpackerr + namespace: flux-system +spec: + targetNamespace: media + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets-stores + path: ./kubernetes/main/apps/media/unpackerr/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/network/cloudflared/app/dnsendpoint.yaml b/kubernetes/main/apps/network/cloudflared/app/dnsendpoint.yaml new file mode 100755 index 000000000..df84ec138 --- /dev/null +++ b/kubernetes/main/apps/network/cloudflared/app/dnsendpoint.yaml @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/externaldns.k8s.io/dnsendpoint_v1alpha1.json +apiVersion: externaldns.k8s.io/v1alpha1 +kind: DNSEndpoint +metadata: + name: cloudflared +spec: + endpoints: + - dnsName: external.${PUBLIC_DOMAIN} + recordType: CNAME + targets: ["${CLUSTER_CLOUDFLARE_TUNNEL_ID}.cfargotunnel.com"] diff --git a/kubernetes/main/apps/network/cloudflared/app/externalsecret.yaml b/kubernetes/main/apps/network/cloudflared/app/externalsecret.yaml new file mode 100755 index 000000000..3e3ca120b --- /dev/null +++ b/kubernetes/main/apps/network/cloudflared/app/externalsecret.yaml @@ -0,0 +1,24 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: cloudflared-tunnel +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: cloudflared-tunnel-secret + template: + engineVersion: v2 + data: + credentials.json: | + { + "AccountTag": "{{ .CF_ACCOUNT_TAG }}", + "TunnelSecret": "{{ .CF_TUNNEL_SECRET }}", + "TunnelID": "{{ .CF_TUNNEL_ID }}" + } + dataFrom: + - extract: + key: cloudflare diff --git a/kubernetes/main/apps/network/cloudflared/app/helmrelease.yaml b/kubernetes/main/apps/network/cloudflared/app/helmrelease.yaml new file mode 100755 index 000000000..a28a217e4 --- /dev/null +++ b/kubernetes/main/apps/network/cloudflared/app/helmrelease.yaml @@ -0,0 +1,117 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: &app cloudflared +spec: + interval: 30m + chart: + spec: + chart: app-template + version: 3.5.1 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + dependsOn: + - name: nginx-external + namespace: network + values: + controllers: + cloudflared: + replicas: 2 + strategy: RollingUpdate + annotations: + reloader.stakater.com/auto: "true" + containers: + app: + image: + repository: docker.io/cloudflare/cloudflared + tag: 2024.11.1@sha256:665dda65335e35a782ed9319aa63e8404f88b34d2644d30adf3e91253604ffa0 + env: + NO_AUTOUPDATE: true + TUNNEL_CRED_FILE: /etc/cloudflared/creds/credentials.json + TUNNEL_METRICS: 0.0.0.0:8080 + TUNNEL_ORIGIN_ENABLE_HTTP2: true + TUNNEL_TRANSPORT_PROTOCOL: quic + TUNNEL_POST_QUANTUM: true + args: + - tunnel + - --config + - /etc/cloudflared/config/config.yaml + - run + - "${CLUSTER_CLOUDFLARE_TUNNEL_ID}" + probes: + liveness: &probes + enabled: true + custom: true + spec: + httpGet: + path: /ready + port: &port 8080 + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + readiness: *probes + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + resources: + requests: + cpu: 10m + limits: + memory: 256Mi + defaultPodOptions: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + seccompProfile: { type: RuntimeDefault } + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + labelSelector: + matchLabels: + app.kubernetes.io/name: *app + service: + app: + controller: cloudflared + ports: + http: + port: *port + serviceMonitor: + app: + serviceName: cloudflared + endpoints: + - port: http + scheme: http + path: /metrics + interval: 1m + scrapeTimeout: 10s + persistence: + config: + type: configMap + name: cloudflared-configmap + globalMounts: + - path: /etc/cloudflared/config/config.yaml + subPath: config.yaml + readOnly: true + creds: + type: secret + name: cloudflared-tunnel-secret + globalMounts: + - path: /etc/cloudflared/creds/credentials.json + subPath: credentials.json + readOnly: true diff --git a/kubernetes/main/apps/network/cloudflared/app/kustomization.yaml b/kubernetes/main/apps/network/cloudflared/app/kustomization.yaml new file mode 100755 index 000000000..ec382010f --- /dev/null +++ b/kubernetes/main/apps/network/cloudflared/app/kustomization.yaml @@ -0,0 +1,14 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml + - ./dnsendpoint.yaml +configMapGenerator: + - name: cloudflared-configmap + files: + - config.yaml=./resources/config.yaml +generatorOptions: + disableNameSuffixHash: true diff --git a/kubernetes/main/apps/network/cloudflared/app/resources/config.yaml b/kubernetes/main/apps/network/cloudflared/app/resources/config.yaml new file mode 100755 index 000000000..156318af7 --- /dev/null +++ b/kubernetes/main/apps/network/cloudflared/app/resources/config.yaml @@ -0,0 +1,10 @@ +--- +originRequest: + originServerName: external.${PUBLIC_DOMAIN} + +ingress: + - hostname: ${PUBLIC_DOMAIN} + service: https://nginx-external-controller.network.svc.cluster.local:443 + - hostname: "*.${PUBLIC_DOMAIN}" + service: https://nginx-external-controller.network.svc.cluster.local:443 + - service: http_status:404 diff --git a/kubernetes/main/apps/network/cloudflared/ks.yaml b/kubernetes/main/apps/network/cloudflared/ks.yaml new file mode 100755 index 000000000..0e94bf167 --- /dev/null +++ b/kubernetes/main/apps/network/cloudflared/ks.yaml @@ -0,0 +1,23 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app cloudflared + namespace: flux-system +spec: + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-dns-cloudflare + - name: external-secrets-stores + path: ./kubernetes/main/apps/network/cloudflared/app + prune: false + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/network/crowdsec/app/externalsecret.yaml b/kubernetes/main/apps/network/crowdsec/app/externalsecret.yaml new file mode 100755 index 000000000..85357c49c --- /dev/null +++ b/kubernetes/main/apps/network/crowdsec/app/externalsecret.yaml @@ -0,0 +1,33 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret crowdsec-secret +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + template: + engineVersion: v2 + data: + INIT_POSTGRES_DBNAME: crowdsec + INIT_POSTGRES_HOST: postgres17-rw.database.svc.cluster.local + INIT_POSTGRES_USER: "{{ .CROWDSEC_POSTGRES_USER }}" + INIT_POSTGRES_PASS: "{{ .CROWDSEC_POSTGRES_PASS }}" + INIT_POSTGRES_SUPER_PASS: "{{ .POSTGRES_SUPER_PASS }}" + ENROLL_KEY: "{{ .ENROLL_KEY }}" + CROWDSEC_PUSHOVER_TOKEN: "{{ .CROWDSEC_PUSHOVER_TOKEN }}" + PUSHOVER_USER_KEY: "{{ .PUSHOVER_USER_KEY }}" + BOUNCER_KEY_NGINX_EXTERNAL: "{{ .BOUNCER_KEY_NGINX_EXTERNAL }}" + dataFrom: + - extract: + key: secrets/cloudnative-pg + - extract: + key: secrets/crowdsec + - extract: + key: secrets/pushover + - extract: + key: secrets/api-keys diff --git a/kubernetes/main/apps/network/crowdsec/app/helmrelease.yaml b/kubernetes/main/apps/network/crowdsec/app/helmrelease.yaml new file mode 100755 index 000000000..a020cff96 --- /dev/null +++ b/kubernetes/main/apps/network/crowdsec/app/helmrelease.yaml @@ -0,0 +1,280 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: crowdsec +spec: + interval: 30m + chart: + spec: + chart: crowdsec + version: 0.15.0 + sourceRef: + kind: HelmRepository + name: crowdsec + namespace: flux-system + maxHistory: 3 + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + postRenderers: + - kustomize: + patches: + - target: + version: v1 + kind: Deployment + name: crowdsec-appsec + patch: | + - op: replace + path: /spec/template/spec/initContainers/0/env/2/value + value: "https://crowdsec-api.${PUBLIC_DOMAIN}" + - op: replace + path: /spec/template/spec/initContainers/0/env/3/value + value: "crowdsec-api.${PUBLIC_DOMAIN}" + - op: replace + path: /spec/template/spec/initContainers/0/env/4/value + value: "443" + - target: + version: v1 + kind: DaemonSet + name: crowdsec-agent + patch: | + - op: replace + path: /spec/template/spec/containers/0/env/2/value + value: "https://crowdsec-api.${PUBLIC_DOMAIN}" + - op: replace + path: /spec/template/spec/initContainers/0/command + value: ["sh", "-c", "until nc crowdsec-api.${PUBLIC_DOMAIN} 443 -z; do echo waiting for lapi to start; sleep 5; done"] + values: + image: + repository: crowdsecurity/crowdsec + pullPolicy: IfNotPresent + tag: v1.6.4@sha256:091229068a9dab7f8c1ae41086669620da2980c6ccc26f9e358a75aaa7cb4a27 + config: + + profiles.yaml: | + name: default_ip_remediation + #debug: true + filters: + - Alert.Remediation == true && Alert.GetScope() == "Ip" + decisions: + - type: ban + duration: 4h + duration_expr: Sprintf('%dh', (GetDecisionsCount(Alert.GetValue()) + 1) * 4) + notifications: + - pushover + on_success: break + --- + name: default_range_remediation + #debug: true + filters: + - Alert.Remediation == true && Alert.GetScope() == "Range" + decisions: + - type: ban + duration: 4h + duration_expr: Sprintf('%dh', (GetDecisionsCount(Alert.GetValue()) + 1) * 4) + notifications: + - pushover + on_success: break + + notifications: + http.yaml: | + type: http + name: pushover + + log_level: info + + group_wait: "30s" + group_threshold: 10 + max_retry: 5 + timeout: "10s" + + #------------------------- + #plugin-specific options + + # The following template receives a list of models.Alert objects + # The output goes in the http request body + format: | + { + "token": $$CROWDSEC_PUSHOVER_TOKEN, + "user": $$PUSHOVER_USER_KEY, + "message": "{{`{{range . -}}{{$alert := . -}}{{range .Decisions -}}{{.Value}} will get {{.Type}} for the next {{.Duration}} for triggering {{.Scenario}}.\r\n https://www.shodan.io/host/{{.Value}}{{end -}}{{end -}}`}}", + "html": "1", + "title": "Scenario triggered on IDS/IPS !" + } + url: https://api.pushover.net/1/messages.json + method: POST + headers: + Content-Type: "application/json" + + config.yaml.local: | + api: + server: + auto_registration: + enabled: true + token: "$$REGISTRATION_TOKEN" + allowed_ranges: + - 172.16.0.0/16 + db_config: + type: postgresql + user: $${DB_USER} + password: $${DB_PASSWORD} + db_name: crowdsec + host: postgres17-rw.database.svc.cluster.local + port: 5432 + sslmode: require + + container_runtime: containerd + + agent: + wait_for_lapi: + repository: busybox + image: + tag: "1.37.0@sha256:5b0f33c83a97f5f7d12698df6732098b0cdb860d377f6307b68efe2c6821296f" + + acquisition: + - namespace: network + podName: nginx-external-controller-* + program: nginx + + - namespace: media + podName: jellyseerr-* + program: jellyseerr + + - namespace: vaultwarden + podName: vaultwarden-* + program: VAULTWARDEN + + - namespace: idp + podName: keycloak-0 + program: keycloak + + env: + - name: LOCAL_API_URL + value: "https://crowdsec-api.${PUBLIC_DOMAIN}" + + - name: PARSERS + value: "crowdsecurity/cri-logs crowdsecurity/whitelists crowdsecurity/geoip-enrich" + + - name: COLLECTIONS + value: "crowdsecurity/modsecurity inherent-io/keycloak LePresidente/jellyseerr crowdsecurity/nginx Dominic-Wagner/vaultwarden crowdsecurity/base-http-scenarios crowdsecurity/http-cve crowdsecurity/http-dos crowdsecurity/whitelist-good-actors" + + metrics: + enabled: true + serviceMonitor: + enabled: true + + lapi: + deployAnnotations: + reloader.stakater.com/auto: "true" + + extraInitContainers: + - name: initdb + image: "ghcr.io/onedr0p/postgres-init:16.6@sha256:35353a77777ee8f634d0f3945f495b4a40065134b8619e0d18bd49b0ee9c855b" + imagePullPolicy: IfNotPresent + + envFrom: + - secretRef: + name: &secret crowdsec-secret + + persistentVolume: + data: + enabled: false + + config: + enabled: false + + storeCAPICredentialsInSecret: true + + ingress: + enabled: true + + annotations: + nginx.ingress.kubernetes.io/backend-protocol: "HTTP" + external-dns.alpha.kubernetes.io/target: internal.${PUBLIC_DOMAIN} + cert-manager.io/cluster-issuer: zerossl-prod + cert-manager.io/private-key-rotation-policy: Always + cert-manager.io/private-key-algorithm: ECDSA + cert-manager.io/private-key-size: "384" + + ingressClassName: internal + + host: &host "crowdsec-api.${PUBLIC_DOMAIN}" + + tls: + - hosts: + - *host + secretName: crowdsec-tls + + env: + - name: CROWDSEC_PUSHOVER_TOKEN + valueFrom: + secretKeyRef: + name: *secret + key: CROWDSEC_PUSHOVER_TOKEN + + - name: PUSHOVER_USER_KEY + valueFrom: + secretKeyRef: + name: *secret + key: PUSHOVER_USER_KEY + + - name: DISABLE_ONLINE_API + value: "false" + + - name: ENROLL_KEY + valueFrom: + secretKeyRef: + name: *secret + key: ENROLL_KEY + + - name: ENROLL_INSTANCE_NAME + value: "cluster" + + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + name: *secret + key: INIT_POSTGRES_PASS + + - name: DB_USER + valueFrom: + secretKeyRef: + name: *secret + key: INIT_POSTGRES_USER + + - name: BOUNCER_KEY_NGINX_EXTERNAL + valueFrom: + secretKeyRef: + name: *secret + key: BOUNCER_KEY_NGINX_EXTERNAL + + metrics: + enabled: true + serviceMonitor: + enabled: true + + appsec: + enabled: true + + acquisitions: + - source: appsec + listen_addr: "0.0.0.0:7422" + path: / + appsec_config: crowdsecurity/appsec-default + labels: + type: appsec + + env: + - name: COLLECTIONS + value: "crowdsecurity/appsec-virtual-patching crowdsecurity/appsec-generic-rules" + - name: APPSEC_CONFIGS + value: "crowdsecurity/appsec-default" + - name: LOCAL_API_URL + value: "https://crowdsec-api.${PUBLIC_DOMAIN}" diff --git a/kubernetes/main/apps/network/crowdsec/app/ingress-appsec.yaml b/kubernetes/main/apps/network/crowdsec/app/ingress-appsec.yaml new file mode 100644 index 000000000..7066eb424 --- /dev/null +++ b/kubernetes/main/apps/network/crowdsec/app/ingress-appsec.yaml @@ -0,0 +1,29 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: network-appsec + namespace: network + annotations: + cert-manager.io/cluster-issuer: zerossl-prod + external-dns.alpha.kubernetes.io/target: internal.${PUBLIC_DOMAIN} + nginx.ingress.kubernetes.io/backend-protocol: HTTP + cert-manager.io/private-key-rotation-policy: Always + cert-manager.io/private-key-algorithm: ECDSA + cert-manager.io/private-key-size: "384" +spec: + ingressClassName: internal + tls: + - hosts: + - crowdsec-appsec.${PUBLIC_DOMAIN} + secretName: crowdsec-appsec-tls + rules: + - host: crowdsec-appsec.${PUBLIC_DOMAIN} + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: crowdsec-appsec-service + port: + name: appsec diff --git a/kubernetes/main/apps/network/crowdsec/app/kustomization.yaml b/kubernetes/main/apps/network/crowdsec/app/kustomization.yaml new file mode 100755 index 000000000..5d573bd0d --- /dev/null +++ b/kubernetes/main/apps/network/crowdsec/app/kustomization.yaml @@ -0,0 +1,8 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml + - ./ingress-appsec.yaml diff --git a/kubernetes/main/apps/network/crowdsec/ks.yaml b/kubernetes/main/apps/network/crowdsec/ks.yaml new file mode 100755 index 000000000..5c9006f49 --- /dev/null +++ b/kubernetes/main/apps/network/crowdsec/ks.yaml @@ -0,0 +1,23 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app crowdsec + namespace: flux-system +spec: + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets-stores + - name: cloudnative-pg-cluster + path: ./kubernetes/main/apps/network/crowdsec/app + prune: false + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/network/external-dns/RFC3645/config.yaml b/kubernetes/main/apps/network/external-dns/RFC3645/config.yaml new file mode 100755 index 000000000..90d3507dd --- /dev/null +++ b/kubernetes/main/apps/network/external-dns/RFC3645/config.yaml @@ -0,0 +1,32 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + creationTimestamp: null + name: krb5.conf +data: + krb5.conf: | + [logging] + default = FILE:/var/log/krb5libs.log + kdc = FILE:/var/log/krb5kdc.log + admin_server = FILE:/var/log/kadmind.log + + [libdefaults] + dns_lookup_realm = false + ticket_lifetime = 24h + renew_lifetime = 7d + forwardable = true + rdns = false + pkinit_anchors = /etc/pki/tls/certs/ca-bundle.crt + default_ccache_name = KEYRING:persistent:%{uid} + + default_realm = ${AD_REALM} + + [realms] + ${AD_REALM} = { + kdc = dc01.${PUBLIC_DOMAIN} + admin_server = dc01.${PUBLIC_DOMAIN} + } + + [domain_realm] + ${PUBLIC_DOMAIN} = ${AD_REALM} + .${PUBLIC_DOMAIN} = ${AD_REALM} diff --git a/kubernetes/main/apps/network/external-dns/RFC3645/helmrelease.yaml b/kubernetes/main/apps/network/external-dns/RFC3645/helmrelease.yaml new file mode 100755 index 000000000..7c7ba9fd8 --- /dev/null +++ b/kubernetes/main/apps/network/external-dns/RFC3645/helmrelease.yaml @@ -0,0 +1,57 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/kubernetes-sigs/external-dns/refs/heads/master/charts/external-dns/values.schema.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: &app external-dns-bind +spec: + interval: 30m + chart: + spec: + chart: external-dns + version: 1.15.0 + sourceRef: + kind: HelmRepository + name: external-dns + namespace: flux-system + install: + crds: CreateReplace + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + crds: CreateReplace + remediation: + strategy: rollback + retries: 3 + values: + fullnameOverride: *app + provider: + name: rfc2136 + extraArgs: + - --events + - --ignore-ingress-tls-spec + - --rfc2136-gss-tsig + - --rfc2136-host=dc01.${PUBLIC_DOMAIN} + - --rfc2136-port=53 + - --rfc2136-zone=${PUBLIC_DOMAIN} + - --rfc2136-kerberos-username=externaldns_service + - --rfc2136-kerberos-password=${KERBEROS_PASSWORD} + - --rfc2136-kerberos-realm=${AD_REALM} + - --rfc2136-tsig-axfr + policy: sync + sources: ["ingress", "service"] + txtOwnerId: default + txtPrefix: k8s. + domainFilters: ["${PUBLIC_DOMAIN}"] + serviceMonitor: + enabled: true + extraVolumes: + - configMap: + defaultMode: 420 + name: krb5.conf + name: kerberos-config-volume + extraVolumeMounts: + - mountPath: /etc/krb5.conf + name: kerberos-config-volume + subPath: krb5.conf diff --git a/kubernetes/main/apps/network/external-dns/RFC3645/kustomization.yaml b/kubernetes/main/apps/network/external-dns/RFC3645/kustomization.yaml new file mode 100755 index 000000000..3ea08d247 --- /dev/null +++ b/kubernetes/main/apps/network/external-dns/RFC3645/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./config.yaml + - ./helmrelease.yaml diff --git a/kubernetes/main/apps/network/external-dns/cloudflare/externalsecret.yaml b/kubernetes/main/apps/network/external-dns/cloudflare/externalsecret.yaml new file mode 100755 index 000000000..948a62faa --- /dev/null +++ b/kubernetes/main/apps/network/external-dns/cloudflare/externalsecret.yaml @@ -0,0 +1,20 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: external-dns-cloudflare +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: external-dns-cloudflare-secret + template: + engineVersion: v2 + data: + CF_API_EMAIL: "{{ .CF_API_EMAIL }}" + CF_API_TOKEN: "{{ .CF_API_TOKEN }}" + dataFrom: + - extract: + key: secrets/cloudflare diff --git a/kubernetes/main/apps/network/external-dns/cloudflare/helmrelease.yaml b/kubernetes/main/apps/network/external-dns/cloudflare/helmrelease.yaml new file mode 100755 index 000000000..8570ea084 --- /dev/null +++ b/kubernetes/main/apps/network/external-dns/cloudflare/helmrelease.yaml @@ -0,0 +1,58 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: &app external-dns-cloudflare +spec: + interval: 30m + chart: + spec: + chart: external-dns + version: 1.15.0 + sourceRef: + kind: HelmRepository + name: external-dns + namespace: flux-system + install: + crds: CreateReplace + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + crds: CreateReplace + remediation: + strategy: rollback + retries: 3 + values: + fullnameOverride: *app + provider: + name: cloudflare + env: + - name: &name CF_API_EMAIL + valueFrom: + secretKeyRef: + name: &secret external-dns-cloudflare-secret + key: *name + - name: &name CF_API_TOKEN + valueFrom: + secretKeyRef: + name: *secret + key: *name + extraArgs: + - --cloudflare-dns-records-per-page=1000 + - --cloudflare-proxied + - --crd-source-apiversion=externaldns.k8s.io/v1alpha1 + - --crd-source-kind=DNSEndpoint + - --events + - --ignore-ingress-tls-spec + - --ingress-class=external + policy: sync + sources: ["crd", "ingress"] + txtOwnerId: default + txtPrefix: k8s. + domainFilters: ["${PUBLIC_DOMAIN}"] + serviceMonitor: + enabled: true + podAnnotations: + secret.reloader.stakater.com/reload: *secret diff --git a/kubernetes/main/apps/network/external-dns/cloudflare/kustomization.yaml b/kubernetes/main/apps/network/external-dns/cloudflare/kustomization.yaml new file mode 100755 index 000000000..4eed917b9 --- /dev/null +++ b/kubernetes/main/apps/network/external-dns/cloudflare/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml diff --git a/kubernetes/main/apps/network/external-dns/ks.yaml b/kubernetes/main/apps/network/external-dns/ks.yaml new file mode 100755 index 000000000..cd9526cfe --- /dev/null +++ b/kubernetes/main/apps/network/external-dns/ks.yaml @@ -0,0 +1,44 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app external-dns-cloudflare + namespace: flux-system +spec: + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets-stores + path: ./kubernetes/main/apps/network/external-dns/cloudflare + prune: false + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app external-dns-rfc2136 + namespace: flux-system +spec: + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets-stores + path: ./kubernetes/main/apps/network/external-dns/RFC3645 + prune: false + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/network/kustomization.yaml b/kubernetes/main/apps/network/kustomization.yaml new file mode 100755 index 000000000..96b348275 --- /dev/null +++ b/kubernetes/main/apps/network/kustomization.yaml @@ -0,0 +1,12 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # Pre Flux-Kustomizations + - ./namespace.yaml + # Flux-Kustomizations + - ./cloudflared/ks.yaml + - ./external-dns/ks.yaml + - ./crowdsec/ks.yaml + - ./nginx/ks.yaml diff --git a/kubernetes/main/apps/network/namespace.yaml b/kubernetes/main/apps/network/namespace.yaml new file mode 100755 index 000000000..356e3dc5a --- /dev/null +++ b/kubernetes/main/apps/network/namespace.yaml @@ -0,0 +1,38 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: network + annotations: + kustomize.toolkit.fluxcd.io/prune: disabled + volsync.backube/privileged-movers: "true" +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/provider_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Provider +metadata: + name: alert-manager + namespace: network +spec: + type: alertmanager + address: http://alertmanager-operated.observability.svc.cluster.local:9093/api/v2/alerts/ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/alert_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Alert +metadata: + name: alert-manager + namespace: network +spec: + providerRef: + name: alert-manager + eventSeverity: error + eventSources: + - kind: HelmRelease + name: "*" + exclusionList: + - "error.*lookup github\\.com" + - "error.*lookup raw\\.githubusercontent\\.com" + - "dial.*tcp.*timeout" + - "waiting.*socket" + suspend: false diff --git a/kubernetes/main/apps/network/nginx/external/externalsecret.yaml b/kubernetes/main/apps/network/nginx/external/externalsecret.yaml new file mode 100755 index 000000000..a5cddfac4 --- /dev/null +++ b/kubernetes/main/apps/network/nginx/external/externalsecret.yaml @@ -0,0 +1,39 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret nginx-external-secret +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + template: + engineVersion: v2 + data: + MAXMIND_LICENSE_KEY: "{{ .MAXMIND_LICENSE_KEY }}" + BOUNCER_API_KEY: "{{ .BOUNCER_KEY_NGINX_EXTERNAL }}" + dataFrom: + - extract: + key: secrets/maxmind + - extract: + key: secrets/api-keys +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret nginx-external-dhparam +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + data: + - secretKey: dhparam.pem + remoteRef: + key: secrets/nginx-external + property: dhparam.pem diff --git a/kubernetes/main/apps/network/nginx/external/helmrelease.yaml b/kubernetes/main/apps/network/nginx/external/helmrelease.yaml new file mode 100755 index 000000000..6bcbf6121 --- /dev/null +++ b/kubernetes/main/apps/network/nginx/external/helmrelease.yaml @@ -0,0 +1,172 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: nginx-external +spec: + interval: 30m + chart: + spec: + chart: ingress-nginx + version: 4.11.3 + sourceRef: + kind: HelmRepository + name: ingress-nginx + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + valuesFrom: + - targetPath: controller.maxmindLicenseKey + kind: Secret + name: nginx-external-secret + valuesKey: MAXMIND_LICENSE_KEY + values: + fullnameOverride: nginx-external + controller: + annotations: + reloader.stakater.com/auto: "true" + extraVolumes: + - name: crowdsec-bouncer-plugin + emptyDir: {} + extraInitContainers: + - name: init-clone-crowdsec-bouncer + image: crowdsecurity/lua-bouncer-plugin:v1.0.5@sha256:90f5c611bebbbe89b8aef3218dad1df3bd9fbe51554024384b56026c69c55925 + imagePullPolicy: IfNotPresent + command: ['sh', '-c', "sh /docker_start.sh; mkdir -p /lua_plugins/crowdsec/; cp -R /crowdsec/* /lua_plugins/crowdsec/"] + env: + - name: BOUNCER_CONFIG + value: "/crowdsec/crowdsec-bouncer.conf" + - name: API_URL + value: "https://crowdsec-api.${PUBLIC_DOMAIN}" + - name: BAN_TEMPLATE_PATH + value: "/etc/nginx/lua/plugins/crowdsec/templates/ban.html" + - name: UPDATE_FREQUENCY + value: "60" + - name: MODE + value: stream + - name: API_KEY + valueFrom: + secretKeyRef: + name: nginx-external-secret + key: BOUNCER_API_KEY + - name: APPSEC_URL + value: "https://crowdsec-appsec.${PUBLIC_DOMAIN}" + - name: SSL_VERIFY + value: "true" + - name: APPSEC_FAILURE_ACTION + value: deny + securityContext: + capabilities: + drop: + - ALL + privileged: false + allowPrivilegeEscalation: false + runAsNonRoot: false + seccompProfile: + type: RuntimeDefault + volumeMounts: + - name: crowdsec-bouncer-plugin + mountPath: /lua_plugins + extraVolumeMounts: + - name: crowdsec-bouncer-plugin + mountPath: /etc/nginx/lua/plugins/crowdsec + subPath: crowdsec + readOnly: true + replicaCount: 2 + service: + annotations: + external-dns.alpha.kubernetes.io/hostname: external.${PUBLIC_DOMAIN} + lbipam.cilium.io/ips: 192.168.91.95 + allocateLoadBalancerNodePorts: false + ingressClassResource: + name: external + default: false + controllerValue: k8s.io/external + admissionWebhooks: + objectSelector: + matchExpressions: + - key: ingress-class + operator: In + values: ["external"] + allowSnippetAnnotations: true + config: + custom-http-errors: "504,503,500,429,410,404,403,401,400" + plugins: "crowdsec" + lua-shared-dicts: "crowdsec_cache: 100m" + server-snippet : | + lua_ssl_trusted_certificate "/etc/ssl/cert.pem"; + block-user-agents: "GPTBot,~*GPTBot*,ChatGPT-User,~*ChatGPT-User*,Google-Extended,~*Google-Extended*,CCBot,~*CCBot*,Omgilibot,~*Omgilibot*,FacebookBot,~*FacebookBot*" # taken from https://github.com/superseriousbusiness/gotosocial/blob/main/internal/web/robots.go + client-body-buffer-size: 100M + client-body-timeout: 120 + client-header-timeout: 120 + enable-brotli: "true" + disable-ipv6: "true" + disable-ipv6-dns: "true" + enable-ocsp: "true" + enable-real-ip: "true" + hide-headers: Server,X-Powered-By + hsts-max-age: 31536000 + force-ssl-redirect: "true" + keep-alive-requests: 10000 + keep-alive: 120 + log-format-escape-json: "true" + log-format-upstream: > + {"time": "$time_iso8601", + "remote_addr": "proxy_protocol_addr": "$proxy_protocol_addr", "x_forwarded_for": "$proxy_add_x_forwarded_for", + "request_id": "$req_id", + "remote_user": "$remote_user", + "bytes_sent": "$bytes_sent", + "request_time": "$request_time", + "status": "$status", + "vhost": "$host", + "request_proto": "$server_protocol", + "path": "$uri", + "request_query": "$args", + "request_length": "$request_length", + "duration": "$request_time", + "method": "$request_method", + "http_referrer": "$http_referer", + "http_user_agent": "$http_user_agent"} + proxy-body-size: 0 + proxy-buffer-size: 16k + ssl-protocols: TLSv1.3 + ssl-ciphers: ECDHE+AESGCM:DHE+AESGCM + ssl-dh-param: network/nginx-external-dhparam + use-geoip2: "false" + use-forwarded-headers: "true" + metrics: + enabled: true + serviceMonitor: + enabled: true + namespaceSelector: + any: true + extraArgs: + default-ssl-certificate: network/darkfellanet-tls + terminationGracePeriodSeconds: 60 + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + labelSelector: + matchLabels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: nginx-external + app.kubernetes.io/component: controller + resources: + requests: + cpu: 100m + limits: + memory: 5Gi + defaultBackend: + enabled: true + image: + registry: ghcr.io + image: darkfella91/custom-error-pages + tag: v1.0.5@sha256:9bcb25b28c0bc5f3434845fa43e3ffe3f6a34c6a1c4da416298c916f5939c969 diff --git a/kubernetes/main/apps/network/nginx/external/kustomization.yaml b/kubernetes/main/apps/network/nginx/external/kustomization.yaml new file mode 100755 index 000000000..4eed917b9 --- /dev/null +++ b/kubernetes/main/apps/network/nginx/external/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml diff --git a/kubernetes/main/apps/network/nginx/internal/externalsecret.yaml b/kubernetes/main/apps/network/nginx/internal/externalsecret.yaml new file mode 100644 index 000000000..8c671807c --- /dev/null +++ b/kubernetes/main/apps/network/nginx/internal/externalsecret.yaml @@ -0,0 +1,17 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret nginx-internal-dhparam +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + data: + - secretKey: dhparam.pem + remoteRef: + key: secrets/nginx-internal + property: dhparam.pem diff --git a/kubernetes/main/apps/network/nginx/internal/helmrelease.yaml b/kubernetes/main/apps/network/nginx/internal/helmrelease.yaml new file mode 100755 index 000000000..885d230f4 --- /dev/null +++ b/kubernetes/main/apps/network/nginx/internal/helmrelease.yaml @@ -0,0 +1,88 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: nginx-internal +spec: + interval: 30m + chart: + spec: + chart: ingress-nginx + version: 4.11.3 + sourceRef: + kind: HelmRepository + name: ingress-nginx + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + values: + fullnameOverride: nginx-internal + controller: + replicaCount: 2 + service: + annotations: + external-dns.alpha.kubernetes.io/hostname: internal.${PUBLIC_DOMAIN} + lbipam.cilium.io/ips: 192.168.91.97 + allocateLoadBalancerNodePorts: false + ingressClassResource: + name: internal + default: true + controllerValue: k8s.io/internal + admissionWebhooks: + objectSelector: + matchExpressions: + - key: ingress-class + operator: In + values: ["internal"] + allowSnippetAnnotations: true + config: + block-user-agents: "GPTBot,~*GPTBot*,ChatGPT-User,~*ChatGPT-User*,Google-Extended,~*Google-Extended*,CCBot,~*CCBot*,Omgilibot,~*Omgilibot*,FacebookBot,~*FacebookBot*" # taken from https://github.com/superseriousbusiness/gotosocial/blob/main/internal/web/robots.go + client-body-buffer-size: 100M + client-body-timeout: 120 + client-header-timeout: 120 + enable-brotli: "true" + disable-ipv6: "true" + disable-ipv6-dns: "true" + enable-ocsp: "true" + enable-real-ip: "true" + force-ssl-redirect: "true" + hide-headers: Server,X-Powered-By + hsts-max-age: 31449600 + keep-alive-requests: 10000 + keep-alive: 120 + log-format-escape-json: "true" + log-format-upstream: > + {"time": "$time_iso8601", "remote_addr": "proxy_protocol_addr": "$proxy_protocol_addr", "x_forwarded_for": "$proxy_add_x_forwarded_for", + "request_id": "$req_id", "remote_user": "$remote_user", "bytes_sent": $bytes_sent, "request_time": $request_time, + "status": $status, "vhost": "$host", "request_proto": "$server_protocol", "path": "$uri", "request_query": "$args", + "request_length": $request_length, "duration": $request_time, "method": "$request_method", "http_referrer": "$http_referer", + "http_user_agent": "$http_user_agent"} + proxy-body-size: 0 + proxy-buffer-size: 16k + ssl-protocols: TLSv1.3 + ssl-ciphers: ECDHE+AESGCM:DHE+AESGCM + ssl-dh-param: network/nginx-internal-dhparam + use-forwarded-headers: "true" + metrics: + enabled: true + serviceMonitor: + enabled: true + namespaceSelector: + any: true + extraArgs: + default-ssl-certificate: network/darkfellanet-tls + terminationGracePeriodSeconds: 120 + resources: + requests: + cpu: 100m + limits: + memory: 5Gi + defaultBackend: + enabled: false diff --git a/kubernetes/main/apps/network/nginx/internal/kustomization.yaml b/kubernetes/main/apps/network/nginx/internal/kustomization.yaml new file mode 100755 index 000000000..4eed917b9 --- /dev/null +++ b/kubernetes/main/apps/network/nginx/internal/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml diff --git a/kubernetes/main/apps/network/nginx/ks.yaml b/kubernetes/main/apps/network/nginx/ks.yaml new file mode 100755 index 000000000..78a3364d0 --- /dev/null +++ b/kubernetes/main/apps/network/nginx/ks.yaml @@ -0,0 +1,42 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app nginx-external + namespace: flux-system +spec: + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets-stores + path: ./kubernetes/main/apps/network/nginx/external + prune: false + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app nginx-internal + namespace: flux-system +spec: + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/network/nginx/internal + prune: false + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/oauth2-proxy/app/externalsecret.yaml b/kubernetes/main/apps/oauth2-proxy/app/externalsecret.yaml new file mode 100755 index 000000000..8d1c0705b --- /dev/null +++ b/kubernetes/main/apps/oauth2-proxy/app/externalsecret.yaml @@ -0,0 +1,25 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret oauth2-secret +spec: + refreshInterval: 5m + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + template: + engineVersion: v2 + data: + redis-password: "{{ .REDIS_PASSWORD }}" + cookie-secret: "{{ .OAUTH2_COOKIE_SECRET }}" + client-secret: "{{ .OAUTH2_CLIENT_SECRET }}" + client-id: "{{ .OAUTH2_CLIENT_ID }}" + dataFrom: + - extract: + key: secrets/redis + - extract: + key: secrets/oauth2 diff --git a/kubernetes/main/apps/oauth2-proxy/app/helmrelease.yaml b/kubernetes/main/apps/oauth2-proxy/app/helmrelease.yaml new file mode 100755 index 000000000..21e2ad87f --- /dev/null +++ b/kubernetes/main/apps/oauth2-proxy/app/helmrelease.yaml @@ -0,0 +1,459 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: oauth2-proxy +spec: + interval: 30m + chart: + spec: + chart: oauth2-proxy + version: 7.8.0 + sourceRef: + kind: HelmRepository + name: oauth2-proxy + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + values: + global: {} + # To help compatibility with other charts which use global.imagePullSecrets. + # global: + # imagePullSecrets: + # - name: pullSecret1 + # - name: pullSecret2 + + ## Override the deployment namespace + ## + namespaceOverride: "" + + # Force the target Kubernetes version (it uses Helm `.Capabilities` if not set). + # This is especially useful for `helm template` as capabilities are always empty + # due to the fact that it doesn't query an actual cluster + kubeVersion: + + config: + annotations: {} + existingSecret: oauth2-secret + cookieName: "_oauth2_proxy" + google: {} + # adminEmail: xxxx + # useApplicationDefaultCredentials: true + # targetPrincipal: xxxx + # serviceAccountJson: xxxx + # Alternatively, use an existing secret (see google-secret.yaml for required fields) + # Example: + # existingSecret: google-secret + # groups: [] + # Example: + # - group1@example.com + # - group2@example.com + # Default configuration, to be overridden + configFile: |- + email_domains = [ "*" ] + upstreams = [ "file:///dev/null" ] + reverse_proxy = true + scope = "openid profile email" + cookie_secure = true + provider = "keycloak-oidc" + oidc_issuer_url = "https://accounts.${PUBLIC_DOMAIN}/realms/DarkfellaNET" + code_challenge_method = "S256" + cookie_domains = ".${PUBLIC_DOMAIN}" + whitelist_domains = ".${PUBLIC_DOMAIN}" + backend_logout_url = "https://accounts.${PUBLIC_DOMAIN}/realms/DarkfellaNET/protocol/openid-connect/logout?id_token_hint={id_token}" + skip_provider_button = true + cookie_expire = "29m30s" + cookie_refresh = "1m" + # Custom configuration file: oauth2_proxy.cfg + # configFile: |- + # pass_basic_auth = false + # pass_access_token = true + # Use an existing config map (see configmap.yaml for required fields) + # Example: + # existingConfig: config + + alphaConfig: + enabled: false + # Add config annotations + annotations: {} + # Arbitrary configuration data to append to the server section + serverConfigData: {} + # Arbitrary configuration data to append to the metrics section + metricsConfigData: {} + # Arbitrary configuration data to append + configData: {} + # Arbitrary configuration to append + # This is treated as a Go template and rendered with the root context + configFile: "" + # Use an existing config map (see secret-alpha.yaml for required fields) + existingConfig: ~ + # Use an existing secret + existingSecret: ~ + + image: + repository: "quay.io/oauth2-proxy/oauth2-proxy" + tag: "v7.7.1-alpine@sha256:a6a1e44374d5d9b72cddf6e3d1177361b91698ae9c6da7e247139094494d3b93" + pullPolicy: "IfNotPresent" + command: [] + + # Optionally specify an array of imagePullSecrets. + # Secrets must be manually created in the namespace. + # ref: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod + imagePullSecrets: [] + # - name: myRegistryKeySecretName + + # Set a custom containerPort if required. + # This will default to 4180 if this value is not set and the httpScheme set to http + # This will default to 4443 if this value is not set and the httpScheme set to https + # containerPort: 4180 + + extraArgs: {} + extraEnv: [] + + envFrom: [] + # Load environment variables from a ConfigMap(s) and/or Secret(s) + # that already exists (created and managed by you). + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/#configure-all-key-value-pairs-in-a-configmap-as-container-environment-variables + # + # PS: Changes in these ConfigMaps or Secrets will not be automatically + # detected and you must manually restart the relevant Pods after changes. + # + # - configMapRef: + # name: special-config + # - secretRef: + # name: special-config-secret + + # -- Custom labels to add into metadata + customLabels: {} + + # To authorize individual email addresses + # That is part of extraArgs but since this needs special treatment we need to do a separate section + authenticatedEmailsFile: + enabled: false + # Defines how the email addresses file will be projected, via a configmap or secret + persistence: configmap + # template is the name of the configmap what contains the email user list but has been configured without this chart. + # It's a simpler way to maintain only one configmap (user list) instead changing it for each oauth2-proxy service. + # Be aware the value name in the extern config map in data needs to be named to "restricted_user_access" or to the + # provided value in restrictedUserAccessKey field. + template: "" + # The configmap/secret key under which the list of email access is stored + # Defaults to "restricted_user_access" if not filled-in, but can be overridden to allow flexibility + restrictedUserAccessKey: "" + # One email per line + # example: + # restricted_access: |- + # name1@domain + # name2@domain + # If you override the config with restricted_access it will configure a user list within this chart what takes care of the + # config map resource. + restricted_access: "" + annotations: {} + # helm.sh/resource-policy: keep + + service: + type: ClusterIP + # when service.type is ClusterIP ... + # clusterIP: 192.0.2.20 + # when service.type is LoadBalancer ... + # loadBalancerIP: 198.51.100.40 + # loadBalancerSourceRanges: 203.0.113.0/24 + # when service.type is NodePort ... + # nodePort: 80 + portNumber: 80 + # Protocol set on the service + appProtocol: http + annotations: {} + # foo.io/bar: "true" + # configure externalTrafficPolicy + externalTrafficPolicy: "" + # configure internalTrafficPolicy + internalTrafficPolicy: "" + + ## Create or use ServiceAccount + serviceAccount: + ## Specifies whether a ServiceAccount should be created + enabled: true + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + automountServiceAccountToken: true + annotations: {} + + ingress: + enabled: true + annotations: + external-dns.alpha.kubernetes.io/target: internal.${PUBLIC_DOMAIN} + className: internal + path: /ready + # Only used if API capabilities (networking.k8s.io/v1) allow it + pathType: Prefix + # Used to create an Ingress record. + hosts: + - oauth2-proxy.${PUBLIC_DOMAIN} + + resources: {} + # limits: + # cpu: 100m + # memory: 300Mi + # requests: + # cpu: 100m + # memory: 300Mi + + extraVolumes: [] + # - name: ca-bundle-cert + # secret: + # secretName: + + extraVolumeMounts: [] + # - mountPath: /etc/ssl/certs/ + # name: ca-bundle-cert + + # Additional containers to be added to the pod. + extraContainers: [] + # - name: my-sidecar + # image: nginx:latest + + priorityClassName: "" + + # hostAliases is a list of aliases to be added to /etc/hosts for network name resolution + hostAliases: [] + # - ip: "10.xxx.xxx.xxx" + # hostnames: + # - "auth.example.com" + # - ip: 127.0.0.1 + # hostnames: + # - chart-example.local + # - example.local + + # [TopologySpreadConstraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/) configuration. + # Ref: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling + # topologySpreadConstraints: [] + + # Affinity for pod assignment + # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + # affinity: {} + + # Tolerations for pod assignment + # Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + tolerations: [] + + # Node labels for pod assignment + # Ref: https://kubernetes.io/docs/user-guide/node-selection/ + nodeSelector: {} + + # Whether to use secrets instead of environment values for setting up OAUTH2_PROXY variables + proxyVarsAsSecrets: true + + # Configure Kubernetes liveness and readiness probes. + # Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + # Disable both when deploying with Istio 1.0 mTLS. https://istio.io/help/faq/security/#k8s-health-checks + livenessProbe: + enabled: true + initialDelaySeconds: 0 + timeoutSeconds: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 0 + timeoutSeconds: 5 + periodSeconds: 10 + successThreshold: 1 + + # Configure Kubernetes security context for container + # Ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + securityContext: + enabled: true + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 2000 + runAsGroup: 2000 + seccompProfile: + type: RuntimeDefault + + deploymentAnnotations: {} + podAnnotations: {} + podLabels: {} + replicaCount: 1 + revisionHistoryLimit: 10 + strategy: {} + + ## PodDisruptionBudget settings + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + podDisruptionBudget: + enabled: true + minAvailable: 1 + + ## Horizontal Pod Autoscaling + ## ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 10 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + annotations: {} + + podSecurityContext: {} + + httpScheme: http + + initContainers: + waitForRedis: + enabled: true + image: + repository: "alpine" + tag: "latest" + pullPolicy: "IfNotPresent" + # uses the kubernetes version of the cluster + # the chart is deployed on, if not set + kubectlVersion: "" + securityContext: + enabled: true + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + seccompProfile: + type: RuntimeDefault + timeout: 180 + resources: {} + # limits: + # cpu: 100m + # memory: 300Mi + # requests: + # cpu: 100m + # memory: 300Mi + + # Additionally authenticate against a htpasswd file. Entries must be created with "htpasswd -B" for bcrypt encryption. + # Alternatively supply an existing secret which contains the required information. + htpasswdFile: + enabled: false + existingSecret: "" + entries: [] + # One row for each user + # example: + # entries: + # - testuser:$2y$05$gY6dgXqjuzFhwdhsiFe7seM9q9Tile4Y3E.CBpAZJffkeiLaC21Gy + + sessionStorage: + type: redis + redis: + existingSecret: "oauth2-secret" + passwordKey: "redis-password" + clientType: "standalone" + standalone: + connectionUrl: "redis://redis-master.database.svc.cluster.local:6379" + + redis: + enabled: false + + # Enables apiVersion deprecation checks + checkDeprecation: true + + # Allows graceful shutdown + # terminationGracePeriodSeconds: 65 + # lifecycle: + # preStop: + # exec: + # command: [ "sh", "-c", "sleep 60" ] + + metrics: + # Enable Prometheus metrics endpoint + enabled: true + # Serve Prometheus metrics on this port + port: 44180 + # when service.type is NodePort ... + # nodePort: 44180 + # Protocol set on the service for the metrics port + service: + appProtocol: http + serviceMonitor: + # Enable Prometheus Operator ServiceMonitor + enabled: false + # Define the namespace where to deploy the ServiceMonitor resource + namespace: "" + # Prometheus Instance definition + prometheusInstance: default + # Prometheus scrape interval + interval: 60s + # Prometheus scrape timeout + scrapeTimeout: 30s + # Add custom labels to the ServiceMonitor resource + labels: {} + + ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS. + scheme: "" + + ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS. + ## Of type: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#tlsconfig + tlsConfig: {} + + ## bearerTokenFile: Path to bearer token file. + bearerTokenFile: "" + + ## Used to pass annotations that are used by the Prometheus installed in your cluster to select Service Monitors to work with + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + annotations: {} + + ## Metric relabel configs to apply to samples before ingestion. + ## [Metric Relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs) + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## Relabel configs to apply to samples before ingestion. + ## [Relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config) + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + # Extra K8s manifests to deploy + extraObjects: [] + # - apiVersion: secrets-store.csi.x-k8s.io/v1 + # kind: SecretProviderClass + # metadata: + # name: oauth2-proxy-secrets-store + # spec: + # provider: aws + # parameters: + # objects: | + # - objectName: "oauth2-proxy" + # objectType: "secretsmanager" + # jmesPath: + # - path: "client_id" + # objectAlias: "client-id" + # - path: "client_secret" + # objectAlias: "client-secret" + # - path: "cookie_secret" + # objectAlias: "cookie-secret" + # secretObjects: + # - data: + # - key: client-id + # objectName: client-id + # - key: client-secret + # objectName: client-secret + # - key: cookie-secret + # objectName: cookie-secret + # secretName: oauth2-proxy-secrets-store + # type: Opaque diff --git a/kubernetes/main/apps/oauth2-proxy/app/ingress-external.yaml b/kubernetes/main/apps/oauth2-proxy/app/ingress-external.yaml new file mode 100755 index 000000000..916d08f83 --- /dev/null +++ b/kubernetes/main/apps/oauth2-proxy/app/ingress-external.yaml @@ -0,0 +1,28 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: oauth2-proxy-external + namespace: oauth2-proxy + annotations: + external-dns.alpha.kubernetes.io/ingress-hostname-source: annotation-only + cert-manager.io/cluster-issuer: zerossl-prod + cert-manager.io/private-key-rotation-policy: Always + cert-manager.io/private-key-algorithm: ECDSA + cert-manager.io/private-key-size: "384" +spec: + ingressClassName: external + tls: + - hosts: + - "*.${PUBLIC_DOMAIN}" + secretName: oauth2-proxy-tls + rules: + - host: vaultwarden.${PUBLIC_DOMAIN} + http: + paths: + - path: /oauth2 + pathType: ImplementationSpecific + backend: + service: + name: oauth2-proxy + port: + number: 80 diff --git a/kubernetes/main/apps/oauth2-proxy/app/ingress-internal.yaml b/kubernetes/main/apps/oauth2-proxy/app/ingress-internal.yaml new file mode 100755 index 000000000..8c36db499 --- /dev/null +++ b/kubernetes/main/apps/oauth2-proxy/app/ingress-internal.yaml @@ -0,0 +1,74 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: oauth2-proxy-internal + namespace: oauth2-proxy + annotations: + external-dns.alpha.kubernetes.io/ingress-hostname-source: annotation-only +spec: + ingressClassName: internal + tls: + - hosts: + - "*.${PUBLIC_DOMAIN}" + secretName: oauth2-proxy-tls + rules: + - host: radarr.${PUBLIC_DOMAIN} + http: + paths: + - path: /oauth2 + pathType: ImplementationSpecific + backend: + service: + name: oauth2-proxy + port: + number: 80 + - host: sonarr.${PUBLIC_DOMAIN} + http: + paths: + - path: /oauth2 + pathType: ImplementationSpecific + backend: + service: + name: oauth2-proxy + port: + number: 80 + - host: prowlarr.${PUBLIC_DOMAIN} + http: + paths: + - path: /oauth2 + pathType: ImplementationSpecific + backend: + service: + name: oauth2-proxy + port: + number: 80 + - host: qbittorrent.${PUBLIC_DOMAIN} + http: + paths: + - path: /oauth2 + pathType: ImplementationSpecific + backend: + service: + name: oauth2-proxy + port: + number: 80 + - host: sabnzbd.${PUBLIC_DOMAIN} + http: + paths: + - path: /oauth2 + pathType: ImplementationSpecific + backend: + service: + name: oauth2-proxy + port: + number: 80 + - host: bazarr.${PUBLIC_DOMAIN} + http: + paths: + - path: /oauth2 + pathType: ImplementationSpecific + backend: + service: + name: oauth2-proxy + port: + number: 80 diff --git a/kubernetes/main/apps/oauth2-proxy/app/kustomization.yaml b/kubernetes/main/apps/oauth2-proxy/app/kustomization.yaml new file mode 100755 index 000000000..f936eea5d --- /dev/null +++ b/kubernetes/main/apps/oauth2-proxy/app/kustomization.yaml @@ -0,0 +1,18 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml + - ./ingress-external.yaml + - ./ingress-internal.yaml +configMapGenerator: + - name: oauth2-proxy-gatus-ep + options: + labels: + gatus.io/enabled: "true" + files: + - config.yaml=./resources/gatus-ep.yaml +generatorOptions: + disableNameSuffixHash: true diff --git a/kubernetes/main/apps/oauth2-proxy/app/resources/gatus-ep.yaml b/kubernetes/main/apps/oauth2-proxy/app/resources/gatus-ep.yaml new file mode 100755 index 000000000..127b700c1 --- /dev/null +++ b/kubernetes/main/apps/oauth2-proxy/app/resources/gatus-ep.yaml @@ -0,0 +1,15 @@ +endpoints: + - name: "OAuth2-Proxy" + group: guarded + url: "https://oauth2-proxy.${PUBLIC_DOMAIN}/ready" + interval: 1m + ui: + hide-hostname: true + hide-url: true + client: + dns-resolver: tcp://172.17.0.10:53 + conditions: + - "[STATUS] == 200" + - "[BODY] == OK" + alerts: + - type: pushover diff --git a/kubernetes/main/apps/oauth2-proxy/ks.yaml b/kubernetes/main/apps/oauth2-proxy/ks.yaml new file mode 100755 index 000000000..81107efda --- /dev/null +++ b/kubernetes/main/apps/oauth2-proxy/ks.yaml @@ -0,0 +1,20 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app oauth2-proxy + namespace: flux-system +spec: + targetNamespace: oauth2-proxy + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/oauth2-proxy/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/oauth2-proxy/kustomization.yaml b/kubernetes/main/apps/oauth2-proxy/kustomization.yaml new file mode 100755 index 000000000..ad2040382 --- /dev/null +++ b/kubernetes/main/apps/oauth2-proxy/kustomization.yaml @@ -0,0 +1,8 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # Pre Flux-Kustomizations + - ./namespace.yaml + - ./ks.yaml diff --git a/kubernetes/main/apps/oauth2-proxy/namespace.yaml b/kubernetes/main/apps/oauth2-proxy/namespace.yaml new file mode 100755 index 000000000..5b602cac9 --- /dev/null +++ b/kubernetes/main/apps/oauth2-proxy/namespace.yaml @@ -0,0 +1,5 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: oauth2-proxy diff --git a/kubernetes/main/apps/observability/gatus/app/externalsecret.yaml b/kubernetes/main/apps/observability/gatus/app/externalsecret.yaml new file mode 100755 index 000000000..38de3b20b --- /dev/null +++ b/kubernetes/main/apps/observability/gatus/app/externalsecret.yaml @@ -0,0 +1,32 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret gatus-secret +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + template: + engineVersion: v2 + data: + PUSHOVER_TOKEN: "{{ .GATUS_PUSHOVER_TOKEN }}" + PUSHOVER_USER_KEY: "{{ .PUSHOVER_USER_KEY }}" + INIT_POSTGRES_DBNAME: gatus + INIT_POSTGRES_HOST: postgres17-rw.database.svc.cluster.local + INIT_POSTGRES_USER: "{{ .GATUS_POSTGRES_USER }}" + INIT_POSTGRES_PASS: "{{ .GATUS_POSTGRES_PASS }}" + GATUS_POSTGRES_ENCODED_PASS: "{{ .GATUS_POSTGRES_ENCODED_PASS }}" + INIT_POSTGRES_SUPER_PASS: "{{ .POSTGRES_SUPER_PASS }}" + OIDC_CLIENT_ID: "{{ .OIDC_CLIENT_ID }}" + OIDC_CLIENT_SECRET: "{{ .OIDC_CLIENT_SECRET }}" + dataFrom: + - extract: + key: secrets/cloudnative-pg + - extract: + key: secrets/pushover + - extract: + key: secrets/gatus diff --git a/kubernetes/main/apps/observability/gatus/app/helmrelease.yaml b/kubernetes/main/apps/observability/gatus/app/helmrelease.yaml new file mode 100755 index 000000000..e62da92cd --- /dev/null +++ b/kubernetes/main/apps/observability/gatus/app/helmrelease.yaml @@ -0,0 +1,146 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: gatus +spec: + interval: 30m + chart: + spec: + chart: app-template + version: 3.5.1 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + values: + controllers: + gatus: + annotations: + reloader.stakater.com/auto: "true" + initContainers: + init-db: + image: + repository: ghcr.io/onedr0p/postgres-init + tag: 16.6@sha256:35353a77777ee8f634d0f3945f495b4a40065134b8619e0d18bd49b0ee9c855b + envFrom: &envFrom + - secretRef: + name: gatus-secret + init-config: + dependsOn: init-db + image: + repository: ghcr.io/kiwigrid/k8s-sidecar + tag: 1.28.0@sha256:4166a019eeafd1f0fef4d867dc5f224f18d84ec8681dbb31f3ca258ecf07bcf2 + env: + FOLDER: /config + LABEL: gatus.io/enabled + NAMESPACE: ALL + RESOURCE: both + UNIQUE_FILENAMES: true + METHOD: WATCH + restartPolicy: Always + resources: &resources + requests: + cpu: 20m + limits: + memory: 256Mi + containers: + app: + image: + repository: ghcr.io/twin/gatus + tag: v5.13.1@sha256:24842a8adebd3dd4bd04a4038ffa27cb2fe72bb50631415e0fb2915063fc1993 + env: + TZ: Europe/Sofia + GATUS_CONFIG_PATH: /config + GATUS_DELAY_START_SECONDS: 5 + WEB_PORT: &port 80 + envFrom: *envFrom + probes: + liveness: &probes + enabled: true + custom: true + spec: + httpGet: + path: /health + port: *port + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + readiness: *probes + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + resources: *resources + defaultPodOptions: + dnsConfig: + options: + - { name: ndots, value: "1" } + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + fsGroup: 65534 + fsGroupChangePolicy: OnRootMismatch + seccompProfile: { type: RuntimeDefault } + service: + app: + controller: gatus + ports: + http: + port: *port + serviceMonitor: + app: + serviceName: gatus + endpoints: + - port: http + scheme: http + path: /metrics + interval: 1m + scrapeTimeout: 10s + ingress: + app: + annotations: + external-dns.alpha.kubernetes.io/target: external.${PUBLIC_DOMAIN} + nginx.ingress.kubernetes.io/configuration-snippet: | + more_set_headers "access-control-allow-origin https://status.${PUBLIC_DOMAIN}"; + more_set_headers "Content-Security-Policy: default-src 'none'; script-src 'self' 'sha256-vOq0p1C22jhkdRTdIHA8DSPgcwXkh5mq8FK4cumu/wU='; style-src https://status.${PUBLIC_DOMAIN}/css/app.css; img-src 'self'; connect-src https://status.${PUBLIC_DOMAIN}; manifest-src 'self'"; + cert-manager.io/cluster-issuer: zerossl-prod + cert-manager.io/private-key-rotation-policy: Always + cert-manager.io/private-key-algorithm: ECDSA + cert-manager.io/private-key-size: "384" + className: external + tls: + - hosts: + - &host "status.${PUBLIC_DOMAIN}" + secretName: gatus-tls + hosts: + - host: *host + paths: + - path: / + service: + identifier: app + port: http + serviceAccount: + create: true + name: gatus + persistence: + config: + type: emptyDir + config-file: + type: configMap + name: gatus-configmap + globalMounts: + - path: /config/config.yaml + subPath: config.yaml + readOnly: true diff --git a/kubernetes/main/apps/observability/gatus/app/kustomization.yaml b/kubernetes/main/apps/observability/gatus/app/kustomization.yaml new file mode 100755 index 000000000..30bf43b95 --- /dev/null +++ b/kubernetes/main/apps/observability/gatus/app/kustomization.yaml @@ -0,0 +1,14 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./rbac.yaml + - ./helmrelease.yaml +configMapGenerator: + - name: gatus-configmap + files: + - config.yaml=./resources/config.yaml +generatorOptions: + disableNameSuffixHash: true diff --git a/kubernetes/main/apps/observability/gatus/app/rbac.yaml b/kubernetes/main/apps/observability/gatus/app/rbac.yaml new file mode 100755 index 000000000..0f12c439b --- /dev/null +++ b/kubernetes/main/apps/observability/gatus/app/rbac.yaml @@ -0,0 +1,22 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: gatus +rules: + - apiGroups: [""] + resources: ["configmaps", "secrets"] + verbs: ["get", "watch", "list"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: gatus +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: gatus +subjects: + - kind: ServiceAccount + name: gatus + namespace: observability diff --git a/kubernetes/main/apps/observability/gatus/app/resources/config.yaml b/kubernetes/main/apps/observability/gatus/app/resources/config.yaml new file mode 100755 index 000000000..7830278e3 --- /dev/null +++ b/kubernetes/main/apps/observability/gatus/app/resources/config.yaml @@ -0,0 +1,57 @@ +--- +# Note: Gatus vars should be escaped with $${VAR_NAME} to avoid interpolation by Flux +security: + oidc: + issuer-url: https://accounts.${PUBLIC_DOMAIN}/realms/DarkfellaNET + client-id: $${OIDC_CLIENT_ID} + client-secret: $${OIDC_CLIENT_SECRET} + redirect-url: https://status.${PUBLIC_DOMAIN}/authorization-code/callback + scopes: [openid] +web: + port: $${WEB_PORT} +storage: + type: postgres + path: postgres://$${INIT_POSTGRES_USER}:$${GATUS_POSTGRES_ENCODED_PASS}@$${INIT_POSTGRES_HOST}:5432/$${INIT_POSTGRES_DBNAME}?sslmode=disable + caching: true +metrics: true +debug: false +ui: + title: Status | Gatus + header: Status +alerting: + pushover: + title: Gatus + application-token: $${PUSHOVER_TOKEN} + user-key: $${PUSHOVER_USER_KEY} + priority: 1 + default-alert: + description: health-check failed + send-on-resolved: true + failure-threshold: 3 + success-threshold: 3 +connectivity: + checker: + target: 1.1.1.1:53 + interval: 1m +endpoints: + - name: status + group: external + url: https://status.${PUBLIC_DOMAIN}/health + interval: 1m + client: + dns-resolver: tcp://1.1.1.1:53 + conditions: + - "[STATUS] == 200" + - "[BODY].status == UP" + alerts: + - type: pushover + - name: flux-webhook + group: external + url: https://flux-webhook.${PUBLIC_DOMAIN} + interval: 1m + client: + dns-resolver: tcp://1.1.1.1:53 + conditions: + - "[STATUS] == 404" + alerts: + - type: pushover diff --git a/kubernetes/main/apps/observability/gatus/ks.yaml b/kubernetes/main/apps/observability/gatus/ks.yaml new file mode 100755 index 000000000..878b1e591 --- /dev/null +++ b/kubernetes/main/apps/observability/gatus/ks.yaml @@ -0,0 +1,23 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app gatus + namespace: flux-system +spec: + targetNamespace: observability + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: cloudnative-pg-cluster + - name: external-secrets-stores + path: ./kubernetes/main/apps/observability/gatus/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/observability/grafana/app/externalsecret.yaml b/kubernetes/main/apps/observability/grafana/app/externalsecret.yaml new file mode 100755 index 000000000..7c186bddd --- /dev/null +++ b/kubernetes/main/apps/observability/grafana/app/externalsecret.yaml @@ -0,0 +1,20 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret grafana-oauth-client-secret +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + template: + engineVersion: v2 + data: + client_id: "{{ .GRAFANA_OAUTH_ID }}" + client_secret: "{{ .GRAFANA_OAUTH_SECRET }}" + dataFrom: + - extract: + key: secrets/grafana diff --git a/kubernetes/main/apps/observability/grafana/app/helmrelease.yaml b/kubernetes/main/apps/observability/grafana/app/helmrelease.yaml new file mode 100755 index 000000000..61c1df6f0 --- /dev/null +++ b/kubernetes/main/apps/observability/grafana/app/helmrelease.yaml @@ -0,0 +1,258 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: grafana +spec: + interval: 30m + chart: + spec: + chart: grafana + version: 8.6.3 + sourceRef: + kind: HelmRepository + name: grafana + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + values: + annotations: + reloader.stakater.com/auto: "true" + extraSecretMounts: + - name: oauth-client-credentials + secretName: grafana-oauth-client-secret + defaultMode: 0440 + mountPath: /etc/secrets/oauth-client-credentials + readOnly: true + deploymentStrategy: + type: Recreate + env: + GF_DATE_FORMATS_USE_BROWSER_LOCALE: true + GF_EXPLORE_ENABLED: true + GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS: natel-discrete-panel,panodata-map-panel + GF_SECURITY_ANGULAR_SUPPORT_ENABLED: false + GF_SERVER_ROOT_URL: https://grafana.${PUBLIC_DOMAIN} + grafana.ini: + auth: + oauth_auto_login: true + disable_login_form: true + oauth_skip_org_role_update_sync: false + skip_org_role_sync: false + auth.generic_oauth: + name: Keycloak-OAuth + enabled: true + use_refresh_token: true + email_attribute_path: email + login_attribute_path: preferred_username + name_attribute_path: name + client_id: "$__file{/etc/secrets/oauth-client-credentials/client_id}" + client_secret: "$__file{/etc/secrets/oauth-client-credentials/client_secret}" + scopes: "openid email profile roles" + auth_url: "https://accounts.${PUBLIC_DOMAIN}/realms/DarkfellaNET/protocol/openid-connect/auth" + token_url: "https://accounts.${PUBLIC_DOMAIN}/realms/DarkfellaNET/protocol/openid-connect/token" + api_url: "https://accounts.${PUBLIC_DOMAIN}/realms/DarkfellaNET/protocol/openid-connect/userinfo" + signout_redirect_url: "https://accounts.${PUBLIC_DOMAIN}/realms/DarkfellaNET/protocol/openid-connect/logout?post_logout_redirect_uri=https%3A%2F%2Fgrafana.${PUBLIC_DOMAIN}%2Flogin" + role_attribute_path: contains(resource_access."46f5398b-91b2-4706-bfc3-5f2f4ad624fc".roles[*], 'admin') && 'Admin' || contains(resource_access."46f5398b-91b2-4706-bfc3-5f2f4ad624fc".roles[*], 'editor') && 'Editor' || 'Viewer' + role_attribute_strict: true + allow_assign_grafana_admin: true + use_pkce: true + users: + auto_assign_org: true + auto_assign_org_role: Viewer + auto_assign_org_id: 1 + security: + disable_initial_admin_creation: true + allow_embedding: false + cookie_secure: true + + analytics: + check_for_updates: false + check_for_plugin_updates: false + reporting_enabled: false + auth.anonymous: + enabled: false + auth.basic: + enabled: false + news: + news_feed_enabled: false + datasources: + datasources.yaml: + apiVersion: 1 + deleteDatasources: + - { name: Alertmanager, orgId: 1 } + - { name: Loki, orgId: 1 } + - { name: Prometheus, orgId: 1 } + datasources: + - name: Prometheus + type: prometheus + uid: prometheus + access: proxy + url: http://prometheus-operated.observability.svc.cluster.local:9090 + jsonData: + timeInterval: 1m + isDefault: true + - name: Loki + type: loki + uid: loki + access: proxy + url: http://loki-headless.observability.svc.cluster.local:3100 + jsonData: + maxLines: 250 + - name: Alertmanager + type: alertmanager + uid: alertmanager + access: proxy + url: http://alertmanager-operated.observability.svc.cluster.local:9093 + jsonData: + implementation: prometheus + dashboardProviders: + dashboardproviders.yaml: + apiVersion: 1 + providers: + - name: default + orgId: 1 + folder: "" + type: file + disableDeletion: false + editable: true + options: + path: /var/lib/grafana/dashboards/default + dashboards: + default: + nginx: + url: https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/grafana/dashboards/nginx.json + datasource: Prometheus + nginx-request-handling-performance: + url: https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/grafana/dashboards/request-handling-performance.json + datasource: Prometheus + cert-manager: + url: https://raw.githubusercontent.com/monitoring-mixins/website/master/assets/cert-manager/dashboards/overview.json + datasource: Prometheus + cloudflared: + # renovate: depName="Cloudflare Tunnels (cloudflared)" + gnetId: 17457 + revision: 6 + datasource: + - { name: DS_PROMETHEUS, value: Prometheus } + external-dns: + # renovate: depName="External-dns" + gnetId: 15038 + revision: 3 + datasource: Prometheus + external-secrets: + url: https://raw.githubusercontent.com/external-secrets/external-secrets/main/docs/snippets/dashboard.json + datasource: Prometheus + flux-cluster: + url: https://raw.githubusercontent.com/fluxcd/flux2-monitoring-example/main/monitoring/configs/dashboards/cluster.json + datasource: Prometheus + flux-control-plane: + url: https://raw.githubusercontent.com/fluxcd/flux2-monitoring-example/main/monitoring/configs/dashboards/control-plane.json + datasource: Prometheus + flux-logs: + url: https://raw.githubusercontent.com/fluxcd/flux2-monitoring-example/main/monitoring/configs/dashboards/logs.json + datasource: Loki + kubernetes-api-server: + # renovate: depName="Kubernetes / System / API Server" + gnetId: 15761 + revision: 18 + datasource: Prometheus + kubernetes-coredns: + # renovate: depName="Kubernetes / System / CoreDNS" + gnetId: 15762 + revision: 19 + datasource: Prometheus + kubernetes-global: + # renovate: depName="Kubernetes / Views / Global" + gnetId: 15757 + revision: 42 + datasource: Prometheus + kubernetes-namespaces: + # renovate: depName="Kubernetes / Views / Namespaces" + gnetId: 15758 + revision: 41 + datasource: Prometheus + kubernetes-nodes: + # renovate: depName="Kubernetes / Views / Nodes" + gnetId: 15759 + revision: 32 + datasource: Prometheus + kubernetes-pods: + # renovate: depName="Kubernetes / Views / Pods" + gnetId: 15760 + revision: 34 + datasource: Prometheus + kubernetes-volumes: + # renovate: depName="K8s / Storage / Volumes / Cluster" + gnetId: 11454 + revision: 14 + datasource: Prometheus + node-exporter-full: + # renovate: depName="Node Exporter Full" + gnetId: 1860 + revision: 37 + datasource: Prometheus + prometheus: + # renovate: depName="Prometheus" + gnetId: 19105 + revision: 6 + datasource: Prometheus + unpackerr: + # renovate: depName="Unpackerr" + gnetId: 18817 + revision: 1 + datasource: + - { name: DS_PROMETHEUS, value: Prometheus } + volsync: + # renovate: depName="VolSync Dashboard" + gnetId: 21356 + revision: 3 + datasource: + - { name: DS_PROMETHEUS, value: Prometheus } + - { name: VAR_REPLICATIONDESTNAME, value: .*-dst } + sidecar: + dashboards: + enabled: true + searchNamespace: ALL + label: grafana_dashboard + folderAnnotation: grafana_folder + provider: + disableDelete: true + foldersFromFilesStructure: true + datasources: + enabled: true + searchNamespace: ALL + labelValue: "" + plugins: + - grafana-clock-panel + - grafana-piechart-panel + - grafana-worldmap-panel + - natel-discrete-panel + - vonage-status-panel + serviceMonitor: + enabled: true + ingress: + enabled: true + annotations: + external-dns.alpha.kubernetes.io/target: internal.${PUBLIC_DOMAIN} + cert-manager.io/cluster-issuer: zerossl-prod + cert-manager.io/private-key-rotation-policy: Always + cert-manager.io/private-key-algorithm: ECDSA + cert-manager.io/private-key-size: "384" + ingressClassName: internal + tls: + - hosts: + - &host "grafana.${PUBLIC_DOMAIN}" + secretName: grafana-tls + hosts: + - *host + persistence: + enabled: false + testFramework: + enabled: false diff --git a/kubernetes/main/apps/observability/grafana/app/kustomization.yaml b/kubernetes/main/apps/observability/grafana/app/kustomization.yaml new file mode 100755 index 000000000..4eed917b9 --- /dev/null +++ b/kubernetes/main/apps/observability/grafana/app/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml diff --git a/kubernetes/main/apps/observability/grafana/ks.yaml b/kubernetes/main/apps/observability/grafana/ks.yaml new file mode 100755 index 000000000..dd21bf3f3 --- /dev/null +++ b/kubernetes/main/apps/observability/grafana/ks.yaml @@ -0,0 +1,22 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app grafana + namespace: flux-system +spec: + targetNamespace: observability + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets-stores + path: ./kubernetes/main/apps/observability/grafana/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/observability/kube-prometheus-stack/app/externalsecret.yaml b/kubernetes/main/apps/observability/kube-prometheus-stack/app/externalsecret.yaml new file mode 100755 index 000000000..7d61f5181 --- /dev/null +++ b/kubernetes/main/apps/observability/kube-prometheus-stack/app/externalsecret.yaml @@ -0,0 +1,90 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: alertmanager +spec: + refreshInterval: 5m + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: alertmanager-secret + template: + engineVersion: v2 + data: + # Yo dawg I heard you like go templating so I put go templates in your go templates + alertmanager.yaml: | + global: + resolve_timeout: 5m + route: + group_by: ["alertname", "job"] + group_interval: 10m + group_wait: 1m + receiver: pushover + repeat_interval: 12h + routes: + - receiver: heartbeat + group_interval: 5m + group_wait: 0s + matchers: + - alertname =~ "Watchdog" + repeat_interval: 5m + - receiver: "null" + matchers: + - alertname =~ "InfoInhibitor" + - receiver: pushover + continue: true + matchers: + - severity = "critical" + inhibit_rules: + - equal: ["alertname", "namespace"] + source_matchers: + - severity = "critical" + target_matchers: + - severity = "warning" + receivers: + - name: heartbeat + webhook_configs: + - send_resolved: true + url: "{{ .ALERTMANAGER_CLUSTER_MAIN_HEARTBEAT_URL }}" + - name: "null" + - name: pushover + pushover_configs: + - html: true + message: |- + {{ "{{-" }} range .Alerts {{ "}}" }} + {{ "{{-" }} if ne .Annotations.description "" {{ "}}" }} + {{ "{{" }} .Annotations.description {{ "}}" }} + {{ "{{-" }} else if ne .Annotations.summary "" {{ "}}" }} + {{ "{{" }} .Annotations.summary {{ "}}" }} + {{ "{{-" }} else if ne .Annotations.message "" {{ "}}" }} + {{ "{{" }} .Annotations.message {{ "}}" }} + {{ "{{-" }} else {{ "}}" }} + Alert description not available + {{ "{{-" }} end {{ "}}" }} + {{ "{{-" }} if gt (len .Labels.SortedPairs) 0 {{ "}}" }} + + {{ "{{-" }} range .Labels.SortedPairs {{ "}}" }} + {{ "{{" }} .Name {{ "}}" }}: {{ "{{" }} .Value {{ "}}" }} + {{ "{{-" }} end {{ "}}" }} + + {{ "{{-" }} end {{ "}}" }} + {{ "{{-" }} end {{ "}}" }} + priority: |- + {{ "{{" }} if eq .Status "firing" {{ "}}" }}1{{ "{{" }} else {{ "}}" }}0{{ "{{" }} end {{ "}}" }} + send_resolved: true + sound: gamelan + # ttl: 1d + title: >- + [{{ "{{" }} .Status | toUpper {{ "}}" }}{{ "{{" }} if eq .Status "firing" {{ "}}" }}:{{ "{{" }} .Alerts.Firing | len {{ "}}" }}{{ "{{" }} end {{ "}}" }}] + {{ "{{" }} .CommonLabels.alertname {{ "}}" }} + token: "{{ .ALERTMANAGER_PUSHOVER_TOKEN }}" + url_title: View in Alertmanager + user_key: "{{ .PUSHOVER_USER_KEY }}" + dataFrom: + - extract: + key: secrets/pushover + - extract: + key: secrets/alertmanager diff --git a/kubernetes/main/apps/observability/kube-prometheus-stack/app/helmrelease.yaml b/kubernetes/main/apps/observability/kube-prometheus-stack/app/helmrelease.yaml new file mode 100755 index 000000000..7a26412ea --- /dev/null +++ b/kubernetes/main/apps/observability/kube-prometheus-stack/app/helmrelease.yaml @@ -0,0 +1,147 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: kube-prometheus-stack +spec: + interval: 30m + timeout: 15m + chart: + spec: + chart: kube-prometheus-stack + version: 66.3.0 + sourceRef: + kind: HelmRepository + name: prometheus-community + namespace: flux-system + install: + crds: Skip + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + crds: Skip + remediation: + strategy: rollback + retries: 3 + dependsOn: + - name: prometheus-operator-crds + namespace: observability + values: + crds: + enabled: false + cleanPrometheusOperatorObjectNames: true + alertmanager: + ingress: + enabled: true + annotations: + external-dns.alpha.kubernetes.io/target: internal.${PUBLIC_DOMAIN} + cert-manager.io/cluster-issuer: zerossl-prod + cert-manager.io/private-key-rotation-policy: Always + cert-manager.io/private-key-algorithm: ECDSA + cert-manager.io/private-key-size: "384" + ingressClassName: internal + tls: + - hosts: &host ["alertmanager.${PUBLIC_DOMAIN}"] + secretName: alertmanager-tls + hosts: *host + pathType: Prefix + alertmanagerSpec: + useExistingSecret: true + configSecret: alertmanager-secret + storage: + volumeClaimTemplate: + spec: + storageClassName: openebs-zfs-128k + resources: + requests: + storage: 1Gi + kubeApiServer: + serviceMonitor: + selector: + k8s-app: kube-apiserver + kubeScheduler: + service: + selector: + k8s-app: kube-scheduler + kubeControllerManager: &kubeControllerManager + service: + selector: + k8s-app: kube-controller-manager + kubeEtcd: + <<: *kubeControllerManager # etcd runs on control plane nodes + kubeProxy: + enabled: false + prometheus: + ingress: + enabled: true + annotations: + external-dns.alpha.kubernetes.io/target: internal.${PUBLIC_DOMAIN} + cert-manager.io/cluster-issuer: zerossl-prod + cert-manager.io/private-key-rotation-policy: Always + cert-manager.io/private-key-algorithm: ECDSA + cert-manager.io/private-key-size: "384" + ingressClassName: internal + tls: + - hosts: &host ["prometheus.${PUBLIC_DOMAIN}"] + secretName: prometheus-tls + hosts: *host + pathType: Prefix + prometheusSpec: + scrapeInterval: 1m # Must match interval in Grafana Helm chart + podMonitorSelector: &selector + matchLabels: null + probeSelector: *selector + ruleSelector: *selector + scrapeConfigSelector: *selector + serviceMonitorSelector: *selector + enableAdminAPI: true + walCompression: true + enableFeatures: + - auto-gomemlimit + - memory-snapshot-on-shutdown + - new-service-discovery-manager + retention: 14d + retentionSize: 70GB + resources: + requests: + cpu: 100m + limits: + memory: 1500Mi + storageSpec: + volumeClaimTemplate: + spec: + storageClassName: openebs-zfs-128k + resources: + requests: + storage: 75Gi + prometheus-node-exporter: + fullnameOverride: node-exporter + prometheus: + monitor: + enabled: true + relabelings: + - action: replace + regex: (.*) + replacement: $1 + sourceLabels: ["__meta_kubernetes_pod_node_name"] + targetLabel: kubernetes_node + kube-state-metrics: + fullnameOverride: kube-state-metrics + metricLabelsAllowlist: + - pods=[*] + - deployments=[*] + - persistentvolumeclaims=[*] + prometheus: + monitor: + enabled: true + relabelings: + - action: replace + regex: (.*) + replacement: $1 + sourceLabels: ["__meta_kubernetes_pod_node_name"] + targetLabel: kubernetes_node + grafana: + enabled: false + forceDeployDashboards: true diff --git a/kubernetes/main/apps/observability/kube-prometheus-stack/app/kustomization.yaml b/kubernetes/main/apps/observability/kube-prometheus-stack/app/kustomization.yaml new file mode 100755 index 000000000..9cffb524f --- /dev/null +++ b/kubernetes/main/apps/observability/kube-prometheus-stack/app/kustomization.yaml @@ -0,0 +1,8 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml + - ./prometheusrule.yaml diff --git a/kubernetes/main/apps/observability/kube-prometheus-stack/app/prometheusrule.yaml b/kubernetes/main/apps/observability/kube-prometheus-stack/app/prometheusrule.yaml new file mode 100755 index 000000000..4d880fa20 --- /dev/null +++ b/kubernetes/main/apps/observability/kube-prometheus-stack/app/prometheusrule.yaml @@ -0,0 +1,25 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/monitoring.coreos.com/prometheusrule_v1.json +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: miscellaneous-rules +spec: + groups: + - name: dockerhub + rules: + - alert: BootstrapRateLimitRisk + annotations: + summary: Kubernetes cluster at risk of being rate limited by dockerhub on bootstrap + expr: count(time() - container_last_seen{image=~"(docker.io).*",container!=""} < 30) > 100 + for: 15m + labels: + severity: critical + - name: oom + rules: + - alert: OOMKilled + annotations: + summary: Container {{ $labels.container }} in pod {{ $labels.namespace }}/{{ $labels.pod }} has been OOMKilled {{ $value }} times in the last 10 minutes. + expr: (kube_pod_container_status_restarts_total - kube_pod_container_status_restarts_total offset 10m >= 1) and ignoring (reason) min_over_time(kube_pod_container_status_last_terminated_reason{reason="OOMKilled"}[10m]) == 1 + labels: + severity: critical diff --git a/kubernetes/main/apps/observability/kube-prometheus-stack/ks.yaml b/kubernetes/main/apps/observability/kube-prometheus-stack/ks.yaml new file mode 100755 index 000000000..60ee96561 --- /dev/null +++ b/kubernetes/main/apps/observability/kube-prometheus-stack/ks.yaml @@ -0,0 +1,22 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app kube-prometheus-stack + namespace: flux-system +spec: + targetNamespace: observability + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets-stores + path: ./kubernetes/main/apps/observability/kube-prometheus-stack/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 15m diff --git a/kubernetes/main/apps/observability/kustomization.yaml b/kubernetes/main/apps/observability/kustomization.yaml new file mode 100755 index 000000000..e75753359 --- /dev/null +++ b/kubernetes/main/apps/observability/kustomization.yaml @@ -0,0 +1,14 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # Pre Flux-Kustomizations + - ./namespace.yaml + # Flux-Kustomizations + - ./grafana/ks.yaml + - ./kube-prometheus-stack/ks.yaml + - ./loki/ks.yaml + - ./prometheus-operator-crds/ks.yaml + - ./promtail/ks.yaml + - ./gatus/ks.yaml diff --git a/kubernetes/main/apps/observability/loki/app/helmrelease.yaml b/kubernetes/main/apps/observability/loki/app/helmrelease.yaml new file mode 100755 index 000000000..9a3ff9302 --- /dev/null +++ b/kubernetes/main/apps/observability/loki/app/helmrelease.yaml @@ -0,0 +1,83 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: loki +spec: + interval: 30m + timeout: 15m + chart: + spec: + chart: loki + version: 6.21.0 + sourceRef: + kind: HelmRepository + name: grafana + namespace: flux-system + install: + crds: Skip + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + crds: Skip + remediation: + strategy: rollback + retries: 3 + values: + deploymentMode: SingleBinary + loki: + auth_enabled: false + analytics: + reporting_enabled: false + server: + log_level: info + commonConfig: + replication_factor: 1 + compactor: + working_directory: /var/loki/compactor/retention + delete_request_store: filesystem + retention_enabled: true + ingester: + chunk_encoding: snappy + storage: + type: filesystem + schemaConfig: + configs: + - from: "2024-04-01" # quote + store: tsdb + object_store: filesystem + schema: v13 + index: + prefix: loki_index_ + period: 24h + limits_config: + retention_period: 14d + singleBinary: + replicas: 1 + persistence: + enabled: true + size: 50Gi + storageClass: openebs-zfs-128k + gateway: + replicas: 0 + backend: + replicas: 0 + read: + replicas: 0 + write: + replicas: 0 + chunksCache: + enabled: false + resultsCache: + enabled: false + lokiCanary: + enabled: false + test: + enabled: false + sidecar: + image: + repository: ghcr.io/kiwigrid/k8s-sidecar + rules: + searchNamespace: ALL diff --git a/kubernetes/main/apps/observability/loki/app/kustomization.yaml b/kubernetes/main/apps/observability/loki/app/kustomization.yaml new file mode 100755 index 000000000..17cbc72b2 --- /dev/null +++ b/kubernetes/main/apps/observability/loki/app/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/kubernetes/main/apps/observability/loki/ks.yaml b/kubernetes/main/apps/observability/loki/ks.yaml new file mode 100755 index 000000000..2c1907079 --- /dev/null +++ b/kubernetes/main/apps/observability/loki/ks.yaml @@ -0,0 +1,20 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app loki + namespace: flux-system +spec: + targetNamespace: observability + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/observability/loki/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 15m diff --git a/kubernetes/main/apps/observability/namespace.yaml b/kubernetes/main/apps/observability/namespace.yaml new file mode 100755 index 000000000..cef355262 --- /dev/null +++ b/kubernetes/main/apps/observability/namespace.yaml @@ -0,0 +1,37 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: observability + labels: + kustomize.toolkit.fluxcd.io/prune: disabled +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/provider_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Provider +metadata: + name: alert-manager + namespace: observability +spec: + type: alertmanager + address: http://alertmanager-operated.observability.svc.cluster.local:9093/api/v2/alerts/ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/alert_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Alert +metadata: + name: alert-manager + namespace: observability +spec: + providerRef: + name: alert-manager + eventSeverity: error + eventSources: + - kind: HelmRelease + name: "*" + exclusionList: + - "error.*lookup github\\.com" + - "error.*lookup raw\\.githubusercontent\\.com" + - "dial.*tcp.*timeout" + - "waiting.*socket" + suspend: false diff --git a/kubernetes/main/apps/observability/prometheus-operator-crds/app/helmrelease.yaml b/kubernetes/main/apps/observability/prometheus-operator-crds/app/helmrelease.yaml new file mode 100755 index 000000000..8f33403cf --- /dev/null +++ b/kubernetes/main/apps/observability/prometheus-operator-crds/app/helmrelease.yaml @@ -0,0 +1,23 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: prometheus-operator-crds +spec: + interval: 30m + chart: + spec: + chart: prometheus-operator-crds + version: 16.0.1 + sourceRef: + kind: HelmRepository + name: prometheus-community + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 diff --git a/kubernetes/main/apps/observability/prometheus-operator-crds/app/kustomization.yaml b/kubernetes/main/apps/observability/prometheus-operator-crds/app/kustomization.yaml new file mode 100755 index 000000000..17cbc72b2 --- /dev/null +++ b/kubernetes/main/apps/observability/prometheus-operator-crds/app/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/kubernetes/main/apps/observability/prometheus-operator-crds/ks.yaml b/kubernetes/main/apps/observability/prometheus-operator-crds/ks.yaml new file mode 100755 index 000000000..76d2ab437 --- /dev/null +++ b/kubernetes/main/apps/observability/prometheus-operator-crds/ks.yaml @@ -0,0 +1,20 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app prometheus-operator-crds + namespace: flux-system +spec: + targetNamespace: observability + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/observability/prometheus-operator-crds/app + prune: false # never should be deleted + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/observability/promtail/app/helmrelease.yaml b/kubernetes/main/apps/observability/promtail/app/helmrelease.yaml new file mode 100755 index 000000000..6489e5090 --- /dev/null +++ b/kubernetes/main/apps/observability/promtail/app/helmrelease.yaml @@ -0,0 +1,30 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: promtail +spec: + interval: 30m + chart: + spec: + chart: promtail + version: 6.16.6 + sourceRef: + kind: HelmRepository + name: grafana + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + values: + fullnameOverride: promtail + config: + clients: + - url: http://loki-headless.observability.svc.cluster.local:3100/loki/api/v1/push + serviceMonitor: + enabled: true diff --git a/kubernetes/main/apps/observability/promtail/app/kustomization.yaml b/kubernetes/main/apps/observability/promtail/app/kustomization.yaml new file mode 100755 index 000000000..17cbc72b2 --- /dev/null +++ b/kubernetes/main/apps/observability/promtail/app/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/kubernetes/main/apps/observability/promtail/ks.yaml b/kubernetes/main/apps/observability/promtail/ks.yaml new file mode 100755 index 000000000..01932a708 --- /dev/null +++ b/kubernetes/main/apps/observability/promtail/ks.yaml @@ -0,0 +1,20 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app promtail + namespace: flux-system +spec: + targetNamespace: observability + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/observability/promtail/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/system-upgrade/kustomization.yaml b/kubernetes/main/apps/system-upgrade/kustomization.yaml new file mode 100755 index 000000000..affe04660 --- /dev/null +++ b/kubernetes/main/apps/system-upgrade/kustomization.yaml @@ -0,0 +1,9 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # Pre Flux-Kustomizations + - ./namespace.yaml + # Flux-Kustomizations + - ./system-upgrade-controller/ks.yaml diff --git a/kubernetes/main/apps/system-upgrade/namespace.yaml b/kubernetes/main/apps/system-upgrade/namespace.yaml new file mode 100755 index 000000000..cb902258a --- /dev/null +++ b/kubernetes/main/apps/system-upgrade/namespace.yaml @@ -0,0 +1,38 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: system-upgrade + annotations: + kustomize.toolkit.fluxcd.io/prune: disabled + volsync.backube/privileged-movers: "true" +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/provider_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Provider +metadata: + name: alert-manager + namespace: system-upgrade +spec: + type: alertmanager + address: http://alertmanager-operated.observability.svc.cluster.local:9093/api/v2/alerts/ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/alert_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Alert +metadata: + name: alert-manager + namespace: system-upgrade +spec: + providerRef: + name: alert-manager + eventSeverity: error + eventSources: + - kind: HelmRelease + name: "*" + exclusionList: + - "error.*lookup github\\.com" + - "error.*lookup raw\\.githubusercontent\\.com" + - "dial.*tcp.*timeout" + - "waiting.*socket" + suspend: false diff --git a/kubernetes/main/apps/system-upgrade/system-upgrade-controller/app/helmrelease.yaml b/kubernetes/main/apps/system-upgrade/system-upgrade-controller/app/helmrelease.yaml new file mode 100755 index 000000000..67b25eac8 --- /dev/null +++ b/kubernetes/main/apps/system-upgrade/system-upgrade-controller/app/helmrelease.yaml @@ -0,0 +1,101 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: &app system-upgrade-controller +spec: + interval: 30m + chart: + spec: + chart: app-template + version: 3.5.1 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + values: + controllers: + system-upgrade-controller: + strategy: RollingUpdate + containers: + app: + image: + repository: docker.io/rancher/system-upgrade-controller + tag: v0.14.2@sha256:3cdbfdd90f814702cefb832fc4bdb09ea93865a4d06c6bafd019d1dc6a9f34c9 + env: + SYSTEM_UPGRADE_CONTROLLER_DEBUG: false + SYSTEM_UPGRADE_CONTROLLER_THREADS: 2 + SYSTEM_UPGRADE_JOB_ACTIVE_DEADLINE_SECONDS: 900 + SYSTEM_UPGRADE_JOB_BACKOFF_LIMIT: 99 + SYSTEM_UPGRADE_JOB_IMAGE_PULL_POLICY: IfNotPresent + SYSTEM_UPGRADE_JOB_KUBECTL_IMAGE: registry.k8s.io/kubectl:v1.31.3@sha256:f5735dae787c62a225536142b5eb0fbfd5515f7a80fcf2b3a5035401b681d1db + SYSTEM_UPGRADE_JOB_POD_REPLACEMENT_POLICY: Failed + SYSTEM_UPGRADE_JOB_PRIVILEGED: true + SYSTEM_UPGRADE_JOB_TTL_SECONDS_AFTER_FINISH: 900 + SYSTEM_UPGRADE_PLAN_POLLING_INTERVAL: 15m + SYSTEM_UPGRADE_CONTROLLER_NAME: *app + SYSTEM_UPGRADE_CONTROLLER_NAMESPACE: + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + seccompProfile: + type: RuntimeDefault + defaultPodOptions: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + seccompProfile: { type: RuntimeDefault } + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + serviceAccount: + create: true + name: system-upgrade + persistence: + tmp: + type: emptyDir + etc-ssl: + type: hostPath + hostPath: /etc/ssl + hostPathType: DirectoryOrCreate + globalMounts: + - readOnly: true + etc-pki: + type: hostPath + hostPath: /etc/pki + hostPathType: DirectoryOrCreate + globalMounts: + - readOnly: true + etc-ca-certificates: + type: hostPath + hostPath: /etc/ca-certificates + hostPathType: DirectoryOrCreate + globalMounts: + - readOnly: true diff --git a/kubernetes/main/apps/system-upgrade/system-upgrade-controller/app/kustomization.yaml b/kubernetes/main/apps/system-upgrade/system-upgrade-controller/app/kustomization.yaml new file mode 100755 index 000000000..10a8a8289 --- /dev/null +++ b/kubernetes/main/apps/system-upgrade/system-upgrade-controller/app/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - helmrelease.yaml + - rbac.yaml diff --git a/kubernetes/main/apps/system-upgrade/system-upgrade-controller/app/rbac.yaml b/kubernetes/main/apps/system-upgrade/system-upgrade-controller/app/rbac.yaml new file mode 100755 index 000000000..e9f4d789c --- /dev/null +++ b/kubernetes/main/apps/system-upgrade/system-upgrade-controller/app/rbac.yaml @@ -0,0 +1,21 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system-upgrade +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + name: system-upgrade + namespace: system-upgrade +--- +apiVersion: talos.dev/v1alpha1 +kind: ServiceAccount +metadata: + name: talos +spec: + roles: + - os:admin diff --git a/kubernetes/main/apps/system-upgrade/system-upgrade-controller/ks.yaml b/kubernetes/main/apps/system-upgrade/system-upgrade-controller/ks.yaml new file mode 100755 index 000000000..0b094ee35 --- /dev/null +++ b/kubernetes/main/apps/system-upgrade/system-upgrade-controller/ks.yaml @@ -0,0 +1,49 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app system-upgrade-controller + namespace: flux-system +spec: + targetNamespace: system-upgrade + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/system-upgrade/system-upgrade-controller/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app system-upgrade-controller-plans + namespace: flux-system +spec: + targetNamespace: system-upgrade + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: system-upgrade-controller + path: ./kubernetes/main/apps/system-upgrade/system-upgrade-controller/plans + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m + postBuild: + substitute: + TALOS_SCHEMATIC_ID: 101a114bc59d4d7bf92fac60cbf651afbfe84e0b3d0b84d5688e3d20bb43cf43 + # renovate: datasource=docker depName=ghcr.io/siderolabs/installer + TALOS_VERSION: v1.8.3 + # renovate: datasource=docker depName=ghcr.io/siderolabs/kubelet + KUBERNETES_VERSION: v1.31.3 diff --git a/kubernetes/main/apps/system-upgrade/system-upgrade-controller/plans/kubernetes.yaml b/kubernetes/main/apps/system-upgrade/system-upgrade-controller/plans/kubernetes.yaml new file mode 100755 index 000000000..ae8704eac --- /dev/null +++ b/kubernetes/main/apps/system-upgrade/system-upgrade-controller/plans/kubernetes.yaml @@ -0,0 +1,45 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/upgrade.cattle.io/plan_v1.json +apiVersion: upgrade.cattle.io/v1 +kind: Plan +metadata: + name: kubernetes +spec: + version: ${KUBERNETES_VERSION} + serviceAccountName: system-upgrade + secrets: + - name: talos + path: /var/run/secrets/talos.dev + ignoreUpdates: true + concurrency: 1 + exclusive: true + nodeSelector: + matchExpressions: + - key: feature.node.kubernetes.io/system-os_release.ID + operator: In + values: ["talos"] + - key: node-role.kubernetes.io/control-plane + operator: Exists + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + prepare: &prepare + image: ghcr.io/siderolabs/talosctl:${TALOS_VERSION} + envs: + - name: NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + args: + - --nodes=$(NODE_IP) + - health + - --server=true + upgrade: + <<: *prepare + args: + - --nodes=$(NODE_IP) + - upgrade-k8s + - --to=$(SYSTEM_UPGRADE_PLAN_LATEST_VERSION) diff --git a/kubernetes/main/apps/system-upgrade/system-upgrade-controller/plans/kustomization.yaml b/kubernetes/main/apps/system-upgrade/system-upgrade-controller/plans/kustomization.yaml new file mode 100755 index 000000000..061d8ad0d --- /dev/null +++ b/kubernetes/main/apps/system-upgrade/system-upgrade-controller/plans/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./kubernetes.yaml + - ./talos.yaml diff --git a/kubernetes/main/apps/system-upgrade/system-upgrade-controller/plans/talos.yaml b/kubernetes/main/apps/system-upgrade/system-upgrade-controller/plans/talos.yaml new file mode 100755 index 000000000..8cbae8f10 --- /dev/null +++ b/kubernetes/main/apps/system-upgrade/system-upgrade-controller/plans/talos.yaml @@ -0,0 +1,48 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/upgrade.cattle.io/plan_v1.json +apiVersion: upgrade.cattle.io/v1 +kind: Plan +metadata: + name: talos +spec: + version: ${TALOS_VERSION} + serviceAccountName: system-upgrade + secrets: + - name: talos + path: /var/run/secrets/talos.dev + ignoreUpdates: true + concurrency: 1 + exclusive: true + nodeSelector: + matchExpressions: + - key: feature.node.kubernetes.io/system-os_release.ID + operator: In + values: ["talos"] + - key: feature.node.kubernetes.io/system-os_release.VERSION_ID + operator: NotIn + values: ["${TALOS_VERSION}"] + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + prepare: &prepare + image: ghcr.io/siderolabs/talosctl:${TALOS_VERSION} + envs: + - name: NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + args: + - --nodes=$(NODE_IP) + - health + - --server=true + upgrade: + <<: *prepare + args: + - --nodes=$(NODE_IP) + - upgrade + - --image=factory.talos.dev/installer-secureboot/${TALOS_SCHEMATIC_ID}:$(SYSTEM_UPGRADE_PLAN_LATEST_VERSION) + - --preserve=true + - --wait=false diff --git a/kubernetes/main/apps/vault/kustomization.yaml b/kubernetes/main/apps/vault/kustomization.yaml new file mode 100755 index 000000000..3d21bd1cd --- /dev/null +++ b/kubernetes/main/apps/vault/kustomization.yaml @@ -0,0 +1,9 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # Pre Flux-Kustomizations + - ./namespace.yaml + # Flux-Kustomizations + - ./vault/ks.yaml diff --git a/kubernetes/main/apps/vault/namespace.yaml b/kubernetes/main/apps/vault/namespace.yaml new file mode 100755 index 000000000..33be07ae2 --- /dev/null +++ b/kubernetes/main/apps/vault/namespace.yaml @@ -0,0 +1,5 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: vault diff --git a/kubernetes/main/apps/vault/vault/app/helmrelease.yaml b/kubernetes/main/apps/vault/vault/app/helmrelease.yaml new file mode 100755 index 000000000..d9a757719 --- /dev/null +++ b/kubernetes/main/apps/vault/vault/app/helmrelease.yaml @@ -0,0 +1,455 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: &app vault + namespace: vault +spec: + interval: 30m + chart: + spec: + chart: vault + version: 0.29.1 + sourceRef: + kind: HelmRepository + name: vault + namespace: flux-system + maxHistory: 3 + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + uninstall: + keepHistory: false + + values: + + global: + enabled: true + tlsDisable: false + serverTelemetry: + prometheusOperator: false + + injector: + enabled: false + + server: + enabled: true + image: + repository: "hashicorp/vault" + tag: "1.18.2" + pullPolicy: IfNotPresent + + updateStrategyType: "OnDelete" + + logLevel: "info" + logFormat: "json" + + resources: {} + + authDelegator: + enabled: true + + extraInitContainers: null + + extraContainers: null + + shareProcessNamespace: false + + extraArgs: "" + + # extraPorts is a list of extra ports. Specified as a YAML list. + # This is useful if you need to add additional ports to the statefulset in dynamic way. + extraPorts: null + # - containerPort: 8300 + # name: http-monitoring + + # Used to define custom readinessProbe settings + readinessProbe: + enabled: true + port: 8200 + failureThreshold: 2 + initialDelaySeconds: 5 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 3 + livenessProbe: + enabled: true + path: "/v1/sys/health?standbyok=true" + port: 8200 + failureThreshold: 2 + initialDelaySeconds: 60 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 3 + + terminationGracePeriodSeconds: 10 + + preStopSleepSeconds: 5 + + # Used to define commands to run after the pod is ready. + # This can be used to automate processes such as initialization + # or boostrapping auth methods. + postStart: [] + # - /bin/sh + # - -c + # - /vault/userconfig/myscript/run.sh + + extraEnvironmentVars: + VAULT_TLSCERT: /vault/tls/tls.crt + VAULT_TLSKEY: /vault/tls/tls.key + + extraSecretEnvironmentVars: [] + + volumes: + - name: vault-tls + secret: + defaultMode: 420 + secretName: vault-tls + + volumeMounts: + - mountPath: /vault/tls + name: vault-tls + readOnly: true + + affinity: | + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/name: {{ template "vault.name" . }} + app.kubernetes.io/instance: "{{ .Release.Name }}" + component: server + topologyKey: kubernetes.io/hostname + + # Topology settings for server pods + # ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + # This should be either a multi-line string or YAML matching the topologySpreadConstraints array + # in a PodSpec. + topologySpreadConstraints: [] + + # Toleration Settings for server pods + # This should be either a multi-line string or YAML matching the Toleration array + # in a PodSpec. + tolerations: [] + + # nodeSelector labels for server pod assignment, formatted as a multi-line string or YAML map. + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + # Example: + # nodeSelector: + # beta.kubernetes.io/arch: amd64 + nodeSelector: {} + + # Enables network policy for server pods + networkPolicy: + enabled: false + egress: [] + # egress: + # - to: + # - ipBlock: + # cidr: 10.0.0.0/24 + # ports: + # - protocol: TCP + # port: 443 + ingress: + - from: + - namespaceSelector: {} + ports: + - port: 8200 + protocol: TCP + - port: 8201 + protocol: TCP + + # Priority class for server pods + priorityClassName: "" + + # Extra labels to attach to the server pods + # This should be a YAML map of the labels to apply to the server pods + extraLabels: {} + + # Extra annotations to attach to the server pods + # This can either be YAML or a YAML-formatted multi-line templated string map + # of the annotations to apply to the server pods + annotations: {} + + # Add an annotation to the server configmap and the statefulset pods, + # vaultproject.io/config-checksum, that is a hash of the Vault configuration. + # This can be used together with an OnDelete deployment strategy to help + # identify which pods still need to be deleted during a deployment to pick up + # any configuration changes. + includeConfigAnnotation: false + + # Enables a headless service to be used by the Vault Statefulset + service: + enabled: false + # Enable or disable the vault-active service, which selects Vault pods that + # have labeled themselves as the cluster leader with `vault-active: "true"`. + active: + enabled: false + # Extra annotations for the service definition. This can either be YAML or a + # YAML-formatted multi-line templated string map of the annotations to apply + # to the active service. + annotations: {} + # Enable or disable the vault-standby service, which selects Vault pods that + # have labeled themselves as a cluster follower with `vault-active: "false"`. + standby: + enabled: false + # Extra annotations for the service definition. This can either be YAML or a + # YAML-formatted multi-line templated string map of the annotations to apply + # to the standby service. + annotations: {} + # If enabled, the service selectors will include `app.kubernetes.io/instance: {{ .Release.Name }}` + # When disabled, services may select Vault pods not deployed from the chart. + # Does not affect the headless vault-internal service with `ClusterIP: None` + instanceSelector: + enabled: true + # clusterIP controls whether a Cluster IP address is attached to the + # Vault service within Kubernetes. By default, the Vault service will + # be given a Cluster IP address, set to None to disable. When disabled + # Kubernetes will create a "headless" service. Headless services can be + # used to communicate with pods directly through DNS instead of a round-robin + # load balancer. + # clusterIP: None + + # Configures the service type for the main Vault service. Can be ClusterIP + # or NodePort. + #type: ClusterIP + + # The IP family and IP families options are to set the behaviour in a dual-stack environment. + # Omitting these values will let the service fall back to whatever the CNI dictates the defaults + # should be. + # These are only supported for kubernetes versions >=1.23.0 + # + # Configures the service's supported IP family policy, can be either: + # SingleStack: Single-stack service. The control plane allocates a cluster IP for the Service, using the first configured service cluster IP range. + # PreferDualStack: Allocates IPv4 and IPv6 cluster IPs for the Service. + # RequireDualStack: Allocates Service .spec.ClusterIPs from both IPv4 and IPv6 address ranges. + ipFamilyPolicy: "" + + # Sets the families that should be supported and the order in which they should be applied to ClusterIP as well. + # Can be IPv4 and/or IPv6. + ipFamilies: [] + + # Do not wait for pods to be ready before including them in the services' + # targets. Does not apply to the headless service, which is used for + # cluster-internal communication. + publishNotReadyAddresses: true + + # The externalTrafficPolicy can be set to either Cluster or Local + # and is only valid for LoadBalancer and NodePort service types. + # The default value is Cluster. + # ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-traffic-policy + externalTrafficPolicy: Cluster + + # If type is set to "NodePort", a specific nodePort value can be configured, + # will be random if left blank. + #nodePort: 30000 + + # When HA mode is enabled + # If type is set to "NodePort", a specific nodePort value can be configured, + # will be random if left blank. + #activeNodePort: 30001 + + # When HA mode is enabled + # If type is set to "NodePort", a specific nodePort value can be configured, + # will be random if left blank. + #standbyNodePort: 30002 + + # Port on which Vault server is listening + port: 8200 + # Target port to which the service should be mapped to + targetPort: 8200 + # Extra annotations for the service definition. This can either be YAML or a + # YAML-formatted multi-line templated string map of the annotations to apply + # to the service. + annotations: {} + + dataStorage: + enabled: true + size: 20Gi + mountPath: "/vault/data" + accessMode: ReadWriteOnce + storageClass: openebs-zfs-128k + + persistentVolumeClaimRetentionPolicy: + whenDeleted: Retain + whenScaled: Retain + + auditStorage: + enabled: true + size: 10Gi + mountPath: "/vault/audit" + accessMode: ReadWriteOnce + storageClass: openebs-zfs-128k + + standalone: + enabled: false + + ha: + enabled: true + replicas: 1 + raft: + enabled: true + setNodeId: false + config: | + + seal "awskms" { + region = "us-east-1" + access_key = "${AWS_ACCESS_KEY}" + secret_key = "${AWS_SECRET_KEY}" + kms_key_id = "${AWS_KEY_ID}" + } + + ui = true + api_addr = "https://vault.${PUBLIC_DOMAIN}:8200" + cluster_addr = "https://vault.${PUBLIC_DOMAIN}.com:8201" + + listener "tcp" { + tls_disable = 0 + address = "0.0.0.0:8200" + cluster_address = "0.0.0.0:8201" + tls_cert_file = "/vault/tls/tls.crt" + tls_key_file = "/vault/tls/tls.key" + tls_min_version = "tls13" + + # Enable unauthenticated metrics access (necessary for Prometheus Operator) + #telemetry { + # unauthenticated_metrics_access = "true" + #} + } + + storage "raft" { + path = "/vault/data" + } + + service_registration "kubernetes" {} + + disruptionBudget: + enabled: true + maxUnavailable: null + + serviceAccount: + create: true + name: "" + createSecret: false + serviceDiscovery: + enabled: true + + statefulSet: + annotations: {} + securityContext: + pod: {} + container: {} + + hostNetwork: false + + ui: + enabled: true + publishNotReadyAddresses: true + activeVaultPodOnly: false + serviceType: "LoadBalancer" + serviceNodePort: null + externalPort: 8200 + targetPort: 8200 + externalTrafficPolicy: Cluster + annotations: + external-dns.alpha.kubernetes.io/hostname: vault.${PUBLIC_DOMAIN} + lbipam.cilium.io/ips: 192.168.91.96 + + serverTelemetry: + # Enable support for the Prometheus Operator. If authorization is not set for authenticating + # to Vault's metrics endpoint, the following Vault server `telemetry{}` config must be included + # in the `listener "tcp"{}` stanza + # telemetry { + # unauthenticated_metrics_access = "true" + # } + # + # See the `standalone.config` for a more complete example of this. + # + # In addition, a top level `telemetry{}` stanza must also be included in the Vault configuration: + # + # example: + # telemetry { + # prometheus_retention_time = "30s" + # disable_hostname = true + # } + # + # Configuration for monitoring the Vault server. + serviceMonitor: + # The Prometheus operator *must* be installed before enabling this feature, + # if not the chart will fail to install due to missing CustomResourceDefinitions + # provided by the operator. + # + # Instructions on how to install the Helm chart can be found here: + # https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack + # More information can be found here: + # https://github.com/prometheus-operator/prometheus-operator + # https://github.com/prometheus-operator/kube-prometheus + + # Enable deployment of the Vault Server ServiceMonitor CustomResource. + enabled: false + + # Selector labels to add to the ServiceMonitor. + # When empty, defaults to: + # release: prometheus + selectors: {} + + # Interval at which Prometheus scrapes metrics + interval: 30s + + # Timeout for Prometheus scrapes + scrapeTimeout: 10s + + # tlsConfig used for scraping the Vault metrics API. + # See API reference: https://prometheus-operator.dev/docs/api-reference/api/#monitoring.coreos.com/v1.TLSConfig + # example: + # tlsConfig: + # ca: + # secret: + # name: vault-metrics-client + # key: ca.crt + tlsConfig: {} + + # authorization used for scraping the Vault metrics API. + # See API reference: https://prometheus-operator.dev/docs/api-reference/api/#monitoring.coreos.com/v1.SafeAuthorization + # example: + # authorization: + # credentials: + # name: vault-metrics-client + # key: token + authorization: {} + + prometheusRules: + # The Prometheus operator *must* be installed before enabling this feature, + # if not the chart will fail to install due to missing CustomResourceDefinitions + # provided by the operator. + + # Deploy the PrometheusRule custom resource for AlertManager based alerts. + # Requires that AlertManager is properly deployed. + enabled: false + + # Selector labels to add to the PrometheusRules. + # When empty, defaults to: + # release: prometheus + selectors: {} + + # Some example rules. + rules: [] + # - alert: vault-HighResponseTime + # annotations: + # message: The response time of Vault is over 500ms on average over the last 5 minutes. + # expr: vault_core_handle_request{quantile="0.5", namespace="mynamespace"} > 500 + # for: 5m + # labels: + # severity: warning + # - alert: vault-HighResponseTime + # annotations: + # message: The response time of Vault is over 1s on average over the last 5 minutes. + # expr: vault_core_handle_request{quantile="0.5", namespace="mynamespace"} > 1000 + # for: 5m + # labels: + # severity: critical \ No newline at end of file diff --git a/kubernetes/main/apps/vault/vault/app/kustomization.yaml b/kubernetes/main/apps/vault/vault/app/kustomization.yaml new file mode 100755 index 000000000..d7a5e9a07 --- /dev/null +++ b/kubernetes/main/apps/vault/vault/app/kustomization.yaml @@ -0,0 +1,15 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml +configMapGenerator: + - name: vault-gatus-ep + options: + labels: + gatus.io/enabled: "true" + files: + - config.yaml=./resources/gatus-ep.yaml +generatorOptions: + disableNameSuffixHash: true diff --git a/kubernetes/main/apps/vault/vault/app/resources/gatus-ep.yaml b/kubernetes/main/apps/vault/vault/app/resources/gatus-ep.yaml new file mode 100755 index 000000000..7eacb9d5d --- /dev/null +++ b/kubernetes/main/apps/vault/vault/app/resources/gatus-ep.yaml @@ -0,0 +1,16 @@ +endpoints: + - name: "HashiCorp Vault" + group: guarded + url: "https://vault.${PUBLIC_DOMAIN}:8200/v1/sys/health" + interval: 1m + ui: + hide-hostname: true + hide-url: true + client: + dns-resolver: tcp://172.17.0.10:53 + conditions: + - "[STATUS] == 200" + - "[BODY].initialized == true" + - "[BODY].sealed == false" + alerts: + - type: pushover diff --git a/kubernetes/main/apps/vault/vault/ks.yaml b/kubernetes/main/apps/vault/vault/ks.yaml new file mode 100755 index 000000000..1a9df2cc4 --- /dev/null +++ b/kubernetes/main/apps/vault/vault/ks.yaml @@ -0,0 +1,23 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app vault + namespace: flux-system +spec: + targetNamespace: vault + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/vault/vault/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m + postBuild: + substitute: + APP: *app diff --git a/kubernetes/main/apps/vaultwarden/app/externalsecret.yaml b/kubernetes/main/apps/vaultwarden/app/externalsecret.yaml new file mode 100644 index 000000000..ec762d27a --- /dev/null +++ b/kubernetes/main/apps/vaultwarden/app/externalsecret.yaml @@ -0,0 +1,32 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret vaultwarden-secret +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + template: + engineVersion: v2 + data: + admin-token: "{{ .VAULTWARDEN_ADMIN_TOKEN }}" + VAULTWARDEN_DATABASE_URI: "{{ .VAULTWARDEN_DATABASE_URI }}" + smtp-user: "{{ .SMTP_USERNAME }}" + smtp-password: "{{ .SMTP_VAULTWARDEN_PASS }}" + SMTP_SENDER: "{{ .SMTP_SENDER }}" + INIT_POSTGRES_DBNAME: vaultwarden + INIT_POSTGRES_HOST: postgres17-rw.database.svc.cluster.local + INIT_POSTGRES_USER: "{{ .VAULTWARDEN_POSTGRESS_USER }}" + INIT_POSTGRES_PASS: "{{ .VAULTWARDEN_POSTGRES_PASS }}" + INIT_POSTGRES_SUPER_PASS: "{{ .POSTGRES_SUPER_PASS }}" + dataFrom: + - extract: + key: secrets/vaultwarden + - extract: + key: secrets/Brevo + - extract: + key: secrets/cloudnative-pg diff --git a/kubernetes/main/apps/vaultwarden/app/helmrelease.yaml b/kubernetes/main/apps/vaultwarden/app/helmrelease.yaml new file mode 100644 index 000000000..7d3b6bbfa --- /dev/null +++ b/kubernetes/main/apps/vaultwarden/app/helmrelease.yaml @@ -0,0 +1,228 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: vaultwarden +spec: + interval: 30m + chart: + spec: + chart: vaultwarden + version: 1.2.4 + sourceRef: + kind: HelmRepository + name: vaultwarden + namespace: flux-system + maxHistory: 3 + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + uninstall: + keepHistory: false + values: + replicaCount: 1 + database: + type: postgresql + existingSecret: &secret vaultwarden-secret + existingSecretKey: VAULTWARDEN_DATABASE_URI + vaultwarden: + domain: https://vaultwarden.${PUBLIC_DOMAIN} + allowSignups: true + signupDomains: + - ${PUBLIC_DOMAIN} + verifySignup: true + requireEmail: false + emailAttempts: 3 + emailTokenExpiration: 600 + allowInvitation: true + invitationExpiration: 120 + passwordHintsAllowed: true + showPasswordHint: false + defaultInviteName: DarkfellaNET + enableWebVault: true + enableSends: true + orgCreationUsers: all + ## Limit attachment disk usage per organization. + #attachmentLimitOrg: + ## Limit attachment disk usage per user. + #attachmentLimitUser: + ## Limit send disk usage per user. + #sendLimitUser: + ## HaveIBeenPwned API Key. Can be purchased at https://haveibeenpwned.com/API/Key. + #hibpApiKey: + ## Number of days to auto-delete trashed items. By default iteams are not auto-deleted. + #autoDeleteDays: + ## Organization event logging + #orgEvents: false + ## Organization event retation. Leave empty to not delete. + #orgEventsRetention: "" + ## Allow users to change their email. + #emailChangeAllowed: true + ## Map of custom environment variables. Use carefully. + #extraEnv: + # IP_HEADER: CF-Connecting-IP + # ALLOWED_IFRAME_ANCESTORS: myintranet.local + # COOL_VARIABLE: + # secretKeyRef: + # name: my-secret + # key: my-secret-key + # ANOTHER_VARIABLE: + # configMapKeyRef: + # name: my-config-map + # key: my-config-map-key + + admin: + enabled: true + disableAdminToken: false + existingSecret: *secret + + emergency: + enabled: true + ## Schedule to send expiration reminders to emergency access grantors. Cron schedule format. + #reminder: "0 3 * * * *" + ## Schedule to grant emergency access requests that have met the required wait time. Cron schedule format. + #timeout: "0 3 * * * *" + + # Enable SMTP. https://github.com/dani-garcia/vaultwarden/wiki/SMTP-configuration + smtp: + enabled: true + host: smtp-relay.brevo.com + from: noreply@${PUBLIC_DOMAIN} + #fromName: "" + security: starttls + port: 587 + authMechanism: Login + timeout: 15 + invalidHostname: false + invalidCertificate: false + existingSecret: *secret + embedImages: true + + log: + file: "" + level: "" + push: + enabled: false + + service: + type: ClusterIP + httpPort: 80 + externalTrafficPolicy: Cluster + + ingress: + enabled: true + className: external + host: &host vaultwarden.${PUBLIC_DOMAIN} + annotations: + nginx.ingress.kubernetes.io/configuration-snippet: | + more_set_headers "access-control-allow-origin https://vaultwarden.${PUBLIC_DOMAIN}"; + external-dns.alpha.kubernetes.io/target: external.${PUBLIC_DOMAIN} + nginx.ingress.kubernetes.io/auth-url: "https://$host/oauth2/auth" + nginx.ingress.kubernetes.io/auth-signin: "https://$host/oauth2/start?rd=$escaped_request_uri" + nginx.ingress.kubernetes.io/auth-snippet: | + # Bypass authentication for specific paths + if ($request_uri ~* "^/$") { + return 200; + } + if ($request_uri ~* "^/.*\.js") { + return 200; + } + if ($request_uri ~* "^/#/.*") { + return 200; + } + if ($request_uri ~* "^/#/login") { + return 200; + } + if ($request_uri ~* "^/#/2fa") { + return 200; + } + if ($request_uri ~* "^/#/vault") { + return 200; + } + if ($request_uri ~* "^/api/.*") { + return 200; + } + if ($request_uri ~* "^/images/.*") { + return 200; + } + if ($request_uri ~* "^/identity/.*") { + return 200; + } + if ($request_uri ~* "^/app/.*") { + return 200; + } + if ($request_uri ~* "^/locales/.*") { + return 200; + } + if ($request_uri ~* "^/alive") { + return 200; + } + cert-manager.io/cluster-issuer: zerossl-prod + cert-manager.io/private-key-rotation-policy: Always + cert-manager.io/private-key-algorithm: ECDSA + cert-manager.io/private-key-size: "384" + tls: + - secretName: vaultwarden-tls + hosts: + - *host + + persistence: + enabled: true + size: 1Gi + accessMode: ReadWriteOnce + storageClass: openebs-zfs-128k + + image: + pullPolicy: IfNotPresent + repository: vaultwarden/server + + nameOverride: "" + fullnameOverride: "" + + serviceAccount: + create: false + + deploymentAnnotations: {} + probes: {} + #liveness: + #timeoutSeconds: 1 + #periodSeconds: 10 + #successThreshold: 1 + #failureThreshold: 3 + #readiness: + #timeoutSeconds: 1 + #periodSeconds: 10 + #successThreshold: 1 + #failureThreshold: 3 + + sidecars: [] + # - name: sidecar + # image: sidecarimage:1.2.3 + # env: + # - name: SIDECAR_END + # value: "sidecar" + # volumeMounts: + # - name: vaultwarden + # mountPath: /data + + podSecurityContext: + fsGroup: 65534 + + securityContext: + runAsUser: 65534 + runAsGroup: 65534 + + strategy: {} + + resources: {} + + nodeSelector: {} + + tolerations: [] + + affinity: {} diff --git a/kubernetes/main/apps/vaultwarden/app/ks.yaml b/kubernetes/main/apps/vaultwarden/app/ks.yaml new file mode 100644 index 000000000..828c0283f --- /dev/null +++ b/kubernetes/main/apps/vaultwarden/app/ks.yaml @@ -0,0 +1,23 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app vaultwarden + namespace: flux-system +spec: + dependsOn: + - name: external-secrets-stores + - name: cloudnative-pg-cluster + targetNamespace: vaultwarden + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/vaultwarden/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/vaultwarden/app/kustomization.yaml b/kubernetes/main/apps/vaultwarden/app/kustomization.yaml new file mode 100644 index 000000000..2895d5898 --- /dev/null +++ b/kubernetes/main/apps/vaultwarden/app/kustomization.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml +configMapGenerator: + - name: vaultwarden-gatus-ep + options: + labels: + gatus.io/enabled: "true" + files: + - config.yaml=./resources/gatus-ep.yaml +generatorOptions: + disableNameSuffixHash: true diff --git a/kubernetes/main/apps/vaultwarden/app/resources/gatus-ep.yaml b/kubernetes/main/apps/vaultwarden/app/resources/gatus-ep.yaml new file mode 100644 index 000000000..bd1b079cc --- /dev/null +++ b/kubernetes/main/apps/vaultwarden/app/resources/gatus-ep.yaml @@ -0,0 +1,11 @@ +endpoints: + - name: "Vaultwarden" + group: external + url: "https://vaultwarden.${PUBLIC_DOMAIN}/alive" + interval: 1m + client: + dns-resolver: tcp://1.1.1.1:53 + conditions: + - "[STATUS] == 200" + alerts: + - type: pushover diff --git a/kubernetes/main/apps/vaultwarden/kustomization.yaml b/kubernetes/main/apps/vaultwarden/kustomization.yaml new file mode 100755 index 000000000..513caea04 --- /dev/null +++ b/kubernetes/main/apps/vaultwarden/kustomization.yaml @@ -0,0 +1,9 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # Pre Flux-Kustomizations + - ./namespace.yaml + # Flux-Kustomizations + - ./app/ks.yaml diff --git a/kubernetes/main/apps/vaultwarden/namespace.yaml b/kubernetes/main/apps/vaultwarden/namespace.yaml new file mode 100755 index 000000000..9201211cd --- /dev/null +++ b/kubernetes/main/apps/vaultwarden/namespace.yaml @@ -0,0 +1,35 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: vaultwarden +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/provider_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Provider +metadata: + name: alert-manager + namespace: vaultwarden +spec: + type: alertmanager + address: http://alertmanager-operated.observability.svc.cluster.local:9093/api/v2/alerts/ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/alert_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Alert +metadata: + name: alert-manager + namespace: vaultwarden +spec: + providerRef: + name: alert-manager + eventSeverity: error + eventSources: + - kind: HelmRelease + name: "*" + exclusionList: + - "error.*lookup github\\.com" + - "error.*lookup raw\\.githubusercontent\\.com" + - "dial.*tcp.*timeout" + - "waiting.*socket" + suspend: false diff --git a/kubernetes/main/apps/virtualization/cdi/app/cr.yaml b/kubernetes/main/apps/virtualization/cdi/app/cr.yaml new file mode 100644 index 000000000..ad94c9631 --- /dev/null +++ b/kubernetes/main/apps/virtualization/cdi/app/cr.yaml @@ -0,0 +1,15 @@ +apiVersion: cdi.kubevirt.io/v1beta1 +kind: CDI +metadata: + name: cdi + namespace: cdi +spec: + config: + scratchSpaceStorageClass: openebs-zfs-1m + podResourceRequirements: + requests: + cpu: "100m" + memory: "60M" + limits: + cpu: "750m" + memory: "2Gi" diff --git a/kubernetes/main/apps/virtualization/cdi/app/kustomization.yaml b/kubernetes/main/apps/virtualization/cdi/app/kustomization.yaml new file mode 100644 index 000000000..03b9d17d0 --- /dev/null +++ b/kubernetes/main/apps/virtualization/cdi/app/kustomization.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - https://github.com/kubevirt/containerized-data-importer/releases/download/v1.60.4/cdi-operator.yaml + - ./cr.yaml diff --git a/kubernetes/main/apps/virtualization/cdi/ks.yaml b/kubernetes/main/apps/virtualization/cdi/ks.yaml new file mode 100644 index 000000000..f02084ad6 --- /dev/null +++ b/kubernetes/main/apps/virtualization/cdi/ks.yaml @@ -0,0 +1,19 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: cdi + namespace: flux-system +spec: + targetNamespace: cdi + dependsOn: + - name: kubevirt + path: ./kubernetes/main/apps/virtualization/cdi/app + prune: false + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/virtualization/kubevirt-manager/app/ingress.yaml b/kubernetes/main/apps/virtualization/kubevirt-manager/app/ingress.yaml new file mode 100644 index 000000000..e69de29bb diff --git a/kubernetes/main/apps/virtualization/kubevirt-manager/app/kustomization.yaml b/kubernetes/main/apps/virtualization/kubevirt-manager/app/kustomization.yaml new file mode 100644 index 000000000..fe47efc57 --- /dev/null +++ b/kubernetes/main/apps/virtualization/kubevirt-manager/app/kustomization.yaml @@ -0,0 +1,5 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - https://raw.githubusercontent.com/kubevirt-manager/kubevirt-manager/main/kubernetes/bundled.yaml diff --git a/kubernetes/main/apps/virtualization/kubevirt-manager/ks.yaml b/kubernetes/main/apps/virtualization/kubevirt-manager/ks.yaml new file mode 100644 index 000000000..fa94e803d --- /dev/null +++ b/kubernetes/main/apps/virtualization/kubevirt-manager/ks.yaml @@ -0,0 +1,20 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: kubevirt-manager + namespace: flux-system +spec: + targetNamespace: kubevirt-manager + dependsOn: + - name: kubevirt + - name: cdi + path: ./kubernetes/main/apps/virtualization/kubevirt-manager/app + prune: false + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/virtualization/kubevirt/app/cr.yaml b/kubernetes/main/apps/virtualization/kubevirt/app/cr.yaml new file mode 100644 index 000000000..559987bfe --- /dev/null +++ b/kubernetes/main/apps/virtualization/kubevirt/app/cr.yaml @@ -0,0 +1,17 @@ +--- +apiVersion: kubevirt.io/v1 +kind: KubeVirt +metadata: + name: kubevirt +spec: + configuration: + developerConfiguration: + featureGates: + - HostDevices + - DataVolumes + permittedHostDevices: + pciHostDevices: + - pciVendorSelector: 1000:0097 + resourceName: devices.kubevirt.io/sas + - pciVendorSelector: 8086:2701 + resourceName: devices.kubevirt.io/nvme diff --git a/kubernetes/main/apps/virtualization/kubevirt/app/kustomization.yaml b/kubernetes/main/apps/virtualization/kubevirt/app/kustomization.yaml new file mode 100644 index 000000000..c41ee447f --- /dev/null +++ b/kubernetes/main/apps/virtualization/kubevirt/app/kustomization.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - https://github.com/kubevirt/kubevirt/releases/download/v1.4.0/kubevirt-operator.yaml + - ./cr.yaml diff --git a/kubernetes/main/apps/virtualization/kubevirt/ks.yaml b/kubernetes/main/apps/virtualization/kubevirt/ks.yaml new file mode 100644 index 000000000..67523fde6 --- /dev/null +++ b/kubernetes/main/apps/virtualization/kubevirt/ks.yaml @@ -0,0 +1,17 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: kubevirt + namespace: flux-system +spec: + targetNamespace: kubevirt + path: ./kubernetes/main/apps/virtualization/kubevirt/app + prune: false + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/virtualization/kustomization.yaml b/kubernetes/main/apps/virtualization/kustomization.yaml new file mode 100644 index 000000000..f41616836 --- /dev/null +++ b/kubernetes/main/apps/virtualization/kustomization.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./namespace.yaml + - ./cdi/ks.yaml + - ./kubevirt/ks.yaml + - ./kubevirt-manager/ks.yaml + - ./virtual-machines/truenas-scale/ks.yaml + - ./virtual-machines/windows-server/ks.yaml diff --git a/kubernetes/main/apps/virtualization/namespace.yaml b/kubernetes/main/apps/virtualization/namespace.yaml new file mode 100644 index 000000000..259caa6bd --- /dev/null +++ b/kubernetes/main/apps/virtualization/namespace.yaml @@ -0,0 +1,28 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: virtualization + annotations: + kustomize.toolkit.fluxcd.io/prune: disabled +--- +apiVersion: v1 +kind: Namespace +metadata: + name: kubevirt + annotations: + kustomize.toolkit.fluxcd.io/prune: disabled +--- +apiVersion: v1 +kind: Namespace +metadata: + name: kubevirt-manager + annotations: + kustomize.toolkit.fluxcd.io/prune: disabled +--- +apiVersion: v1 +kind: Namespace +metadata: + name: cdi + annotations: + kustomize.toolkit.fluxcd.io/prune: disabled diff --git a/kubernetes/main/apps/virtualization/virtual-machines/truenas-scale/app/kustomization.yaml b/kubernetes/main/apps/virtualization/virtual-machines/truenas-scale/app/kustomization.yaml new file mode 100644 index 000000000..59b5dfbf2 --- /dev/null +++ b/kubernetes/main/apps/virtualization/virtual-machines/truenas-scale/app/kustomization.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./pvc.yaml + - ./virtualmachine.yaml diff --git a/kubernetes/main/apps/virtualization/virtual-machines/truenas-scale/app/pvc.yaml b/kubernetes/main/apps/virtualization/virtual-machines/truenas-scale/app/pvc.yaml new file mode 100644 index 000000000..4af31dc9a --- /dev/null +++ b/kubernetes/main/apps/virtualization/virtual-machines/truenas-scale/app/pvc.yaml @@ -0,0 +1,28 @@ +--- +apiVersion: cdi.kubevirt.io/v1beta1 +kind: DataVolume +metadata: + name: truenas-scale-iso +spec: + source: + http: + url: "https://download.sys.truenas.net/TrueNAS-SCALE-ElectricEel/24.10.0.2/TrueNAS-SCALE-24.10.0.2.iso" + storage: + resources: + requests: + storage: "2Gi" + storageClassName: "openebs-zfs-128k" + accessModes: + - ReadWriteOnce +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: truenas-scale-os-disk +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 32Gi + storageClassName: openebs-zfs-128k diff --git a/kubernetes/main/apps/virtualization/virtual-machines/truenas-scale/app/virtualmachine.yaml b/kubernetes/main/apps/virtualization/virtual-machines/truenas-scale/app/virtualmachine.yaml new file mode 100644 index 000000000..779e4064d --- /dev/null +++ b/kubernetes/main/apps/virtualization/virtual-machines/truenas-scale/app/virtualmachine.yaml @@ -0,0 +1,68 @@ +apiVersion: kubevirt.io/v1 +kind: VirtualMachine +metadata: + name: truenas-scale +spec: + running: true + template: + metadata: + labels: + kubevirt.io/domain: truenas-scale + spec: + networks: + - name: bridge-1 + multus: + default: true + networkName: kube-system/bridge-truenas-1 + - name: bridge-2 + multus: + networkName: kube-system/bridge-truenas-2 + domain: + clock: + utc: {} + cpu: + cores: 4 + model: host-passthrough + features: + acpi: {} + apic: {} + smm: + enabled: false + firmware: + bootloader: + efi: + secureBoot: false + devices: + hostDevices: + - deviceName: devices.kubevirt.io/nvme + name: intel-optane + - deviceName: devices.kubevirt.io/sas + name: sas + interfaces: + - name: bridge-1 + bridge: {} + model: virtio + macAddress: 00:16:3e:bc:da:12 + - name: bridge-2 + bridge: {} + model: virtio + macAddress: 52:54:00:fa:3d:88 + disks: + - disk: + bus: virtio + name: truenas-scale-os-disk + - cdrom: + bus: sata + name: truenas-scale-iso + machine: + type: q35 + resources: + requests: + memory: 48G + volumes: + - name: truenas-scale-os-disk + persistentVolumeClaim: + claimName: truenas-scale-os-disk + - name: truenas-scale-iso + persistentVolumeClaim: + claimName: truenas-scale-iso diff --git a/kubernetes/main/apps/virtualization/virtual-machines/truenas-scale/ks.yaml b/kubernetes/main/apps/virtualization/virtual-machines/truenas-scale/ks.yaml new file mode 100644 index 000000000..3f60fe0c6 --- /dev/null +++ b/kubernetes/main/apps/virtualization/virtual-machines/truenas-scale/ks.yaml @@ -0,0 +1,20 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: truenas-scale + namespace: flux-system +spec: + targetNamespace: virtualization + dependsOn: + - name: kubevirt + - name: cdi + path: ./kubernetes/main/apps/virtualization/virtual-machines/truenas-scale/app + prune: false + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/virtualization/virtual-machines/windows-server/app/kustomization.yaml b/kubernetes/main/apps/virtualization/virtual-machines/windows-server/app/kustomization.yaml new file mode 100644 index 000000000..59b5dfbf2 --- /dev/null +++ b/kubernetes/main/apps/virtualization/virtual-machines/windows-server/app/kustomization.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./pvc.yaml + - ./virtualmachine.yaml diff --git a/kubernetes/main/apps/virtualization/virtual-machines/windows-server/app/pvc.yaml b/kubernetes/main/apps/virtualization/virtual-machines/windows-server/app/pvc.yaml new file mode 100644 index 000000000..250acf8da --- /dev/null +++ b/kubernetes/main/apps/virtualization/virtual-machines/windows-server/app/pvc.yaml @@ -0,0 +1,29 @@ +--- +apiVersion: cdi.kubevirt.io/v1beta1 +kind: DataVolume +metadata: + name: windows-server-iso +spec: + source: + http: + url: "https://go.microsoft.com/fwlink/p/?LinkID=2195280&clcid=0x409&culture=en-us&country=US" + storage: + resources: + requests: + storage: "5Gi" + storageClassName: "openebs-zfs-128k" + accessModes: + - ReadWriteOnce + +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: windows-server-os-disk +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 64Gi + storageClassName: openebs-zfs-128k diff --git a/kubernetes/main/apps/virtualization/virtual-machines/windows-server/app/virtualmachine.yaml b/kubernetes/main/apps/virtualization/virtual-machines/windows-server/app/virtualmachine.yaml new file mode 100644 index 000000000..4c761a4a0 --- /dev/null +++ b/kubernetes/main/apps/virtualization/virtual-machines/windows-server/app/virtualmachine.yaml @@ -0,0 +1,58 @@ +apiVersion: kubevirt.io/v1 +kind: VirtualMachine +metadata: + name: windows-server-2022 +spec: + running: true + template: + metadata: + labels: + kubevirt.io/domain: windows-server-2022 + spec: + networks: + - name: bridge + multus: + default: true + networkName: kube-system/bridge-windows-server + domain: + clock: + timezone: Europe/Sofia + timer: {} + cpu: + cores: 2 + model: host-passthrough + features: + acpi: {} + apic: {} + smm: + enabled: true + firmware: + bootloader: + efi: {} + uuid: 43a838b8-9c2f-41ef-84b0-2370fd097a97 + devices: + interfaces: + - name: bridge + bridge: {} + model: virtio + macAddress: 52:54:00:ab:cd:ef + tpm: {} + disks: + - cdrom: + bus: sata + name: windows-server-iso + - disk: + bus: sata + name: windows-server-os-disk + machine: + type: q35 + resources: + requests: + memory: 4G + volumes: + - name: windows-server-iso + persistentVolumeClaim: + claimName: windows-server-iso + - name: windows-server-os-disk + persistentVolumeClaim: + claimName: windows-server-os-disk diff --git a/kubernetes/main/apps/virtualization/virtual-machines/windows-server/ks.yaml b/kubernetes/main/apps/virtualization/virtual-machines/windows-server/ks.yaml new file mode 100644 index 000000000..5be152f09 --- /dev/null +++ b/kubernetes/main/apps/virtualization/virtual-machines/windows-server/ks.yaml @@ -0,0 +1,20 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: windows-server + namespace: flux-system +spec: + targetNamespace: virtualization + dependsOn: + - name: kubevirt + - name: cdi + path: ./kubernetes/main/apps/virtualization/virtual-machines/windows-server/app + prune: false + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/volsync-system/kustomization.yaml b/kubernetes/main/apps/volsync-system/kustomization.yaml new file mode 100755 index 000000000..2270ac3eb --- /dev/null +++ b/kubernetes/main/apps/volsync-system/kustomization.yaml @@ -0,0 +1,9 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # Pre Flux-Kustomizations + - ./namespace.yaml + # Flux-Kustomizations + - ./volsync/ks.yaml diff --git a/kubernetes/main/apps/volsync-system/namespace.yaml b/kubernetes/main/apps/volsync-system/namespace.yaml new file mode 100755 index 000000000..082ad6dac --- /dev/null +++ b/kubernetes/main/apps/volsync-system/namespace.yaml @@ -0,0 +1,38 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: volsync-system + annotations: + kustomize.toolkit.fluxcd.io/prune: disabled + volsync.backube/privileged-movers: "true" +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/provider_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Provider +metadata: + name: alert-manager + namespace: volsync-system +spec: + type: alertmanager + address: http://alertmanager-operated.observability.svc.cluster.local:9093/api/v2/alerts/ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/alert_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Alert +metadata: + name: alert-manager + namespace: volsync-system +spec: + providerRef: + name: alert-manager + eventSeverity: error + eventSources: + - kind: HelmRelease + name: "*" + exclusionList: + - "error.*lookup github\\.com" + - "error.*lookup raw\\.githubusercontent\\.com" + - "dial.*tcp.*timeout" + - "waiting.*socket" + suspend: false diff --git a/kubernetes/main/apps/volsync-system/volsync/app/helmrelease.yaml b/kubernetes/main/apps/volsync-system/volsync/app/helmrelease.yaml new file mode 100755 index 000000000..8cbbad6ff --- /dev/null +++ b/kubernetes/main/apps/volsync-system/volsync/app/helmrelease.yaml @@ -0,0 +1,28 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: volsync +spec: + interval: 30m + chart: + spec: + chart: volsync + version: 0.11.0 + sourceRef: + kind: HelmRepository + name: backube + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + values: + manageCRDs: true + metrics: + disableAuth: true diff --git a/kubernetes/main/apps/volsync-system/volsync/app/kustomization.yaml b/kubernetes/main/apps/volsync-system/volsync/app/kustomization.yaml new file mode 100755 index 000000000..5e0988437 --- /dev/null +++ b/kubernetes/main/apps/volsync-system/volsync/app/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml + - ./prometheusrule.yaml diff --git a/kubernetes/main/apps/volsync-system/volsync/app/prometheusrule.yaml b/kubernetes/main/apps/volsync-system/volsync/app/prometheusrule.yaml new file mode 100755 index 000000000..880d67388 --- /dev/null +++ b/kubernetes/main/apps/volsync-system/volsync/app/prometheusrule.yaml @@ -0,0 +1,28 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/monitoring.coreos.com/prometheusrule_v1.json +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: volsync +spec: + groups: + - name: volsync.rules + rules: + - alert: VolSyncComponentAbsent + annotations: + summary: VolSync component has disappeared from Prometheus target discovery. + expr: | + absent(up{job="volsync-metrics"}) + for: 15m + labels: + severity: critical + - alert: VolSyncVolumeOutOfSync + annotations: + summary: >- + {{ $labels.obj_namespace }}/{{ $labels.obj_name }} volume + is out of sync. + expr: | + volsync_volume_out_of_sync == 1 + for: 15m + labels: + severity: critical diff --git a/kubernetes/main/apps/volsync-system/volsync/ks.yaml b/kubernetes/main/apps/volsync-system/volsync/ks.yaml new file mode 100755 index 000000000..5d2093e89 --- /dev/null +++ b/kubernetes/main/apps/volsync-system/volsync/ks.yaml @@ -0,0 +1,20 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app volsync + namespace: flux-system +spec: + targetNamespace: volsync-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/volsync-system/volsync/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/zfs/kustomization.yaml b/kubernetes/main/apps/zfs/kustomization.yaml new file mode 100644 index 000000000..fbe3406d2 --- /dev/null +++ b/kubernetes/main/apps/zfs/kustomization.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./namespace.yaml + - ./zfs-scrubber/ks.yaml diff --git a/kubernetes/main/apps/zfs/namespace.yaml b/kubernetes/main/apps/zfs/namespace.yaml new file mode 100644 index 000000000..1ad04b86d --- /dev/null +++ b/kubernetes/main/apps/zfs/namespace.yaml @@ -0,0 +1,38 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: zfs + annotations: + kustomize.toolkit.fluxcd.io/prune: disabled + volsync.backube/privileged-movers: "true" +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/provider_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Provider +metadata: + name: alert-manager + namespace: zfs +spec: + type: alertmanager + address: http://alertmanager-operated.observability.svc.cluster.local:9093/api/v2/alerts/ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/alert_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Alert +metadata: + name: alert-manager + namespace: zfs +spec: + providerRef: + name: alert-manager + eventSeverity: error + eventSources: + - kind: HelmRelease + name: "*" + exclusionList: + - "error.*lookup github\\.com" + - "error.*lookup raw\\.githubusercontent\\.com" + - "dial.*tcp.*timeout" + - "waiting.*socket" + suspend: false diff --git a/kubernetes/main/apps/zfs/zfs-scrubber/app/externalsecret.yaml b/kubernetes/main/apps/zfs/zfs-scrubber/app/externalsecret.yaml new file mode 100644 index 000000000..913adc960 --- /dev/null +++ b/kubernetes/main/apps/zfs/zfs-scrubber/app/externalsecret.yaml @@ -0,0 +1,20 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret zfs-scrubber-secret +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + template: + engineVersion: v2 + data: + PUSHOVER_USER_KEY: "{{ .PUSHOVER_USER_KEY }}" + PUSHOVER_API_TOKEN: "{{ .SCRUBBER_PUSHOVER_TOKEN }}" + dataFrom: + - extract: + key: secrets/pushover diff --git a/kubernetes/main/apps/zfs/zfs-scrubber/app/helmrelease.yaml b/kubernetes/main/apps/zfs/zfs-scrubber/app/helmrelease.yaml new file mode 100644 index 000000000..84fcea53e --- /dev/null +++ b/kubernetes/main/apps/zfs/zfs-scrubber/app/helmrelease.yaml @@ -0,0 +1,56 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: zfs-scrubber +spec: + interval: 30m + chart: + spec: + chart: app-template + version: 3.5.1 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + + values: + controllers: + zfs-scrubber: + type: cronjob + cronjob: + schedule: "0 0 1,15 * *" + successfulJobsHistory: 1 + failedJobsHistory: 1 + concurrencyPolicy: Forbid + timeZone: Europe/Sofia + backoffLimit: 0 + containers: + app: + image: + repository: ghcr.io/heavybullets8/zfs-scrubber + tag: 1.0.6@sha256:548a7e1a55088ab2d4738a38708104b264b4401e098348594daa96cad58402bd + env: + ZFS_POOL: "ssd_pool" + PUSHOVER_NOTIFICATION: true + TALOS_VERSION: ${TALOS_VERSION} + envFrom: + - secretRef: + name: zfs-scrubber-secret + securityContext: + privileged: true + + persistence: + dev: + type: hostPath + hostPath: /dev/zfs + globalMounts: + - path: /dev/zfs diff --git a/kubernetes/main/apps/zfs/zfs-scrubber/app/kustomization.yaml b/kubernetes/main/apps/zfs/zfs-scrubber/app/kustomization.yaml new file mode 100644 index 000000000..4eed917b9 --- /dev/null +++ b/kubernetes/main/apps/zfs/zfs-scrubber/app/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml diff --git a/kubernetes/main/apps/zfs/zfs-scrubber/ks.yaml b/kubernetes/main/apps/zfs/zfs-scrubber/ks.yaml new file mode 100644 index 000000000..ff58b9191 --- /dev/null +++ b/kubernetes/main/apps/zfs/zfs-scrubber/ks.yaml @@ -0,0 +1,24 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app zfs-scrubber + namespace: flux-system +spec: + targetNamespace: zfs + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/zfs/zfs-scrubber/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m + postBuild: + substitute: + # renovate: datasource=docker depName=ghcr.io/siderolabs/installer + TALOS_VERSION: v1.8.3 diff --git a/kubernetes/main/bootstrap/flux/age-key.secret.sops.yaml b/kubernetes/main/bootstrap/flux/age-key.secret.sops.yaml new file mode 100755 index 000000000..fe6f113e7 --- /dev/null +++ b/kubernetes/main/bootstrap/flux/age-key.secret.sops.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Secret +metadata: + name: sops-age + namespace: flux-system +stringData: + age.agekey: ENC[AES256_GCM,data:hHup+dFxDjW9ci4cKhkxl4DSfIT7CK08HgfeQ+lce5SeeIn6unWM2LXcIA5mkwZGoLOzngAKopwt5aeThWvsLy66Q/XFp0mDHus=,iv:U4CxsaosLMXFngGGWSnutE2GHiUWKxkBDho10AtGsFQ=,tag:3PBqK9ZCbvB89b3rbgv3qw==,type:str] +sops: + kms: [] + gcp_kms: [] + azure_kv: [] + hc_vault: [] + age: + - recipient: age1frhtpr8u3u99pvcuq5mjevxdq9agjfpkd8fjtnpj9qymzh5v845q53f37d + enc: | + -----BEGIN AGE ENCRYPTED FILE----- + YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBuYTBhNTk0Ni9QMHEyYldK + cW1zUXV2c0kzNXovMjVxU2ZGWVJLcTJtYWpzCjMxWTk4UnluYVA2VGJNZXdzcjVS + OVZCVXJrWVI1aUhDY0M3YW16Y21qQUUKLS0tIHZaTXBldGNYWVkyemlWOTJQaEdw + azE3ZDJMZW5vdkZaUXBkNTN5MFhGS00KUYM+zFG0a0sej18dQCAS6AkCVd8FrHZp + Lq99sdhQ86xPTIPq2MzxATsPKWM3qOiVUG3lehlF7WKe0YTAAHC3/w== + -----END AGE ENCRYPTED FILE----- + lastmodified: "2024-10-08T21:48:12Z" + mac: ENC[AES256_GCM,data:1jN/pAPbxS/dnA19zPsh3qwFkmS3ve93LJZnOjkhKL//+ohDN6OquhE9xwYjq8/35xJERot68uB3iJ7bu2bJJoLGZHyDtvBuKANwrorSo346g9VDa2rpNx8GhsaoYtpz5fyasmZhAnh5f9ss27DpSSh9b6kp+ToNAWnFXbA/wQg=,iv:3HEC9jObBuaJNMj8dywsX+luIhWi2PbrLgCWpPyCZwQ=,tag:s453WKwqB1QvgF2Yfz5qzw==,type:str] + pgp: [] + encrypted_regex: ^(data|stringData)$ + mac_only_encrypted: true + version: 3.9.1 diff --git a/kubernetes/main/bootstrap/flux/deploy-key.secret.sops.yaml b/kubernetes/main/bootstrap/flux/deploy-key.secret.sops.yaml new file mode 100755 index 000000000..5a72656c3 --- /dev/null +++ b/kubernetes/main/bootstrap/flux/deploy-key.secret.sops.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Secret +metadata: + name: deploy-key + namespace: flux-system +stringData: + identity: ENC[AES256_GCM,data:IBYRcWjfwODirFE/YQMTQ0LHaKr9KIXYw3CD6TsKMJ6wIASDV3offgQiykho3XLEdnSBfXWmD7Zs2ooRihK9gVuqs1TkVlcv4E0rVP3Ytp5a25SP6Dd5WcU5m4lXdWBPCG92cU6cNzrdJ7JJT34Ohqg53eaokC5szGQcBT2dpbQCHfekDqNHZ4f6ZZKVdpyHVPRxwDxTqxQNOrDdpd2GdKUXRZd3v8lqGEIWnlys4WelUq4jTaZ8fjGVCpag6zSMtfR6WFHDJ8P7FH2M7li4V2ff+uzkhXgxEz6VgtyFBAq77IAz+jnf6lB0w1+Y0J7ws8LmYK6whWXBkpmNfDs/ehhqTLpCfbDg4sm864k6AaHYKXSGNKMRjNaq+/zUP0xqGc9iYpv++IXyvAHMxVx+CrMNetesXG/SPPm5jpHJILAL0RWxoCh/0fb8uQDxXFy1nk4haY6N2O9zhqi/ofm18RDmBGvkEheeMGDDv2ZSNbqoG2peXBhXJTyWZiuA0Ox/,iv:3rTgh86myohUye3x7r6WFb0gGx+CWP7y8Rzmbj+RVwg=,tag:oKXo8MMwDSQhUbogs6r2Sw==,type:str] + identity.pub: ENC[AES256_GCM,data:6fvhqnzxmGMa2KmuBW+ge6uPUqmJ9v92dIHEgK/jPQjQUbmyWQ+Nh2Azh5VGdiXyFFT085Zmd+KSXI4FelQT3tKZ1YiOJ79loyY/vcP9MVBhbVwYkdmDMFwmSHWB9bnAlC8QMhDNprRDqMYc8GE8IjC/qQfCz0EWuelFvfHgafnGTN1zQNLiDK5YGYG9fsZoSQceOcbwF3Xc2aWRYa812+rQcNxv5dGZsFCY7AxIs13QsfFoSKX945ildls7as0Lsc/UJjrbTq93YOgx2b2zIt7RUWVaKLKoY33vlr5LhhYVRjf+7OVRERk8rgHp90pPvcHg6P1IxX6YdRE31w==,iv:OrPRv3Y7rX9tx/vMrt6MUh71gwWDAvLm7fsGt++v99M=,tag:SQeHvymWVofWDR2vWh3QXw==,type:str] + known_hosts: ENC[AES256_GCM,data:mJ+w+UCm6vGNgwSty0Rnt55f0WYXvWSA47RHEMKJypH9AOvG8seWaHmPsYsaNT19gLqTb7m+R41Ppr13SZdqJK02vqbbR1bvPZBhMEEf2JY15XfqEvFPNColNLcpRCbEScU7tItoXmJ2KRNqZOKT6laRoyDemxnpToxU5v2jX4zmryAvCSap3s6s6eZ2rCOzVJwfXopxyfttDYS+RCj4kxKrqCFFd3PkGvKn,iv:580mGiQNGqQo068EBBsu3wO6FHnFCaoT+XR4TOwmRGE=,tag:S3KE2HiAfCzF7rfW0x/31g==,type:str] +sops: + kms: [] + gcp_kms: [] + azure_kv: [] + hc_vault: [] + age: + - recipient: age1frhtpr8u3u99pvcuq5mjevxdq9agjfpkd8fjtnpj9qymzh5v845q53f37d + enc: | + -----BEGIN AGE ENCRYPTED FILE----- + YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAwUlp6Q29OREFPRmZUNmRh + TGN6ZUpnM2NwVXpPbDZSQkFSQjJxNU9IcGdjCkdmSzNzQUFBVXJGWEo5Rm9jaGZt + UCtKcXFQaFdQaXQ3UGxjaWRMS0N3VVkKLS0tIEVMUkVlTVArOXBqTTdJTmVsSzVB + QnYvYUpxWDMweWFKOFQyYk15QzdQZlkKKqlwfunxvU43OeNEQA9qm8w9GNzPywde + C732F7XEYZls8rYpmZdxpIA+x4Yjsl9PpXDuwjBfKwX+AXj32NRRog== + -----END AGE ENCRYPTED FILE----- + lastmodified: "2024-10-08T20:40:20Z" + mac: ENC[AES256_GCM,data:YmHPvl10nOr3fXtJjcL7jaZgxbk6rmSTVPyvdxjUw49eqUnrudO6WfuQVg6+TIWH2npPxfrJxwYT6iQ7bAJd9+2iGFMVUSed6YyV/QSSf2kzXg4WqUA/SyoHNLKZmdgTxIyZkKbFA3ymsFH6qsbV0ulfO2p/I3Z77/qVYvLYb6Y=,iv:6T5rFR+HoY6VsRbLgk33+P5VIBS/nfvSGfCJ+k2NymE=,tag:zxVI0nmbsvq24DJkFAnMyw==,type:str] + pgp: [] + encrypted_regex: ^(data|stringData)$ + mac_only_encrypted: true + version: 3.9.1 diff --git a/kubernetes/main/bootstrap/flux/kustomization.yaml b/kubernetes/main/bootstrap/flux/kustomization.yaml new file mode 100755 index 000000000..988cf48cb --- /dev/null +++ b/kubernetes/main/bootstrap/flux/kustomization.yaml @@ -0,0 +1,135 @@ +# IMPORTANT: This file is not tracked by flux and should never be. Its +# purpose is to only install the Flux components & CRDs into your cluster. +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - github.com/fluxcd/flux2/manifests/install?ref=v2.4.0 +patches: + # Remove image automation and image reflector controllers + - patch: | + $patch: delete + apiVersion: apps/v1 + kind: Deployment + metadata: + name: all + target: + kind: Deployment + name: (image-automation-controller|image-reflector-controller) + # Increase the number of workers and limits + # Ref: https://fluxcd.io/flux/installation/configuration/vertical-scaling/#increase-the-number-of-workers-and-limits + - patch: | + - op: add + path: /spec/template/spec/containers/0/args/- + value: --concurrent=10 + - op: add + path: /spec/template/spec/containers/0/args/- + value: --requeue-dependency=5s + target: + kind: Deployment + name: (kustomize-controller|helm-controller|source-controller) + - patch: | + apiVersion: apps/v1 + kind: Deployment + metadata: + name: all + spec: + template: + spec: + containers: + - name: manager + resources: + limits: + memory: 2Gi + target: + kind: Deployment + name: (kustomize-controller|helm-controller|source-controller) + # Enable in-memory kustomize builds + # Ref: https://fluxcd.io/flux/installation/configuration/vertical-scaling/#enable-in-memory-kustomize-builds + - patch: | + - op: add + path: /spec/template/spec/containers/0/args/- + value: --concurrent=20 + - op: replace + path: /spec/template/spec/volumes/0 + value: + name: temp + emptyDir: + medium: Memory + target: + kind: Deployment + name: kustomize-controller + # Enable Helm repositories caching + # Ref: https://fluxcd.io/flux/installation/configuration/vertical-scaling/#enable-helm-repositories-caching + - patch: | + - op: add + path: /spec/template/spec/containers/0/args/- + value: --helm-cache-max-size=10 + - op: add + path: /spec/template/spec/containers/0/args/- + value: --helm-cache-ttl=60m + - op: add + path: /spec/template/spec/containers/0/args/- + value: --helm-cache-purge-interval=5m + target: + kind: Deployment + name: source-controller + # Flux near OOM detection for Helm + # Ref: https://fluxcd.io/flux/installation/configuration/helm-oom-detection/ + - patch: | + - op: add + path: /spec/template/spec/containers/0/args/- + value: --feature-gates=OOMWatch=true + - op: add + path: /spec/template/spec/containers/0/args/- + value: --oom-watch-memory-threshold=95 + - op: add + path: /spec/template/spec/containers/0/args/- + value: --oom-watch-interval=500ms + target: + kind: Deployment + name: helm-controller + # Resources renamed to match those installed by oci://ghcr.io/fluxcd/flux-manifests + - target: + kind: ResourceQuota + name: critical-pods + patch: | + - op: replace + path: /metadata/name + value: critical-pods-flux-system + - target: + kind: ClusterRoleBinding + name: cluster-reconciler + patch: | + - op: replace + path: /metadata/name + value: cluster-reconciler-flux-system + - target: + kind: ClusterRoleBinding + name: crd-controller + patch: | + - op: replace + path: /metadata/name + value: crd-controller-flux-system + - target: + kind: ClusterRole + name: crd-controller + patch: | + - op: replace + path: /metadata/name + value: crd-controller-flux-system + - target: + kind: ClusterRole + name: flux-edit + patch: | + - op: replace + path: /metadata/name + value: flux-edit-flux-system + - target: + kind: ClusterRole + name: flux-view + patch: | + - op: replace + path: /metadata/name + value: flux-view-flux-system \ No newline at end of file diff --git a/kubernetes/main/bootstrap/helmfile.yaml b/kubernetes/main/bootstrap/helmfile.yaml new file mode 100755 index 000000000..8a5d7ec5e --- /dev/null +++ b/kubernetes/main/bootstrap/helmfile.yaml @@ -0,0 +1,78 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/helmfile +helmDefaults: + wait: true + waitForJobs: true + timeout: 600 + force: true + recreatePods: true + +repositories: + - name: cilium + url: https://helm.cilium.io + - name: coredns + url: https://coredns.github.io/helm + - name: postfinance + url: https://postfinance.github.io/kubelet-csr-approver + - name: cert-manager + url: https://charts.jetstack.io + - name: prometheus-community + url: https://prometheus-community.github.io/helm-charts + - name: zfs-localpv + url: https://openebs.github.io/zfs-localpv + +releases: + - name: prometheus-operator-crds + namespace: observability + chart: prometheus-community/prometheus-operator-crds + version: 16.0.1 + - name: cilium + namespace: kube-system + chart: cilium/cilium + version: 1.16.4 + values: + - ../apps/kube-system/cilium/app/helm-values.yaml + needs: + - observability/prometheus-operator-crds + - name: coredns + namespace: kube-system + chart: coredns/coredns + version: 1.36.1 + values: + - ../apps/kube-system/coredns/app/helm-values.yaml + needs: + - observability/prometheus-operator-crds + - kube-system/cilium + - name: kubelet-csr-approver + namespace: kube-system + chart: postfinance/kubelet-csr-approver + version: 1.2.3 + values: + - ../apps/kube-system/kubelet-csr-approver/app/helm-values.yaml + needs: + - observability/prometheus-operator-crds + - kube-system/cilium + - kube-system/coredns + - name: cert-manager + namespace: cert-manager + chart: cert-manager/cert-manager + version: v1.16.2 + values: + - ../apps/cert-manager/cert-manager/app/helm-values.yaml + needs: + - observability/prometheus-operator-crds + - kube-system/cilium + - kube-system/coredns + - kube-system/kubelet-csr-approver + - name: zfs-localpv + namespace: kube-system + chart: zfs-localpv/zfs-localpv + version: 2.7.0-develop + values: + - ../apps/kube-system/openebs-zfs-localpv/app/helm-values.yaml + needs: + - observability/prometheus-operator-crds + - kube-system/cilium + - kube-system/coredns + - kube-system/kubelet-csr-approver + - cert-manager/cert-manager diff --git a/kubernetes/main/bootstrap/talos/k8s-0.secret.sops.yaml b/kubernetes/main/bootstrap/talos/k8s-0.secret.sops.yaml new file mode 100644 index 000000000..8d6cedde5 --- /dev/null +++ b/kubernetes/main/bootstrap/talos/k8s-0.secret.sops.yaml @@ -0,0 +1,219 @@ +version: v1alpha1 +debug: false +persist: true +machine: + systemDiskEncryption: + ephemeral: + provider: luks2 + keys: + - slot: 0 + tpm: {} + state: + provider: luks2 + keys: + - slot: 0 + tpm: {} + type: controlplane + token: ENC[AES256_GCM,data:4QZZ94S4xG5Or2eX5omLD5KtJ6YfOmQ=,iv:MntQbgf+h0i3GHpy2uSdrWd0z7coNNUuMPCqyJz+5Ic=,tag:S3GTC9eAZQZku7GLp+kRIA==,type:str] + ca: + crt: ENC[AES256_GCM,data:miBYalmdXjIaOMZk8gsezCxY4QaWgUmEuj1Y0uiW03y4Jljaf8UkxJiVZij+P/uWc2OY0rtROsWBL0bpPBete/I1k0SVVn72VIs/HpcKTZ3lJZiTi9H419BgRvFi2Tkvn6EwFqQngp/wv3mDP3UY2UWvfej37BCMJLFMgPsz6qJeA+KNHIeVKf/bfGRLQrzi+hQVwHMGFv/pd7PC9trMT7aZi3PivScT8cylwHnOKW0njkj9b28jBX/1RoJx3dEDxgBJdJM7uifHEaCHGb7ljETB3949CGbwoMrOvK0q29HPRdAg2eeui33BdhPfcQu+ioVGls6eGqb0od2H4/BNQ0OSpS3L6dbuefY4pzvmIYzeAAZYJXN4TQxpWreh6Kce6vuXq9uCcYhZeuO+qgye5bLWfZ20EP42yLjOJlY8mqYfV2s5shjrUhrZ4yI7bQGexAp7zkPQj/YmCdXlEmTroRkm/YuvNyyBLI18H9mh/iGmZL/FnSaSnqBNAFzgCA3v7uAaxnq2RzRLNBiKopAzyVmFtLSnYtm/RF/UKuQi6p+M46kcbBSRl5MAZhnYOZmv2g1NHuCNyceJPm1E9WUWATuYvlf6XAXp1IDywkkxI75kNKOJ0pBVW/bUXpKn7R8RzzFjxSi6hz7L7QeeRcB/JYrgjILMqkIPUvg/ubADEUrYpnzuzj4GfgZmrwyOX1U7xdny9aHttweY3R+0cyrz92YMu1+ppwQczD66hwb6eumu2uTy8aQIgiUYbeATYm0GoLzBeqXXQp2OHQwTu54IhHIGnd+LdqcvG7w/uFvRkVY6floZSm5vNIwpASOfUx4bWdVdOXJfjQvpQol0RfF+VCw0pZLz6yvewtnb2nCo9FoBPC1l,iv:7NJqmkatKBq3aLn0BTzt8fF4xXEGscTQkGqPOKxTzRU=,tag:JJEd9BfLHyi8a8eNGYUVAg==,type:str] + key: ENC[AES256_GCM,data:w1bTVUzOqgwqNK/PutIXZGlNCbMbn8ILm/MTovjLOAUmlBe1wGGVgpnyd2URoEFTNvWMxxVNTl30Tn1O7JVaZfv77DsbOAaBOf60sFq/wLItVTQbUgKxwwvbGOf6PlcwjUW7tNrKa1bgrg6627MlvWK6ac6PACBoqhFIY5htw7DoMr8MdQEVzP5xJ8WuiVN5PRCIwqdSlC1dCvWziZAavjcd1oky0cgvDaGlpQdn/DNoTsQd,iv:woeQlxSOXrmGzs+W02MEADflMDSdY/fFgwrH70kK01o=,tag:efYOvIrbyAxVY9LWKZx9jg==,type:str] + certSANs: + - 127.0.0.1 + - 192.168.91.10 + kubelet: + image: ghcr.io/siderolabs/kubelet:${KUBERNETES_VERSION} + extraArgs: + rotate-server-certificates: "true" + extraConfig: + maxPods: 250 + shutdownGracePeriod: 120s + shutdownGracePeriodCriticalPods: 120s + defaultRuntimeSeccompProfileEnabled: true + disableManifestsDirectory: true + network: + hostname: k8s-0 + nameservers: + - 192.168.91.1 + interfaces: + - interface: bond0 + bond: + mode: 802.3ad + lacpRate: fast + xmitHashPolicy: layer3+4 + miimon: 100 + updelay: 200 + downdelay: 200 + interfaces: + - eth0 + - eth1 + - interface: br0 + routes: + - network: 0.0.0.0/0 + gateway: 192.168.91.1 + addresses: + - 192.168.91.9/24 + bridge: + stp: + enabled: false + interfaces: + - bond0 + vip: + ip: 192.168.91.10 + install: + diskSelector: + model: CT1000P3PSSD8 + extraKernelArgs: + - net.ifnames=0 + - amd_iommu=on + - iommu=pt + - amd_pstate=passive + - amd_pstate.shared_mem=1 + image: factory.talos.dev/installer-secureboot/${TALOS_SCHEMATIC_ID}:${TALOS_VERSION} + wipe: false + files: + - op: create + path: /etc/cri/conf.d/20-customization.part + content: | + [plugins."io.containerd.cri.v1.runtime".containerd] + default_runtime_name = "nvidia" + [plugins."io.containerd.grpc.v1.cri"] + enable_unprivileged_ports = true + enable_unprivileged_icmp = true + [plugins."io.containerd.grpc.v1.cri".containerd] + discard_unpacked_layers = false + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + discard_unpacked_layers = false + - op: overwrite + path: /etc/nfsmount.conf + permissions: 420 + content: | + [ NFSMount_Global_Options ] + nfsvers=4.2 + hard=True + noatime=True + nconnect=16 + time: + disabled: false + servers: + - time.cloudflare.com + sysctls: + fs.inotify.max_queued_events: "65536" + fs.inotify.max_user_instances: "8192" + fs.inotify.max_user_watches: "524288" + net.core.bpf_jit_harden: 1 + net.core.default_qdisc: fq + net.core.rmem_max: "16777216" + net.core.wmem_max: "16777216" + net.core.rmem_default: "8388608" + net.core.wmem_default: "8388608" + net.core.netdev_max_backlog: "250000" + net.ipv4.tcp_congestion_control: bbr + net.ipv4.tcp_max_syn_backlog: "8192" + net.ipv4.tcp_rmem: 4096 131072 16777216 + net.ipv4.tcp_wmem: 4096 65536 16777216 + features: + rbac: true + stableHostname: true + kubernetesTalosAPIAccess: + enabled: true + allowedRoles: + - os:admin + allowedKubernetesNamespaces: + - system-upgrade + apidCheckExtKeyUsage: true + diskQuotaSupport: true + kubePrism: + enabled: true + port: 7445 + hostDNS: + enabled: true + forwardKubeDNSToHost: false + resolveMemberNames: false + kernel: + modules: + - name: vfio_pci + - name: vfio + - name: vfio_iommu_type1 + - name: nvidia + - name: nvidia_uvm + - name: nvidia_drm + - name: nvidia_modeset + - name: zfs + nodeLabels: + node.kubernetes.io/exclude-from-external-load-balancers: "" +cluster: + id: ENC[AES256_GCM,data:NGVmoKE+s/fZ/V1yrWcqw6N6dbWC7pD60Y8HnygNmoAVk2UTuzj/RBPLzkc=,iv:OvL0766a+nnblPtMBaVngECKuw57GSMFRYA4qY6IGYw=,tag:E61BnbomE5UMThDvXMxQ6w==,type:str] + secret: ENC[AES256_GCM,data:6Ep3DtPnfkl9+qwnhz7IoujfCFAXwLfUx0c71dvOfCR1ISfr1859EPahb3M=,iv:wmXPXn4ZhZK0MT2fGnzbXEddsvVaFSVAYEd/RZPsPK8=,tag:+0LDgdEwRcykKfSvtsJ31A==,type:str] + controlPlane: + endpoint: https://192.168.91.10:6443 + clusterName: main + network: + cni: + name: none + dnsDomain: cluster.local + podSubnets: + - 172.16.0.0/16 + serviceSubnets: + - 172.17.0.0/16 + coreDNS: + disabled: true + token: ENC[AES256_GCM,data:sSVT8wxlba/laEZFk7XhmAY1aQbIjJw=,iv:HJkcolwUgqOKOu2ycNYc6eSMCyXGDk9Zjb6hXRGigd8=,tag:zIfsQvLXIiNx2BroMLNXAA==,type:str] + secretboxEncryptionSecret: ENC[AES256_GCM,data:HCZwCQYWXFDmw8jtRvep4bJJllmb9WqIjAUGxwRxPUxWOHY+HbCmh79RC9s=,iv:QNMxfzY6LI63pDJuUSgva5CEJMywlBdHQCYaDW4AY1Q=,tag:9MYeENBK8iAVfkRNjIMTHQ==,type:str] + ca: + crt: ENC[AES256_GCM,data:XMB5ZzBo0AwH1fovCkfBl2bA1B3f5PWwlPYT6sC4jom8ryeSSqhmA4CeDWA+d/W4vWmmHKNTytOB4egzz6taiTraP2k8FkqA1hTEBNBJe0rfp0Dr3ZTGAJz9J2twgD6h7cZ7T+8rb9yJhpGQW7yNsCSJ55zb2eVm6M79M9AdwpSY+joEXaYjy2r3AAbfkOvuiaVrT+UGW72Kb7YBhtqAAUOl0ULLBaxt6O7Y8jT5hXNCidwe8gcnT2xUjtKc1/Ale5Hz18KI4tZeOvNgqC7+vmdgD5V9VYmGGjl9r+HvD5yUYPGQkmLJxnzqtf4zQ+HOLIOuyulte9DPE0RPlF3RNg7JwRmawFXoXmBKft8Zbwe02YTlZCaFFokYwUo98UXnTvTMR8dHYfHiRoWtxSys3XY66I9yvzGJkhkfG/oL7UzqjEZE1ZlAOGm/s+xvI0h7CpZuNNLEiK+ex0gTfabZ6UO11YVrRqVOr8TgbbvOIVvzqoCtp0OWeM7kCRslvuDuLeOE0YOOARH/MgHlDYV35QqykwfvYbuecf//XzOSBSpUZAR4ThiIOsFe6LteL0cuBs7AWJC11NVxvy0ZaQcMBZIEaROw1dEXXuTLX+C6FxTJR2PeT+SbPmgWX+qM/ssxuoSAuhdyHWt7FHlIRxIFTqJWm8l9Zv3/YYneuFRqHht8YkZlKoYSNy38dVLVD6TE+GlDU6fvhj8Rk44JAxdKcDeAW+Dtx6dP5krXJIenR2iDIFnnIZQtPsWaS72XhTU0QGeMzVaGJ/RUsihJj42Ns95i+7S+/xeFg+gGZnDcfPx3R7H9J6cs3q4AAM6+8nWmHY9iV0kzAIu4UQ6Z3NTY6xxw5lWeCeniRr9O5usVMiITqdcaf2Sjhq+KkianJO8M6s5jenHp3AKBls9+4V+sqKe1jo/K73fCTCHjHS8nlEpShW2WM0AilZ/0MIMwRJATy94uMf+usFMm8bpvuRyHAamT+M6Is3HeQgjgNC8+8Ob0HwIHXQZlixTSE7HIjGL4NjC6OOio/JRYiAzsaevwYnCP06w=,iv:OQa8JxAkGHrM7b62P0RsC+mbWyprKMCdr69UpLJldYs=,tag:MSaZvj41oX9Ks4fzpQcurA==,type:str] + key: ENC[AES256_GCM,data:TN7ChreImta6Yy8au65Eae9xwQtrseHVBSQQv7V/uvqXaWpgleC1jgoDWudTKySDGhBakUFaqLyWAqrcpg30YBuXMmkfB9rxspdt+ThWrNjtL82Nb1nfB1luMPvB+wK7Bcspqlw7J6P41v+98zicF41Q1mhsOwCXc0hZv1Qo0fVOBrtbF7bJiaHXj+PzQO9Rn7ps4hXgUsyHOPuvMYV0PL/RXmgMQxkMurdg6CqxDYWZnoaaS9qc1CoElqkwH1+AxR7rMPBR32oGMeooHnw4yGsykGZjf6+qDuUebd42aoCNlovgmbkrUGjcWv855P7SjKct1Lar2awbqNV2Z7xpRoFf+QeDti5fYYKHFNyPvmrGrjhPogiurFgo1USAl2YApHCYBgc+1boVpjO+dXffQw==,iv:sgtq9WX68JlM8YomqyXmxzd1K1Aoov/WMfiC9tflHC4=,tag:D/MvsPuz09YeooKRSbDkRw==,type:str] + aggregatorCA: + crt: ENC[AES256_GCM,data:9N205kt7XPiNXWnDYG/FXw3jT6xlVGPaQA3G32djGcbus5axcSNQ8Kr5HfL1xexCk/rhIBDP/fHDl5/c2iWs8t0536REUvcnkv+RI8/pX81dJpFMLntCu++LhBgm1jGkXnW2YZeyPdL1hO0XPPpFtZFZlpN8K384RfWqoUDO58vONdLHO1mxKf3rQy3WfIu6zC24ZdXWJeMAW5z7fbTyvM9vyikR3Qq66Jj5oTE8AXWP9vgFtPYiysvgaapBWpcMICWqF8jdaitpTDfXU45kFn25b6+OYalGq9meRRNIHq/keF2tF5qjSKVG0f714k8G59uI7NTQtAjJMipvnrRoKUs7ueuWveMDPx4syobxaa6thPF/IMmv5r7kQhWOS9r4iIyZJ6VXol9r01K+3x9YSXW7DWqRaUnxd1Ih37FxWHTdSqOnDkd4j0Ur8VrSR06v2J7ujEqIwwJ60wKjCQDUG6nBkUQ86kpsZp1m2+ULJSVzb3Yr8mhJhhLX7hcgYUbJqqEsmjwdDXIO8B+gLdeWCRhmXQp5YG9UgRGNn5lczAOqgV9o7HJdhC+DK+waUR5oiHoOg/y8cCHQSzIogT2PhDJSswHl6404FVtKxNSS+ZhN7O40q9Ft2S/NJ9jUf+y5BSop3eumJSRiRvdmi8iWIIJkemqSB44/dmiiKTA9Y5BDvWlephyQCW8KJljFBmq5cM4Hl2voiE2KquCKJdAXKSYrubiRuXRXG2ApX5kZ8ebHfJgNPwdZsRDG4eU7shU09zP+F8WThCCl37nrLe5vGiYTjtXUGCOEnTmyp12tW9z3CQJ04jvEu2RKWn5LfH4QcOAnH67K5xYMgHRVq+m4kCd6Yoz9ZnZbh/SVto7Y4Hv1P1N8mfiY2hzAU5zVTDcq8JmbYxhgQaQ0WoDeYw4cnG87us5B1F+R03Y2aTAfUky1yWRIz8GN3Q==,iv:ldTjmxK61tc7tqiCX1TTxmbtNZjiI35QsyHvHnfEU8g=,tag:xHiYkv2cUADvJqwVGFUlSQ==,type:str] + key: ENC[AES256_GCM,data:Pn9N7BxQohOodYb3Yjaw3SVCFNxm9opYvBtcPQMP5TU62Aud+t1sKpyST4Jw8IoQjULzAIrzV/DYZea+0/Q9k9pecDjJDpSDmzAtk2nRVoJ2Z1+Hpid6c4xl+x6ShV0TTU5CkuMcKdz+DqYvjDp33QXZTe76dO+C3niTQa+2pd5Iqz9IaRxiMCdFnWG2HCpPWFhxLIwZjVEotV5oC2pZrY07GWxxrAch47ruKm0QsvQUV+DgJplD65eMkDUonUpR8I/wHvXRh/eaZgJDAv7uS7Ut5phViHcWSm0zxq7D9p081tLfAk8k+A2ocdlUNndbAZSppqs0gbkALmJgbqYOZG4ErVi2hWBdBV3uCfAlXJMq1qWUoBQmengmGMGJw6sV46u2f/y7Elj2MaA2J9BHNw==,iv:TL42kQwgax4DfiCt7ISntRqyM0p3XuCz1sa72sinBPU=,tag:pwSiGVM6HJTZH+Z70RLkHw==,type:str] + serviceAccount: + key: ENC[AES256_GCM,data:JDSGxaTzNttQtypZjPkSDEGYJNvwmLCHjNsEEc+pBHCp1xH+6N4oFLOO3Ry4Lvg7YsFeFwg+1wYFVDdITCmgrmLFy5xV2gnps2Qko9k4lBrpX7gsQWFoumV+I0UrQFhCken5JtHHodof8DF6vZGBNLg/+L4Sqc1JCPVAQXAPOTcMytC3e5VY/AyPaHAeSBHZEesJd0uQ8LEsFTZpIxhz0I+erP4BQofGGYzE9xJoWSVRW/uBQBrDGrglK3Nun4htPv0Vf1IjzzzjpcwFbXl7J/9FutIDSSvXl7969GW7e9t281VzIF8p2U5+oJGcgToG7/HQ939yDwJIFTdFKTm5jy4TArwTZKbGxErsyz9D65zb/x3Cs9suLcT5CNeUkJ1Sv+qLaAXMdAIUZbNtdsJanAdLQPFPB2Zr2W9MPC8BsWeEtx68vBszMYuoO0CWJRjS6h9XjWbaVUvFcM5evdrU+mXQ/gs1wJXQ6cWfEZEj2moqamdaL7kd4HUspZGZJ89lzYMOVEk9mMTgHt5Ar+YvNTT6/l+0Hu/ZA8DJGFg7LWuyCGbWkNdVNAJ9dgiuvvwb8bsdYpbSRp7chwqrkfJrT5CgIYk/FVoWSWLna1We/q6Ud6/LI5niGoHlcsP260jtPs5fu4ssF6rKn6EUWZHwyp7GnwBaBnY2CxUVshd7/Ot849KQyc4NEixM1967gVavFEsF7N+fZGQYEbMp5WBcUM88so9eGNkWumLuUXIuFbZzMBDAchv+u9nbU7h0Nkept9ljbhGJ8to3Q2sYlhuevLdpkTWv54TcdiQDFnXFyyTOBz+HSUKDt/q5QEM44vjqW1Xn8ch8hwhFavAqAW99bDSXM/X1qpr76trLmQoXyyWN2Qy9h0hdAY5wBChMq1wI9vSz+OzZ7vNv/C76Cp7Wjhmsac9pajLpVd6pkHREoB1pCCgcBoYPGgcAX4zsR1xrBPYeANM2fzFZw1Bqikx+X1C23jBs5nBGXUVXp6oMn8f7FyBQHFQ6iGtuvGcTELcZnx1EcpBSZo5e1/cM0mFU7Mj/YwMfIvJlpIwUtCRM6IBiulwXMUKK4zOwY5K7WCeFVo82bK6yGUPUv2LXmDQxaXcdBXQ1Rc0OWE5goqNqJpU0lDckFc5KleNQzxGQ5qM2T8jK80bFcYjF7uFffV+SzTIojVWfhTRN93r0bwa7XHd+QusDImOfs7DiIbFghv93ErF0G8b+4SyT2RFbscGqIGrEn5sc69PxNgqa7elOzeF3OGjnNjrnKQieblRQZay6PpzX7zzdl82vrlQGCcQB8nqrmhSDB3I0HALeA5csutFsjYr7v6LXdog5OGGILzHeTwTGKMPT6KFUClcLSgIyJH5BOharTYQz+2HL9s5xhHA7tMI8xumZDPnjSnYAaIAr/+j9obUTdwEZ/VCDyDkrdIbh2WozucQQRAAJCK0z1fgwQuKYRdv+rbJtEE2brZdxWzBwsIShKPnniwK3xclOhPx5x9sGR527h2M5u6IPy0HMKdcbKGTBkCissknatAsHSxSOSgvKNblqEqPhCCjgXLLm+oU0IWfpxynTf7opnK54fB2/zKDcwkOHLojgiDSketUokvPRdP8k3F5fZ7qPPGbloms/nSdwZl52gdsnc0cd2BS6nIf1TBT+cIHT4lBXKVcw4fb/Qri1N5rUGDFr4Jqcv0asVDc0pj/6JIgy9chmYAyEAAk4N5w0lN3UkyEpjdaEu+l02kjMdTQjiRtcyZSqHl/UPPGNSKUlYcM4IRd9VTf1rc1mTCDg93V4ng8CayzRsSwjC6p9yDHpseBIyuuYqjDMJV5h9axVR2jum3zuo3zmPeweYNbkHKff/2dGRU0BDoww8QOMOK3kFa+DrRKgrFtG8+JsmzeBAtIQu3h+DKNC5a/rEVlbWsjGgcsjfBxtWS3MkgKa+QX5++RDLtkYi2gnvJBIlrqkjTRdzvoYUR5hp+g8xTDooVcZQ0ZyiHs6klxsW33r17jsl2V+CHDtVDRgt2BAh3Yq/NC0YG4Xq3KkRYaDzIRH4pf4/MJ0He2w3Zh0tCG/5azNo39TyIHanJIIys3WMbigcTFleKgqC+wJu8+wkKrFU0eUpFPnBNiCpqz4GC2/JaswPq5IC9ADiXj0xh49DbKkjvh5dwozXQYDVocyA9cQ3auT5Qpd6/+KaHKpfxk389DEhBJyf5KpRz0qTIL/2DLHc29aiyVgxVFw7zxcQfgkRm5Nkl7sAox9bEAUpY9RxbZUV7iBmvVcMnJWEMeqkPLVkDZt6uTcYFERIDJE0cfYGwbpjTuv1v+rCuNTz3QOKu9eHIW3QGn2paWtFym1h5E4qOn34WLpoIOw1xgsHNdFGg3DQZTuDrsXMUGl23wg7ri1uQQcy2q3Sz4A4MMRJ5D8H5k1HRRAfbxHWY0MSj+lTCOl4xnW85GJrBdR2i3+IyLiw0Xc7Bscrge20RJNYWl4JeL+FhmhV7I9sCmAI22yn0E/fr3rRbnwwL37MBq+V4z1a/SfZHuFioWCbqjNwpoqClOgl2IjCsQrKxLZ8M6pLlz6NFS0xfcZytN0Eq00uuaITCTTdB6vBmVs50fuJHcaIum2WUtAU4QNdhf2naUgNMvz3p/fylKuSeMFpuCuZ3+F3lFNtsskzGoRnvuv8DEYN4HPa2MKAx93Sh39d4A5L1itm98LtUod8b6nTiSVsd9H/MQCXGOMeNDpJnctiH2IeRRmAF0Can/jGA5sExWtgzdrSjymBbWZdLfNFTABIlAViJc243buBTPOZb/pFglR1CxZtoyPHDh/VDE3MlPnsf8GNj1TWcLHpuPDA6NEpf+pamnSqpCNexgsNjKlK792CH3pKCDt+ZJnGKbH4fCTSRhzukl6DcpJyK3O6nJjaouQKTCD1m01yFr0B88tIBm9cIFKuNQQKhmQQmN3C1QWbnlQjIXAmFMJF/mu1PS4Pil6+DDS1/YHKkP8qMk8JtY3XJJ3kkS/d+21G1wOU8HwZXUN/o3EcLfb5d3NuwOuR5hakPqtx+Q1ERcFcoqODznF/ns+HOHsBv1xkccQTbrOE36qMk11UydNgBzX1OrMpgHd+jqZrKMX/6gQplgYK/TrW92M6L2nFW3Dsp876i4bfb6+I58q8SCo4XDw4WL3mw5dX+4YCeWg4T3Ad5PWVC0YKIIEv0AO/JOKqIkRO4RmuzbYCh7isNWj8UpfP6dQYLRhUVYmgQUDoLV4cob74uKGOIs2IqqbhiWLQAnPz7b9ADgv6JXF91Di/qSM5vFiDUgEddMv9icaBTE5mkvedc/+gf12gd54gb4OVRK2hTgXALEFYSuMq9ULY16n4v0a3D95AGVN/nxqBWPLv+PG+/5+OxdYtcgOzIhJ4N22oq6DbmoMzGqnzRDj9IlqaXpfDc37KZ6RjrWyX/ukUxDulwP2ZMfDx1giP83MWGFyfAhlH+JMt0DCrHBlaXWDoFdcG8lS7uOz3m+pckHLBK0AwiAYO6H+ur3cUH6Z4JzU+/QBm+XhxsYdELV/5ZfKX/R1GLB/PVpZv63Cy5cR6YMYhegrCVTOyojqEmNeIGlfzwMLAq2e8wCswIqzOQYxdo1ZDdfKw/VegOwqMwnLoOX/Smgd3lLcecZbSD1iN3TKMDrr83Qxqov8X/9Z74rswDvJLZIYVIAzjvq3QcLM4Q1nibowGk9SrzvZNsnX173cMcAe+SkeDgi4+u2gsWEaGwB55KOw/0yjVDsl2oMxa0LIphUnKAH6dmLW956n06eWTXkS66BgF0Mr8HZ+VBcxLdY0p4oJ8DX3bvlgE8lflthmxgHsxCVrkUuaRbDlbPABjjsHe5O8bMh0pf0Y3mBKPmn+FFkxlJssH/g7EhnN7FR1qrpQEdw9m6u6JmCdFs+T3FVBBFibfLyRVij/zjnsLGWSSKgMgPe+0XyLFbTczhVLDjBU3feNAU+kDPfF1WNXi186keUmYXwCNZ5LiEIEt3rohc4gptrM3P15Co9lX3r1fnjrR460kLgT6AZgyrBspb4NmcxAfCN8NyfWX82/CjyK3lcUjiA2VkdhVynWTqUChrkZrQfonTzkmkWeBVDB8YsAy3BslKIlnC+s7UOZk/qMJomdh2mnre2HmR/y7xKljUvwVIG6PwgTeDDvlOWSgShd1JOD5pMXTDJUGYyeIO3duQf4fYAERjkLkljhR/XNi2+PWM/YvQHzyUQpMDa4LNDfO/geWgiZlHg7vW0XaEB2a9/4IFsjI4Dbw0kMjq1W1c4mEISUJAPIm/tI0rkp3mDHkl18FSU9OTwnfzYHattaWtwyxWNi+COQGANqVbDeiHLgJaFB9wBVCT6ilcrvNzibwJefeok2apmeAUH1ugjgg+PtvR1r0q2mev4+mUuPagD+omnrRE9VmEtnK20/ydGWRLrRKFvOEmBWLl4hIAao6KyhgvOR0w0xbQ8IMmjAWFbTLItVvk3Jo7K1w67yGNEAT0sa7SF+4fgzJqieFt42NQZCDCxPatyf5nrfeHPyKlvj9rqpjNPh2yQpVOySQJn4n6/6VIFZm/VjLJjTveaXQ6kaRI+BJbVyu56xF3iotmelHuygkfuwmrrVWOcMWrAbaoLkBAnXzWRd9T9tqSg70pibrnTPznt7tDGzBccahEuZC8gFEGE0AzTdvVxHKZ0cQpNp2nRcaWCevVsGMAxLb6hepOu/f68GPPvb2veQQqEHZBt2wX787s8Ss836BFWbx5G+t1tYq3DKBOUcmMR3Q9Y6v5OdXr9bOxk218SkRcHc4Vh3vRjEWG3aqZ0hVf0LmvAebzIO4FVs5R6BTdSehIwBSMdhWnoh7Fcxb19MkUl8kwXEu8aryGapSEnkn4XQTMs5f+aRGHex6qF7cxnedk00zQo45979zngg3t89c8jiU7rKct0elJV7ym+k3dcZw/ByT8tr9Ync8DlPjyzeAfE/73Q183b0TDs+Qjo1LEcBbw45BfVTpaXR++4//PJLIN8U92zPQICclOpI+kxFrBiFzqK74/iCT/cNO4m6uAm15re4TfinHWKTHd2lgi1JcqWAU1zonkuKGCCv6un6eJ6n94ygv+cOU1Dt5x9vdp0QABnj2KvR8Jeb2e3EKkUi5QKotrmWkXGst5fRSMtfEb9LA0SyltywxvXZV99r+D73DlTvX4JFudPChx7TKvBBSX20cu+Z0SHDNrKIa2dcDr4TQHA8aZ/Jm/Z/pX3lKEbw8uR9JqKd+1YbdimDv7G+dDTezAxpY8ObQi4QUxjiwhUrb66RjGZMkbNI5stDj/GHRh7Y8bmcsijCwk3lUIQupszh6C5YL8zhuHn0lcSDNSVyn5EyxD1bjLEmZQaX6YTXwoES6+R+0mIJGlx2mN7UFMgHXVOv+ASTzmWEdokL0YzK0oqp9L8m9Ui7CJmZULFHqOB5fdY3HhRlNcX4GkYkfK/h/hGyi/gMRDQB6T+OIoEHNghmTReRdB6izFDiXKNto2Pn31h3FOSqvY5BJoJNBm6nTtoVeRkj+dLbzT1TGc/QaevUpfnz34799NRHBzxBM+NBVkXBlOS73g1OH4PKXANg0yYqFI31YktcoRy2QBySaKOpxs/KCdpoYEA3GmP1lifYC9NMgyq/AR9ycmbopPNoMjpKpmClaNbK4E24CLk0Gy+W+qUZmZCXw3MkveZ4j2+w+cS6qEN27pQHnV1eyvqgN9ojowWznyWB2HY2iDL8hX1RRsXqVqs+y73UuHoxCNm/w8wqZ9mfZnvwPAiYg7eKmsFyCUJoYPgoWK1BQnJ7,iv:wD4mPa+Y6DokFBEfcmv6AwboDE01/GjznV2N36MJ6FE=,tag:nRP9r9oXlhyCuE6GU1hVUw==,type:str] + apiServer: + image: registry.k8s.io/kube-apiserver:${KUBERNETES_VERSION} + certSANs: + - 127.0.0.1 + - 192.168.91.10 + disablePodSecurityPolicy: true + auditPolicy: + apiVersion: audit.k8s.io/v1 + kind: Policy + rules: + - level: Metadata + controllerManager: + image: registry.k8s.io/kube-controller-manager:${KUBERNETES_VERSION} + extraArgs: + bind-address: 0.0.0.0 + proxy: + disabled: true + scheduler: + image: registry.k8s.io/kube-scheduler:${KUBERNETES_VERSION} + extraArgs: + bind-address: 0.0.0.0 + discovery: + enabled: false + etcd: + ca: + crt: ENC[AES256_GCM,data:TW7wF/YnpTw/MbWXABNlj0VA4VfT6rYTvm5iIns1XM+/BGN35vb4o8yHbFAElqrCL6FCekW4D5sKGPgr+QYGdhMfVYG0ZFkzZvUcLKT0bzvGECnHr3Ix2U1JwUd2t/gk9frmpAcZMmxIltRiXBDs0h+qMzKmh72TkypYU5St0y8z7/1EnLDmlkFpnuy32uh6iJPWe86lXaIJdBI+rA88KBQ+zbehammsBu1aaOFP98vK1ZopNCPoF0sv40BUr5Lia73lwPf25OH7zdDzGNzkvGSN1idMQrH35mUQYsWbDFOMQx0cSDLtNd2yOrdm3Zz2njonUhTPyuwPo4X4y/xUg77tCNFaaN7qzXwGvGQxpaPQQMmdblnA//z5PHXYpZ+LaNq/zhW7fVTNbdmQeZg0U6Iiy1NRd+tz8QVkDv1Qsik4oxcNa/QT271Y4EkEzuTdnwGVg7u8cDDGmJPCk3mVWk3OLoJL+8z3wv2xABSKlQCqZ/u7IZMR0jRCd7W1g+b+kfrzLqaIkvmkC3/POn2+0oHbA2KXSMeXPlLHJE5+m7sCyS+rkRCir4UNkgsOPwti8BeJd0O9QDqCp0zOFmCwSU0ewti1blsdXHevRgKw5HftqftBOdXEgIxylin0NZ49s2mvn8z5Qv6StsLPVrLhDskWJyqWCqF0dolwSTxlN7LWZtsRvJVOdqs/5WP3dN8Hfuk6nXv6bHb3dGkef1idfRLFZb0eBAmzOEvqP7FOtmavLkgpg5w36WXfCNRw/3jreuXP60moiT0s79/lpb0lNSI/Q15/OO+0L3kj/c01ZTxvpPwFJcomOq8DQS18/fl+1hOk6Z9EQR4V8w3wDOr57xuQQ/fu98Yr+hNExWGixYLXpEulL4+pV9t3CrU+BlciyLt3uLNmgQ757C0flYmspZLdmiRHAFMnnn/bCPjegEUq/tWmn6r6KicLYWTTw8xU63zje1FntIXMfc2NBGClFGF9AtoI6FutHLohbTO2dERboHTs/CwKPPOouJlgpX26uLWXaA==,iv:CJDJaNr07b8Q23nfz8+SsufdjH/bu3L3JZtKAinksis=,tag:D721N9BiV90mctlQVATWrw==,type:str] + key: ENC[AES256_GCM,data:Nx4JWl4I35D9cmjWRCKfKOcag9etmstv/tY3F2K52xkW9arF2wiLNDIrDqll2A7BrsNU1hHnLIl/TXDXJs5xatMnF3rN+vV/EUIGd68uI1DOgPxKExf9kkAnatIevAIT3d5uLrCTA273GqFSYvUXSFxONI7BpnMXvIZGnOaBBk1g1QKrgYIFkG9pBbrgIpD5S4iLtJ4QmUBAFbR8USgiKXMuIyPyST/1pOMEyeuE3PNYfJEzIAqA691+AGmmmTOUUi2spxZBOw7CBTWku7XTIEIdwT3fYujb4CWotvenPC52OjAyp+iPP4YPmtkxvHBgVViMFP2YzcaMTAuFPzr5l6hDWGWsSFzpeTwT/utWk/Ljx0RmHA9jJ2ED4Fs4ulCVJ75rJQgJzrQdwjGiHcaGSA==,iv:DoKILZ3oyP4LO1E8sGQ41Cj8kOnewbHqjpQKS0Ni0Yw=,tag:wURpnRGp+3ZAdbIkUJXAXw==,type:str] + allowSchedulingOnControlPlanes: true +sops: + kms: [] + gcp_kms: [] + azure_kv: [] + hc_vault: [] + age: + - recipient: age1frhtpr8u3u99pvcuq5mjevxdq9agjfpkd8fjtnpj9qymzh5v845q53f37d + enc: | + -----BEGIN AGE ENCRYPTED FILE----- + YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBFbXpjcTgzSlR1SlQzYTZE + TG4waFQ4YU5qWklIa1R5aERZNmpYa25SM21NCnM1VU5lb2JzQmwvSnROcmhXSW9T + ZnNBQmNDM0s3aUVqUytuSitKeXErKzQKLS0tIE9nVXpKYXQwK1NSM3oyT0ZyRDN3 + ZVU5MWNwWVlFbENGRFVPWXRiWjVhUHcKzObF23w2RB0KQ4mTUOM8G1hnScMV0fXX + hcF5Q8CpLpo1JAZHl3iUJscWHDzluUkaCJEZ7qTwAP2JawaptH9/5Q== + -----END AGE ENCRYPTED FILE----- + lastmodified: "2024-11-27T15:48:22Z" + mac: ENC[AES256_GCM,data:y5PS9jIMSu/WskN98f4FgqcYd0s34DDVgKuJM71RdZrua3rzBnmvtmY1I4G48nPqkSHVj8ZHzqSmJeX0wGp3by3dLWrXTzvSnGS+eSPocCF8tkoQ+PQ3qO899bkS1mimKtC4fDmGn5TjCq97YORsAyRHLSWf3/yzsu+NtCcyPV4=,iv:zpnv9qxePHnlrRl1ZLY3SgvmHbVhveavCpZNygye2as=,tag:A6u1jjKx12GIbPg+2HqlhQ==,type:str] + pgp: [] + encrypted_regex: ^(token|crt|key|id|secret|secretboxEncryptionSecret|ca|urls|extraManifests)$ + mac_only_encrypted: true + version: 3.9.1 diff --git a/kubernetes/main/flux/apps.yaml b/kubernetes/main/flux/apps.yaml new file mode 100755 index 000000000..59cb09fad --- /dev/null +++ b/kubernetes/main/flux/apps.yaml @@ -0,0 +1,41 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: cluster-apps + namespace: flux-system +spec: + interval: 10m + path: ./kubernetes/main/apps + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + decryption: + provider: sops + secretRef: + name: sops-age + postBuild: + substituteFrom: + - kind: Secret + name: cluster-secrets + patches: + - patch: |- + apiVersion: kustomize.toolkit.fluxcd.io/v1 + kind: Kustomization + metadata: + name: not-used + spec: + decryption: + provider: sops + secretRef: + name: sops-age + postBuild: + substituteFrom: + - kind: Secret + name: cluster-secrets + target: + group: kustomize.toolkit.fluxcd.io + kind: Kustomization + labelSelector: substitution.flux.home.arpa/disabled notin (true) diff --git a/kubernetes/main/flux/config/cluster.yaml b/kubernetes/main/flux/config/cluster.yaml new file mode 100755 index 000000000..e9699ac4a --- /dev/null +++ b/kubernetes/main/flux/config/cluster.yaml @@ -0,0 +1,43 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/gitrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: home-kubernetes + namespace: flux-system +spec: + interval: 30m + url: ssh://git@github.com/Darkfella91/home-ops + ref: + branch: main + secretRef: + name: deploy-key + ignore: | + # exclude all + /* + # include flux directories + !/kubernetes/main +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: cluster + namespace: flux-system +spec: + interval: 30m + path: ./kubernetes/main/flux + prune: true + wait: false + sourceRef: + kind: GitRepository + name: home-kubernetes + decryption: + provider: sops + secretRef: + name: sops-age + postBuild: + substituteFrom: + - kind: Secret + name: cluster-secrets + optional: false diff --git a/kubernetes/main/flux/config/flux.yaml b/kubernetes/main/flux/config/flux.yaml new file mode 100755 index 000000000..e845cfbce --- /dev/null +++ b/kubernetes/main/flux/config/flux.yaml @@ -0,0 +1,111 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/ocirepository_v1beta2.json +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: OCIRepository +metadata: + name: flux-manifests + namespace: flux-system +spec: + interval: 10m + url: oci://ghcr.io/fluxcd/flux-manifests + ref: + tag: v2.4.0 +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: flux + namespace: flux-system +spec: + interval: 10m + path: ./ + prune: true + wait: true + sourceRef: + kind: OCIRepository + name: flux-manifests + patches: + # Remove image automation and image reflector controllers + - patch: | + $patch: delete + apiVersion: apps/v1 + kind: Deployment + metadata: + name: all + target: + kind: Deployment + name: (image-automation-controller|image-reflector-controller) + # Increase the number of workers and limits + # Ref: https://fluxcd.io/flux/installation/configuration/vertical-scaling/#increase-the-number-of-workers-and-limits + - patch: | + - op: add + path: /spec/template/spec/containers/0/args/- + value: --concurrent=10 + - op: add + path: /spec/template/spec/containers/0/args/- + value: --requeue-dependency=5s + target: + kind: Deployment + name: (kustomize-controller|helm-controller|source-controller) + - patch: | + apiVersion: apps/v1 + kind: Deployment + metadata: + name: all + spec: + template: + spec: + containers: + - name: manager + resources: + limits: + memory: 2Gi + target: + kind: Deployment + name: (kustomize-controller|helm-controller|source-controller) + # Enable in-memory kustomize builds + # Ref: https://fluxcd.io/flux/installation/configuration/vertical-scaling/#enable-in-memory-kustomize-builds + - patch: | + - op: add + path: /spec/template/spec/containers/0/args/- + value: --concurrent=20 + - op: replace + path: /spec/template/spec/volumes/0 + value: + name: temp + emptyDir: + medium: Memory + target: + kind: Deployment + name: kustomize-controller + # Enable Helm repositories caching + # Ref: https://fluxcd.io/flux/installation/configuration/vertical-scaling/#enable-helm-repositories-caching + - patch: | + - op: add + path: /spec/template/spec/containers/0/args/- + value: --helm-cache-max-size=10 + - op: add + path: /spec/template/spec/containers/0/args/- + value: --helm-cache-ttl=60m + - op: add + path: /spec/template/spec/containers/0/args/- + value: --helm-cache-purge-interval=5m + target: + kind: Deployment + name: source-controller + # Flux near OOM detection for Helm + # Ref: https://fluxcd.io/flux/installation/configuration/helm-oom-detection/ + - patch: | + - op: add + path: /spec/template/spec/containers/0/args/- + value: --feature-gates=OOMWatch=true + - op: add + path: /spec/template/spec/containers/0/args/- + value: --oom-watch-memory-threshold=95 + - op: add + path: /spec/template/spec/containers/0/args/- + value: --oom-watch-interval=500ms + target: + kind: Deployment + name: helm-controller \ No newline at end of file diff --git a/kubernetes/main/flux/config/kustomization.yaml b/kubernetes/main/flux/config/kustomization.yaml new file mode 100755 index 000000000..2ff3c784d --- /dev/null +++ b/kubernetes/main/flux/config/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./flux.yaml + - ./cluster.yaml diff --git a/kubernetes/main/flux/repositories/helm/actions-runner-controller.yaml b/kubernetes/main/flux/repositories/helm/actions-runner-controller.yaml new file mode 100755 index 000000000..54fa67be8 --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/actions-runner-controller.yaml @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: actions-runner-controller + namespace: flux-system +spec: + type: oci + interval: 5m + url: oci://ghcr.io/actions/actions-runner-controller-charts diff --git a/kubernetes/main/flux/repositories/helm/backube.yaml b/kubernetes/main/flux/repositories/helm/backube.yaml new file mode 100755 index 000000000..4ba0742ca --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/backube.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: backube + namespace: flux-system +spec: + interval: 2h + url: https://backube.github.io/helm-charts/ diff --git a/kubernetes/main/flux/repositories/helm/bitnami.yaml b/kubernetes/main/flux/repositories/helm/bitnami.yaml new file mode 100755 index 000000000..9f84188c5 --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/bitnami.yaml @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: bitnami + namespace: flux-system +spec: + type: oci + interval: 5m + url: oci://registry-1.docker.io/bitnamicharts diff --git a/kubernetes/main/flux/repositories/helm/bjw-s.yaml b/kubernetes/main/flux/repositories/helm/bjw-s.yaml new file mode 100755 index 000000000..c32ccd8de --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/bjw-s.yaml @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: bjw-s + namespace: flux-system +spec: + type: oci + interval: 5m + url: oci://ghcr.io/bjw-s/helm diff --git a/kubernetes/main/flux/repositories/helm/cilium.yaml b/kubernetes/main/flux/repositories/helm/cilium.yaml new file mode 100755 index 000000000..2cd7146d9 --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/cilium.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: cilium + namespace: flux-system +spec: + interval: 2h + url: https://helm.cilium.io diff --git a/kubernetes/main/flux/repositories/helm/cloudnative-pg.yaml b/kubernetes/main/flux/repositories/helm/cloudnative-pg.yaml new file mode 100755 index 000000000..4b2f0e615 --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/cloudnative-pg.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: cloudnative-pg + namespace: flux-system +spec: + interval: 2h + url: https://cloudnative-pg.github.io/charts diff --git a/kubernetes/main/flux/repositories/helm/coredns.yaml b/kubernetes/main/flux/repositories/helm/coredns.yaml new file mode 100755 index 000000000..ed0bb65a9 --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/coredns.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: coredns + namespace: flux-system +spec: + interval: 2h + url: https://coredns.github.io/helm diff --git a/kubernetes/main/flux/repositories/helm/crowdsec.yaml b/kubernetes/main/flux/repositories/helm/crowdsec.yaml new file mode 100755 index 000000000..ff698010b --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/crowdsec.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: crowdsec + namespace: flux-system +spec: + interval: 2h + url: https://crowdsecurity.github.io/helm-charts diff --git a/kubernetes/main/flux/repositories/helm/csi-driver-nfs.yaml b/kubernetes/main/flux/repositories/helm/csi-driver-nfs.yaml new file mode 100755 index 000000000..869fce395 --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/csi-driver-nfs.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: csi-driver-nfs + namespace: flux-system +spec: + interval: 2h + url: https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/charts diff --git a/kubernetes/main/flux/repositories/helm/descheduler.yaml b/kubernetes/main/flux/repositories/helm/descheduler.yaml new file mode 100755 index 000000000..147045cd2 --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/descheduler.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: descheduler + namespace: flux-system +spec: + interval: 2h + url: https://kubernetes-sigs.github.io/descheduler diff --git a/kubernetes/main/flux/repositories/helm/emberstack.yaml b/kubernetes/main/flux/repositories/helm/emberstack.yaml new file mode 100755 index 000000000..9ca4b3149 --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/emberstack.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: emberstack + namespace: flux-system +spec: + interval: 24h + url: https://emberstack.github.io/helm-charts diff --git a/kubernetes/main/flux/repositories/helm/external-dns.yaml b/kubernetes/main/flux/repositories/helm/external-dns.yaml new file mode 100755 index 000000000..2392dac23 --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/external-dns.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: external-dns + namespace: flux-system +spec: + interval: 2h + url: https://kubernetes-sigs.github.io/external-dns diff --git a/kubernetes/main/flux/repositories/helm/external-secrets.yaml b/kubernetes/main/flux/repositories/helm/external-secrets.yaml new file mode 100755 index 000000000..2acd768af --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/external-secrets.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: external-secrets + namespace: flux-system +spec: + interval: 2h + url: https://charts.external-secrets.io diff --git a/kubernetes/main/flux/repositories/helm/grafana.yaml b/kubernetes/main/flux/repositories/helm/grafana.yaml new file mode 100755 index 000000000..eb1a6fb0c --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/grafana.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: grafana + namespace: flux-system +spec: + interval: 2h + url: https://grafana.github.io/helm-charts diff --git a/kubernetes/main/flux/repositories/helm/ingress-nginx.yaml b/kubernetes/main/flux/repositories/helm/ingress-nginx.yaml new file mode 100755 index 000000000..8e107adc6 --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/ingress-nginx.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: ingress-nginx + namespace: flux-system +spec: + interval: 2h + url: https://kubernetes.github.io/ingress-nginx diff --git a/kubernetes/main/flux/repositories/helm/jetstack.yaml b/kubernetes/main/flux/repositories/helm/jetstack.yaml new file mode 100755 index 000000000..4bc09d02a --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/jetstack.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: jetstack + namespace: flux-system +spec: + interval: 2h + url: https://charts.jetstack.io/ diff --git a/kubernetes/main/flux/repositories/helm/kustomization.yaml b/kubernetes/main/flux/repositories/helm/kustomization.yaml new file mode 100755 index 000000000..3423dda10 --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/kustomization.yaml @@ -0,0 +1,33 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./actions-runner-controller.yaml + - ./backube.yaml + - ./bitnami.yaml + - ./bjw-s.yaml + - ./cilium.yaml + - ./cloudnative-pg.yaml + - ./crowdsec.yaml + - ./coredns.yaml + - ./csi-driver-nfs.yaml + - ./descheduler.yaml + - ./emberstack.yaml + - ./external-dns.yaml + - ./external-secrets.yaml + - ./vaultwarden.yaml + - ./grafana.yaml + - ./jetstack.yaml + - ./kyverno.yaml + - ./metrics-server.yaml + - ./node-feature-discovery.yaml + - ./piraeus.yaml + - ./postfinance.yaml + - ./prometheus-community.yaml + - ./stakater.yaml + - ./nvidia-device-plugin.yaml + - ./ingress-nginx.yaml + - ./vault.yaml + - ./oauth2-proxy.yaml + - ./zfs-localpv.yaml diff --git a/kubernetes/main/flux/repositories/helm/kyverno.yaml b/kubernetes/main/flux/repositories/helm/kyverno.yaml new file mode 100755 index 000000000..b86efb0a7 --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/kyverno.yaml @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: kyverno + namespace: flux-system +spec: + type: oci + interval: 5m + url: oci://ghcr.io/kyverno/charts diff --git a/kubernetes/main/flux/repositories/helm/metrics-server.yaml b/kubernetes/main/flux/repositories/helm/metrics-server.yaml new file mode 100755 index 000000000..5b2d20f03 --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/metrics-server.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: metrics-server + namespace: flux-system +spec: + interval: 2h + url: https://kubernetes-sigs.github.io/metrics-server diff --git a/kubernetes/main/flux/repositories/helm/node-feature-discovery.yaml b/kubernetes/main/flux/repositories/helm/node-feature-discovery.yaml new file mode 100755 index 000000000..5e45d5a82 --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/node-feature-discovery.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: node-feature-discovery + namespace: flux-system +spec: + interval: 2h + url: https://kubernetes-sigs.github.io/node-feature-discovery/charts diff --git a/kubernetes/main/flux/repositories/helm/nvidia-device-plugin.yaml b/kubernetes/main/flux/repositories/helm/nvidia-device-plugin.yaml new file mode 100755 index 000000000..9d6501049 --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/nvidia-device-plugin.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: nvdp + namespace: flux-system +spec: + interval: 2h + url: https://nvidia.github.io/k8s-device-plugin diff --git a/kubernetes/main/flux/repositories/helm/oauth2-proxy.yaml b/kubernetes/main/flux/repositories/helm/oauth2-proxy.yaml new file mode 100755 index 000000000..807fe58d2 --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/oauth2-proxy.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: oauth2-proxy + namespace: flux-system +spec: + interval: 2h + url: https://oauth2-proxy.github.io/manifests diff --git a/kubernetes/main/flux/repositories/helm/piraeus.yaml b/kubernetes/main/flux/repositories/helm/piraeus.yaml new file mode 100755 index 000000000..4fe31ddb0 --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/piraeus.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: piraeus + namespace: flux-system +spec: + interval: 2h + url: https://piraeus.io/helm-charts/ diff --git a/kubernetes/main/flux/repositories/helm/postfinance.yaml b/kubernetes/main/flux/repositories/helm/postfinance.yaml new file mode 100755 index 000000000..015568bfc --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/postfinance.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: postfinance + namespace: flux-system +spec: + interval: 2h + url: https://postfinance.github.io/kubelet-csr-approver diff --git a/kubernetes/main/flux/repositories/helm/prometheus-community.yaml b/kubernetes/main/flux/repositories/helm/prometheus-community.yaml new file mode 100755 index 000000000..78c4f0c0f --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/prometheus-community.yaml @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: prometheus-community + namespace: flux-system +spec: + type: oci + interval: 5m + url: oci://ghcr.io/prometheus-community/charts diff --git a/kubernetes/main/flux/repositories/helm/stakater.yaml b/kubernetes/main/flux/repositories/helm/stakater.yaml new file mode 100755 index 000000000..838185d06 --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/stakater.yaml @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: stakater + namespace: flux-system +spec: + type: oci + interval: 5m + url: oci://ghcr.io/stakater/charts diff --git a/kubernetes/main/flux/repositories/helm/vault.yaml b/kubernetes/main/flux/repositories/helm/vault.yaml new file mode 100755 index 000000000..8ee1755ec --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/vault.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: vault + namespace: flux-system +spec: + interval: 2h + url: https://helm.releases.hashicorp.com diff --git a/kubernetes/main/flux/repositories/helm/vaultwarden.yaml b/kubernetes/main/flux/repositories/helm/vaultwarden.yaml new file mode 100755 index 000000000..b0cfcf84c --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/vaultwarden.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: vaultwarden + namespace: flux-system +spec: + interval: 2h + url: https://gissilabs.github.io/charts/ diff --git a/kubernetes/main/flux/repositories/helm/zfs-localpv.yaml b/kubernetes/main/flux/repositories/helm/zfs-localpv.yaml new file mode 100644 index 000000000..7b7b0f7e8 --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/zfs-localpv.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: zfs-localpv + namespace: flux-system +spec: + interval: 2h + url: https://openebs.github.io/zfs-localpv diff --git a/kubernetes/main/flux/repositories/kustomization.yaml b/kubernetes/main/flux/repositories/kustomization.yaml new file mode 100755 index 000000000..d6b26ce53 --- /dev/null +++ b/kubernetes/main/flux/repositories/kustomization.yaml @@ -0,0 +1,8 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # - ./git + - ./helm + # - ./oci diff --git a/kubernetes/main/flux/vars/cluster-secrets.secret.sops.yaml b/kubernetes/main/flux/vars/cluster-secrets.secret.sops.yaml new file mode 100755 index 000000000..6c92dc61e --- /dev/null +++ b/kubernetes/main/flux/vars/cluster-secrets.secret.sops.yaml @@ -0,0 +1,36 @@ +apiVersion: v1 +kind: Secret +metadata: + name: cluster-secrets + namespace: flux-system +stringData: + PUBLIC_DOMAIN: ENC[AES256_GCM,data:KeiBfsOYbAilanRFWTniJY8iApE=,iv:6sdpiOX7gZODMfv0vwO9HlROX7vILjbvNkYofNQKzjE=,tag:dCJkFwrof79py5fJ1yLSWg==,type:str] + S3URL: ENC[AES256_GCM,data:4eXYOQuo2tDd+kAXZfg2XNUltBjM3OfEX7CUTwaz9mqr6UHSnVFFl2nxwXaE92Df6TVOe66w2780s0tcHmeVM81pBi0=,iv:fgwPt4wdCtL0NP8Xuz+pFRrlCEBzQ2uN0HNPx96dS5c=,tag:6ZQTrqsJ6Mm89uQfm8Y/VQ==,type:str] + CLUSTER_CLOUDFLARE_TUNNEL_ID: ENC[AES256_GCM,data:PwtYbcCui5UsC+rimZC5QNgwwZ+LZDQHjV4o+ARrWe53WIZf,iv:GVqwLq/4SR3sFj/0qRZ5SYt31j8ezGaeJsj6u6kQQqk=,tag:oBreoO25bpcGWcrImuugAg==,type:str] + AD_REALM: ENC[AES256_GCM,data:Bp+HSFGmnbVcxApev3YqKQiZFfY=,iv:AIFfzdF3O4n1WYxlwXCU64+VVKYGis6t4OQdYHYuHo4=,tag:63QA8Lmd14zlFZsA8ljwLg==,type:str] + KERBEROS_PASSWORD: ENC[AES256_GCM,data:lZZho8xEGfnVSag50A==,iv:JmrrZymVwUiPQueOG3n9cmd5smN4B2fsojOwIpxVY60=,tag:V7WAqaEZWCFdA8DNZB1c8Q==,type:str] + S3PREFIX: ENC[AES256_GCM,data:4WpmyuPVslvk246t,iv:QnZEv0J8vz9FBwdC4kV+wczCFFowNKpvTh/12v+Z8B8=,tag:yVLCGFqEsssvM/l9XPS9Pw==,type:str] + AWS_ACCESS_KEY: ENC[AES256_GCM,data:mPJ9xbM54GP9BkhOz/O7qaPsFk0=,iv:7SQQ2S89d7c0lVoxQP0JvbmpxELA8DXSYfCxaLBDF4I=,tag:tLQ/IVx6Dxuuq+K+I1vKhw==,type:str] + AWS_SECRET_KEY: ENC[AES256_GCM,data:+acohLCugLGUY6iMpS7dVjMbMag4teGImMfC35MlQIvvpmY1HdhUqw==,iv:JzSTxol9vwjDbnihICmD5pkDazoX3GGdnNnCJzvaMdo=,tag:vaZvofDsjoPGE/G+1XPZ7Q==,type:str] + AWS_KEY_ID: ENC[AES256_GCM,data:hLSFvhsaOzZF/2kEMKwD7bsk5QZTy2VAdDZcgRGDHjGjkQJFdVjsAROCV8T9pZ6ZuraCOjsGrBfq2/ZQ3+vYDOlKrTWBZh5DtlI4,iv:eDBs/XH6km+nOLuQ5ct1CnBPyObzgG9AA7zhsjcrES4=,tag:cW+ANHpGEbObt5wiPzIwJw==,type:str] +sops: + kms: [] + gcp_kms: [] + azure_kv: [] + hc_vault: [] + age: + - recipient: age1frhtpr8u3u99pvcuq5mjevxdq9agjfpkd8fjtnpj9qymzh5v845q53f37d + enc: | + -----BEGIN AGE ENCRYPTED FILE----- + YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSA5T0RNbStKa0ttS3RiUk03 + WStycVN4dmhvUDBUT1o2a1R6Qnd5dkVPVFQ0ClozTS82RmlCc2o4TEQ2UGRoOGp3 + TDIxeGZHNSt1VHpGVkMweXA0RW5nY2sKLS0tIFJMeGMvWVdyRkpaVlMwa3ZBQis4 + QW1lVW5BaVpDR1JReEI2L3k1WGxwRW8KcjNPmPf05RKize92JHHw3qcWT3j5H7gE + 089qhWDhyldAZJAjJVK+6MJvFQj3JeCHHyAgfFsnhSWYBBoSCU48IQ== + -----END AGE ENCRYPTED FILE----- + lastmodified: "2024-11-23T18:05:39Z" + mac: ENC[AES256_GCM,data:nCrGQb8ObOun4AtBMHu2USLA3AfRORyB1sfigWya4eou0/9BeJs+CRWmN6Qx0QXz5wORT3CJquqK5+ksShJ3XvK5TcAyAYM+1gZl6a+Kk6RpJh5dA3BxSnIaSwVpLQ2jj0IlGmI+DFwpxsqT9lurG1lvD/obme6CzGXMdDOAmMg=,iv:wuAidob22yAC6C6IZqcTAZKoZqLTR9UVvJCQJQ6cLnk=,tag:nUamRi+HnbUPMaaxkYEQqA==,type:str] + pgp: [] + encrypted_regex: ^(data|stringData)$ + mac_only_encrypted: true + version: 3.9.1